diff --git a/Cargo.toml b/Cargo.toml index f3c625b8a3..e7aaf8f36e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace] -members = ["field", "maybe_rayon", "plonky2", "starky", "util", "gen"] +members = ["field", "maybe_rayon", "plonky2", "starky", "util", "gen", "u32", "ecdsa", "ecgfp5"] resolver = "2" [workspace.dependencies] diff --git a/ecdsa/.github/workflows/continuous-integration.yml b/ecdsa/.github/workflows/continuous-integration.yml new file mode 100644 index 0000000000..640074b82e --- /dev/null +++ b/ecdsa/.github/workflows/continuous-integration.yml @@ -0,0 +1,95 @@ +name: Continuous Integration + +on: + push: + branches: [main] + pull_request: + branches: + - "**" + workflow_dispatch: + branches: + - "**" + +jobs: + test: + name: Test Suite + runs-on: ubuntu-latest + if: "! contains(toJSON(github.event.commits.*.message), '[skip-ci]')" + steps: + - name: Checkout sources + uses: actions/checkout@v2 + + - name: Install nightly toolchain + id: rustc-toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: nightly + override: true + + - name: rust-cache + uses: actions/cache@v3 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: rustc-test-${{ steps.rustc-toolchain.outputs.rustc_hash }}-cargo-${{ hashFiles('**/Cargo.toml') }} + + - name: Run cargo test + uses: actions-rs/cargo@v1 + with: + command: test + args: --workspace + env: + RUSTFLAGS: -Copt-level=3 -Cdebug-assertions -Coverflow-checks=y -Cdebuginfo=0 + RUST_LOG: 1 + CARGO_INCREMENTAL: 1 + RUST_BACKTRACE: 1 + + lints: + name: Formatting and Clippy + runs-on: ubuntu-latest + if: "! contains(toJSON(github.event.commits.*.message), '[skip-ci]')" + steps: + - name: Checkout sources + uses: actions/checkout@v2 + + - name: Install nightly toolchain + id: rustc-toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: nightly + override: true + components: rustfmt, clippy + + - name: rust-cache + uses: actions/cache@v3 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: rustc-lints-${{ steps.rustc-toolchain.outputs.rustc_hash }}-cargo-${{ hashFiles('**/Cargo.toml') }} + + - name: Run cargo fmt + uses: actions-rs/cargo@v1 + with: + command: fmt + args: --all -- --check + env: + CARGO_INCREMENTAL: 1 + + - name: Run cargo clippy + uses: actions-rs/cargo@v1 + with: + command: clippy + args: --all-features --all-targets -- -D warnings -A incomplete-features + env: + # Seems necessary until https://github.com/rust-lang/rust/pull/115819 is merged. + CARGO_INCREMENTAL: 0 diff --git a/ecdsa/.gitignore b/ecdsa/.gitignore new file mode 100644 index 0000000000..ff355236d6 --- /dev/null +++ b/ecdsa/.gitignore @@ -0,0 +1,11 @@ +# Cargo build +/target +Cargo.lock + +# Profile-guided optimization +/tmp +pgo-data.profdata + +# MacOS nuisances +.DS_Store + diff --git a/ecdsa/Cargo.toml b/ecdsa/Cargo.toml new file mode 100644 index 0000000000..97253b7431 --- /dev/null +++ b/ecdsa/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "plonky2_ecdsa" +description = "ECDSA gadget for Plonky2" +version = "0.2.0" +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +keywords.workspace = true +categories.workspace = true + +[features] +parallel = ["plonky2_maybe_rayon/parallel", "plonky2/parallel"] + +[dependencies] +anyhow = { version = "1.0.40" } +itertools = { version = "0.10.0" } +plonky2_maybe_rayon = { version = "0.1.0" } +num = { version = "0.4.0" } +plonky2 = { path = "../plonky2" } +plonky2_u32 = { path = "../u32" } +serde = { version = "1.0", features = ["derive"] } + +[dev-dependencies] +rand = { version = "0.8.4", features = ["getrandom"] } +log = "0.4" +env_logger = "0.10" diff --git a/ecdsa/LICENSE-APACHE b/ecdsa/LICENSE-APACHE new file mode 100644 index 0000000000..1e5006dc14 --- /dev/null +++ b/ecdsa/LICENSE-APACHE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + diff --git a/ecdsa/LICENSE-MIT b/ecdsa/LICENSE-MIT new file mode 100644 index 0000000000..86d690b220 --- /dev/null +++ b/ecdsa/LICENSE-MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2022 The Plonky2 Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/ecdsa/README.md b/ecdsa/README.md new file mode 100644 index 0000000000..a917cb0672 --- /dev/null +++ b/ecdsa/README.md @@ -0,0 +1,73 @@ +## Benchmark +Machine: Apple M1 Pro 10 cores + 32GB + +### Secp256k1 +Gate Number: 98039 +Prove Time: 9s +``` + Finished release [optimized] target(s) in 4.30s + Running `target/release/examples/ecdsa_secp256k1` +[2024-03-06T10:42:35Z DEBUG plonky2::util::context_tree] 98039 gates to root +[2024-03-06T10:42:35Z DEBUG plonky2::plonk::circuit_builder] Total gate counts: +[2024-03-06T10:42:35Z DEBUG plonky2::plonk::circuit_builder] - 16 instances of BaseSumGate { num_limbs: 16 } + Base: 4 +[2024-03-06T10:42:35Z DEBUG plonky2::plonk::circuit_builder] - 1264 instances of U32AddManyGate { num_addends: 13, num_ops: 4, _phantom: PhantomData } +[2024-03-06T10:42:35Z DEBUG plonky2::plonk::circuit_builder] - 3121 instances of U32RangeCheckGate { num_input_limbs: 8, _phantom: PhantomData } +[2024-03-06T10:42:35Z DEBUG plonky2::plonk::circuit_builder] - 3 instances of U32AddManyGate { num_addends: 0, num_ops: 6, _phantom: PhantomData } +[2024-03-06T10:42:35Z DEBUG plonky2::plonk::circuit_builder] - 1 instances of U32RangeCheckGate { num_input_limbs: 0, _phantom: PhantomData } +[2024-03-06T10:42:35Z DEBUG plonky2::plonk::circuit_builder] - 1012 instances of U32AddManyGate { num_addends: 5, num_ops: 5, _phantom: PhantomData } +[2024-03-06T10:42:35Z DEBUG plonky2::plonk::circuit_builder] - 1264 instances of U32AddManyGate { num_addends: 11, num_ops: 4, _phantom: PhantomData } +[2024-03-06T10:42:35Z DEBUG plonky2::plonk::circuit_builder] - 1737 instances of U32SubtractionGate { num_ops: 6, _phantom: PhantomData } +[2024-03-06T10:42:36Z DEBUG plonky2::plonk::circuit_builder] - 55530 instances of U32ArithmeticGate { num_ops: 3, _phantom: PhantomData } +[2024-03-06T10:42:36Z DEBUG plonky2::plonk::circuit_builder] - 10546 instances of U32AddManyGate { num_addends: 3, num_ops: 5, _phantom: PhantomData } +[2024-03-06T10:42:36Z DEBUG plonky2::plonk::circuit_builder] - 1907 instances of ArithmeticGate { num_ops: 20 } +[2024-03-06T10:42:36Z DEBUG plonky2::plonk::circuit_builder] - 16912 instances of ComparisonGate { num_bits: 32, num_chunks: 16, _phantom: PhantomData } +[2024-03-06T10:42:36Z DEBUG plonky2::plonk::circuit_builder] - 1686 instances of U32AddManyGate { num_addends: 15, num_ops: 3, _phantom: PhantomData } +[2024-03-06T10:42:36Z DEBUG plonky2::plonk::circuit_builder] - 512 instances of RandomAccessGate { bits: 4, num_copies: 4, num_extra_constants: 2, _phantom: PhantomData } +[2024-03-06T10:42:36Z DEBUG plonky2::plonk::circuit_builder] - 1264 instances of U32AddManyGate { num_addends: 7, num_ops: 4, _phantom: PhantomData } +[2024-03-06T10:42:36Z DEBUG plonky2::plonk::circuit_builder] - 1264 instances of U32AddManyGate { num_addends: 9, num_ops: 4, _phantom: PhantomData } +[2024-03-06T10:42:36Z DEBUG plonky2::plonk::circuit_builder] Degree before blinding & padding: 105434 +[2024-03-06T10:42:36Z DEBUG plonky2::plonk::circuit_builder] Degree after blinding & padding: 131072 +[2024-03-06T10:42:43Z DEBUG plonky2::plonk::circuit_builder] Building circuit took 7.3354907s +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] 9.0794s to prove +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | 0.5859s to run 339849 generators +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | 0.0920s to compute full witness +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | 0.0037s to compute wire polynomials +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | 3.7082s to compute wires commitment +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | | 0.0545s to IFFT +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | | 0.4327s to FFT + blinding +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | | 0.4273s to transpose LDEs +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | | 2.7703s to build Merkle tree +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | 0.1239s to compute partial products +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | 0.7268s to commit to partial products and Z's +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | | 0.0083s to IFFT +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | | 0.0734s to FFT + blinding +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | | 0.0724s to transpose LDEs +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | | 0.5669s to build Merkle tree +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | 2.2916s to compute quotient polys +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | 0.0006s to split up quotient polys +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | 0.5671s to commit to quotient polys +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | | 0.0544s to FFT + blinding +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | | 0.0598s to transpose LDEs +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | | 0.4480s to build Merkle tree +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | 0.0426s to construct the opening set +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | 0.2691s to compute opening proofs +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | | 0.1275s to reduce batch of 258 polynomials +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | | 0.0009s to reduce batch of 2 polynomials +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | | 0.0681s to perform final FFT 1048576 +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | | 0.0604s to fold codewords in the commitment phase +[2024-03-06T10:42:53Z DEBUG plonky2::util::timing] | | 0.0038s to find proof-of-work witness +``` + +## License + +Licensed under either of + +* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) +* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. diff --git a/ecdsa/examples/ecdsa_secp256k1.rs b/ecdsa/examples/ecdsa_secp256k1.rs new file mode 100644 index 0000000000..0769a7f458 --- /dev/null +++ b/ecdsa/examples/ecdsa_secp256k1.rs @@ -0,0 +1,65 @@ +use env_logger::{try_init_from_env, Env, DEFAULT_FILTER_ENV}; +use log::Level; +use plonky2::{ + field::{secp256k1_scalar::Secp256K1Scalar, types::Sample}, + iop::witness::PartialWitness, + plonk::{ + circuit_builder::CircuitBuilder, + circuit_data::{CircuitConfig, CircuitData}, + config::{GenericConfig, PoseidonGoldilocksConfig}, + prover::prove, + }, + util::timing::TimingTree, +}; +use plonky2_ecdsa::{ + curve::{ + curve_types::{Curve, CurveScalar}, + ecdsa::{sign_message, ECDSAPublicKey, ECDSASecretKey, ECDSASignature}, + secp256k1::Secp256K1, + }, + gadgets::{ + curve::CircuitBuilderCurve, + ecdsa::{verify_message_circuit, ECDSAPublicKeyTarget, ECDSASignatureTarget}, + nonnative::CircuitBuilderNonNative, + }, +}; +fn main() { + let _ = try_init_from_env(Env::default().filter_or(DEFAULT_FILTER_ENV, "debug")); + + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + type Curve = Secp256K1; + + let pw = PartialWitness::new(); + let config = CircuitConfig::standard_ecc_config(); + let mut builder = CircuitBuilder::::new(config); + + let msg = Secp256K1Scalar::rand(); + let msg_target = builder.constant_nonnative(msg); + + let sk = ECDSASecretKey::(Secp256K1Scalar::rand()); + let pk = ECDSAPublicKey((CurveScalar(sk.0) * Curve::GENERATOR_PROJECTIVE).to_affine()); + + let pk_target = ECDSAPublicKeyTarget(builder.constant_affine_point(pk.0)); + + let sig = sign_message(msg, sk); + + let ECDSASignature { r, s } = sig; + let r_target = builder.constant_nonnative(r); + let s_target = builder.constant_nonnative(s); + let sig_target = ECDSASignatureTarget { r: r_target, s: s_target }; + + verify_message_circuit(&mut builder, msg_target, sig_target, pk_target); + + builder.print_gate_counts(0); + let circuit = builder.build::(); + let CircuitData { prover_only, common, verifier_only: _ } = &circuit; + + let mut timing = TimingTree::new("prove", Level::Debug); + let proof = prove(prover_only, common, pw, &mut timing).expect("prover failed"); + timing.print(); + + circuit.verify(proof).expect("verifier failed"); +} diff --git a/ecdsa/rustfmt.toml b/ecdsa/rustfmt.toml new file mode 100644 index 0000000000..e651f760b1 --- /dev/null +++ b/ecdsa/rustfmt.toml @@ -0,0 +1,7 @@ +# This project uses rustfmt to format source code. Run `cargo +nightly fmt [-- --check]. +# https://github.com/rust-lang/rustfmt/blob/master/Configurations.md + +# Break complex but short statements a bit less. +use_small_heuristics = "Max" + +imports_granularity = "Crate" \ No newline at end of file diff --git a/ecdsa/src/curve/curve_adds.rs b/ecdsa/src/curve/curve_adds.rs new file mode 100644 index 0000000000..0853c4ab7f --- /dev/null +++ b/ecdsa/src/curve/curve_adds.rs @@ -0,0 +1,133 @@ +use core::ops::Add; + +use plonky2::field::{ops::Square, types::Field}; + +use crate::curve::curve_types::{AffinePoint, Curve, ProjectivePoint}; + +impl Add> for ProjectivePoint { + type Output = ProjectivePoint; + + fn add(self, rhs: ProjectivePoint) -> Self::Output { + let ProjectivePoint { x: x1, y: y1, z: z1 } = self; + let ProjectivePoint { x: x2, y: y2, z: z2 } = rhs; + + if z1 == C::BaseField::ZERO { + return rhs; + } + if z2 == C::BaseField::ZERO { + return self; + } + + let x1z2 = x1 * z2; + let y1z2 = y1 * z2; + let x2z1 = x2 * z1; + let y2z1 = y2 * z1; + + // Check if we're doubling or adding inverses. + if x1z2 == x2z1 { + if y1z2 == y2z1 { + // TODO: inline to avoid redundant muls. + return self.double(); + } + if y1z2 == -y2z1 { + return ProjectivePoint::ZERO; + } + } + + // From https://www.hyperelliptic.org/EFD/g1p/data/shortw/projective/addition/add-1998-cmo-2 + let z1z2 = z1 * z2; + let u = y2z1 - y1z2; + let uu = u.square(); + let v = x2z1 - x1z2; + let vv = v.square(); + let vvv = v * vv; + let r = vv * x1z2; + let a = uu * z1z2 - vvv - r.double(); + let x3 = v * a; + let y3 = u * (r - a) - vvv * y1z2; + let z3 = vvv * z1z2; + ProjectivePoint::nonzero(x3, y3, z3) + } +} + +impl Add> for ProjectivePoint { + type Output = ProjectivePoint; + + fn add(self, rhs: AffinePoint) -> Self::Output { + let ProjectivePoint { x: x1, y: y1, z: z1 } = self; + let AffinePoint { x: x2, y: y2, zero: zero2 } = rhs; + + if z1 == C::BaseField::ZERO { + return rhs.to_projective(); + } + if zero2 { + return self; + } + + let x2z1 = x2 * z1; + let y2z1 = y2 * z1; + + // Check if we're doubling or adding inverses. + if x1 == x2z1 { + if y1 == y2z1 { + // TODO: inline to avoid redundant muls. + return self.double(); + } + if y1 == -y2z1 { + return ProjectivePoint::ZERO; + } + } + + // From https://www.hyperelliptic.org/EFD/g1p/data/shortw/projective/addition/madd-1998-cmo + let u = y2z1 - y1; + let uu = u.square(); + let v = x2z1 - x1; + let vv = v.square(); + let vvv = v * vv; + let r = vv * x1; + let a = uu * z1 - vvv - r.double(); + let x3 = v * a; + let y3 = u * (r - a) - vvv * y1; + let z3 = vvv * z1; + ProjectivePoint::nonzero(x3, y3, z3) + } +} + +impl Add> for AffinePoint { + type Output = ProjectivePoint; + + fn add(self, rhs: AffinePoint) -> Self::Output { + let AffinePoint { x: x1, y: y1, zero: zero1 } = self; + let AffinePoint { x: x2, y: y2, zero: zero2 } = rhs; + + if zero1 { + return rhs.to_projective(); + } + if zero2 { + return self.to_projective(); + } + + // Check if we're doubling or adding inverses. + if x1 == x2 { + if y1 == y2 { + return self.to_projective().double(); + } + if y1 == -y2 { + return ProjectivePoint::ZERO; + } + } + + // From https://www.hyperelliptic.org/EFD/g1p/data/shortw/projective/addition/mmadd-1998-cmo + let u = y2 - y1; + let uu = u.square(); + let v = x2 - x1; + let vv = v.square(); + let vvv = v * vv; + let r = vv * x1; + let a = uu - vvv - r.double(); + let x3 = v * a; + let y3 = u * (r - a) - vvv * y1; + let z3 = vvv; + ProjectivePoint::nonzero(x3, y3, z3) + } +} diff --git a/ecdsa/src/curve/curve_msm.rs b/ecdsa/src/curve/curve_msm.rs new file mode 100644 index 0000000000..ed272572c3 --- /dev/null +++ b/ecdsa/src/curve/curve_msm.rs @@ -0,0 +1,267 @@ +use alloc::vec::Vec; + +use itertools::Itertools; +use plonky2::field::types::{Field, PrimeField}; +use plonky2_maybe_rayon::*; + +use crate::curve::{ + curve_summation::affine_multisummation_best, + curve_types::{AffinePoint, Curve, ProjectivePoint}, +}; + +/// In Yao's method, we compute an affine summation for each digit. In a parallel setting, it would +/// be easiest to assign individual summations to threads, but this would be sub-optimal because +/// multi-summations can be more efficient than repeating individual summations (see +/// `affine_multisummation_best`). Thus we divide digits into large chunks, and assign chunks of +/// digits to threads. Note that there is a delicate balance here, as large chunks can result in +/// uneven distributions of work among threads. +const DIGITS_PER_CHUNK: usize = 80; + +#[derive(Clone, Debug)] +pub struct MsmPrecomputation { + /// For each generator (in the order they were passed to `msm_precompute`), contains a vector + /// of powers, i.e. [(2^w)^i] for i < DIGITS. + // TODO: Use compressed coordinates here. + powers_per_generator: Vec>>, + + /// The window size. + w: usize, +} + +pub fn msm_precompute( + generators: &[ProjectivePoint], + w: usize, +) -> MsmPrecomputation { + MsmPrecomputation { + powers_per_generator: generators + .into_par_iter() + .map(|&g| precompute_single_generator(g, w)) + .collect(), + w, + } +} + +fn precompute_single_generator(g: ProjectivePoint, w: usize) -> Vec> { + let digits = (C::ScalarField::BITS + w - 1) / w; + let mut powers: Vec> = Vec::with_capacity(digits); + powers.push(g); + for i in 1..digits { + let mut power_i_proj = powers[i - 1]; + for _j in 0..w { + power_i_proj = power_i_proj.double(); + } + powers.push(power_i_proj); + } + ProjectivePoint::batch_to_affine(&powers) +} + +pub fn msm_parallel( + scalars: &[C::ScalarField], + generators: &[ProjectivePoint], + w: usize, +) -> ProjectivePoint { + let precomputation = msm_precompute(generators, w); + msm_execute_parallel(&precomputation, scalars) +} + +pub fn msm_execute( + precomputation: &MsmPrecomputation, + scalars: &[C::ScalarField], +) -> ProjectivePoint { + assert_eq!(precomputation.powers_per_generator.len(), scalars.len()); + let w = precomputation.w; + let digits = (C::ScalarField::BITS + w - 1) / w; + let base = 1 << w; + + // This is a variant of Yao's method, adapted to the multi-scalar setting. Because we use + // extremely large windows, the repeated scans in Yao's method could be more expensive than the + // actual group operations. To avoid this, we store a multimap from each possible digit to the + // positions in which that digit occurs in the scalars. These positions have the form (i, j), + // where i is the index of the generator and j is an index into the digits of the scalar + // associated with that generator. + let mut digit_occurrences: Vec> = Vec::with_capacity(digits); + for _i in 0..base { + digit_occurrences.push(Vec::new()); + } + for (i, scalar) in scalars.iter().enumerate() { + let digits = to_digits::(scalar, w); + for (j, &digit) in digits.iter().enumerate() { + digit_occurrences[digit].push((i, j)); + } + } + + let mut y = ProjectivePoint::ZERO; + let mut u = ProjectivePoint::ZERO; + + for digit in (1..base).rev() { + for &(i, j) in &digit_occurrences[digit] { + u = u + precomputation.powers_per_generator[i][j]; + } + y = y + u; + } + + y +} + +pub fn msm_execute_parallel( + precomputation: &MsmPrecomputation, + scalars: &[C::ScalarField], +) -> ProjectivePoint { + assert_eq!(precomputation.powers_per_generator.len(), scalars.len()); + let w = precomputation.w; + let digits = (C::ScalarField::BITS + w - 1) / w; + let base = 1 << w; + + // This is a variant of Yao's method, adapted to the multi-scalar setting. Because we use + // extremely large windows, the repeated scans in Yao's method could be more expensive than the + // actual group operations. To avoid this, we store a multimap from each possible digit to the + // positions in which that digit occurs in the scalars. These positions have the form (i, j), + // where i is the index of the generator and j is an index into the digits of the scalar + // associated with that generator. + let mut digit_occurrences: Vec> = Vec::with_capacity(digits); + for _i in 0..base { + digit_occurrences.push(Vec::new()); + } + for (i, scalar) in scalars.iter().enumerate() { + let digits = to_digits::(scalar, w); + for (j, &digit) in digits.iter().enumerate() { + digit_occurrences[digit].push((i, j)); + } + } + + // For each digit, we add up the powers associated with all occurrences that digit. + let digits: Vec = (0..base).collect(); + let digit_acc: Vec> = digits + .par_chunks(DIGITS_PER_CHUNK) + .flat_map(|chunk| { + let summations: Vec>> = chunk + .iter() + .map(|&digit| { + digit_occurrences[digit] + .iter() + .map(|&(i, j)| precomputation.powers_per_generator[i][j]) + .collect() + }) + .collect(); + affine_multisummation_best(summations) + }) + .collect(); + // println!("Computing the per-digit summations (in parallel) took {}s", start.elapsed().as_secs_f64()); + + let mut y = ProjectivePoint::ZERO; + let mut u = ProjectivePoint::ZERO; + for digit in (1..base).rev() { + u = u + digit_acc[digit]; + y = y + u; + } + // println!("Final summation (sequential) {}s", start.elapsed().as_secs_f64()); + y +} + +pub(crate) fn to_digits(x: &C::ScalarField, w: usize) -> Vec { + let scalar_bits = C::ScalarField::BITS; + let num_digits = (scalar_bits + w - 1) / w; + + // Convert x to a bool array. + let x_canonical: Vec<_> = x + .to_canonical_biguint() + .to_u64_digits() + .iter() + .cloned() + .pad_using(scalar_bits / 64, |_| 0) + .collect(); + let mut x_bits = Vec::with_capacity(scalar_bits); + for i in 0..scalar_bits { + x_bits.push((x_canonical[i / 64] >> (i as u64 % 64) & 1) != 0); + } + + let mut digits = Vec::with_capacity(num_digits); + for i in 0..num_digits { + let mut digit = 0; + for j in ((i * w)..((i + 1) * w).min(scalar_bits)).rev() { + digit <<= 1; + digit |= x_bits[j] as usize; + } + digits.push(digit); + } + digits +} + +#[cfg(test)] +mod tests { + use alloc::vec; + + use num::BigUint; + use plonky2::field::secp256k1_scalar::Secp256K1Scalar; + + use super::*; + use crate::curve::secp256k1::Secp256K1; + + #[test] + fn test_to_digits() { + let x_canonical = [ + 0b10101010101010101010101010101010, + 0b10101010101010101010101010101010, + 0b11001100110011001100110011001100, + 0b11001100110011001100110011001100, + 0b11110000111100001111000011110000, + 0b11110000111100001111000011110000, + 0b00001111111111111111111111111111, + 0b11111111111111111111111111111111, + ]; + let x = Secp256K1Scalar::from_noncanonical_biguint(BigUint::from_slice(&x_canonical)); + assert_eq!(x.to_canonical_biguint().to_u32_digits(), x_canonical); + assert_eq!( + to_digits::(&x, 17), + vec![ + 0b01010101010101010, + 0b10101010101010101, + 0b01010101010101010, + 0b11001010101010101, + 0b01100110011001100, + 0b00110011001100110, + 0b10011001100110011, + 0b11110000110011001, + 0b01111000011110000, + 0b00111100001111000, + 0b00011110000111100, + 0b11111111111111110, + 0b01111111111111111, + 0b11111111111111000, + 0b11111111111111111, + 0b1, + ] + ); + } + + #[test] + fn test_msm() { + let w = 5; + + let generator_1 = Secp256K1::GENERATOR_PROJECTIVE; + let generator_2 = generator_1 + generator_1; + let generator_3 = generator_1 + generator_2; + + let scalar_1 = Secp256K1Scalar::from_noncanonical_biguint(BigUint::from_slice(&[ + 11111111, 22222222, 33333333, 44444444, + ])); + let scalar_2 = Secp256K1Scalar::from_noncanonical_biguint(BigUint::from_slice(&[ + 22222222, 22222222, 33333333, 44444444, + ])); + let scalar_3 = Secp256K1Scalar::from_noncanonical_biguint(BigUint::from_slice(&[ + 33333333, 22222222, 33333333, 44444444, + ])); + + let generators = vec![generator_1, generator_2, generator_3]; + let scalars = vec![scalar_1, scalar_2, scalar_3]; + + let precomputation = msm_precompute(&generators, w); + let result_msm = msm_execute(&precomputation, &scalars); + + let result_naive = Secp256K1::convert(scalar_1) * generator_1 + + Secp256K1::convert(scalar_2) * generator_2 + + Secp256K1::convert(scalar_3) * generator_3; + + assert_eq!(result_msm, result_naive); + } +} diff --git a/ecdsa/src/curve/curve_multiplication.rs b/ecdsa/src/curve/curve_multiplication.rs new file mode 100644 index 0000000000..1f9c653d3e --- /dev/null +++ b/ecdsa/src/curve/curve_multiplication.rs @@ -0,0 +1,100 @@ +use alloc::vec::Vec; +use core::ops::Mul; + +use plonky2::field::types::{Field, PrimeField}; + +use crate::curve::curve_types::{Curve, CurveScalar, ProjectivePoint}; + +const WINDOW_BITS: usize = 4; +const BASE: usize = 1 << WINDOW_BITS; + +fn digits_per_scalar() -> usize { + (C::ScalarField::BITS + WINDOW_BITS - 1) / WINDOW_BITS +} + +/// Precomputed state used for scalar x ProjectivePoint multiplications, +/// specific to a particular generator. +#[derive(Clone)] +pub struct MultiplicationPrecomputation { + /// [(2^w)^i] g for each i < digits_per_scalar. + powers: Vec>, +} + +impl ProjectivePoint { + pub fn mul_precompute(&self) -> MultiplicationPrecomputation { + let num_digits = digits_per_scalar::(); + let mut powers = Vec::with_capacity(num_digits); + powers.push(*self); + for i in 1..num_digits { + let mut power_i = powers[i - 1]; + for _j in 0..WINDOW_BITS { + power_i = power_i.double(); + } + powers.push(power_i); + } + + MultiplicationPrecomputation { powers } + } + + #[must_use] + pub fn mul_with_precomputation( + &self, + scalar: C::ScalarField, + precomputation: MultiplicationPrecomputation, + ) -> Self { + // Yao's method; see https://koclab.cs.ucsb.edu/teaching/ecc/eccPapers/Doche-ch09.pdf + let precomputed_powers = precomputation.powers; + + let digits = to_digits::(&scalar); + + let mut y = ProjectivePoint::ZERO; + let mut u = ProjectivePoint::ZERO; + let mut all_summands = Vec::new(); + for j in (1..BASE).rev() { + let mut u_summands = Vec::new(); + for (i, &digit) in digits.iter().enumerate() { + if digit == j as u64 { + u_summands.push(precomputed_powers[i]); + } + } + all_summands.push(u_summands); + } + + let all_sums: Vec> = all_summands + .iter() + .cloned() + .map(|vec| vec.iter().fold(ProjectivePoint::ZERO, |a, &b| a + b)) + .collect(); + for i in 0..all_sums.len() { + u = u + all_sums[i]; + y = y + u; + } + y + } +} + +impl Mul> for CurveScalar { + type Output = ProjectivePoint; + + fn mul(self, rhs: ProjectivePoint) -> Self::Output { + let precomputation = rhs.mul_precompute(); + rhs.mul_with_precomputation(self.0, precomputation) + } +} + +#[allow(clippy::assertions_on_constants)] +fn to_digits(x: &C::ScalarField) -> Vec { + debug_assert!( + 64 % WINDOW_BITS == 0, + "For simplicity, only power-of-two window sizes are handled for now" + ); + let digits_per_u64 = 64 / WINDOW_BITS; + let mut digits = Vec::with_capacity(digits_per_scalar::()); + for limb in x.to_canonical_biguint().to_u64_digits() { + for j in 0..digits_per_u64 { + digits.push((limb >> (j * WINDOW_BITS) as u64) % BASE as u64); + } + } + + digits +} diff --git a/ecdsa/src/curve/curve_summation.rs b/ecdsa/src/curve/curve_summation.rs new file mode 100644 index 0000000000..7f031de52a --- /dev/null +++ b/ecdsa/src/curve/curve_summation.rs @@ -0,0 +1,200 @@ +use alloc::{vec, vec::Vec}; +use core::iter::Sum; + +use plonky2::field::{ops::Square, types::Field}; + +use crate::curve::curve_types::{AffinePoint, Curve, ProjectivePoint}; + +impl Sum> for ProjectivePoint { + fn sum>>(iter: I) -> ProjectivePoint { + let points: Vec<_> = iter.collect(); + affine_summation_best(points) + } +} + +impl Sum for ProjectivePoint { + fn sum>>(iter: I) -> ProjectivePoint { + iter.fold(ProjectivePoint::ZERO, |acc, x| acc + x) + } +} + +pub fn affine_summation_best(summation: Vec>) -> ProjectivePoint { + let result = affine_multisummation_best(vec![summation]); + debug_assert_eq!(result.len(), 1); + result[0] +} + +pub fn affine_multisummation_best( + summations: Vec>>, +) -> Vec> { + let pairwise_sums: usize = summations.iter().map(|summation| summation.len() / 2).sum(); + + // This threshold is chosen based on data from the summation benchmarks. + if pairwise_sums < 70 { + affine_multisummation_pairwise(summations) + } else { + affine_multisummation_batch_inversion(summations) + } +} + +/// Adds each pair of points using an affine + affine = projective formula, then adds up the +/// intermediate sums using a projective formula. +pub fn affine_multisummation_pairwise( + summations: Vec>>, +) -> Vec> { + summations.into_iter().map(affine_summation_pairwise).collect() +} + +/// Adds each pair of points using an affine + affine = projective formula, then adds up the +/// intermediate sums using a projective formula. +pub fn affine_summation_pairwise(points: Vec>) -> ProjectivePoint { + let mut reduced_points: Vec> = Vec::new(); + for chunk in points.chunks(2) { + match chunk.len() { + 1 => reduced_points.push(chunk[0].to_projective()), + 2 => reduced_points.push(chunk[0] + chunk[1]), + _ => panic!(), + } + } + // TODO: Avoid copying (deref) + reduced_points.iter().fold(ProjectivePoint::ZERO, |sum, x| sum + *x) +} + +/// Computes several summations of affine points by applying an affine group law, except that the +/// divisions are batched via Montgomery's trick. +pub fn affine_summation_batch_inversion( + summation: Vec>, +) -> ProjectivePoint { + let result = affine_multisummation_batch_inversion(vec![summation]); + debug_assert_eq!(result.len(), 1); + result[0] +} + +/// Computes several summations of affine points by applying an affine group law, except that the +/// divisions are batched via Montgomery's trick. +pub fn affine_multisummation_batch_inversion( + summations: Vec>>, +) -> Vec> { + let mut elements_to_invert = Vec::new(); + + // For each pair of points, (x1, y1) and (x2, y2), that we're going to add later, we want to + // invert either y (if the points are equal) or x1 - x2 (otherwise). We will use these later. + for summation in &summations { + let n = summation.len(); + // The special case for n=0 is to avoid underflow. + let range_end = if n == 0 { 0 } else { n - 1 }; + + for i in (0..range_end).step_by(2) { + let p1 = summation[i]; + let p2 = summation[i + 1]; + let AffinePoint { x: x1, y: y1, zero: zero1 } = p1; + let AffinePoint { x: x2, y: _y2, zero: zero2 } = p2; + + if zero1 || zero2 || p1 == -p2 { + // These are trivial cases where we won't need any inverse. + } else if p1 == p2 { + elements_to_invert.push(y1.double()); + } else { + elements_to_invert.push(x1 - x2); + } + } + } + + let inverses: Vec = + C::BaseField::batch_multiplicative_inverse(&elements_to_invert); + + let mut all_reduced_points = Vec::with_capacity(summations.len()); + let mut inverse_index = 0; + for summation in summations { + let n = summation.len(); + let mut reduced_points = Vec::with_capacity((n + 1) / 2); + + // The special case for n=0 is to avoid underflow. + let range_end = if n == 0 { 0 } else { n - 1 }; + + for i in (0..range_end).step_by(2) { + let p1 = summation[i]; + let p2 = summation[i + 1]; + let AffinePoint { x: x1, y: y1, zero: zero1 } = p1; + let AffinePoint { x: x2, y: y2, zero: zero2 } = p2; + + let sum = if zero1 { + p2 + } else if zero2 { + p1 + } else if p1 == -p2 { + AffinePoint::ZERO + } else { + // It's a non-trivial case where we need one of the inverses we computed earlier. + let inverse = inverses[inverse_index]; + inverse_index += 1; + + if p1 == p2 { + // This is the doubling case. + let mut numerator = x1.square().triple(); + if C::A.is_nonzero() { + numerator += C::A; + } + let quotient = numerator * inverse; + let x3 = quotient.square() - x1.double(); + let y3 = quotient * (x1 - x3) - y1; + AffinePoint::nonzero(x3, y3) + } else { + // This is the general case. We use the incomplete addition formulas 4.3 and 4.4. + let quotient = (y1 - y2) * inverse; + let x3 = quotient.square() - x1 - x2; + let y3 = quotient * (x1 - x3) - y1; + AffinePoint::nonzero(x3, y3) + } + }; + reduced_points.push(sum); + } + + // If n is odd, the last point was not part of a pair. + if n % 2 == 1 { + reduced_points.push(summation[n - 1]); + } + + all_reduced_points.push(reduced_points); + } + + // We should have consumed all of the inverses from the batch computation. + debug_assert_eq!(inverse_index, inverses.len()); + + // Recurse with our smaller set of points. + affine_multisummation_best(all_reduced_points) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::curve::secp256k1::Secp256K1; + + #[test] + fn test_pairwise_affine_summation() { + let g_affine = Secp256K1::GENERATOR_AFFINE; + let g2_affine = (g_affine + g_affine).to_affine(); + let g3_affine = (g_affine + g_affine + g_affine).to_affine(); + let g2_proj = g2_affine.to_projective(); + let g3_proj = g3_affine.to_projective(); + assert_eq!(affine_summation_pairwise::(vec![g_affine, g_affine]), g2_proj); + assert_eq!(affine_summation_pairwise::(vec![g_affine, g2_affine]), g3_proj); + assert_eq!( + affine_summation_pairwise::(vec![g_affine, g_affine, g_affine]), + g3_proj + ); + assert_eq!(affine_summation_pairwise::(vec![]), ProjectivePoint::ZERO); + } + + #[test] + fn test_pairwise_affine_summation_batch_inversion() { + let g = Secp256K1::GENERATOR_AFFINE; + let g_proj = g.to_projective(); + assert_eq!(affine_summation_batch_inversion::(vec![g, g]), g_proj + g_proj); + assert_eq!( + affine_summation_batch_inversion::(vec![g, g, g]), + g_proj + g_proj + g_proj + ); + assert_eq!(affine_summation_batch_inversion::(vec![]), ProjectivePoint::ZERO); + } +} diff --git a/ecdsa/src/curve/curve_types.rs b/ecdsa/src/curve/curve_types.rs new file mode 100644 index 0000000000..fc0ac6e37b --- /dev/null +++ b/ecdsa/src/curve/curve_types.rs @@ -0,0 +1,248 @@ +use alloc::vec::Vec; +use core::{ + fmt::Debug, + hash::{Hash, Hasher}, + ops::Neg, +}; + +use plonky2::field::{ + ops::Square, + types::{Field, PrimeField}, +}; +use serde::{Deserialize, Serialize}; + +// To avoid implementation conflicts from associated types, +// see https://github.com/rust-lang/rust/issues/20400 +pub struct CurveScalar(pub ::ScalarField); + +/// A short Weierstrass curve. +pub trait Curve: 'static + Sync + Sized + Copy + Debug { + type BaseField: PrimeField; + type ScalarField: PrimeField; + + const A: Self::BaseField; + const B: Self::BaseField; + + const GENERATOR_AFFINE: AffinePoint; + + const GENERATOR_PROJECTIVE: ProjectivePoint = ProjectivePoint { + x: Self::GENERATOR_AFFINE.x, + y: Self::GENERATOR_AFFINE.y, + z: Self::BaseField::ONE, + }; + + fn convert(x: Self::ScalarField) -> CurveScalar { + CurveScalar(x) + } + + fn is_safe_curve() -> bool { + // Added additional check to prevent using vulnerabilties in case a discriminant is equal to 0. + (Self::A.cube().double().double() + Self::B.square().triple().triple().triple()) + .is_nonzero() + } +} + +/// A point on a short Weierstrass curve, represented in affine coordinates. +#[derive(Copy, Clone, Debug, Deserialize, Serialize)] +pub struct AffinePoint { + pub x: C::BaseField, + pub y: C::BaseField, + pub zero: bool, +} + +impl AffinePoint { + pub const ZERO: Self = Self { x: C::BaseField::ZERO, y: C::BaseField::ZERO, zero: true }; + + pub fn nonzero(x: C::BaseField, y: C::BaseField) -> Self { + let point = Self { x, y, zero: false }; + debug_assert!(point.is_valid()); + point + } + + pub fn is_valid(&self) -> bool { + let Self { x, y, zero } = *self; + zero || y.square() == x.cube() + C::A * x + C::B + } + + pub fn to_projective(&self) -> ProjectivePoint { + let Self { x, y, zero } = *self; + let z = if zero { C::BaseField::ZERO } else { C::BaseField::ONE }; + + ProjectivePoint { x, y, z } + } + + pub fn batch_to_projective(affine_points: &[Self]) -> Vec> { + affine_points.iter().map(Self::to_projective).collect() + } + + #[must_use] + pub fn double(&self) -> Self { + let AffinePoint { x: x1, y: y1, zero } = *self; + + if zero { + return AffinePoint::ZERO; + } + + let double_y = y1.double(); + let inv_double_y = double_y.inverse(); // (2y)^(-1) + let triple_xx = x1.square().triple(); // 3x^2 + let lambda = (triple_xx + C::A) * inv_double_y; + let x3 = lambda.square() - self.x.double(); + let y3 = lambda * (x1 - x3) - y1; + + Self { x: x3, y: y3, zero: false } + } +} + +impl PartialEq for AffinePoint { + fn eq(&self, other: &Self) -> bool { + let AffinePoint { x: x1, y: y1, zero: zero1 } = *self; + let AffinePoint { x: x2, y: y2, zero: zero2 } = *other; + if zero1 || zero2 { + return zero1 == zero2; + } + x1 == x2 && y1 == y2 + } +} + +impl Eq for AffinePoint {} + +impl Hash for AffinePoint { + fn hash(&self, state: &mut H) { + if self.zero { + self.zero.hash(state); + } else { + self.x.hash(state); + self.y.hash(state); + } + } +} + +/// A point on a short Weierstrass curve, represented in projective coordinates. +#[derive(Copy, Clone, Debug)] +pub struct ProjectivePoint { + pub x: C::BaseField, + pub y: C::BaseField, + pub z: C::BaseField, +} + +impl ProjectivePoint { + pub const ZERO: Self = + Self { x: C::BaseField::ZERO, y: C::BaseField::ONE, z: C::BaseField::ZERO }; + + pub fn nonzero(x: C::BaseField, y: C::BaseField, z: C::BaseField) -> Self { + let point = Self { x, y, z }; + debug_assert!(point.is_valid()); + point + } + + pub fn is_valid(&self) -> bool { + let Self { x, y, z } = *self; + z.is_zero() || y.square() * z == x.cube() + C::A * x * z.square() + C::B * z.cube() + } + + pub fn to_affine(&self) -> AffinePoint { + let Self { x, y, z } = *self; + if z == C::BaseField::ZERO { + AffinePoint::ZERO + } else { + let z_inv = z.inverse(); + AffinePoint::nonzero(x * z_inv, y * z_inv) + } + } + + pub fn batch_to_affine(proj_points: &[Self]) -> Vec> { + let n = proj_points.len(); + let zs: Vec = proj_points.iter().map(|pp| pp.z).collect(); + let z_invs = C::BaseField::batch_multiplicative_inverse(&zs); + + let mut result = Vec::with_capacity(n); + for i in 0..n { + let Self { x, y, z } = proj_points[i]; + result.push(if z == C::BaseField::ZERO { + AffinePoint::ZERO + } else { + let z_inv = z_invs[i]; + AffinePoint::nonzero(x * z_inv, y * z_inv) + }); + } + result + } + + // From https://www.hyperelliptic.org/EFD/g1p/data/shortw/projective/doubling/dbl-2007-bl + #[must_use] + pub fn double(&self) -> Self { + let Self { x, y, z } = *self; + if z == C::BaseField::ZERO { + return ProjectivePoint::ZERO; + } + + let xx = x.square(); + let zz = z.square(); + let mut w = xx.triple(); + if C::A.is_nonzero() { + w += C::A * zz; + } + let s = y.double() * z; + let r = y * s; + let rr = r.square(); + let b = (x + r).square() - (xx + rr); + let h = w.square() - b.double(); + let x3 = h * s; + let y3 = w * (b - h) - rr.double(); + let z3 = s.cube(); + Self { x: x3, y: y3, z: z3 } + } + + pub fn add_slices(a: &[Self], b: &[Self]) -> Vec { + assert_eq!(a.len(), b.len()); + a.iter().zip(b.iter()).map(|(&a_i, &b_i)| a_i + b_i).collect() + } + + #[must_use] + pub fn neg(&self) -> Self { + Self { x: self.x, y: -self.y, z: self.z } + } +} + +impl PartialEq for ProjectivePoint { + fn eq(&self, other: &Self) -> bool { + let ProjectivePoint { x: x1, y: y1, z: z1 } = *self; + let ProjectivePoint { x: x2, y: y2, z: z2 } = *other; + if z1 == C::BaseField::ZERO || z2 == C::BaseField::ZERO { + return z1 == z2; + } + + // We want to compare (x1/z1, y1/z1) == (x2/z2, y2/z2). + // But to avoid field division, it is better to compare (x1*z2, y1*z2) == (x2*z1, y2*z1). + x1 * z2 == x2 * z1 && y1 * z2 == y2 * z1 + } +} + +impl Eq for ProjectivePoint {} + +impl Neg for AffinePoint { + type Output = AffinePoint; + + fn neg(self) -> Self::Output { + let AffinePoint { x, y, zero } = self; + AffinePoint { x, y: -y, zero } + } +} + +impl Neg for ProjectivePoint { + type Output = ProjectivePoint; + + fn neg(self) -> Self::Output { + let ProjectivePoint { x, y, z } = self; + ProjectivePoint { x, y: -y, z } + } +} + +pub fn base_to_scalar(x: C::BaseField) -> C::ScalarField { + C::ScalarField::from_noncanonical_biguint(x.to_canonical_biguint()) +} + +pub fn scalar_to_base(x: C::ScalarField) -> C::BaseField { + C::BaseField::from_noncanonical_biguint(x.to_canonical_biguint()) +} diff --git a/ecdsa/src/curve/ecdsa.rs b/ecdsa/src/curve/ecdsa.rs new file mode 100644 index 0000000000..67b9806618 --- /dev/null +++ b/ecdsa/src/curve/ecdsa.rs @@ -0,0 +1,87 @@ +use plonky2::field::types::{Field, Sample}; +use serde::{Deserialize, Serialize}; + +use crate::curve::{ + curve_msm::msm_parallel, + curve_types::{base_to_scalar, AffinePoint, Curve, CurveScalar}, +}; + +#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] +pub struct ECDSASignature { + pub r: C::ScalarField, + pub s: C::ScalarField, +} + +#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] +pub struct ECDSASecretKey(pub C::ScalarField); + +impl ECDSASecretKey { + pub fn to_public(&self) -> ECDSAPublicKey { + ECDSAPublicKey((CurveScalar(self.0) * C::GENERATOR_PROJECTIVE).to_affine()) + } +} + +#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] +pub struct ECDSAPublicKey(pub AffinePoint); + +pub fn sign_message(msg: C::ScalarField, sk: ECDSASecretKey) -> ECDSASignature { + let (k, rr) = { + let mut k = C::ScalarField::rand(); + let mut rr = (CurveScalar(k) * C::GENERATOR_PROJECTIVE).to_affine(); + while rr.x == C::BaseField::ZERO { + k = C::ScalarField::rand(); + rr = (CurveScalar(k) * C::GENERATOR_PROJECTIVE).to_affine(); + } + (k, rr) + }; + let r = base_to_scalar::(rr.x); + + let s = k.inverse() * (msg + r * sk.0); + + ECDSASignature { r, s } +} + +pub fn verify_message( + msg: C::ScalarField, + sig: ECDSASignature, + pk: ECDSAPublicKey, +) -> bool { + let ECDSASignature { r, s } = sig; + + assert!(pk.0.is_valid()); + + let c = s.inverse(); + let u1 = msg * c; + let u2 = r * c; + + let g = C::GENERATOR_PROJECTIVE; + let w = 5; // Experimentally fastest + let point_proj = msm_parallel(&[u1, u2], &[g, pk.0.to_projective()], w); + let point = point_proj.to_affine(); + + let x = base_to_scalar::(point.x); + r == x +} + +#[cfg(test)] +mod tests { + use plonky2::field::{secp256k1_scalar::Secp256K1Scalar, types::Sample}; + + use crate::curve::{ + ecdsa::{sign_message, verify_message, ECDSASecretKey}, + secp256k1::Secp256K1, + }; + + #[test] + fn test_ecdsa_native() { + type C = Secp256K1; + + let msg = Secp256K1Scalar::rand(); + let sk = ECDSASecretKey::(Secp256K1Scalar::rand()); + let pk = sk.to_public(); + + let sig = sign_message(msg, sk); + let result = verify_message(msg, sig, pk); + assert!(result); + } +} diff --git a/ecdsa/src/curve/glv.rs b/ecdsa/src/curve/glv.rs new file mode 100644 index 0000000000..177d28e671 --- /dev/null +++ b/ecdsa/src/curve/glv.rs @@ -0,0 +1,135 @@ +use num::{rational::Ratio, BigUint}; +use plonky2::field::{ + secp256k1_base::Secp256K1Base, + secp256k1_scalar::Secp256K1Scalar, + types::{Field, PrimeField}, +}; + +use crate::curve::{ + curve_msm::msm_parallel, + curve_types::{AffinePoint, ProjectivePoint}, + secp256k1::Secp256K1, +}; + +pub const GLV_BETA: Secp256K1Base = Secp256K1Base([ + 13923278643952681454, + 11308619431505398165, + 7954561588662645993, + 8856726876819556112, +]); + +pub const GLV_S: Secp256K1Scalar = Secp256K1Scalar([ + 16069571880186789234, + 1310022930574435960, + 11900229862571533402, + 6008836872998760672, +]); + +const A1: Secp256K1Scalar = Secp256K1Scalar([16747920425669159701, 3496713202691238861, 0, 0]); + +const MINUS_B1: Secp256K1Scalar = + Secp256K1Scalar([8022177200260244675, 16448129721693014056, 0, 0]); + +const A2: Secp256K1Scalar = Secp256K1Scalar([6323353552219852760, 1498098850674701302, 1, 0]); + +const B2: Secp256K1Scalar = Secp256K1Scalar([16747920425669159701, 3496713202691238861, 0, 0]); + +/// Algorithm 15.41 in Handbook of Elliptic and Hyperelliptic Curve Cryptography. +/// Decompose a scalar `k` into two small scalars `k1, k2` with `|k1|, |k2| < √p` that satisfy +/// `k1 + s * k2 = k`. +/// Returns `(|k1|, |k2|, k1 < 0, k2 < 0)`. +pub fn decompose_secp256k1_scalar( + k: Secp256K1Scalar, +) -> (Secp256K1Scalar, Secp256K1Scalar, bool, bool) { + let p = Secp256K1Scalar::order(); + let c1_biguint = Ratio::new(B2.to_canonical_biguint() * k.to_canonical_biguint(), p.clone()) + .round() + .to_integer(); + let c1 = Secp256K1Scalar::from_noncanonical_biguint(c1_biguint); + let c2_biguint = + Ratio::new(MINUS_B1.to_canonical_biguint() * k.to_canonical_biguint(), p.clone()) + .round() + .to_integer(); + let c2 = Secp256K1Scalar::from_noncanonical_biguint(c2_biguint); + + let k1_raw = k - c1 * A1 - c2 * A2; + let k2_raw = c1 * MINUS_B1 - c2 * B2; + debug_assert!(k1_raw + GLV_S * k2_raw == k); + + let two = BigUint::from_slice(&[2]); + let k1_neg = k1_raw.to_canonical_biguint() > p.clone() / two.clone(); + let k1 = if k1_neg { + Secp256K1Scalar::from_noncanonical_biguint(p.clone() - k1_raw.to_canonical_biguint()) + } else { + k1_raw + }; + let k2_neg = k2_raw.to_canonical_biguint() > p.clone() / two; + let k2 = if k2_neg { + Secp256K1Scalar::from_noncanonical_biguint(p - k2_raw.to_canonical_biguint()) + } else { + k2_raw + }; + + (k1, k2, k1_neg, k2_neg) +} + +/// See Section 15.2.1 in Handbook of Elliptic and Hyperelliptic Curve Cryptography. +/// GLV scalar multiplication `k * P = k1 * P + k2 * psi(P)`, where `k = k1 + s * k2` is the +/// decomposition computed in `decompose_secp256k1_scalar(k)` and `psi` is the Secp256k1 +/// endomorphism `psi: (x, y) |-> (beta * x, y)` equivalent to scalar multiplication by `s`. +pub fn glv_mul(p: ProjectivePoint, k: Secp256K1Scalar) -> ProjectivePoint { + let (k1, k2, k1_neg, k2_neg) = decompose_secp256k1_scalar(k); + + let p_affine = p.to_affine(); + let sp = + AffinePoint:: { x: p_affine.x * GLV_BETA, y: p_affine.y, zero: p_affine.zero }; + + let first = if k1_neg { p.neg() } else { p }; + let second = if k2_neg { sp.to_projective().neg() } else { sp.to_projective() }; + + msm_parallel(&[k1, k2], &[first, second], 5) +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use plonky2::field::{ + secp256k1_scalar::Secp256K1Scalar, + types::{Field, Sample}, + }; + + use crate::curve::{ + curve_types::{Curve, CurveScalar}, + glv::{decompose_secp256k1_scalar, glv_mul, GLV_S}, + secp256k1::Secp256K1, + }; + + #[test] + fn test_glv_decompose() -> Result<()> { + let k = Secp256K1Scalar::rand(); + let (k1, k2, k1_neg, k2_neg) = decompose_secp256k1_scalar(k); + let one = Secp256K1Scalar::ONE; + let m1 = if k1_neg { -one } else { one }; + let m2 = if k2_neg { -one } else { one }; + + assert!(k1 * m1 + GLV_S * k2 * m2 == k); + + Ok(()) + } + + #[test] + fn test_glv_mul() -> Result<()> { + for _ in 0..20 { + let k = Secp256K1Scalar::rand(); + + let p = CurveScalar(Secp256K1Scalar::rand()) * Secp256K1::GENERATOR_PROJECTIVE; + + let kp = CurveScalar(k) * p; + let glv = glv_mul(p, k); + + assert!(kp == glv); + } + + Ok(()) + } +} diff --git a/ecdsa/src/curve/mod.rs b/ecdsa/src/curve/mod.rs new file mode 100644 index 0000000000..1984b0c6bf --- /dev/null +++ b/ecdsa/src/curve/mod.rs @@ -0,0 +1,8 @@ +pub mod curve_adds; +pub mod curve_msm; +pub mod curve_multiplication; +pub mod curve_summation; +pub mod curve_types; +pub mod ecdsa; +pub mod glv; +pub mod secp256k1; diff --git a/ecdsa/src/curve/secp256k1.rs b/ecdsa/src/curve/secp256k1.rs new file mode 100644 index 0000000000..b3990c7196 --- /dev/null +++ b/ecdsa/src/curve/secp256k1.rs @@ -0,0 +1,89 @@ +use plonky2::field::{ + secp256k1_base::Secp256K1Base, secp256k1_scalar::Secp256K1Scalar, types::Field, +}; +use serde::{Deserialize, Serialize}; + +use crate::curve::curve_types::{AffinePoint, Curve}; + +#[derive(Debug, Copy, Clone, Deserialize, Eq, Hash, PartialEq, Serialize)] +pub struct Secp256K1; + +impl Curve for Secp256K1 { + type BaseField = Secp256K1Base; + type ScalarField = Secp256K1Scalar; + + const A: Secp256K1Base = Secp256K1Base::ZERO; + const B: Secp256K1Base = Secp256K1Base([7, 0, 0, 0]); + const GENERATOR_AFFINE: AffinePoint = + AffinePoint { x: SECP256K1_GENERATOR_X, y: SECP256K1_GENERATOR_Y, zero: false }; +} + +// 55066263022277343669578718895168534326250603453777594175500187360389116729240 +const SECP256K1_GENERATOR_X: Secp256K1Base = + Secp256K1Base([0x59F2815B16F81798, 0x029BFCDB2DCE28D9, 0x55A06295CE870B07, 0x79BE667EF9DCBBAC]); + +/// 32670510020758816978083085130507043184471273380659243275938904335757337482424 +const SECP256K1_GENERATOR_Y: Secp256K1Base = + Secp256K1Base([0x9C47D08FFB10D4B8, 0xFD17B448A6855419, 0x5DA4FBFC0E1108A8, 0x483ADA7726A3C465]); + +#[cfg(test)] +mod tests { + use num::BigUint; + use plonky2::field::{ + secp256k1_scalar::Secp256K1Scalar, + types::{Field, PrimeField}, + }; + + use crate::curve::{ + curve_types::{AffinePoint, Curve, ProjectivePoint}, + secp256k1::Secp256K1, + }; + + #[test] + fn test_generator() { + let g = Secp256K1::GENERATOR_AFFINE; + assert!(g.is_valid()); + + let neg_g = AffinePoint:: { x: g.x, y: -g.y, zero: g.zero }; + assert!(neg_g.is_valid()); + } + + #[test] + fn test_naive_multiplication() { + let g = Secp256K1::GENERATOR_PROJECTIVE; + let ten = Secp256K1Scalar::from_canonical_u64(10); + let product = mul_naive(ten, g); + let sum = g + g + g + g + g + g + g + g + g + g; + assert_eq!(product, sum); + } + + #[test] + fn test_g1_multiplication() { + let lhs = Secp256K1Scalar::from_noncanonical_biguint(BigUint::from_slice(&[ + 1111, 2222, 3333, 4444, 5555, 6666, 7777, 8888, + ])); + assert_eq!( + Secp256K1::convert(lhs) * Secp256K1::GENERATOR_PROJECTIVE, + mul_naive(lhs, Secp256K1::GENERATOR_PROJECTIVE) + ); + } + + /// A simple, somewhat inefficient implementation of multiplication which is used as a reference + /// for correctness. + fn mul_naive( + lhs: Secp256K1Scalar, + rhs: ProjectivePoint, + ) -> ProjectivePoint { + let mut g = rhs; + let mut sum = ProjectivePoint::ZERO; + for limb in lhs.to_canonical_biguint().to_u64_digits().iter() { + for j in 0..64 { + if (limb >> j & 1u64) != 0u64 { + sum = sum + g; + } + g = g.double(); + } + } + sum + } +} diff --git a/ecdsa/src/gadgets/biguint.rs b/ecdsa/src/gadgets/biguint.rs new file mode 100644 index 0000000000..5e4bf1a654 --- /dev/null +++ b/ecdsa/src/gadgets/biguint.rs @@ -0,0 +1,557 @@ +use alloc::{ + string::{String, ToString}, + vec, + vec::Vec, +}; +use core::marker::PhantomData; + +use num::{BigUint, Integer, Zero}; +use plonky2::{ + field::{ + extension::Extendable, + types::{PrimeField, PrimeField64}, + }, + hash::hash_types::RichField, + iop::{ + generator::{GeneratedValues, SimpleGenerator}, + target::{BoolTarget, Target}, + witness::{PartitionWitness, Witness}, + }, + plonk::circuit_builder::CircuitBuilder, +}; +use plonky2_u32::{ + gadgets::{ + arithmetic_u32::{CircuitBuilderU32, U32Target}, + multiple_comparison::list_le_u32_circuit, + }, + witness::{GeneratedValuesU32, WitnessU32}, +}; + +#[derive(Clone, Debug)] +pub struct BigUintTarget { + pub limbs: Vec, +} + +impl BigUintTarget { + pub fn num_limbs(&self) -> usize { + self.limbs.len() + } + + pub fn get_limb(&self, i: usize) -> U32Target { + self.limbs[i] + } +} + +pub trait CircuitBuilderBiguint, const D: usize> { + fn constant_biguint(&mut self, value: &BigUint) -> BigUintTarget; + + fn zero_biguint(&mut self) -> BigUintTarget; + fn is_zero_biguint(&mut self, value: &BigUintTarget) -> BoolTarget; + + fn connect_biguint(&mut self, lhs: &BigUintTarget, rhs: &BigUintTarget); + + fn pad_biguints( + &mut self, + a: &BigUintTarget, + b: &BigUintTarget, + ) -> (BigUintTarget, BigUintTarget); + + /// Return true if a <= b + fn cmp_biguint(&mut self, a: &BigUintTarget, b: &BigUintTarget) -> BoolTarget; + + fn add_virtual_biguint_target(&mut self, num_limbs: usize) -> BigUintTarget; + + /// Add two `BigUintTarget`s. + fn add_biguint(&mut self, a: &BigUintTarget, b: &BigUintTarget) -> BigUintTarget; + + /// Subtract two `BigUintTarget`s. We assume that the first is larger than the second. + fn sub_biguint(&mut self, a: &BigUintTarget, b: &BigUintTarget) -> BigUintTarget; + + fn mul_biguint(&mut self, a: &BigUintTarget, b: &BigUintTarget) -> BigUintTarget; + + fn mul_biguint_by_bool(&mut self, a: &BigUintTarget, b: BoolTarget) -> BigUintTarget; + + /// Returns x * y + z. This is no more efficient than mul-then-add; it's purely for convenience (only need to call one CircuitBuilder function). + fn mul_add_biguint( + &mut self, + x: &BigUintTarget, + y: &BigUintTarget, + z: &BigUintTarget, + ) -> BigUintTarget; + + fn div_rem_biguint( + &mut self, + a: &BigUintTarget, + b: &BigUintTarget, + ) -> (BigUintTarget, BigUintTarget); + + fn div_biguint(&mut self, a: &BigUintTarget, b: &BigUintTarget) -> BigUintTarget; + + fn rem_biguint(&mut self, a: &BigUintTarget, b: &BigUintTarget) -> BigUintTarget; +} + +impl, const D: usize> CircuitBuilderBiguint + for CircuitBuilder +{ + fn constant_biguint(&mut self, value: &BigUint) -> BigUintTarget { + let limb_values = value.to_u32_digits(); + let limbs = limb_values.iter().map(|&l| self.constant_u32(l)).collect(); + + BigUintTarget { limbs } + } + + fn zero_biguint(&mut self) -> BigUintTarget { + self.constant_biguint(&BigUint::zero()) + } + + fn is_zero_biguint(&mut self, value: &BigUintTarget) -> BoolTarget { + let targets: Vec = value.limbs.iter().map(|&l| l.0).collect(); + let zero = self.zero(); + // Check if all limbs are equal to zero + targets.iter().fold(self._true(), |is_all_eq: BoolTarget, x| { + let is_eq = self.is_equal(*x, zero); + self.and(is_all_eq, is_eq) + }) + } + + fn connect_biguint(&mut self, lhs: &BigUintTarget, rhs: &BigUintTarget) { + let min_limbs = lhs.num_limbs().min(rhs.num_limbs()); + for i in 0..min_limbs { + self.connect_u32(lhs.get_limb(i), rhs.get_limb(i)); + } + + for i in min_limbs..lhs.num_limbs() { + self.assert_zero_u32(lhs.get_limb(i)); + } + for i in min_limbs..rhs.num_limbs() { + self.assert_zero_u32(rhs.get_limb(i)); + } + } + + fn pad_biguints( + &mut self, + a: &BigUintTarget, + b: &BigUintTarget, + ) -> (BigUintTarget, BigUintTarget) { + if a.num_limbs() > b.num_limbs() { + let mut padded_b = b.clone(); + for _ in b.num_limbs()..a.num_limbs() { + padded_b.limbs.push(self.zero_u32()); + } + + (a.clone(), padded_b) + } else { + let mut padded_a = a.clone(); + for _ in a.num_limbs()..b.num_limbs() { + padded_a.limbs.push(self.zero_u32()); + } + + (padded_a, b.clone()) + } + } + + fn cmp_biguint(&mut self, a: &BigUintTarget, b: &BigUintTarget) -> BoolTarget { + let (a, b) = self.pad_biguints(a, b); + + list_le_u32_circuit(self, a.limbs, b.limbs) + } + + fn add_virtual_biguint_target(&mut self, num_limbs: usize) -> BigUintTarget { + let limbs = self.add_virtual_u32_targets(num_limbs); + + BigUintTarget { limbs } + } + + fn add_biguint(&mut self, a: &BigUintTarget, b: &BigUintTarget) -> BigUintTarget { + let num_limbs = a.num_limbs().max(b.num_limbs()); + + let mut combined_limbs = vec![]; + let mut carry = self.zero_u32(); + for i in 0..num_limbs { + let a_limb = (i < a.num_limbs()).then(|| a.limbs[i]).unwrap_or_else(|| self.zero_u32()); + let b_limb = (i < b.num_limbs()).then(|| b.limbs[i]).unwrap_or_else(|| self.zero_u32()); + + let (new_limb, new_carry) = self.add_many_u32(&[carry, a_limb, b_limb]); + carry = new_carry; + combined_limbs.push(new_limb); + } + combined_limbs.push(carry); + + BigUintTarget { limbs: combined_limbs } + } + + fn sub_biguint(&mut self, a: &BigUintTarget, b: &BigUintTarget) -> BigUintTarget { + let (a, b) = self.pad_biguints(a, b); + let num_limbs = a.limbs.len(); + + let mut result_limbs = vec![]; + + let mut borrow = self.zero_u32(); + for i in 0..num_limbs { + let (result, new_borrow) = self.sub_u32(a.limbs[i], b.limbs[i], borrow); + result_limbs.push(result); + borrow = new_borrow; + } + // Borrow should be zero here. + + BigUintTarget { limbs: result_limbs } + } + + fn mul_biguint(&mut self, a: &BigUintTarget, b: &BigUintTarget) -> BigUintTarget { + let total_limbs = a.limbs.len() + b.limbs.len(); + + let mut to_add = vec![vec![]; total_limbs]; + for i in 0..a.limbs.len() { + for j in 0..b.limbs.len() { + let (product, carry) = self.mul_u32(a.limbs[i], b.limbs[j]); + to_add[i + j].push(product); + to_add[i + j + 1].push(carry); + } + } + + let mut combined_limbs = vec![]; + let mut carry = self.zero_u32(); + for summands in &mut to_add { + let (new_result, new_carry) = self.add_u32s_with_carry(summands, carry); + combined_limbs.push(new_result); + carry = new_carry; + } + combined_limbs.push(carry); + + BigUintTarget { limbs: combined_limbs } + } + + fn mul_biguint_by_bool(&mut self, a: &BigUintTarget, b: BoolTarget) -> BigUintTarget { + let t = b.target; + + BigUintTarget { limbs: a.limbs.iter().map(|&l| U32Target(self.mul(l.0, t))).collect() } + } + + fn mul_add_biguint( + &mut self, + x: &BigUintTarget, + y: &BigUintTarget, + z: &BigUintTarget, + ) -> BigUintTarget { + let prod = self.mul_biguint(x, y); + self.add_biguint(&prod, z) + } + + fn div_rem_biguint( + &mut self, + a: &BigUintTarget, + b: &BigUintTarget, + ) -> (BigUintTarget, BigUintTarget) { + let a_len = a.limbs.len(); + let b_len = b.limbs.len(); + let div_num_limbs = if b_len > a_len + 1 { 0 } else { a_len - b_len + 1 }; + let div = self.add_virtual_biguint_target(div_num_limbs); + let rem = self.add_virtual_biguint_target(b_len); + + self.add_simple_generator(BigUintDivRemGenerator:: { + a: a.clone(), + b: b.clone(), + div: div.clone(), + rem: rem.clone(), + _phantom: PhantomData, + }); + + let div_b = self.mul_biguint(&div, b); + let div_b_plus_rem = self.add_biguint(&div_b, &rem); + self.connect_biguint(a, &div_b_plus_rem); + + let cmp_rem_b = self.cmp_biguint(&rem, b); + self.assert_one(cmp_rem_b.target); + + (div, rem) + } + + fn div_biguint(&mut self, a: &BigUintTarget, b: &BigUintTarget) -> BigUintTarget { + let (div, _rem) = self.div_rem_biguint(a, b); + div + } + + fn rem_biguint(&mut self, a: &BigUintTarget, b: &BigUintTarget) -> BigUintTarget { + let (_div, rem) = self.div_rem_biguint(a, b); + rem + } +} + +pub trait WitnessBigUint: Witness { + fn get_biguint_target(&self, target: BigUintTarget) -> BigUint; + fn set_biguint_target(&mut self, target: &BigUintTarget, value: &BigUint); +} + +impl, F: PrimeField64> WitnessBigUint for T { + fn get_biguint_target(&self, target: BigUintTarget) -> BigUint { + target.limbs.into_iter().rev().fold(BigUint::zero(), |acc, limb| { + (acc << 32) + self.get_target(limb.0).to_canonical_biguint() + }) + } + + fn set_biguint_target(&mut self, target: &BigUintTarget, value: &BigUint) { + let mut limbs = value.to_u32_digits(); + assert!(target.num_limbs() >= limbs.len()); + limbs.resize(target.num_limbs(), 0); + for i in 0..target.num_limbs() { + self.set_u32_target(target.limbs[i], limbs[i]); + } + } +} + +pub trait GeneratedValuesBigUint { + fn set_biguint_target(&mut self, target: &BigUintTarget, value: &BigUint); +} + +impl GeneratedValuesBigUint for GeneratedValues { + fn set_biguint_target(&mut self, target: &BigUintTarget, value: &BigUint) { + let mut limbs = value.to_u32_digits(); + assert!(target.num_limbs() >= limbs.len()); + limbs.resize(target.num_limbs(), 0); + for i in 0..target.num_limbs() { + self.set_u32_target(target.get_limb(i), limbs[i]); + } + } +} + +#[derive(Debug, Clone)] +struct BigUintDivRemGenerator, const D: usize> { + a: BigUintTarget, + b: BigUintTarget, + div: BigUintTarget, + rem: BigUintTarget, + _phantom: PhantomData, +} + +impl, const D: usize> SimpleGenerator + for BigUintDivRemGenerator +{ + fn id(&self) -> String { + "BigUintDivRemGenerator".to_string() + } + fn serialize( + &self, + _dst: &mut Vec, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult<()> { + todo!(); + } + fn deserialize( + _src: &mut plonky2::util::serialization::Buffer, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult + where + Self: Sized, + { + todo!(); + } + fn dependencies(&self) -> Vec { + self.a.limbs.iter().chain(&self.b.limbs).map(|&l| l.0).collect() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let a = witness.get_biguint_target(self.a.clone()); + let b = witness.get_biguint_target(self.b.clone()); + let (div, rem) = a.div_rem(&b); + + out_buffer.set_biguint_target(&self.div, &div); + out_buffer.set_biguint_target(&self.rem, &rem); + } +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use num::{BigUint, FromPrimitive, Integer}; + use plonky2::{ + iop::witness::PartialWitness, + plonk::{ + circuit_builder::CircuitBuilder, + circuit_data::CircuitConfig, + config::{GenericConfig, PoseidonGoldilocksConfig}, + }, + }; + use rand::{rngs::OsRng, Rng}; + + use crate::gadgets::biguint::{CircuitBuilderBiguint, WitnessBigUint}; + + #[test] + fn test_biguint_add() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + let mut rng = OsRng; + + let x_value = BigUint::from_u128(rng.gen()).unwrap(); + let y_value = BigUint::from_u128(rng.gen()).unwrap(); + let expected_z_value = &x_value + &y_value; + + let config = CircuitConfig::standard_recursion_config(); + let mut pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let x = builder.add_virtual_biguint_target(x_value.to_u32_digits().len()); + let y = builder.add_virtual_biguint_target(y_value.to_u32_digits().len()); + let z = builder.add_biguint(&x, &y); + let expected_z = builder.add_virtual_biguint_target(expected_z_value.to_u32_digits().len()); + builder.connect_biguint(&z, &expected_z); + + pw.set_biguint_target(&x, &x_value); + pw.set_biguint_target(&y, &y_value); + pw.set_biguint_target(&expected_z, &expected_z_value); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + data.verify(proof) + } + + #[test] + fn test_biguint_sub() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + let mut rng = OsRng; + + let mut x_value = BigUint::from_u128(rng.gen()).unwrap(); + let mut y_value = BigUint::from_u128(rng.gen()).unwrap(); + if y_value > x_value { + (x_value, y_value) = (y_value, x_value); + } + let expected_z_value = &x_value - &y_value; + + let config = CircuitConfig::standard_recursion_config(); + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let x = builder.constant_biguint(&x_value); + let y = builder.constant_biguint(&y_value); + let z = builder.sub_biguint(&x, &y); + let expected_z = builder.constant_biguint(&expected_z_value); + + builder.connect_biguint(&z, &expected_z); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + data.verify(proof) + } + + #[test] + fn test_biguint_mul() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + let mut rng = OsRng; + + let x_value = BigUint::from_u128(rng.gen()).unwrap(); + let y_value = BigUint::from_u128(rng.gen()).unwrap(); + let expected_z_value = &x_value * &y_value; + + let config = CircuitConfig::standard_recursion_config(); + let mut pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let x = builder.add_virtual_biguint_target(x_value.to_u32_digits().len()); + let y = builder.add_virtual_biguint_target(y_value.to_u32_digits().len()); + let z = builder.mul_biguint(&x, &y); + let expected_z = builder.add_virtual_biguint_target(expected_z_value.to_u32_digits().len()); + builder.connect_biguint(&z, &expected_z); + + pw.set_biguint_target(&x, &x_value); + pw.set_biguint_target(&y, &y_value); + pw.set_biguint_target(&expected_z, &expected_z_value); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + data.verify(proof) + } + + #[test] + fn test_biguint_cmp() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + let mut rng = OsRng; + + let x_value = BigUint::from_u128(rng.gen()).unwrap(); + let y_value = BigUint::from_u128(rng.gen()).unwrap(); + + let config = CircuitConfig::standard_recursion_config(); + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let x = builder.constant_biguint(&x_value); + let y = builder.constant_biguint(&y_value); + let cmp = builder.cmp_biguint(&x, &y); + let expected_cmp = builder.constant_bool(x_value <= y_value); + + builder.connect(cmp.target, expected_cmp.target); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + data.verify(proof) + } + + #[test] + fn test_biguint_div_rem() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + let mut rng = OsRng; + + let mut x_value = BigUint::from_u128(rng.gen()).unwrap(); + let mut y_value = BigUint::from_u128(rng.gen()).unwrap(); + if y_value > x_value { + (x_value, y_value) = (y_value, x_value); + } + let (expected_div_value, expected_rem_value) = x_value.div_rem(&y_value); + + let config = CircuitConfig::standard_recursion_config(); + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let x = builder.constant_biguint(&x_value); + let y = builder.constant_biguint(&y_value); + let (div, rem) = builder.div_rem_biguint(&x, &y); + + let expected_div = builder.constant_biguint(&expected_div_value); + let expected_rem = builder.constant_biguint(&expected_rem_value); + + builder.connect_biguint(&div, &expected_div); + builder.connect_biguint(&rem, &expected_rem); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + data.verify(proof) + } + + #[test] + fn test_is_zero_biguint() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let config = CircuitConfig::standard_recursion_config(); + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let zero = builder.zero_biguint(); + let should_true = builder.is_zero_biguint(&zero); + builder.assert_one(should_true.target); + + let one = builder.constant_biguint(&BigUint::from_u32(1).unwrap()); + { + let should_false = builder.is_zero_biguint(&one); + builder.assert_zero(should_false.target); + } + + let big = builder.constant_biguint(&BigUint::from_u128(1 << 100).unwrap()); + { + let should_false = builder.is_zero_biguint(&big); + builder.assert_zero(should_false.target); + } + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + data.verify(proof) + } +} diff --git a/ecdsa/src/gadgets/curve.rs b/ecdsa/src/gadgets/curve.rs new file mode 100644 index 0000000000..6e2c107f35 --- /dev/null +++ b/ecdsa/src/gadgets/curve.rs @@ -0,0 +1,487 @@ +use alloc::{vec, vec::Vec}; + +use plonky2::{ + field::{extension::Extendable, types::Sample}, + hash::hash_types::RichField, + iop::target::BoolTarget, + plonk::circuit_builder::CircuitBuilder, +}; + +use crate::{ + curve::curve_types::{AffinePoint, Curve, CurveScalar}, + gadgets::nonnative::{CircuitBuilderNonNative, NonNativeTarget}, +}; + +/// A Target representing an affine point on the curve `C`. We use incomplete arithmetic for efficiency, +/// so we assume these points are not zero. +#[derive(Clone, Debug)] +pub struct AffinePointTarget { + pub x: NonNativeTarget, + pub y: NonNativeTarget, +} + +impl AffinePointTarget { + pub fn to_vec(&self) -> Vec> { + vec![self.x.clone(), self.y.clone()] + } +} + +pub trait CircuitBuilderCurve, const D: usize> { + fn constant_affine_point(&mut self, point: AffinePoint) -> AffinePointTarget; + + fn connect_affine_point( + &mut self, + lhs: &AffinePointTarget, + rhs: &AffinePointTarget, + ); + + fn add_virtual_affine_point_target(&mut self) -> AffinePointTarget; + + fn curve_assert_valid(&mut self, p: &AffinePointTarget); + + fn curve_neg(&mut self, p: &AffinePointTarget) -> AffinePointTarget; + + fn curve_conditional_neg( + &mut self, + p: &AffinePointTarget, + b: BoolTarget, + ) -> AffinePointTarget; + + fn curve_double(&mut self, p: &AffinePointTarget) -> AffinePointTarget; + + fn curve_repeated_double( + &mut self, + p: &AffinePointTarget, + n: usize, + ) -> AffinePointTarget; + + /// Add two points, which are assumed to be non-equal. + fn curve_add( + &mut self, + p1: &AffinePointTarget, + p2: &AffinePointTarget, + ) -> AffinePointTarget; + + fn curve_conditional_add( + &mut self, + p1: &AffinePointTarget, + p2: &AffinePointTarget, + b: BoolTarget, + ) -> AffinePointTarget; + + fn curve_scalar_mul( + &mut self, + p: &AffinePointTarget, + n: &NonNativeTarget, + ) -> AffinePointTarget; +} + +impl, const D: usize> CircuitBuilderCurve + for CircuitBuilder +{ + fn constant_affine_point(&mut self, point: AffinePoint) -> AffinePointTarget { + debug_assert!(!point.zero); + AffinePointTarget { + x: self.constant_nonnative(point.x), + y: self.constant_nonnative(point.y), + } + } + + fn connect_affine_point( + &mut self, + lhs: &AffinePointTarget, + rhs: &AffinePointTarget, + ) { + self.connect_nonnative(&lhs.x, &rhs.x); + self.connect_nonnative(&lhs.y, &rhs.y); + } + + fn add_virtual_affine_point_target(&mut self) -> AffinePointTarget { + let x = self.add_virtual_nonnative_target(); + let y = self.add_virtual_nonnative_target(); + + AffinePointTarget { x, y } + } + + fn curve_assert_valid(&mut self, p: &AffinePointTarget) { + let a = self.constant_nonnative(C::A); + let b = self.constant_nonnative(C::B); + + let y_squared = self.mul_nonnative(&p.y, &p.y); + let x_squared = self.mul_nonnative(&p.x, &p.x); + let x_cubed = self.mul_nonnative(&x_squared, &p.x); + let a_x = self.mul_nonnative(&a, &p.x); + let a_x_plus_b = self.add_nonnative(&a_x, &b); + let rhs = self.add_nonnative(&x_cubed, &a_x_plus_b); + + self.connect_nonnative(&y_squared, &rhs); + } + + fn curve_neg(&mut self, p: &AffinePointTarget) -> AffinePointTarget { + let neg_y = self.neg_nonnative(&p.y); + AffinePointTarget { x: p.x.clone(), y: neg_y } + } + + fn curve_conditional_neg( + &mut self, + p: &AffinePointTarget, + b: BoolTarget, + ) -> AffinePointTarget { + AffinePointTarget { x: p.x.clone(), y: self.nonnative_conditional_neg(&p.y, b) } + } + + fn curve_double(&mut self, p: &AffinePointTarget) -> AffinePointTarget { + let AffinePointTarget { x, y } = p; + let double_y = self.add_nonnative(y, y); + let inv_double_y = self.inv_nonnative(&double_y); + let x_squared = self.mul_nonnative(x, x); + let double_x_squared = self.add_nonnative(&x_squared, &x_squared); + let triple_x_squared = self.add_nonnative(&double_x_squared, &x_squared); + + let a = self.constant_nonnative(C::A); + let triple_xx_a = self.add_nonnative(&triple_x_squared, &a); + let lambda = self.mul_nonnative(&triple_xx_a, &inv_double_y); + let lambda_squared = self.mul_nonnative(&lambda, &lambda); + let x_double = self.add_nonnative(x, x); + + let x3 = self.sub_nonnative(&lambda_squared, &x_double); + + let x_diff = self.sub_nonnative(x, &x3); + let lambda_x_diff = self.mul_nonnative(&lambda, &x_diff); + + let y3 = self.sub_nonnative(&lambda_x_diff, y); + + AffinePointTarget { x: x3, y: y3 } + } + + fn curve_repeated_double( + &mut self, + p: &AffinePointTarget, + n: usize, + ) -> AffinePointTarget { + let mut result = p.clone(); + + for _ in 0..n { + result = self.curve_double(&result); + } + + result + } + + fn curve_add( + &mut self, + p1: &AffinePointTarget, + p2: &AffinePointTarget, + ) -> AffinePointTarget { + let AffinePointTarget { x: x1, y: y1 } = p1; + let AffinePointTarget { x: x2, y: y2 } = p2; + + let u = self.sub_nonnative(y2, y1); + let v = self.sub_nonnative(x2, x1); + let v_inv = self.inv_nonnative(&v); + let s = self.mul_nonnative(&u, &v_inv); + let s_squared = self.mul_nonnative(&s, &s); + let x_sum = self.add_nonnative(x2, x1); + let x3 = self.sub_nonnative(&s_squared, &x_sum); + let x_diff = self.sub_nonnative(x1, &x3); + let prod = self.mul_nonnative(&s, &x_diff); + let y3 = self.sub_nonnative(&prod, y1); + + AffinePointTarget { x: x3, y: y3 } + } + + fn curve_conditional_add( + &mut self, + p1: &AffinePointTarget, + p2: &AffinePointTarget, + b: BoolTarget, + ) -> AffinePointTarget { + let not_b = self.not(b); + let sum = self.curve_add(p1, p2); + let x_if_true = self.mul_nonnative_by_bool(&sum.x, b); + let y_if_true = self.mul_nonnative_by_bool(&sum.y, b); + let x_if_false = self.mul_nonnative_by_bool(&p1.x, not_b); + let y_if_false = self.mul_nonnative_by_bool(&p1.y, not_b); + + let x = self.add_nonnative(&x_if_true, &x_if_false); + let y = self.add_nonnative(&y_if_true, &y_if_false); + + AffinePointTarget { x, y } + } + + fn curve_scalar_mul( + &mut self, + p: &AffinePointTarget, + n: &NonNativeTarget, + ) -> AffinePointTarget { + let bits = self.split_nonnative_to_bits(n); + + let rando = (CurveScalar(C::ScalarField::rand()) * C::GENERATOR_PROJECTIVE).to_affine(); + let randot = self.constant_affine_point(rando); + // Result starts at `rando`, which is later subtracted, because we don't support arithmetic with the zero point. + let mut result = self.add_virtual_affine_point_target(); + self.connect_affine_point(&randot, &result); + + let mut two_i_times_p = self.add_virtual_affine_point_target(); + self.connect_affine_point(p, &two_i_times_p); + + for &bit in bits.iter() { + let not_bit = self.not(bit); + + let result_plus_2_i_p = self.curve_add(&result, &two_i_times_p); + + let new_x_if_bit = self.mul_nonnative_by_bool(&result_plus_2_i_p.x, bit); + let new_x_if_not_bit = self.mul_nonnative_by_bool(&result.x, not_bit); + let new_y_if_bit = self.mul_nonnative_by_bool(&result_plus_2_i_p.y, bit); + let new_y_if_not_bit = self.mul_nonnative_by_bool(&result.y, not_bit); + + let new_x = self.add_nonnative(&new_x_if_bit, &new_x_if_not_bit); + let new_y = self.add_nonnative(&new_y_if_bit, &new_y_if_not_bit); + + result = AffinePointTarget { x: new_x, y: new_y }; + + two_i_times_p = self.curve_double(&two_i_times_p); + } + + // Subtract off result's intial value of `rando`. + let neg_r = self.curve_neg(&randot); + result = self.curve_add(&result, &neg_r); + + result + } +} + +#[cfg(test)] +mod tests { + use core::ops::Neg; + + use anyhow::Result; + use plonky2::{ + field::{ + secp256k1_base::Secp256K1Base, + secp256k1_scalar::Secp256K1Scalar, + types::{Field, Sample}, + }, + iop::witness::PartialWitness, + plonk::{ + circuit_builder::CircuitBuilder, + circuit_data::CircuitConfig, + config::{GenericConfig, PoseidonGoldilocksConfig}, + }, + }; + + use crate::{ + curve::{ + curve_types::{AffinePoint, Curve, CurveScalar}, + secp256k1::Secp256K1, + }, + gadgets::{curve::CircuitBuilderCurve, nonnative::CircuitBuilderNonNative}, + }; + + #[test] + fn test_curve_point_is_valid() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let config = CircuitConfig::standard_ecc_config(); + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let g = Secp256K1::GENERATOR_AFFINE; + let g_target = builder.constant_affine_point(g); + let neg_g_target = builder.curve_neg(&g_target); + + builder.curve_assert_valid(&g_target); + builder.curve_assert_valid(&neg_g_target); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + + data.verify(proof) + } + + #[test] + #[should_panic] + fn test_curve_point_is_not_valid() { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let config = CircuitConfig::standard_ecc_config(); + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let g = Secp256K1::GENERATOR_AFFINE; + let not_g = AffinePoint:: { x: g.x, y: g.y + Secp256K1Base::ONE, zero: g.zero }; + let not_g_target = builder.constant_affine_point(not_g); + + builder.curve_assert_valid(¬_g_target); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + + data.verify(proof).unwrap() + } + + #[test] + fn test_curve_double() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let config = CircuitConfig::standard_ecc_config(); + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let g = Secp256K1::GENERATOR_AFFINE; + let g_target = builder.constant_affine_point(g); + let neg_g_target = builder.curve_neg(&g_target); + + let double_g = g.double(); + let double_g_expected = builder.constant_affine_point(double_g); + builder.curve_assert_valid(&double_g_expected); + + let double_neg_g = (-g).double(); + let double_neg_g_expected = builder.constant_affine_point(double_neg_g); + builder.curve_assert_valid(&double_neg_g_expected); + + let double_g_actual = builder.curve_double(&g_target); + let double_neg_g_actual = builder.curve_double(&neg_g_target); + builder.curve_assert_valid(&double_g_actual); + builder.curve_assert_valid(&double_neg_g_actual); + + builder.connect_affine_point(&double_g_expected, &double_g_actual); + builder.connect_affine_point(&double_neg_g_expected, &double_neg_g_actual); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + + data.verify(proof) + } + + #[test] + fn test_curve_add() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let config = CircuitConfig::standard_ecc_config(); + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let g = Secp256K1::GENERATOR_AFFINE; + let double_g = g.double(); + let g_plus_2g = (g + double_g).to_affine(); + let g_plus_2g_expected = builder.constant_affine_point(g_plus_2g); + builder.curve_assert_valid(&g_plus_2g_expected); + + let g_target = builder.constant_affine_point(g); + let double_g_target = builder.curve_double(&g_target); + let g_plus_2g_actual = builder.curve_add(&g_target, &double_g_target); + builder.curve_assert_valid(&g_plus_2g_actual); + + builder.connect_affine_point(&g_plus_2g_expected, &g_plus_2g_actual); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + + data.verify(proof) + } + + #[test] + fn test_curve_conditional_add() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let config = CircuitConfig::standard_ecc_config(); + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let g = Secp256K1::GENERATOR_AFFINE; + let double_g = g.double(); + let g_plus_2g = (g + double_g).to_affine(); + let g_plus_2g_expected = builder.constant_affine_point(g_plus_2g); + + let g_expected = builder.constant_affine_point(g); + let double_g_target = builder.curve_double(&g_expected); + let t = builder._true(); + let f = builder._false(); + let g_plus_2g_actual = builder.curve_conditional_add(&g_expected, &double_g_target, t); + let g_actual = builder.curve_conditional_add(&g_expected, &double_g_target, f); + + builder.connect_affine_point(&g_plus_2g_expected, &g_plus_2g_actual); + builder.connect_affine_point(&g_expected, &g_actual); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + + data.verify(proof) + } + + #[test] + #[ignore] + fn test_curve_mul() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let config = CircuitConfig::standard_ecc_config(); + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let g = Secp256K1::GENERATOR_PROJECTIVE.to_affine(); + let five = Secp256K1Scalar::from_canonical_usize(5); + let neg_five = five.neg(); + let neg_five_scalar = CurveScalar::(neg_five); + let neg_five_g = (neg_five_scalar * g.to_projective()).to_affine(); + let neg_five_g_expected = builder.constant_affine_point(neg_five_g); + builder.curve_assert_valid(&neg_five_g_expected); + + let g_target = builder.constant_affine_point(g); + let neg_five_target = builder.constant_nonnative(neg_five); + let neg_five_g_actual = builder.curve_scalar_mul(&g_target, &neg_five_target); + builder.curve_assert_valid(&neg_five_g_actual); + + builder.connect_affine_point(&neg_five_g_expected, &neg_five_g_actual); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + + data.verify(proof) + } + + #[test] + #[ignore] + fn test_curve_random() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let config = CircuitConfig::standard_ecc_config(); + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let rando = + (CurveScalar(Secp256K1Scalar::rand()) * Secp256K1::GENERATOR_PROJECTIVE).to_affine(); + let randot = builder.constant_affine_point(rando); + + let two_target = builder.constant_nonnative(Secp256K1Scalar::TWO); + let randot_doubled = builder.curve_double(&randot); + let randot_times_two = builder.curve_scalar_mul(&randot, &two_target); + builder.connect_affine_point(&randot_doubled, &randot_times_two); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + + data.verify(proof) + } +} diff --git a/ecdsa/src/gadgets/curve_fixed_base.rs b/ecdsa/src/gadgets/curve_fixed_base.rs new file mode 100644 index 0000000000..417033c851 --- /dev/null +++ b/ecdsa/src/gadgets/curve_fixed_base.rs @@ -0,0 +1,134 @@ +use alloc::vec::Vec; + +use num::BigUint; +use plonky2::{ + field::{extension::Extendable, types::Field}, + hash::{hash_types::RichField, keccak::KeccakHash}, + plonk::{ + circuit_builder::CircuitBuilder, + config::{GenericHashOut, Hasher}, + }, +}; + +use crate::{ + curve::curve_types::{AffinePoint, Curve, CurveScalar}, + gadgets::{ + curve::{AffinePointTarget, CircuitBuilderCurve}, + curve_windowed_mul::CircuitBuilderWindowedMul, + nonnative::NonNativeTarget, + split_nonnative::CircuitBuilderSplit, + }, +}; + +/// Compute windowed fixed-base scalar multiplication, using a 4-bit window. +pub fn fixed_base_curve_mul_circuit, const D: usize>( + builder: &mut CircuitBuilder, + base: AffinePoint, + scalar: &NonNativeTarget, +) -> AffinePointTarget { + // Holds `(16^i) * base` for `i=0..scalar.value.limbs.len() * 8`. + let scaled_base = (0..scalar.value.limbs.len() * 8).scan(base, |acc, _| { + let tmp = *acc; + for _ in 0..4 { + *acc = acc.double(); + } + Some(tmp) + }); + + let limbs = builder.split_nonnative_to_4_bit_limbs(scalar); + + let hash_0 = KeccakHash::<32>::hash_no_pad(&[F::ZERO]); + let hash_0_scalar = C::ScalarField::from_noncanonical_biguint(BigUint::from_bytes_le( + &GenericHashOut::::to_bytes(&hash_0), + )); + let rando = (CurveScalar(hash_0_scalar) * C::GENERATOR_PROJECTIVE).to_affine(); + + let zero = builder.zero(); + let mut result = builder.constant_affine_point(rando); + // `s * P = sum s_i * P_i` with `P_i = (16^i) * P` and `s = sum s_i * (16^i)`. + for (limb, point) in limbs.into_iter().zip(scaled_base) { + // `muls_point[t] = t * P_i` for `t=0..16`. + let mut muls_point = (0..16) + .scan(AffinePoint::ZERO, |acc, _| { + let tmp = *acc; + *acc = (point + *acc).to_affine(); + Some(tmp) + }) + // First element if zero, so we skip it since `constant_affine_point` takes non-zero input. + .skip(1) + .map(|p| builder.constant_affine_point(p)) + .collect::>(); + // We add back a point in position 0. `limb == zero` is checked below, so this point can be arbitrary. + muls_point.insert(0, muls_point[0].clone()); + let is_zero = builder.is_equal(limb, zero); + let should_add = builder.not(is_zero); + // `r = s_i * P_i` + let r = builder.random_access_curve_points(limb, muls_point); + result = builder.curve_conditional_add(&result, &r, should_add); + } + + let to_add = builder.constant_affine_point(-rando); + builder.curve_add(&result, &to_add) +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use plonky2::{ + field::{ + secp256k1_scalar::Secp256K1Scalar, + types::{PrimeField, Sample}, + }, + iop::witness::PartialWitness, + plonk::{ + circuit_builder::CircuitBuilder, + circuit_data::CircuitConfig, + config::{GenericConfig, PoseidonGoldilocksConfig}, + }, + }; + + use crate::{ + curve::{ + curve_types::{Curve, CurveScalar}, + secp256k1::Secp256K1, + }, + gadgets::{ + biguint::WitnessBigUint, curve::CircuitBuilderCurve, + curve_fixed_base::fixed_base_curve_mul_circuit, nonnative::CircuitBuilderNonNative, + }, + }; + + #[test] + #[ignore] + fn test_fixed_base() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let config = CircuitConfig::standard_ecc_config(); + + let mut pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let g = Secp256K1::GENERATOR_AFFINE; + let n = Secp256K1Scalar::rand(); + + let res = (CurveScalar(n) * g.to_projective()).to_affine(); + let res_expected = builder.constant_affine_point(res); + builder.curve_assert_valid(&res_expected); + + let n_target = builder.add_virtual_nonnative_target::(); + pw.set_biguint_target(&n_target.value, &n.to_canonical_biguint()); + + let res_target = fixed_base_curve_mul_circuit(&mut builder, g, &n_target); + builder.curve_assert_valid(&res_target); + + builder.connect_affine_point(&res_target, &res_expected); + + dbg!(builder.num_gates()); + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + + data.verify(proof) + } +} diff --git a/ecdsa/src/gadgets/curve_msm.rs b/ecdsa/src/gadgets/curve_msm.rs new file mode 100644 index 0000000000..41187f9fcd --- /dev/null +++ b/ecdsa/src/gadgets/curve_msm.rs @@ -0,0 +1,152 @@ +use alloc::vec; + +use num::BigUint; +use plonky2::{ + field::{extension::Extendable, types::Field}, + hash::{hash_types::RichField, keccak::KeccakHash}, + plonk::{ + circuit_builder::CircuitBuilder, + config::{GenericHashOut, Hasher}, + }, +}; + +use crate::{ + curve::curve_types::{Curve, CurveScalar}, + gadgets::{ + curve::{AffinePointTarget, CircuitBuilderCurve}, + curve_windowed_mul::CircuitBuilderWindowedMul, + nonnative::NonNativeTarget, + split_nonnative::CircuitBuilderSplit, + }, +}; + +/// Computes `n*p + m*q` using windowed MSM, with a 2-bit window. +/// See Algorithm 9.23 in Handbook of Elliptic and Hyperelliptic Curve Cryptography for a +/// description. +/// Note: Doesn't work if `p == q`. +pub fn curve_msm_circuit, const D: usize>( + builder: &mut CircuitBuilder, + p: &AffinePointTarget, + q: &AffinePointTarget, + n: &NonNativeTarget, + m: &NonNativeTarget, +) -> AffinePointTarget { + let limbs_n = builder.split_nonnative_to_2_bit_limbs(n); + let limbs_m = builder.split_nonnative_to_2_bit_limbs(m); + assert_eq!(limbs_n.len(), limbs_m.len()); + let num_limbs = limbs_n.len(); + + let hash_0 = KeccakHash::<32>::hash_no_pad(&[F::ZERO]); + let hash_0_scalar = C::ScalarField::from_noncanonical_biguint(BigUint::from_bytes_le( + &GenericHashOut::::to_bytes(&hash_0), + )); + let rando = (CurveScalar(hash_0_scalar) * C::GENERATOR_PROJECTIVE).to_affine(); + let rando_t = builder.constant_affine_point(rando); + let neg_rando = builder.constant_affine_point(-rando); + + // Precomputes `precomputation[i + 4*j] = i*p + j*q` for `i,j=0..4`. + let mut precomputation = vec![p.clone(); 16]; + let mut cur_p = rando_t.clone(); + let mut cur_q = rando_t.clone(); + for i in 0..4 { + precomputation[i] = cur_p.clone(); + precomputation[4 * i] = cur_q.clone(); + cur_p = builder.curve_add(&cur_p, p); + cur_q = builder.curve_add(&cur_q, q); + } + for i in 1..4 { + precomputation[i] = builder.curve_add(&precomputation[i], &neg_rando); + precomputation[4 * i] = builder.curve_add(&precomputation[4 * i], &neg_rando); + } + for i in 1..4 { + for j in 1..4 { + precomputation[i + 4 * j] = + builder.curve_add(&precomputation[i], &precomputation[4 * j]); + } + } + + let four = builder.constant(F::from_canonical_usize(4)); + + let zero = builder.zero(); + let mut result = rando_t; + for (limb_n, limb_m) in limbs_n.into_iter().zip(limbs_m).rev() { + result = builder.curve_repeated_double(&result, 2); + let index = builder.mul_add(four, limb_m, limb_n); + let r = builder.random_access_curve_points(index, precomputation.clone()); + let is_zero = builder.is_equal(index, zero); + let should_add = builder.not(is_zero); + result = builder.curve_conditional_add(&result, &r, should_add); + } + let starting_point_multiplied = (0..2 * num_limbs).fold(rando, |acc, _| acc.double()); + let to_add = builder.constant_affine_point(-starting_point_multiplied); + result = builder.curve_add(&result, &to_add); + + result +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use plonky2::{ + field::{secp256k1_scalar::Secp256K1Scalar, types::Sample}, + iop::witness::PartialWitness, + plonk::{ + circuit_builder::CircuitBuilder, + circuit_data::CircuitConfig, + config::{GenericConfig, PoseidonGoldilocksConfig}, + }, + }; + + use crate::{ + curve::{ + curve_types::{Curve, CurveScalar}, + secp256k1::Secp256K1, + }, + gadgets::{ + curve::CircuitBuilderCurve, curve_msm::curve_msm_circuit, + nonnative::CircuitBuilderNonNative, + }, + }; + + #[test] + #[ignore] + fn test_curve_msm() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let config = CircuitConfig::standard_ecc_config(); + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let p = + (CurveScalar(Secp256K1Scalar::rand()) * Secp256K1::GENERATOR_PROJECTIVE).to_affine(); + let q = + (CurveScalar(Secp256K1Scalar::rand()) * Secp256K1::GENERATOR_PROJECTIVE).to_affine(); + let n = Secp256K1Scalar::rand(); + let m = Secp256K1Scalar::rand(); + + let res = + (CurveScalar(n) * p.to_projective() + CurveScalar(m) * q.to_projective()).to_affine(); + let res_expected = builder.constant_affine_point(res); + builder.curve_assert_valid(&res_expected); + + let p_target = builder.constant_affine_point(p); + let q_target = builder.constant_affine_point(q); + let n_target = builder.constant_nonnative(n); + let m_target = builder.constant_nonnative(m); + + let res_target = + curve_msm_circuit(&mut builder, &p_target, &q_target, &n_target, &m_target); + builder.curve_assert_valid(&res_target); + + builder.connect_affine_point(&res_target, &res_expected); + + dbg!(builder.num_gates()); + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + + data.verify(proof) + } +} diff --git a/ecdsa/src/gadgets/curve_windowed_mul.rs b/ecdsa/src/gadgets/curve_windowed_mul.rs new file mode 100644 index 0000000000..db9469ce3d --- /dev/null +++ b/ecdsa/src/gadgets/curve_windowed_mul.rs @@ -0,0 +1,253 @@ +use alloc::{vec, vec::Vec}; +use core::marker::PhantomData; + +use num::BigUint; +use plonky2::{ + field::{ + extension::Extendable, + types::{Field, Sample}, + }, + hash::{hash_types::RichField, keccak::KeccakHash}, + iop::target::{BoolTarget, Target}, + plonk::{ + circuit_builder::CircuitBuilder, + config::{GenericHashOut, Hasher}, + }, +}; +use plonky2_u32::gadgets::arithmetic_u32::{CircuitBuilderU32, U32Target}; + +use crate::{ + curve::curve_types::{Curve, CurveScalar}, + gadgets::{ + biguint::BigUintTarget, + curve::{AffinePointTarget, CircuitBuilderCurve}, + nonnative::{CircuitBuilderNonNative, NonNativeTarget}, + split_nonnative::CircuitBuilderSplit, + }, +}; + +const WINDOW_SIZE: usize = 4; + +pub trait CircuitBuilderWindowedMul, const D: usize> { + fn precompute_window( + &mut self, + p: &AffinePointTarget, + ) -> Vec>; + + fn random_access_curve_points( + &mut self, + access_index: Target, + v: Vec>, + ) -> AffinePointTarget; + + fn if_affine_point( + &mut self, + b: BoolTarget, + p1: &AffinePointTarget, + p2: &AffinePointTarget, + ) -> AffinePointTarget; + + fn curve_scalar_mul_windowed( + &mut self, + p: &AffinePointTarget, + n: &NonNativeTarget, + ) -> AffinePointTarget; +} + +impl, const D: usize> CircuitBuilderWindowedMul + for CircuitBuilder +{ + fn precompute_window( + &mut self, + p: &AffinePointTarget, + ) -> Vec> { + let g = (CurveScalar(C::ScalarField::rand()) * C::GENERATOR_PROJECTIVE).to_affine(); + let neg = { + let mut neg = g; + neg.y = -neg.y; + self.constant_affine_point(neg) + }; + + let mut multiples = vec![self.constant_affine_point(g)]; + for i in 1..1 << WINDOW_SIZE { + multiples.push(self.curve_add(p, &multiples[i - 1])); + } + for i in 1..1 << WINDOW_SIZE { + multiples[i] = self.curve_add(&neg, &multiples[i]); + } + multiples + } + + fn random_access_curve_points( + &mut self, + access_index: Target, + v: Vec>, + ) -> AffinePointTarget { + let num_limbs = C::BaseField::BITS / 32; + let zero = self.zero_u32(); + let x_limbs: Vec> = (0..num_limbs) + .map(|i| v.iter().map(|p| p.x.value.limbs.get(i).unwrap_or(&zero).0).collect()) + .collect(); + let y_limbs: Vec> = (0..num_limbs) + .map(|i| v.iter().map(|p| p.y.value.limbs.get(i).unwrap_or(&zero).0).collect()) + .collect(); + + let selected_x_limbs: Vec<_> = x_limbs + .iter() + .map(|limbs| U32Target(self.random_access(access_index, limbs.clone()))) + .collect(); + let selected_y_limbs: Vec<_> = y_limbs + .iter() + .map(|limbs| U32Target(self.random_access(access_index, limbs.clone()))) + .collect(); + + let x = NonNativeTarget { + value: BigUintTarget { limbs: selected_x_limbs }, + _phantom: PhantomData, + }; + let y = NonNativeTarget { + value: BigUintTarget { limbs: selected_y_limbs }, + _phantom: PhantomData, + }; + AffinePointTarget { x, y } + } + + fn if_affine_point( + &mut self, + b: BoolTarget, + p1: &AffinePointTarget, + p2: &AffinePointTarget, + ) -> AffinePointTarget { + let new_x = self.if_nonnative(b, &p1.x, &p2.x); + let new_y = self.if_nonnative(b, &p1.y, &p2.y); + AffinePointTarget { x: new_x, y: new_y } + } + + fn curve_scalar_mul_windowed( + &mut self, + p: &AffinePointTarget, + n: &NonNativeTarget, + ) -> AffinePointTarget { + let hash_0 = KeccakHash::<25>::hash_no_pad(&[F::ZERO]); + let hash_0_scalar = C::ScalarField::from_noncanonical_biguint(BigUint::from_bytes_le( + &GenericHashOut::::to_bytes(&hash_0), + )); + let starting_point = CurveScalar(hash_0_scalar) * C::GENERATOR_PROJECTIVE; + let starting_point_multiplied = { + let mut cur = starting_point; + for _ in 0..C::ScalarField::BITS { + cur = cur.double(); + } + cur + }; + + let mut result = self.constant_affine_point(starting_point.to_affine()); + + let precomputation = self.precompute_window(p); + let zero = self.zero(); + + let windows = self.split_nonnative_to_4_bit_limbs(n); + for i in (0..windows.len()).rev() { + result = self.curve_repeated_double(&result, WINDOW_SIZE); + let window = windows[i]; + + let to_add = self.random_access_curve_points(window, precomputation.clone()); + let is_zero = self.is_equal(window, zero); + let should_add = self.not(is_zero); + result = self.curve_conditional_add(&result, &to_add, should_add); + } + + let to_subtract = self.constant_affine_point(starting_point_multiplied.to_affine()); + let to_add = self.curve_neg(&to_subtract); + result = self.curve_add(&result, &to_add); + + result + } +} + +#[cfg(test)] +mod tests { + use core::ops::Neg; + + use anyhow::Result; + use plonky2::{ + field::secp256k1_scalar::Secp256K1Scalar, + iop::witness::PartialWitness, + plonk::{ + circuit_data::CircuitConfig, + config::{GenericConfig, PoseidonGoldilocksConfig}, + }, + }; + use rand::{rngs::OsRng, Rng}; + + use super::*; + use crate::curve::secp256k1::Secp256K1; + + #[test] + fn test_random_access_curve_points() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let config = CircuitConfig::standard_ecc_config(); + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let num_points = 16; + let points: Vec<_> = (0..num_points) + .map(|_| { + let g = (CurveScalar(Secp256K1Scalar::rand()) * Secp256K1::GENERATOR_PROJECTIVE) + .to_affine(); + builder.constant_affine_point(g) + }) + .collect(); + + let mut rng = OsRng; + let access_index = rng.gen::() % num_points; + + let access_index_target = builder.constant(F::from_canonical_usize(access_index)); + let selected = builder.random_access_curve_points(access_index_target, points.clone()); + let expected = points[access_index].clone(); + builder.connect_affine_point(&selected, &expected); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + + data.verify(proof) + } + + #[test] + #[ignore] + fn test_curve_windowed_mul() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let config = CircuitConfig::standard_ecc_config(); + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let g = + (CurveScalar(Secp256K1Scalar::rand()) * Secp256K1::GENERATOR_PROJECTIVE).to_affine(); + let five = Secp256K1Scalar::from_canonical_usize(5); + let neg_five = five.neg(); + let neg_five_scalar = CurveScalar::(neg_five); + let neg_five_g = (neg_five_scalar * g.to_projective()).to_affine(); + let neg_five_g_expected = builder.constant_affine_point(neg_five_g); + builder.curve_assert_valid(&neg_five_g_expected); + + let g_target = builder.constant_affine_point(g); + let neg_five_target = builder.constant_nonnative(neg_five); + let neg_five_g_actual = builder.curve_scalar_mul_windowed(&g_target, &neg_five_target); + builder.curve_assert_valid(&neg_five_g_actual); + + builder.connect_affine_point(&neg_five_g_expected, &neg_five_g_actual); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + + data.verify(proof) + } +} diff --git a/ecdsa/src/gadgets/ecdsa.rs b/ecdsa/src/gadgets/ecdsa.rs new file mode 100644 index 0000000000..23ee772b2c --- /dev/null +++ b/ecdsa/src/gadgets/ecdsa.rs @@ -0,0 +1,115 @@ +use core::marker::PhantomData; + +use plonky2::{ + field::{extension::Extendable, secp256k1_scalar::Secp256K1Scalar}, + hash::hash_types::RichField, + plonk::circuit_builder::CircuitBuilder, +}; + +use crate::{ + curve::{curve_types::Curve, secp256k1::Secp256K1}, + gadgets::{ + curve::{AffinePointTarget, CircuitBuilderCurve}, + curve_fixed_base::fixed_base_curve_mul_circuit, + glv::CircuitBuilderGlv, + nonnative::{CircuitBuilderNonNative, NonNativeTarget}, + }, +}; + +#[derive(Clone, Debug)] +pub struct ECDSASecretKeyTarget(pub NonNativeTarget); + +#[derive(Clone, Debug)] +pub struct ECDSAPublicKeyTarget(pub AffinePointTarget); + +#[derive(Clone, Debug)] +pub struct ECDSASignatureTarget { + pub r: NonNativeTarget, + pub s: NonNativeTarget, +} + +pub fn verify_message_circuit, const D: usize>( + builder: &mut CircuitBuilder, + msg: NonNativeTarget, + sig: ECDSASignatureTarget, + pk: ECDSAPublicKeyTarget, +) { + let ECDSASignatureTarget { r, s } = sig; + + builder.curve_assert_valid(&pk.0); + + let c = builder.inv_nonnative(&s); + let u1 = builder.mul_nonnative(&msg, &c); + let u2 = builder.mul_nonnative(&r, &c); + + let point1 = fixed_base_curve_mul_circuit(builder, Secp256K1::GENERATOR_AFFINE, &u1); + let point2 = builder.glv_mul(&pk.0, &u2); + let point = builder.curve_add(&point1, &point2); + + let x = NonNativeTarget:: { value: point.x.value, _phantom: PhantomData }; + builder.connect_nonnative(&r, &x); +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use plonky2::{ + field::types::Sample, + iop::witness::PartialWitness, + plonk::{ + circuit_data::CircuitConfig, + config::{GenericConfig, PoseidonGoldilocksConfig}, + }, + }; + + use super::*; + use crate::curve::{ + curve_types::CurveScalar, + ecdsa::{sign_message, ECDSAPublicKey, ECDSASecretKey, ECDSASignature}, + }; + + fn test_ecdsa_circuit_with_config(config: CircuitConfig) -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + type Curve = Secp256K1; + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let msg = Secp256K1Scalar::rand(); + let msg_target = builder.constant_nonnative(msg); + + let sk = ECDSASecretKey::(Secp256K1Scalar::rand()); + let pk = ECDSAPublicKey((CurveScalar(sk.0) * Curve::GENERATOR_PROJECTIVE).to_affine()); + + let pk_target = ECDSAPublicKeyTarget(builder.constant_affine_point(pk.0)); + + let sig = sign_message(msg, sk); + + let ECDSASignature { r, s } = sig; + let r_target = builder.constant_nonnative(r); + let s_target = builder.constant_nonnative(s); + let sig_target = ECDSASignatureTarget { r: r_target, s: s_target }; + + verify_message_circuit(&mut builder, msg_target, sig_target, pk_target); + + dbg!(builder.num_gates()); + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + data.verify(proof) + } + + #[test] + #[ignore] + fn test_ecdsa_circuit_narrow() -> Result<()> { + test_ecdsa_circuit_with_config(CircuitConfig::standard_ecc_config()) + } + + #[test] + #[ignore] + fn test_ecdsa_circuit_wide() -> Result<()> { + test_ecdsa_circuit_with_config(CircuitConfig::wide_ecc_config()) + } +} diff --git a/ecdsa/src/gadgets/glv.rs b/ecdsa/src/gadgets/glv.rs new file mode 100644 index 0000000000..b80e05fa63 --- /dev/null +++ b/ecdsa/src/gadgets/glv.rs @@ -0,0 +1,209 @@ +use alloc::{ + string::{String, ToString}, + vec::Vec, +}; +use core::marker::PhantomData; + +use plonky2::{ + field::{ + extension::Extendable, + secp256k1_base::Secp256K1Base, + secp256k1_scalar::Secp256K1Scalar, + types::{Field, PrimeField}, + }, + hash::hash_types::RichField, + iop::{ + generator::{GeneratedValues, SimpleGenerator}, + target::{BoolTarget, Target}, + witness::{PartitionWitness, WitnessWrite}, + }, + plonk::circuit_builder::CircuitBuilder, +}; + +use crate::{ + curve::{ + glv::{decompose_secp256k1_scalar, GLV_BETA, GLV_S}, + secp256k1::Secp256K1, + }, + gadgets::{ + biguint::{GeneratedValuesBigUint, WitnessBigUint}, + curve::{AffinePointTarget, CircuitBuilderCurve}, + curve_msm::curve_msm_circuit, + nonnative::{CircuitBuilderNonNative, NonNativeTarget}, + }, +}; + +pub trait CircuitBuilderGlv, const D: usize> { + fn secp256k1_glv_beta(&mut self) -> NonNativeTarget; + + fn decompose_secp256k1_scalar( + &mut self, + k: &NonNativeTarget, + ) -> (NonNativeTarget, NonNativeTarget, BoolTarget, BoolTarget); + + fn glv_mul( + &mut self, + p: &AffinePointTarget, + k: &NonNativeTarget, + ) -> AffinePointTarget; +} + +impl, const D: usize> CircuitBuilderGlv + for CircuitBuilder +{ + fn secp256k1_glv_beta(&mut self) -> NonNativeTarget { + self.constant_nonnative(GLV_BETA) + } + + fn decompose_secp256k1_scalar( + &mut self, + k: &NonNativeTarget, + ) -> (NonNativeTarget, NonNativeTarget, BoolTarget, BoolTarget) + { + let k1 = self.add_virtual_nonnative_target_sized::(4); + let k2 = self.add_virtual_nonnative_target_sized::(4); + let k1_neg = self.add_virtual_bool_target_unsafe(); + let k2_neg = self.add_virtual_bool_target_unsafe(); + + self.add_simple_generator(GLVDecompositionGenerator:: { + k: k.clone(), + k1: k1.clone(), + k2: k2.clone(), + k1_neg, + k2_neg, + _phantom: PhantomData, + }); + + // Check that `k1_raw + GLV_S * k2_raw == k`. + let k1_raw = self.nonnative_conditional_neg(&k1, k1_neg); + let k2_raw = self.nonnative_conditional_neg(&k2, k2_neg); + let s = self.constant_nonnative(GLV_S); + let mut should_be_k = self.mul_nonnative(&s, &k2_raw); + should_be_k = self.add_nonnative(&should_be_k, &k1_raw); + self.connect_nonnative(&should_be_k, k); + + (k1, k2, k1_neg, k2_neg) + } + + fn glv_mul( + &mut self, + p: &AffinePointTarget, + k: &NonNativeTarget, + ) -> AffinePointTarget { + let (k1, k2, k1_neg, k2_neg) = self.decompose_secp256k1_scalar(k); + + let beta = self.secp256k1_glv_beta(); + let beta_px = self.mul_nonnative(&beta, &p.x); + let sp = AffinePointTarget:: { x: beta_px, y: p.y.clone() }; + + let p_neg = self.curve_conditional_neg(p, k1_neg); + let sp_neg = self.curve_conditional_neg(&sp, k2_neg); + curve_msm_circuit(self, &p_neg, &sp_neg, &k1, &k2) + } +} + +#[derive(Debug, Clone)] +struct GLVDecompositionGenerator, const D: usize> { + k: NonNativeTarget, + k1: NonNativeTarget, + k2: NonNativeTarget, + k1_neg: BoolTarget, + k2_neg: BoolTarget, + _phantom: PhantomData, +} + +impl, const D: usize> SimpleGenerator + for GLVDecompositionGenerator +{ + fn id(&self) -> String { + "GLVDecompositionGenerator".to_string() + } + fn serialize( + &self, + _dst: &mut Vec, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult<()> { + todo!(); + } + fn deserialize( + _src: &mut plonky2::util::serialization::Buffer, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult + where + Self: Sized, + { + todo!(); + } + fn dependencies(&self) -> Vec { + self.k.value.limbs.iter().map(|l| l.0).collect() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let k = Secp256K1Scalar::from_noncanonical_biguint( + witness.get_biguint_target(self.k.value.clone()), + ); + + let (k1, k2, k1_neg, k2_neg) = decompose_secp256k1_scalar(k); + + out_buffer.set_biguint_target(&self.k1.value, &k1.to_canonical_biguint()); + out_buffer.set_biguint_target(&self.k2.value, &k2.to_canonical_biguint()); + out_buffer.set_bool_target(self.k1_neg, k1_neg); + out_buffer.set_bool_target(self.k2_neg, k2_neg); + } +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use plonky2::{ + field::{secp256k1_scalar::Secp256K1Scalar, types::Sample}, + iop::witness::PartialWitness, + plonk::{ + circuit_builder::CircuitBuilder, + circuit_data::CircuitConfig, + config::{GenericConfig, PoseidonGoldilocksConfig}, + }, + }; + + use crate::{ + curve::{ + curve_types::{Curve, CurveScalar}, + glv::glv_mul, + secp256k1::Secp256K1, + }, + gadgets::{ + curve::CircuitBuilderCurve, glv::CircuitBuilderGlv, nonnative::CircuitBuilderNonNative, + }, + }; + + #[test] + #[ignore] + fn test_glv_gadget() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let config = CircuitConfig::standard_ecc_config(); + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let rando = + (CurveScalar(Secp256K1Scalar::rand()) * Secp256K1::GENERATOR_PROJECTIVE).to_affine(); + let randot = builder.constant_affine_point(rando); + + let scalar = Secp256K1Scalar::rand(); + let scalar_target = builder.constant_nonnative(scalar); + + let rando_glv_scalar = glv_mul(rando.to_projective(), scalar); + let expected = builder.constant_affine_point(rando_glv_scalar.to_affine()); + let actual = builder.glv_mul(&randot, &scalar_target); + builder.connect_affine_point(&expected, &actual); + + dbg!(builder.num_gates()); + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + + data.verify(proof) + } +} diff --git a/ecdsa/src/gadgets/mod.rs b/ecdsa/src/gadgets/mod.rs new file mode 100644 index 0000000000..35b101008f --- /dev/null +++ b/ecdsa/src/gadgets/mod.rs @@ -0,0 +1,9 @@ +pub mod biguint; +pub mod curve; +pub mod curve_fixed_base; +pub mod curve_msm; +pub mod curve_windowed_mul; +pub mod ecdsa; +pub mod glv; +pub mod nonnative; +pub mod split_nonnative; diff --git a/ecdsa/src/gadgets/nonnative.rs b/ecdsa/src/gadgets/nonnative.rs new file mode 100644 index 0000000000..cffffe8521 --- /dev/null +++ b/ecdsa/src/gadgets/nonnative.rs @@ -0,0 +1,971 @@ +use alloc::{ + string::{String, ToString}, + vec, + vec::Vec, +}; +use core::marker::PhantomData; + +use num::{BigUint, Integer, One, Zero}; +use plonky2::{ + field::{ + extension::Extendable, + types::{Field, PrimeField}, + }, + hash::hash_types::RichField, + iop::{ + generator::{GeneratedValues, SimpleGenerator}, + target::{BoolTarget, Target}, + witness::{PartitionWitness, Witness, WitnessWrite}, + }, + plonk::circuit_builder::CircuitBuilder, + util::ceil_div_usize, +}; +use plonky2_u32::{ + gadgets::{ + arithmetic_u32::{CircuitBuilderU32, U32Target}, + range_check::range_check_u32_circuit, + }, + witness::GeneratedValuesU32, +}; + +use crate::gadgets::biguint::{ + BigUintTarget, CircuitBuilderBiguint, GeneratedValuesBigUint, WitnessBigUint, +}; + +#[derive(Clone, Debug)] +pub struct NonNativeTarget { + pub(crate) value: BigUintTarget, + pub(crate) _phantom: PhantomData, +} + +pub trait CircuitBuilderNonNative, const D: usize> { + fn num_nonnative_limbs() -> usize { + ceil_div_usize(FF::BITS, 32) + } + + fn biguint_to_nonnative(&mut self, x: &BigUintTarget) -> NonNativeTarget; + + fn nonnative_to_canonical_biguint( + &mut self, + x: &NonNativeTarget, + ) -> BigUintTarget; + + fn constant_nonnative(&mut self, x: FF) -> NonNativeTarget; + + fn zero_nonnative(&mut self) -> NonNativeTarget; + + // Assert that two NonNativeTarget's, both assumed to be in reduced form, are equal. + fn connect_nonnative( + &mut self, + lhs: &NonNativeTarget, + rhs: &NonNativeTarget, + ); + + fn add_virtual_nonnative_target(&mut self) -> NonNativeTarget; + + fn add_virtual_nonnative_target_sized( + &mut self, + num_limbs: usize, + ) -> NonNativeTarget; + + fn add_nonnative( + &mut self, + a: &NonNativeTarget, + b: &NonNativeTarget, + ) -> NonNativeTarget; + + fn mul_nonnative_by_bool( + &mut self, + a: &NonNativeTarget, + b: BoolTarget, + ) -> NonNativeTarget; + + fn if_nonnative( + &mut self, + b: BoolTarget, + x: &NonNativeTarget, + y: &NonNativeTarget, + ) -> NonNativeTarget; + + fn add_many_nonnative( + &mut self, + to_add: &[NonNativeTarget], + ) -> NonNativeTarget; + + // Subtract two `NonNativeTarget`s. + fn sub_nonnative( + &mut self, + a: &NonNativeTarget, + b: &NonNativeTarget, + ) -> NonNativeTarget; + + fn mul_nonnative( + &mut self, + a: &NonNativeTarget, + b: &NonNativeTarget, + ) -> NonNativeTarget; + + fn mul_many_nonnative( + &mut self, + to_mul: &[NonNativeTarget], + ) -> NonNativeTarget; + + fn neg_nonnative(&mut self, x: &NonNativeTarget) -> NonNativeTarget; + + fn inv_nonnative(&mut self, x: &NonNativeTarget) -> NonNativeTarget; + + /// Returns `x % |FF|` as a `NonNativeTarget`. + fn reduce(&mut self, x: &BigUintTarget) -> NonNativeTarget; + + fn reduce_nonnative(&mut self, x: &NonNativeTarget) -> NonNativeTarget; + + fn bool_to_nonnative(&mut self, b: &BoolTarget) -> NonNativeTarget; + + // Split a nonnative field element to bits. + fn split_nonnative_to_bits(&mut self, x: &NonNativeTarget) -> Vec; + + fn nonnative_conditional_neg( + &mut self, + x: &NonNativeTarget, + b: BoolTarget, + ) -> NonNativeTarget; + + /// Assert a >= b + fn assert_greater_equal_nonnative( + &mut self, + a: &NonNativeTarget, + b: &NonNativeTarget, + ); +} + +impl, const D: usize> CircuitBuilderNonNative + for CircuitBuilder +{ + fn num_nonnative_limbs() -> usize { + ceil_div_usize(FF::BITS, 32) + } + + fn biguint_to_nonnative(&mut self, x: &BigUintTarget) -> NonNativeTarget { + NonNativeTarget { value: x.clone(), _phantom: PhantomData } + } + + fn nonnative_to_canonical_biguint( + &mut self, + x: &NonNativeTarget, + ) -> BigUintTarget { + x.value.clone() + } + + fn constant_nonnative(&mut self, x: FF) -> NonNativeTarget { + let x_biguint = self.constant_biguint(&x.to_canonical_biguint()); + self.biguint_to_nonnative(&x_biguint) + } + + fn zero_nonnative(&mut self) -> NonNativeTarget { + self.constant_nonnative(FF::ZERO) + } + + // Assert that two NonNativeTarget's, both assumed to be in reduced form, are equal. + fn connect_nonnative( + &mut self, + lhs: &NonNativeTarget, + rhs: &NonNativeTarget, + ) { + self.connect_biguint(&lhs.value, &rhs.value); + } + + fn add_virtual_nonnative_target(&mut self) -> NonNativeTarget { + let num_limbs = Self::num_nonnative_limbs::(); + let value = self.add_virtual_biguint_target(num_limbs); + + NonNativeTarget { value, _phantom: PhantomData } + } + + fn add_virtual_nonnative_target_sized( + &mut self, + num_limbs: usize, + ) -> NonNativeTarget { + let value = self.add_virtual_biguint_target(num_limbs); + + NonNativeTarget { value, _phantom: PhantomData } + } + + fn add_nonnative( + &mut self, + a: &NonNativeTarget, + b: &NonNativeTarget, + ) -> NonNativeTarget { + let sum = self.add_virtual_nonnative_target::(); + let overflow = self.add_virtual_bool_target_unsafe(); + + self.add_simple_generator(NonNativeAdditionGenerator:: { + a: a.clone(), + b: b.clone(), + sum: sum.clone(), + overflow, + _phantom: PhantomData, + }); + + let sum_expected = self.add_biguint(&a.value, &b.value); + + let modulus = self.constant_biguint(&FF::order()); + let mod_times_overflow = self.mul_biguint_by_bool(&modulus, overflow); + let sum_actual = self.add_biguint(&sum.value, &mod_times_overflow); + self.connect_biguint(&sum_expected, &sum_actual); + + // Range-check result. + // TODO: can potentially leave unreduced until necessary (e.g. when connecting values). + let cmp = self.cmp_biguint(&sum.value, &modulus); + let one = self.one(); + self.connect(cmp.target, one); + + sum + } + + fn mul_nonnative_by_bool( + &mut self, + a: &NonNativeTarget, + b: BoolTarget, + ) -> NonNativeTarget { + NonNativeTarget { value: self.mul_biguint_by_bool(&a.value, b), _phantom: PhantomData } + } + + fn if_nonnative( + &mut self, + b: BoolTarget, + x: &NonNativeTarget, + y: &NonNativeTarget, + ) -> NonNativeTarget { + let not_b = self.not(b); + let maybe_x = self.mul_nonnative_by_bool(x, b); + let maybe_y = self.mul_nonnative_by_bool(y, not_b); + self.add_nonnative(&maybe_x, &maybe_y) + } + + fn add_many_nonnative( + &mut self, + to_add: &[NonNativeTarget], + ) -> NonNativeTarget { + if to_add.len() == 1 { + return to_add[0].clone(); + } + + let sum = self.add_virtual_nonnative_target::(); + let overflow = self.add_virtual_u32_target(); + let summands = to_add.to_vec(); + + self.add_simple_generator(NonNativeMultipleAddsGenerator:: { + summands: summands.clone(), + sum: sum.clone(), + overflow, + _phantom: PhantomData, + }); + + range_check_u32_circuit(self, sum.value.limbs.clone()); + range_check_u32_circuit(self, vec![overflow]); + + let sum_expected = + summands.iter().fold(self.zero_biguint(), |a, b| self.add_biguint(&a, &b.value)); + + let modulus = self.constant_biguint(&FF::order()); + let overflow_biguint = BigUintTarget { limbs: vec![overflow] }; + let mod_times_overflow = self.mul_biguint(&modulus, &overflow_biguint); + let sum_actual = self.add_biguint(&sum.value, &mod_times_overflow); + self.connect_biguint(&sum_expected, &sum_actual); + + // Range-check result. + // TODO: can potentially leave unreduced until necessary (e.g. when connecting values). + let cmp = self.cmp_biguint(&sum.value, &modulus); + let one = self.one(); + self.connect(cmp.target, one); + + sum + } + + // Subtract two `NonNativeTarget`s. + fn sub_nonnative( + &mut self, + a: &NonNativeTarget, + b: &NonNativeTarget, + ) -> NonNativeTarget { + let diff = self.add_virtual_nonnative_target::(); + let overflow = self.add_virtual_bool_target_unsafe(); + + self.add_simple_generator(NonNativeSubtractionGenerator:: { + a: a.clone(), + b: b.clone(), + diff: diff.clone(), + overflow, + _phantom: PhantomData, + }); + + range_check_u32_circuit(self, diff.value.limbs.clone()); + self.assert_bool(overflow); + + let diff_plus_b = self.add_biguint(&diff.value, &b.value); + let modulus = self.constant_biguint(&FF::order()); + let mod_times_overflow = self.mul_biguint_by_bool(&modulus, overflow); + let diff_plus_b_reduced = self.sub_biguint(&diff_plus_b, &mod_times_overflow); + self.connect_biguint(&a.value, &diff_plus_b_reduced); + + diff + } + + fn mul_nonnative( + &mut self, + a: &NonNativeTarget, + b: &NonNativeTarget, + ) -> NonNativeTarget { + let prod = self.add_virtual_nonnative_target::(); + let modulus = self.constant_biguint(&FF::order()); + let overflow = self.add_virtual_biguint_target( + a.value.num_limbs() + b.value.num_limbs() - modulus.num_limbs(), + ); + + self.add_simple_generator(NonNativeMultiplicationGenerator:: { + a: a.clone(), + b: b.clone(), + prod: prod.clone(), + overflow: overflow.clone(), + _phantom: PhantomData, + }); + + range_check_u32_circuit(self, prod.value.limbs.clone()); + range_check_u32_circuit(self, overflow.limbs.clone()); + + let prod_expected = self.mul_biguint(&a.value, &b.value); + + let mod_times_overflow = self.mul_biguint(&modulus, &overflow); + let prod_actual = self.add_biguint(&prod.value, &mod_times_overflow); + self.connect_biguint(&prod_expected, &prod_actual); + + prod + } + + fn mul_many_nonnative( + &mut self, + to_mul: &[NonNativeTarget], + ) -> NonNativeTarget { + if to_mul.len() == 1 { + return to_mul[0].clone(); + } + + let mut accumulator = self.mul_nonnative(&to_mul[0], &to_mul[1]); + for t in to_mul.iter().skip(2) { + accumulator = self.mul_nonnative(&accumulator, t); + } + accumulator + } + + fn neg_nonnative(&mut self, x: &NonNativeTarget) -> NonNativeTarget { + let zero_target = self.constant_biguint(&BigUint::zero()); + let zero_ff = self.biguint_to_nonnative(&zero_target); + + self.sub_nonnative(&zero_ff, x) + } + + fn inv_nonnative(&mut self, x: &NonNativeTarget) -> NonNativeTarget { + let num_limbs = x.value.num_limbs(); + let inv_biguint = self.add_virtual_biguint_target(num_limbs); + let div = self.add_virtual_biguint_target(num_limbs); + + self.add_simple_generator(NonNativeInverseGenerator:: { + x: x.clone(), + inv: inv_biguint.clone(), + div: div.clone(), + _phantom: PhantomData, + }); + + let product = self.mul_biguint(&x.value, &inv_biguint); + + let modulus = self.constant_biguint(&FF::order()); + let mod_times_div = self.mul_biguint(&modulus, &div); + let one = self.constant_biguint(&BigUint::one()); + let expected_product = self.add_biguint(&mod_times_div, &one); + self.connect_biguint(&product, &expected_product); + + NonNativeTarget:: { value: inv_biguint, _phantom: PhantomData } + } + + /// Returns `x % |FF|` as a `NonNativeTarget`. + fn reduce(&mut self, x: &BigUintTarget) -> NonNativeTarget { + let modulus = FF::order(); + let order_target = self.constant_biguint(&modulus); + let value = self.rem_biguint(x, &order_target); + + NonNativeTarget { value, _phantom: PhantomData } + } + + fn reduce_nonnative(&mut self, x: &NonNativeTarget) -> NonNativeTarget { + let x_biguint = self.nonnative_to_canonical_biguint(x); + self.reduce(&x_biguint) + } + + fn bool_to_nonnative(&mut self, b: &BoolTarget) -> NonNativeTarget { + let limbs = vec![U32Target(b.target)]; + let value = BigUintTarget { limbs }; + + NonNativeTarget { value, _phantom: PhantomData } + } + + // Split a nonnative field element to bits. + fn split_nonnative_to_bits(&mut self, x: &NonNativeTarget) -> Vec { + let num_limbs = x.value.num_limbs(); + let mut result = Vec::with_capacity(num_limbs * 32); + + for i in 0..num_limbs { + let limb = x.value.get_limb(i); + let bit_targets = self.split_le_base::<2>(limb.0, 32); + let mut bits: Vec<_> = bit_targets.iter().map(|&t| BoolTarget::new_unsafe(t)).collect(); + + result.append(&mut bits); + } + + result + } + + fn nonnative_conditional_neg( + &mut self, + x: &NonNativeTarget, + b: BoolTarget, + ) -> NonNativeTarget { + let not_b = self.not(b); + let neg = self.neg_nonnative(x); + let x_if_true = self.mul_nonnative_by_bool(&neg, b); + let x_if_false = self.mul_nonnative_by_bool(x, not_b); + + self.add_nonnative(&x_if_true, &x_if_false) + } + + /// Assert a >= b + fn assert_greater_equal_nonnative( + &mut self, + a: &NonNativeTarget, + b: &NonNativeTarget, + ) { + let a_int = self.nonnative_to_canonical_biguint(&a); + let b_int = self.nonnative_to_canonical_biguint(&b); + // b <= a + let is_a_bigger_equal = self.cmp_biguint(&b_int, &a_int); + self.assert_one(is_a_bigger_equal.target); + } +} + +#[derive(Debug, Clone)] +struct NonNativeAdditionGenerator, const D: usize, FF: PrimeField> { + a: NonNativeTarget, + b: NonNativeTarget, + sum: NonNativeTarget, + overflow: BoolTarget, + _phantom: PhantomData, +} + +impl, const D: usize, FF: PrimeField> SimpleGenerator + for NonNativeAdditionGenerator +{ + fn id(&self) -> String { + "NonNativeAdditionGenerator".to_string() + } + fn serialize( + &self, + _dst: &mut Vec, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult<()> { + todo!(); + } + fn deserialize( + _src: &mut plonky2::util::serialization::Buffer, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult + where + Self: Sized, + { + todo!(); + } + fn dependencies(&self) -> Vec { + self.a.value.limbs.iter().cloned().chain(self.b.value.limbs.clone()).map(|l| l.0).collect() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let a = FF::from_noncanonical_biguint(witness.get_biguint_target(self.a.value.clone())); + let b = FF::from_noncanonical_biguint(witness.get_biguint_target(self.b.value.clone())); + let a_biguint = a.to_canonical_biguint(); + let b_biguint = b.to_canonical_biguint(); + let sum_biguint = a_biguint + b_biguint; + let modulus = FF::order(); + let (overflow, sum_reduced) = if sum_biguint > modulus { + (true, sum_biguint - modulus) + } else { + (false, sum_biguint) + }; + + out_buffer.set_biguint_target(&self.sum.value, &sum_reduced); + out_buffer.set_bool_target(self.overflow, overflow); + } +} + +#[derive(Debug, Clone)] +struct NonNativeMultipleAddsGenerator, const D: usize, FF: PrimeField> +{ + summands: Vec>, + sum: NonNativeTarget, + overflow: U32Target, + _phantom: PhantomData, +} + +impl, const D: usize, FF: PrimeField> SimpleGenerator + for NonNativeMultipleAddsGenerator +{ + fn id(&self) -> String { + "NonNativeMultipleAddsGenerator".to_string() + } + fn serialize( + &self, + _dst: &mut Vec, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult<()> { + todo!(); + } + fn deserialize( + _src: &mut plonky2::util::serialization::Buffer, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult + where + Self: Sized, + { + todo!(); + } + fn dependencies(&self) -> Vec { + self.summands + .iter() + .flat_map(|summand| summand.value.limbs.iter().map(|limb| limb.0)) + .collect() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let summands: Vec<_> = self + .summands + .iter() + .map(|summand| { + FF::from_noncanonical_biguint(witness.get_biguint_target(summand.value.clone())) + }) + .collect(); + let summand_biguints: Vec<_> = + summands.iter().map(|summand| summand.to_canonical_biguint()).collect(); + + let sum_biguint = summand_biguints.iter().fold(BigUint::zero(), |a, b| a + b.clone()); + + let modulus = FF::order(); + let (overflow_biguint, sum_reduced) = sum_biguint.div_rem(&modulus); + let overflow = overflow_biguint.to_u64_digits()[0] as u32; + + out_buffer.set_biguint_target(&self.sum.value, &sum_reduced); + out_buffer.set_u32_target(self.overflow, overflow); + } +} + +#[derive(Debug, Clone)] +struct NonNativeSubtractionGenerator, const D: usize, FF: Field> { + a: NonNativeTarget, + b: NonNativeTarget, + diff: NonNativeTarget, + overflow: BoolTarget, + _phantom: PhantomData, +} + +impl, const D: usize, FF: PrimeField> SimpleGenerator + for NonNativeSubtractionGenerator +{ + fn id(&self) -> String { + "NonNativeInverseGenerator".to_string() + } + fn serialize( + &self, + _dst: &mut Vec, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult<()> { + todo!(); + } + fn deserialize( + _src: &mut plonky2::util::serialization::Buffer, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult + where + Self: Sized, + { + todo!(); + } + fn dependencies(&self) -> Vec { + self.a.value.limbs.iter().cloned().chain(self.b.value.limbs.clone()).map(|l| l.0).collect() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let a = FF::from_noncanonical_biguint(witness.get_biguint_target(self.a.value.clone())); + let b = FF::from_noncanonical_biguint(witness.get_biguint_target(self.b.value.clone())); + let a_biguint = a.to_canonical_biguint(); + let b_biguint = b.to_canonical_biguint(); + + let modulus = FF::order(); + let (diff_biguint, overflow) = if a_biguint >= b_biguint { + (a_biguint - b_biguint, false) + } else { + (modulus + a_biguint - b_biguint, true) + }; + + out_buffer.set_biguint_target(&self.diff.value, &diff_biguint); + out_buffer.set_bool_target(self.overflow, overflow); + } +} + +#[derive(Debug, Clone)] +struct NonNativeMultiplicationGenerator, const D: usize, FF: Field> { + a: NonNativeTarget, + b: NonNativeTarget, + prod: NonNativeTarget, + overflow: BigUintTarget, + _phantom: PhantomData, +} + +impl, const D: usize, FF: PrimeField> SimpleGenerator + for NonNativeMultiplicationGenerator +{ + fn id(&self) -> String { + "NonNativeMultiplicationGenerator".to_string() + } + fn serialize( + &self, + _dst: &mut Vec, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult<()> { + todo!(); + } + fn deserialize( + _src: &mut plonky2::util::serialization::Buffer, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult + where + Self: Sized, + { + todo!(); + } + fn dependencies(&self) -> Vec { + self.a.value.limbs.iter().cloned().chain(self.b.value.limbs.clone()).map(|l| l.0).collect() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let a = FF::from_noncanonical_biguint(witness.get_biguint_target(self.a.value.clone())); + let b = FF::from_noncanonical_biguint(witness.get_biguint_target(self.b.value.clone())); + let a_biguint = a.to_canonical_biguint(); + let b_biguint = b.to_canonical_biguint(); + + let prod_biguint = a_biguint * b_biguint; + + let modulus = FF::order(); + let (overflow_biguint, prod_reduced) = prod_biguint.div_rem(&modulus); + + out_buffer.set_biguint_target(&self.prod.value, &prod_reduced); + out_buffer.set_biguint_target(&self.overflow, &overflow_biguint); + } +} + +#[derive(Debug, Clone)] +struct NonNativeInverseGenerator, const D: usize, FF: PrimeField> { + x: NonNativeTarget, + inv: BigUintTarget, + div: BigUintTarget, + _phantom: PhantomData, +} + +impl, const D: usize, FF: PrimeField> SimpleGenerator + for NonNativeInverseGenerator +{ + fn id(&self) -> String { + "NonNativeInverseGenerator".to_string() + } + fn serialize( + &self, + _dst: &mut Vec, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult<()> { + todo!(); + } + fn deserialize( + _src: &mut plonky2::util::serialization::Buffer, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult + where + Self: Sized, + { + todo!(); + } + fn dependencies(&self) -> Vec { + self.x.value.limbs.iter().map(|&l| l.0).collect() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let x = FF::from_noncanonical_biguint(witness.get_biguint_target(self.x.value.clone())); + let inv = x.inverse(); + + let x_biguint = x.to_canonical_biguint(); + let inv_biguint = inv.to_canonical_biguint(); + let prod = x_biguint * &inv_biguint; + let modulus = FF::order(); + let (div, _rem) = prod.div_rem(&modulus); + + out_buffer.set_biguint_target(&self.div, &div); + out_buffer.set_biguint_target(&self.inv, &inv_biguint); + } +} + +pub trait PartialWitnessNonNative>: Witness { + fn get_nonnative_target(&self, target: NonNativeTarget) -> FF; + fn set_nonnative_target(&mut self, target: NonNativeTarget, value: FF); +} +impl, W: Witness> PartialWitnessNonNative for W { + fn get_nonnative_target(&self, target: NonNativeTarget) -> FF { + FF::from_noncanonical_biguint(self.get_biguint_target(target.value)) + } + fn set_nonnative_target(&mut self, target: NonNativeTarget, value: FF) { + self.set_biguint_target(&target.value, &value.to_canonical_biguint()); + } +} +#[cfg(test)] +mod tests { + use anyhow::Result; + use plonky2::{ + field::{ + secp256k1_base::Secp256K1Base, + types::{Field, PrimeField, Sample}, + }, + iop::witness::PartialWitness, + plonk::{ + circuit_builder::CircuitBuilder, + circuit_data::CircuitConfig, + config::{GenericConfig, PoseidonGoldilocksConfig}, + }, + }; + + use crate::gadgets::nonnative::{CircuitBuilderNonNative, PartialWitnessNonNative}; + + #[test] + fn test_nonnative_add() -> Result<()> { + type FF = Secp256K1Base; + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let x_ff = FF::rand(); + let y_ff = FF::rand(); + let sum_ff = x_ff + y_ff; + + let config = CircuitConfig::standard_ecc_config(); + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let x = builder.constant_nonnative(x_ff); + let y = builder.constant_nonnative(y_ff); + let sum = builder.add_nonnative(&x, &y); + + let sum_expected = builder.constant_nonnative(sum_ff); + builder.connect_nonnative(&sum, &sum_expected); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + data.verify(proof) + } + + #[test] + fn test_nonnative_many_adds() -> Result<()> { + type FF = Secp256K1Base; + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let a_ff = FF::rand(); + let b_ff = FF::rand(); + let c_ff = FF::rand(); + let d_ff = FF::rand(); + let e_ff = FF::rand(); + let f_ff = FF::rand(); + let g_ff = FF::rand(); + let h_ff = FF::rand(); + let sum_ff = a_ff + b_ff + c_ff + d_ff + e_ff + f_ff + g_ff + h_ff; + + let config = CircuitConfig::standard_ecc_config(); + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let a = builder.constant_nonnative(a_ff); + let b = builder.constant_nonnative(b_ff); + let c = builder.constant_nonnative(c_ff); + let d = builder.constant_nonnative(d_ff); + let e = builder.constant_nonnative(e_ff); + let f = builder.constant_nonnative(f_ff); + let g = builder.constant_nonnative(g_ff); + let h = builder.constant_nonnative(h_ff); + let all = [a, b, c, d, e, f, g, h]; + let sum = builder.add_many_nonnative(&all); + + let sum_expected = builder.constant_nonnative(sum_ff); + builder.connect_nonnative(&sum, &sum_expected); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + data.verify(proof) + } + + #[test] + fn test_nonnative_sub() -> Result<()> { + type FF = Secp256K1Base; + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let x_ff = FF::rand(); + let mut y_ff = FF::rand(); + while y_ff.to_canonical_biguint() > x_ff.to_canonical_biguint() { + y_ff = FF::rand(); + } + let diff_ff = x_ff - y_ff; + + let config = CircuitConfig::standard_ecc_config(); + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let x = builder.constant_nonnative(x_ff); + let y = builder.constant_nonnative(y_ff); + let diff = builder.sub_nonnative(&x, &y); + + let diff_expected = builder.constant_nonnative(diff_ff); + builder.connect_nonnative(&diff, &diff_expected); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + data.verify(proof) + } + + #[test] + fn test_nonnative_mul() -> Result<()> { + type FF = Secp256K1Base; + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + let x_ff = FF::rand(); + let y_ff = FF::rand(); + let product_ff = x_ff * y_ff; + + let config = CircuitConfig::standard_ecc_config(); + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let x = builder.constant_nonnative(x_ff); + let y = builder.constant_nonnative(y_ff); + let product = builder.mul_nonnative(&x, &y); + + let product_expected = builder.constant_nonnative(product_ff); + builder.connect_nonnative(&product, &product_expected); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + data.verify(proof) + } + + #[test] + fn test_nonnative_neg() -> Result<()> { + type FF = Secp256K1Base; + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + let x_ff = FF::rand(); + let neg_x_ff = -x_ff; + + let config = CircuitConfig::standard_ecc_config(); + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let x = builder.constant_nonnative(x_ff); + let neg_x = builder.neg_nonnative(&x); + + let neg_x_expected = builder.constant_nonnative(neg_x_ff); + builder.connect_nonnative(&neg_x, &neg_x_expected); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + data.verify(proof) + } + + #[test] + fn test_nonnative_inv() -> Result<()> { + type FF = Secp256K1Base; + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + let x_ff = FF::rand(); + let inv_x_ff = x_ff.inverse(); + + let config = CircuitConfig::standard_ecc_config(); + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let x = builder.constant_nonnative(x_ff); + let inv_x = builder.inv_nonnative(&x); + + let inv_x_expected = builder.constant_nonnative(inv_x_ff); + builder.connect_nonnative(&inv_x, &inv_x_expected); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + data.verify(proof) + } + + #[test] + fn test_witness() -> Result<()> { + type FF = Secp256K1Base; + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let x_ff = FF::rand(); + let y_ff = FF::rand(); + let sum_ff = x_ff + y_ff; + + let config = CircuitConfig::standard_ecc_config(); + let mut pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let x = builder.add_virtual_nonnative_target(); + let y = builder.add_virtual_nonnative_target(); + let sum = builder.add_nonnative(&x, &y); + + let sum_expected = builder.constant_nonnative(sum_ff); + builder.connect_nonnative(&sum, &sum_expected); + + pw.set_nonnative_target(x, x_ff); + pw.set_nonnative_target(y, y_ff); + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + data.verify(proof) + } + + #[test] + fn test_assert_greater_nonnative() -> Result<()> { + type FF = Secp256K1Base; + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let config = CircuitConfig::standard_ecc_config(); + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let a = builder.constant_nonnative(FF::from_canonical_u64(100)); + let b = builder.constant_nonnative(FF::from_canonical_u64(99)); + let c = builder.constant_nonnative(FF::from_canonical_u64(100)); + builder.assert_greater_equal_nonnative(&a, &b); + builder.assert_greater_equal_nonnative(&a, &c); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + data.verify(proof) + } +} diff --git a/ecdsa/src/gadgets/split_nonnative.rs b/ecdsa/src/gadgets/split_nonnative.rs new file mode 100644 index 0000000000..3c427a05c7 --- /dev/null +++ b/ecdsa/src/gadgets/split_nonnative.rs @@ -0,0 +1,120 @@ +use alloc::vec::Vec; +use core::marker::PhantomData; + +use itertools::Itertools; +use plonky2::{ + field::{extension::Extendable, types::Field}, + hash::hash_types::RichField, + iop::target::Target, + plonk::circuit_builder::CircuitBuilder, +}; +use plonky2_u32::gadgets::arithmetic_u32::{CircuitBuilderU32, U32Target}; + +use crate::gadgets::{biguint::BigUintTarget, nonnative::NonNativeTarget}; + +pub trait CircuitBuilderSplit, const D: usize> { + fn split_u32_to_4_bit_limbs(&mut self, val: U32Target) -> Vec; + + fn split_nonnative_to_4_bit_limbs( + &mut self, + val: &NonNativeTarget, + ) -> Vec; + + fn split_nonnative_to_2_bit_limbs( + &mut self, + val: &NonNativeTarget, + ) -> Vec; + + // Note: assumes its inputs are 4-bit limbs, and does not range-check. + fn recombine_nonnative_4_bit_limbs( + &mut self, + limbs: Vec, + ) -> NonNativeTarget; +} + +impl, const D: usize> CircuitBuilderSplit + for CircuitBuilder +{ + fn split_u32_to_4_bit_limbs(&mut self, val: U32Target) -> Vec { + let two_bit_limbs = self.split_le_base::<4>(val.0, 16); + let four = self.constant(F::from_canonical_usize(4)); + let combined_limbs = + two_bit_limbs.iter().tuples().map(|(&a, &b)| self.mul_add(b, four, a)).collect(); + + combined_limbs + } + + fn split_nonnative_to_4_bit_limbs( + &mut self, + val: &NonNativeTarget, + ) -> Vec { + val.value.limbs.iter().flat_map(|&l| self.split_u32_to_4_bit_limbs(l)).collect() + } + + fn split_nonnative_to_2_bit_limbs( + &mut self, + val: &NonNativeTarget, + ) -> Vec { + val.value.limbs.iter().flat_map(|&l| self.split_le_base::<4>(l.0, 16)).collect() + } + + // Note: assumes its inputs are 4-bit limbs, and does not range-check. + fn recombine_nonnative_4_bit_limbs( + &mut self, + limbs: Vec, + ) -> NonNativeTarget { + let base = self.constant_u32(1 << 4); + let u32_limbs = limbs + .chunks(8) + .map(|chunk| { + let mut combined_chunk = self.zero_u32(); + for i in (0..8).rev() { + let (low, _high) = self.mul_add_u32(combined_chunk, base, U32Target(chunk[i])); + combined_chunk = low; + } + combined_chunk + }) + .collect(); + + NonNativeTarget { value: BigUintTarget { limbs: u32_limbs }, _phantom: PhantomData } + } +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use plonky2::{ + field::{secp256k1_scalar::Secp256K1Scalar, types::Sample}, + iop::witness::PartialWitness, + plonk::{ + circuit_data::CircuitConfig, + config::{GenericConfig, PoseidonGoldilocksConfig}, + }, + }; + + use super::*; + use crate::gadgets::nonnative::{CircuitBuilderNonNative, NonNativeTarget}; + + #[test] + fn test_split_nonnative() -> Result<()> { + type FF = Secp256K1Scalar; + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let config = CircuitConfig::standard_ecc_config(); + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let x = FF::rand(); + let x_target = builder.constant_nonnative(x); + let split = builder.split_nonnative_to_4_bit_limbs(&x_target); + let combined: NonNativeTarget = + builder.recombine_nonnative_4_bit_limbs(split); + builder.connect_nonnative(&x_target, &combined); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + data.verify(proof) + } +} diff --git a/ecdsa/src/lib.rs b/ecdsa/src/lib.rs new file mode 100644 index 0000000000..bf84913a3f --- /dev/null +++ b/ecdsa/src/lib.rs @@ -0,0 +1,7 @@ +#![allow(clippy::needless_range_loop)] +#![cfg_attr(not(test), no_std)] + +extern crate alloc; + +pub mod curve; +pub mod gadgets; diff --git a/ecgfp5/.gitignore b/ecgfp5/.gitignore new file mode 100644 index 0000000000..ea8c4bf7f3 --- /dev/null +++ b/ecgfp5/.gitignore @@ -0,0 +1 @@ +/target diff --git a/ecgfp5/Cargo.toml b/ecgfp5/Cargo.toml new file mode 100644 index 0000000000..5c1ea17249 --- /dev/null +++ b/ecgfp5/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "plonky2_ecgfp5" +version = "0.2.0" +authors = [ + "Sebastien La Duca ", + "Yongjin Huang ", +] +readme = "README.md" +description = "plonky2 gadgets for EcGFp5 curve and base field arithmetic" +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +keywords.workspace = true +categories.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +plonky2 = { path = "../plonky2" } +plonky2_field = { path = "../field" } +plonky2_ecdsa = { path = "../ecdsa" } +plonky2_u32 = { path = "../u32" } +num = "0.4" +itertools = "0.10" +serde = "1" +rand = { version = "0.8.5", default-features = false, features = ["getrandom"] } +hex = "0.4.3" + +[dev-dependencies] +rand = { version = "0.8.5", features = ["min_const_gen"] } +anyhow = "1.0.51" +criterion = "0.4" +log = "0.4" +env_logger = "0.10" + +[[bench]] +name = "scalar" +harness = false + +[[bench]] +name = "curve" +harness = false + +[[bench]] +name = "schnorr" +harness = false diff --git a/ecgfp5/LICENSE.md b/ecgfp5/LICENSE.md new file mode 100644 index 0000000000..44e398ebb1 --- /dev/null +++ b/ecgfp5/LICENSE.md @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Sebastien La Duca + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/ecgfp5/README.md b/ecgfp5/README.md new file mode 100644 index 0000000000..5eee06be65 --- /dev/null +++ b/ecgfp5/README.md @@ -0,0 +1,16 @@ +# plonky2-ecgfp5 + +> DISCLAIMER: this is a prototype. It hasn't been audited. It probably has bugs. DO NOT USE THIS IN PRODUCTION. + +> DISCLAIMER: the curve is pretty new, and, being built upon an extension field, may be vulnerable to more kinds of attacks than other curves. Read Thomas Pornin's paper on eprint for more information about security of the curve itself. + +> NOTE: this crate is not (yet) constant time. + +This crate provides plonky2 SNARK gadgets and an out-of-circuit implementation of `EcGFp5`, an elliptic curve whose base field is a degree-5 extension field of Goldilocks, the field whose modulus is `2^64 - 2^32 + 1`. + +Most of the out-of-circuit implementation is built atop Thomas Pornin's implementation, which can be found [here](https://github.com/pornin/ecgfp5). All credit for designing the curve and providing its first implementation belongs to him. + + +### Why does this exist? + +One of the most useful things to have access to in a proof system is a curve that can be represented in its native field, as it allows one to efficiently verify public key cryptography (signatures, assymetric encryption, etc). For proof systems implemented atop BLS12-381, we have JubJub, and for proof systems implemented atop BN128, we have Baby JubJub. Plonky2 has something similar - EcGFp5. The hope is this will allow people to do in plonky2 the things people typically do with Baby JubJub in Circom/Groth16. diff --git a/ecgfp5/benches/curve.rs b/ecgfp5/benches/curve.rs new file mode 100644 index 0000000000..10920e5407 --- /dev/null +++ b/ecgfp5/benches/curve.rs @@ -0,0 +1,48 @@ +use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion}; +use plonky2_ecgfp5::curve::{curve::Point, scalar_field::Scalar}; +use plonky2_field::types::Sample; + +pub fn bench_curve(c: &mut Criterion) { + c.bench_function("add", |b| { + b.iter_batched( + || (Point::rand(), Point::rand()), + |(a, b)| { + black_box(a + b); + }, + BatchSize::SmallInput, + ) + }); + + c.bench_function("double", |b| { + b.iter_batched( + || Point::rand(), + |point| { + black_box(point.double()); + }, + BatchSize::SmallInput, + ) + }); + + c.bench_function("scalar_mul", |b| { + b.iter_batched( + || (Point::rand(), Scalar::rand()), + |(point, scalar)| { + black_box(point * scalar); + }, + BatchSize::SmallInput, + ) + }); + + c.bench_function("mulgen", |b| { + b.iter_batched( + || Scalar::rand(), + |scalar| { + black_box(Point::mulgen(scalar)); + }, + BatchSize::SmallInput, + ) + }); +} + +criterion_group!(benches, bench_curve); +criterion_main!(benches); diff --git a/ecgfp5/benches/scalar.rs b/ecgfp5/benches/scalar.rs new file mode 100644 index 0000000000..f697483007 --- /dev/null +++ b/ecgfp5/benches/scalar.rs @@ -0,0 +1,81 @@ +use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion}; +use plonky2_ecgfp5::curve::scalar_field::Scalar; +use plonky2_field::{ + ops::Square, + types::{Field, Sample}, +}; + +pub fn bench_scalar(c: &mut Criterion) { + c.bench_function("add", |b| { + b.iter_batched( + || (Scalar::rand(), Scalar::rand()), + |(x, y)| black_box(x + y), + BatchSize::SmallInput, + ) + }); + + c.bench_function("mul", |b| { + b.iter_batched( + || (Scalar::rand(), Scalar::rand()), + |(x, y)| black_box(x * y), + BatchSize::SmallInput, + ) + }); + + c.bench_function("square", |b| { + b.iter_batched(|| Scalar::rand(), |x| black_box(x.square()), BatchSize::SmallInput) + }); + + c.bench_function("try_inverse", |b| { + b.iter_batched( + || Scalar::rand(), + |x| { + black_box(x.try_inverse()); + }, + BatchSize::SmallInput, + ) + }); + + c.bench_function("batch_multiplicative_inverse-tiny", |b| { + b.iter_batched( + || (0..2).into_iter().map(|_| Scalar::rand()).collect::>(), + |x| Scalar::batch_multiplicative_inverse(&x), + BatchSize::SmallInput, + ) + }); + + c.bench_function("batch_multiplicative_inverse-small", |b| { + b.iter_batched( + || (0..4).into_iter().map(|_| Scalar::rand()).collect::>(), + |x| Scalar::batch_multiplicative_inverse(&x), + BatchSize::SmallInput, + ) + }); + + c.bench_function("batch_multiplicative_inverse-medium", |b| { + b.iter_batched( + || (0..16).into_iter().map(|_| Scalar::rand()).collect::>(), + |x| Scalar::batch_multiplicative_inverse(&x), + BatchSize::SmallInput, + ) + }); + + c.bench_function("batch_multiplicative_inverse-large", |b| { + b.iter_batched( + || (0..256).into_iter().map(|_| Scalar::rand()).collect::>(), + |x| Scalar::batch_multiplicative_inverse(&x), + BatchSize::LargeInput, + ) + }); + + c.bench_function("batch_multiplicative_inverse-huge", |b| { + b.iter_batched( + || (0..65536).into_iter().map(|_| Scalar::rand()).collect::>(), + |x| Scalar::batch_multiplicative_inverse(&x), + BatchSize::LargeInput, + ) + }); +} + +criterion_group!(benches, bench_scalar); +criterion_main!(benches); diff --git a/ecgfp5/benches/schnorr.rs b/ecgfp5/benches/schnorr.rs new file mode 100644 index 0000000000..4b3f217cea --- /dev/null +++ b/ecgfp5/benches/schnorr.rs @@ -0,0 +1,32 @@ +use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; +use plonky2_ecgfp5::gadgets::schnorr::{schnorr_keygen, schnorr_sign, F}; +use plonky2_field::types::Field; +use rand::thread_rng; + +pub fn bench_schnorr(c: &mut Criterion) { + let mut group = c.benchmark_group("schnorr"); + group.bench_function("keygen", |b| { + b.iter(|| { + let mut rng = thread_rng(); + let _ = schnorr_keygen(&mut rng); + }) + }); + group.bench_function("sign", |b| { + b.iter_batched( + || { + let mut rng = thread_rng(); + let (_pk, sk) = schnorr_keygen(&mut rng); + let message = [F::TWO; 32]; + (sk, message) + }, + |(sk, message)| { + let mut rng = thread_rng(); + let _ = schnorr_sign(&message, &sk, &mut rng); + }, + BatchSize::SmallInput, + ) + }); +} + +criterion_group!(benches, bench_schnorr); +criterion_main!(benches); diff --git a/ecgfp5/examples/ecdsa_sig.rs b/ecgfp5/examples/ecdsa_sig.rs new file mode 100644 index 0000000000..4359cfae9b --- /dev/null +++ b/ecgfp5/examples/ecdsa_sig.rs @@ -0,0 +1,146 @@ +// toy example of a circuit that checks a ecdsa signatuse + +use env_logger::{try_init_from_env, Env, DEFAULT_FILTER_ENV}; +use log::Level; +use plonky2::{ + iop::witness::PartialWitness, + plonk::{ + circuit_builder::CircuitBuilder, + circuit_data::{CircuitConfig, CircuitData}, + config::{GenericConfig, PoseidonGoldilocksConfig}, + prover::prove, + }, + util::timing::TimingTree, +}; +use plonky2_ecdsa::gadgets::nonnative::CircuitBuilderNonNative; +use plonky2_ecgfp5::{ + curve::{curve::Point, scalar_field::Scalar}, + gadgets::curve::CircuitBuilderEcGFp5, +}; +use plonky2_field::types::{Field, Sample}; +use rand::thread_rng; + +const D: usize = 2; +type C = PoseidonGoldilocksConfig; +type F = >::F; + +pub const SPONGE_RATE: usize = 8; + +// we define a hash function whose digest is 5 GFp5 elems +// note: this doesn't apply any padding, so this is vulnerable to length extension attacks +// fn sig_hash(message: &[F]) -> [F; 5] { +// let mut res = [F::ZERO; 5]; +// let out = hash_n_to_m_no_pad::(message, 5); +// res.copy_from_slice(&out[..5]); + +// res +// } + +pub fn main() { + init_logger(); + let mut rng = thread_rng(); + + /* Generate Keypair */ + let sk = Scalar::sample(&mut rng); + let pk = Point::GENERATOR * sk; + + /* Signing */ + // 1. message digest & encoding + // let message_bytes = b"I'm going to be the king of pirates!"; + // let message_elems = message_bytes.map(|b| F::from_canonical_u8(b)); + // let e = sig_hash(&message_elems); + // 2. z = Ln lestmost bits of e = scalar of e + // let z = Scalar::from_gfp5(QuinticExtension(e)); + + // 1-2. Sample random z + let z = Scalar::sample(&mut rng); + // 3-5 + let (k, r) = { + // 3. sample random k + let mut k = Scalar::sample(&mut rng); + // 4. compute (x1, y1) = k*G + let mut rr = Point::GENERATOR * k; + // If x1 is zero, we need to sample a new k. + while rr.x.is_zero() { + k = Scalar::sample(&mut rng); + rr = Point::GENERATOR * k; + } + // 5. r = x1 mod n + let r = Scalar::from_gfp5(rr.x); + (k, r) + }; + // 6. s = k^{-1} * (z + r*sk) mod n + let s = k.inverse() * (z + r * sk); + + /* Verify in Rust */ + { + let u1 = z * s.inverse(); + let u2 = r * s.inverse(); + let point = u1 * Point::GENERATOR + u2 * pk; + assert!(point.equals(k * Point::GENERATOR)); + assert_eq!(point.is_neutral(), false); + } + + /* Verify in circuit */ + let config = CircuitConfig::wide_ecc_config(); + let mut builder = CircuitBuilder::::new(config); + + // Verify(msg, (r,s), pk) + // Sig = (r, s) + let r_target = builder.constant_nonnative::(r); + let s_target = builder.constant_nonnative::(s); + let pk_target = builder.curve_constant(pk.to_weierstrass()); + + let g = builder.curve_generator(); + // 1. Check pk is a valid curve point + { + + // let a = builder.constant_nonnative(WeierstrassPoint::A); + // let b = builder.constant_nonnative(WeierstrassPoint::B); + // let px = &pk_target.0.0[0]; + // let py = &pk_target.0.0[1]; + // let y_squared = builder.mul_nonnative(py, py); + // let x_squared = builder.mul_nonnative(px, px); + // let x_cuded = builder.mul_nonnative(&x_squared, px); + // let a_x = builder.mul_nonnative(&a, px); + // let a_x_plus_b = builder.add_nonnative(&a_x, &b); + // let rhs = builder.add_nonnative(&x_cuded, &a_x_plus_b); + // builder.connect_nonnative(&y_squared, &rhs); + } + + // 2. Check r and s are in [1, n-1] + + // 3. e = HASH(msg) + // 4. z = Ln leastmost bits of e + // We get z from witness directly + let z_target = builder.constant_nonnative::(z); + + // 5. u1 = z * s^{-1} mod n, u2 = r * s^{-1} mod n + let s_inv = builder.inv_nonnative(&s_target); + let u1 = builder.mul_nonnative(&z_target, &s_inv); + let u2 = builder.mul_nonnative(&r_target, &s_inv); + + // 6. point = u1*G + u2*pk + let point = builder.curve_muladd_2(g, pk_target, &u1, &u2); + + // 7. Check point = r*G and point != O (identity element) + let r_g = builder.curve_scalar_mul(g, &r_target); + builder.curve_eq(point, r_g); + builder.curve_assert_not_zero(point); + + // build circuit + builder.print_gate_counts(0); + let circuit = builder.build::(); + let CircuitData { prover_only, common, verifier_only: _ } = &circuit; + + let pw = PartialWitness::new(); + let mut timing = TimingTree::new("prove", Level::Debug); + let proof = prove(prover_only, common, pw, &mut timing).expect("prover failed"); + timing.print(); + + circuit.verify(proof).expect("verifier failed"); +} + +fn init_logger() { + let _ = try_init_from_env(Env::default().filter_or(DEFAULT_FILTER_ENV, "debug")); +} diff --git a/ecgfp5/examples/point_add.rs b/ecgfp5/examples/point_add.rs new file mode 100644 index 0000000000..8660ac2378 --- /dev/null +++ b/ecgfp5/examples/point_add.rs @@ -0,0 +1,60 @@ +use env_logger::{try_init_from_env, Env, DEFAULT_FILTER_ENV}; +use log::Level; +use plonky2::{ + iop::witness::PartialWitness, + plonk::{ + circuit_builder::CircuitBuilder, + circuit_data::{CircuitConfig, CircuitData}, + config::{GenericConfig, PoseidonGoldilocksConfig}, + prover::prove, + }, + util::timing::TimingTree, +}; + +use plonky2_ecgfp5::{ + curve::curve::Point, + gadgets::curve::{CircuitBuilderEcGFp5, PartialWitnessCurve}, +}; +use plonky2_field::types::Sample; +use rand::thread_rng; + +const D: usize = 2; +type C = PoseidonGoldilocksConfig; +type F = >::F; + +pub fn main() { + init_logger(); + let mut rng = thread_rng(); + + // curve point add + println!("testing_curve_add..."); + + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + + let p = Point::sample(&mut rng); + let p2 = Point::sample(&mut rng); + let expected = p + p2; + + let p = builder.curve_constant(p.to_weierstrass()); + let p2 = builder.curve_constant(p2.to_weierstrass()); + let sum = builder.curve_add(p, p2); + + builder.print_gate_counts(0); + let circuit = builder.build::(); + + let mut pw = PartialWitness::new(); + pw.set_curve_target(sum, expected.to_weierstrass()); + + let CircuitData { prover_only, common, verifier_only: _ } = &circuit; + + let mut timing = TimingTree::new("prove", Level::Debug); + let proof = prove(prover_only, common, pw, &mut timing).expect("prover failed"); + timing.print(); + + circuit.verify(proof).expect("verifier failed"); +} + +fn init_logger() { + let _ = try_init_from_env(Env::default().filter_or(DEFAULT_FILTER_ENV, "debug")); +} diff --git a/ecgfp5/examples/scalar_mul.rs b/ecgfp5/examples/scalar_mul.rs new file mode 100644 index 0000000000..c3c4f4b96d --- /dev/null +++ b/ecgfp5/examples/scalar_mul.rs @@ -0,0 +1,91 @@ +use env_logger::{try_init_from_env, Env, DEFAULT_FILTER_ENV}; +use log::Level; +use plonky2::{ + iop::witness::PartialWitness, + plonk::{ + circuit_builder::CircuitBuilder, + circuit_data::{CircuitConfig, CircuitData}, + config::{GenericConfig, PoseidonGoldilocksConfig}, + prover::prove, + }, + util::timing::TimingTree, +}; +use plonky2_ecdsa::gadgets::nonnative::CircuitBuilderNonNative; +use plonky2_ecgfp5::{ + curve::{curve::Point, scalar_field::Scalar}, + gadgets::curve::{CircuitBuilderEcGFp5, PartialWitnessCurve}, +}; +use plonky2_field::types::Sample; +use rand::thread_rng; + +const D: usize = 2; +type C = PoseidonGoldilocksConfig; +type F = >::F; + +pub fn main() { + init_logger(); + let mut rng = thread_rng(); + + // scalar mul + println!("testing_scalar_mul..."); + + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + + let p = Point::sample(&mut rng); + let s = Scalar::sample(&mut rng); + let prod_expected = p * s; + + let p = builder.curve_constant(p.to_weierstrass()); + let s = builder.constant_nonnative(s); + + let prod = builder.curve_scalar_mul(p, &s); + builder.register_curve_public_input(prod); + + builder.print_gate_counts(0); + let circuit = builder.build::(); + + let mut pw = PartialWitness::new(); + pw.set_curve_target(prod, prod_expected.to_weierstrass()); + + let CircuitData { prover_only, common, verifier_only: _ } = &circuit; + + let mut timing = TimingTree::new("prove", Level::Debug); + let proof = prove(prover_only, common, pw, &mut timing).expect("prover failed"); + timing.print(); + + circuit.verify(proof).expect("verifier failed"); + + // scalar mul const + println!("testing scalar_mul_const..."); + + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + + let p = Point::sample(&mut rng); + let s = Scalar::sample(&mut rng); + let prod_expected = p * s; + + let s = builder.constant_nonnative(s); + + let prod = builder.curve_scalar_mul_const(p, &s); + builder.register_curve_public_input(prod); + + builder.print_gate_counts(0); + let circuit = builder.build::(); + + let mut pw = PartialWitness::new(); + pw.set_curve_target(prod, prod_expected.to_weierstrass()); + + let CircuitData { prover_only, common, verifier_only: _ } = &circuit; + + let mut timing = TimingTree::new("prove", Level::Debug); + let proof = prove(prover_only, common, pw, &mut timing).expect("prover failed"); + timing.print(); + + circuit.verify(proof).expect("verifier failed"); +} + +fn init_logger() { + let _ = try_init_from_env(Env::default().filter_or(DEFAULT_FILTER_ENV, "debug")); +} diff --git a/ecgfp5/examples/schnorr_sig.rs b/ecgfp5/examples/schnorr_sig.rs new file mode 100644 index 0000000000..5041fd621a --- /dev/null +++ b/ecgfp5/examples/schnorr_sig.rs @@ -0,0 +1,60 @@ +// toy example of a circuit that checks a schnorr signatuse + +use env_logger::{try_init_from_env, Env, DEFAULT_FILTER_ENV}; +use log::Level; +use plonky2::{ + iop::witness::PartialWitness, + plonk::{ + circuit_builder::CircuitBuilder, + circuit_data::{CircuitConfig, CircuitData}, + config::{GenericConfig, PoseidonGoldilocksConfig}, + prover::prove, + }, + util::timing::TimingTree, +}; + +use plonky2_ecgfp5::gadgets::schnorr::{ + schnorr_keygen, schnorr_sign, schnorr_verify_circuit, schnorr_verify_rust, +}; +use plonky2_field::types::Field; +use rand::thread_rng; + +const D: usize = 2; +type C = PoseidonGoldilocksConfig; +type F = >::F; + +pub const SPONGE_RATE: usize = 8; + +pub fn main() { + init_logger(); + + // Keygen + let mut rng = thread_rng(); + let (pk, sk) = schnorr_keygen(&mut rng); + // Sign + let message = b"Hello, world!"; + let message_f = message.map(|b| F::from_canonical_u8(b)); + let sig = schnorr_sign(&message_f, &sk, &mut rng); + // Verify in Rust + assert!(schnorr_verify_rust(&message_f, &pk, &sig)); + + // Verify in circuit + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + schnorr_verify_circuit(&mut builder, &message_f, &pk, &sig); + // build circuit + builder.print_gate_counts(0); + let circuit = builder.build::(); + let CircuitData { prover_only, common, verifier_only: _ } = &circuit; + + let pw = PartialWitness::new(); + let mut timing = TimingTree::new("prove", Level::Debug); + let proof = prove(prover_only, common, pw, &mut timing).expect("prover failed"); + timing.print(); + + circuit.verify(proof).expect("verifier failed"); +} + +fn init_logger() { + let _ = try_init_from_env(Env::default().filter_or(DEFAULT_FILTER_ENV, "debug")); +} diff --git a/ecgfp5/rustfmt.toml b/ecgfp5/rustfmt.toml new file mode 100644 index 0000000000..e651f760b1 --- /dev/null +++ b/ecgfp5/rustfmt.toml @@ -0,0 +1,7 @@ +# This project uses rustfmt to format source code. Run `cargo +nightly fmt [-- --check]. +# https://github.com/rust-lang/rustfmt/blob/master/Configurations.md + +# Break complex but short statements a bit less. +use_small_heuristics = "Max" + +imports_granularity = "Crate" \ No newline at end of file diff --git a/ecgfp5/src/curve/base_field.rs b/ecgfp5/src/curve/base_field.rs new file mode 100644 index 0000000000..62094cf891 --- /dev/null +++ b/ecgfp5/src/curve/base_field.rs @@ -0,0 +1,184 @@ +use plonky2::hash::hash_types::RichField; +use plonky2_field::{ + extension::{quintic::QuinticExtension, Extendable, FieldExtension, Frobenius}, + ops::Square, + types::{Field, PrimeField}, +}; + +use super::{GFp, GFp5}; + +pub trait Legendre { + fn legendre(&self) -> F; +} + +impl Legendre for QuinticExtension { + fn legendre(&self) -> GFp { + let frob1 = self.frobenius(); + let frob2 = frob1.frobenius(); + + let frob1_times_frob2 = frob1 * frob2; + let frob2_frob1_times_frob2 = frob1_times_frob2.repeated_frobenius(2); + + let xr_ext = *self * frob1_times_frob2 * frob2_frob1_times_frob2; + let xr: GFp = >::to_basefield_array(&xr_ext)[0]; + + let xr_31 = xr.exp_power_of_2(31); + let xr_63 = xr_31.exp_power_of_2(32); + + // only way `xr_31` can be zero is if `xr` is zero, in which case `self` is zero, in which case we want to return zero. + let xr_31_inv_or_zero = xr_31.inverse_or_zero(); + xr_63 * xr_31_inv_or_zero + } +} + +pub trait SquareRoot: Sized { + fn sqrt(&self) -> Option; + fn canonical_sqrt(&self) -> Option; +} + +impl SquareRoot for QuinticExtension { + fn sqrt(&self) -> Option { + sqrt_quintic_ext_goldilocks(*self) + } + + fn canonical_sqrt(&self) -> Option { + canonical_sqrt_quintic_ext_goldilocks(*self) + } +} + +pub trait InverseOrZero: Sized { + fn inverse_or_zero(&self) -> Self; +} + +impl InverseOrZero for GFp { + fn inverse_or_zero(&self) -> Self { + self.try_inverse().unwrap_or(GFp::ZERO) + } +} + +impl InverseOrZero for GFp5 { + fn inverse_or_zero(&self) -> Self { + self.try_inverse().unwrap_or(GFp5::ZERO) + } +} + +pub trait Sgn0 { + fn sgn0(&self) -> bool; +} + +impl Sgn0 for QuinticExtension { + fn sgn0(&self) -> bool { + quintic_ext_sgn0(*self) + } +} + +/// returns true or false indicating a notion of "sign" for quintic_ext. +/// This is used to canonicalize the square root +/// This is an implementation of the function sgn0 from the IRTF's hash-to-curve document +/// https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-07#name-the-sgn0-function +pub(crate) fn quintic_ext_sgn0>(x: QuinticExtension) -> bool { + let mut sign = false; + let mut zero = true; + for &limb in x.0.iter() { + let sign_i = limb.to_canonical_u64() & 1 == 0; + let zero_i = limb == F::ZERO; + sign = sign || (zero && sign_i); + zero = zero && zero_i; + } + return sign; +} + +// returns the "canoncal" square root of x, if it exists +// the "canonical" square root is the one such that `sgn0(sqrt(x)) == true` +pub(crate) fn canonical_sqrt_quintic_ext_goldilocks(x: GFp5) -> Option { + match sqrt_quintic_ext_goldilocks(x) { + Some(root_x) => { + if quintic_ext_sgn0(root_x) { + Some(-root_x) + } else { + Some(root_x) + } + } + None => None, + } +} + +/// returns `Some(sqrt(x))` if `x` is a square in the field, and `None` otherwise +/// basically copied from here: https://github.com/pornin/ecquintic_ext/blob/ce059c6d1e1662db437aecbf3db6bb67fe63c716/python/ecGFp5.py#L879 +pub(crate) fn sqrt_quintic_ext_goldilocks(x: GFp5) -> Option { + let v = x.exp_power_of_2(31); + let d = x * v.exp_power_of_2(32) * v.try_inverse().unwrap_or(GFp5::ZERO); + let e = (d * d.repeated_frobenius(2)).frobenius(); + let f = e.square(); + + let [x0, x1, x2, x3, x4] = x.0; + let [f0, f1, f2, f3, f4] = f.0; + let g = x0 * f0 + GFp::from_canonical_u64(3) * (x1 * f4 + x2 * f3 + x3 * f2 + x4 * f1); + + g.sqrt().map(|s| e.inverse_or_zero() * s.into()) +} + +#[cfg(test)] +mod tests { + use plonky2_field::types::Sample; + use rand::thread_rng; + + use super::*; + use crate::curve::{base_field::SquareRoot, test_utils::gfp5_random_non_square}; + + #[test] + fn test_legendre() { + // test zero + assert_eq!(GFp::ZERO, GFp5::ZERO.legendre()); + + // test non-squares + for _ in 0..32 { + let x = gfp5_random_non_square(); + let legendre_sym = x.legendre(); + + assert_eq!(legendre_sym, -GFp::ONE); + } + + // test squares + for _ in 0..32 { + let x = GFp5::sample(&mut thread_rng()); + let square = x * x; + let legendre_sym = square.legendre(); + + assert_eq!(legendre_sym, GFp::ONE); + } + + // test zero + let x = GFp5::ZERO; + let square = x * x; + let legendre_sym = square.legendre(); + assert_eq!(legendre_sym, GFp::ZERO); + } + + #[test] + fn test_sqrt_quintic_ext_outside_circuit() { + let mut rng = thread_rng(); + + for _ in 0..30 { + let x = GFp5::sample(&mut rng); + let square = x * x; + let sqrt = SquareRoot::sqrt(&square).unwrap(); + + assert_eq!(sqrt * sqrt, square); + } + } + + #[test] + fn test_canonical_sqrt_quintic_ext_outside_circuit() { + let mut rng = thread_rng(); + + for _ in 0..30 { + let x = GFp5::sample(&mut rng); + let square = x * x; + let sqrt = square.canonical_sqrt().unwrap(); + + assert_eq!(sqrt * sqrt, square); + assert!(!sqrt.sgn0()) + } + } +} diff --git a/ecgfp5/src/curve/curve.rs b/ecgfp5/src/curve/curve.rs new file mode 100644 index 0000000000..7b1f6cca0f --- /dev/null +++ b/ecgfp5/src/curve/curve.rs @@ -0,0 +1,1762 @@ +/// EcGFp5 curve implementation +/// Most of this is copied from Thomas Pornin's implementation +/// with some modifications to make it play more nicely with plonky2 primitives +/// His implementation can be found here: https://github.com/pornin/ecgfp5 +use std::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}; + +use alloc::vec::Vec; +use plonky2_field::{ + extension::{quintic::QuinticExtension, FieldExtension}, + goldilocks_field::GoldilocksField, + ops::Square, + types::{Field, PrimeField64, Sample}, +}; +use rand::RngCore; + +use crate::curve::{ + base_field::{Legendre, SquareRoot}, + mul_table::*, + scalar_field::Scalar, + GFp, GFp5, +}; + +use super::base_field::InverseOrZero; + +/// A curve point. +#[derive(Clone, Copy, Debug)] +pub struct Point { + // Internally, we use the (x,u) fractional coordinates: for curve + // point (x,y), we have (x,u) = (x,x/y) = (X/Z,U/T) (for the neutral + // N, the u coordinate is 0). + pub x: GFp5, + z: GFp5, + u: GFp5, + t: GFp5, +} + +/// A curve point in affine (x,u) coordinates. This is used internally +/// to make "windows" that speed up point multiplications. +#[derive(Clone, Copy, Debug)] +pub(crate) struct AffinePoint { + pub(crate) x: GFp5, + pub(crate) u: GFp5, +} + +/// A curve point in short Weirstrass form (x, y). This is used by the in-circuit representation +#[derive(Clone, Copy, Debug)] +pub struct WeierstrassPoint { + pub(crate) x: GFp5, + pub(crate) y: GFp5, + pub(crate) is_inf: bool, +} + +impl WeierstrassPoint { + // curve equation `A` constants when in short Weierstrass form + pub const A: GFp5 = QuinticExtension([ + GoldilocksField(6148914689804861439), + GoldilocksField(263), + GFp::ZERO, + GFp::ZERO, + GFp::ZERO, + ]); + + pub const B: GFp5 = QuinticExtension([ + GoldilocksField(15713893096167979237), + GoldilocksField(6148914689804861265), + GFp::ZERO, + GFp::ZERO, + GFp::ZERO, + ]); + + pub const NEUTRAL: Self = Self { x: GFp5::ZERO, y: GFp5::ZERO, is_inf: true }; + + pub const GENERATOR: Self = Self { + x: QuinticExtension([ + GoldilocksField(11712523173042564207), + GoldilocksField(14090224426659529053), + GoldilocksField(13197813503519687414), + GoldilocksField(16280770174934269299), + GoldilocksField(15998333998318935536), + ]), + + y: QuinticExtension([ + GoldilocksField(14639054205878357578), + GoldilocksField(17426078571020221072), + GoldilocksField(2548978194165003307), + GoldilocksField(8663895577921260088), + GoldilocksField(9793640284382595140), + ]), + is_inf: false, + }; + + pub fn encode(&self) -> GFp5 { + self.y / (Point::A / GFp5::from_canonical_u16(3) - self.x) + } + + pub fn decode(w: GFp5) -> Option { + let e = w.square() - Point::A; + let delta = e.square() - Point::B_MUL4; + let r = delta.canonical_sqrt(); + let c = r.is_some(); + let r = r.unwrap_or(GFp5::ZERO); + + let x1 = (e + r) / GFp5::TWO; + let x2 = (e - r) / GFp5::TWO; + + let x = if x1.legendre() == GFp::ONE { x1 } else { x2 }; + + let y = -w * x; + let x = if c { x + Point::A / GFp5::from_canonical_u16(3) } else { GFp5::ZERO }; + let is_inf = !c; + + // If w == 0 then this is in fact a success. + if c || w == GFp5::ZERO { + Some(WeierstrassPoint { x, y, is_inf }) + } else { + None + } + } +} + +impl PartialEq for WeierstrassPoint { + fn eq(&self, other: &Self) -> bool { + if self.is_inf && other.is_inf { + true + } else { + self.x == other.x && self.y == other.y + } + } +} + +impl Eq for WeierstrassPoint {} + +impl Sample for Point { + fn sample(rng: &mut R) -> Self + where + R: RngCore + ?Sized, + { + let s = Scalar::sample(rng); + Point::GENERATOR * s + } +} + +impl Point { + // Curve equation 'a' constant. + pub(crate) const A: GFp5 = + QuinticExtension([GFp::TWO, GFp::ZERO, GFp::ZERO, GFp::ZERO, GFp::ZERO]); + pub const B1: u64 = 263; + + pub(crate) const B: GFp5 = + QuinticExtension([GFp::ZERO, GoldilocksField(Self::B1), GFp::ZERO, GFp::ZERO, GFp::ZERO]); + + // 2*b + pub(crate) const B_MUL2: GFp5 = QuinticExtension([ + GFp::ZERO, + GoldilocksField(2 * Self::B1), + GFp::ZERO, + GFp::ZERO, + GFp::ZERO, + ]); + // 4*b + pub(crate) const B_MUL4: GFp5 = QuinticExtension([ + GFp::ZERO, + GoldilocksField(4 * Self::B1), + GFp::ZERO, + GFp::ZERO, + GFp::ZERO, + ]); + // 16*b + pub(crate) const B_MUL16: GFp5 = QuinticExtension([ + GFp::ZERO, + GoldilocksField(16 * Self::B1), + GFp::ZERO, + GFp::ZERO, + GFp::ZERO, + ]); + + /// The neutral point (neutral of the group law). + pub const NEUTRAL: Self = Self { x: GFp5::ZERO, z: GFp5::ONE, u: GFp5::ZERO, t: GFp5::ONE }; + + /// The conventional generator (corresponding to encoding w = 4). + pub const GENERATOR: Self = Self { + x: QuinticExtension([ + GoldilocksField(12883135586176881569), + GoldilocksField(4356519642755055268), + GoldilocksField(5248930565894896907), + GoldilocksField(2165973894480315022), + GoldilocksField(2448410071095648785), + ]), + z: GFp5::ONE, + u: QuinticExtension([GoldilocksField(1), GFp::ZERO, GFp::ZERO, GFp::ZERO, GFp::ZERO]), + t: QuinticExtension([GoldilocksField(4), GFp::ZERO, GFp::ZERO, GFp::ZERO, GFp::ZERO]), + }; + + pub fn is_x_zero(&self) -> bool { + self.x == GFp5::ZERO + } + + /// Encode this point into a field element. Encoding is always + /// canonical. + pub fn encode(self) -> GFp5 { + // Encoded form is the value w = 1/u. GFpor the neutral (u == 0), + // the encoded form is 0. Since our inversion over GF(p^5) already + // yields 0 in that case, there is no need for any special code. + self.t * self.u.inverse_or_zero() + } + + /// Little endian + pub fn to_le_bytes(self) -> [u8; 40] { + let gfp5: [GFp; 5] = self.encode().to_basefield_array(); + gfp5.iter() + .fold(vec![], |mut acc, gfp| { + let bytes = gfp.to_canonical_u64().to_le_bytes(); + acc.extend_from_slice(&bytes); + acc + }) + .try_into() + .unwrap() + } + + /// Test whether a field element can be decoded into a point. + /// returns `true` if decoding would work, `false` otherwise. + pub fn validate(w: GFp5) -> bool { + // Value w can be decoded if and only if it is zero, or + // (w^2 - a)^2 - 4*b is a quadratic residue. + let e = w.square() - Self::A; + let delta = e.square() - Self::B_MUL4; + w == GFp5::ZERO || delta.legendre() == GFp::ONE + } + + /// Little endian + pub fn from_le_bytes(buf: [u8; 40]) -> Option { + let gfp5: [GFp; 5] = (0..5) + .map(|i| { + GFp::from_canonical_u64(u64::from_le_bytes( + buf[i * 8..(i + 1) * 8].try_into().unwrap(), + )) + }) + .collect::>() + .try_into() + .unwrap(); + let w = GFp5::from_basefield_array(gfp5); + if Self::validate(w) { + Self::decode(w) + } else { + None + } + } + + /// Attempt to decode a point from a field element + pub fn decode(w: GFp5) -> Option { + // Curve equation is y^2 = x*(x^2 + a*x + b); encoded value + // is w = y/x. Dividing by x, we get the equation: + // x^2 - (w^2 - a)*x + b = 0 + // We solve for x and keep the solution which is not itself a + // square (if there are solutions, exactly one of them will be + // a square, and the other will not be a square). + + let e = w.square() - Self::A; + let delta = e.square() - Self::B_MUL4; + let r = delta.canonical_sqrt(); + let c = r.is_some(); + let r = r.unwrap_or(GFp5::ZERO); + + let x1 = (e + r) / GFp5::TWO; + let x2 = (e - r) / GFp5::TWO; + let x = if x1.legendre() == GFp::ONE { x2 } else { x1 }; + + // If c == true (delta is not a sqrt) then we want to get the neutral here; note that if + // w == 0, then delta = a^2 - 4*b, which is not a square, and + // thus we also get c == 0. + let x = if c { x } else { GFp5::ZERO }; + let z = GFp5::ONE; + let u = if c { GFp5::ONE } else { GFp5::ZERO }; + let t = if c { w } else { GFp5::ONE }; + + // If w == 0 then this is in fact a success. + if c || w == GFp5::ZERO { + Some(Self { x, z, u, t }) + } else { + None + } + } + + // TODO: this can be better + pub fn to_weierstrass(&self) -> WeierstrassPoint { + let w = self.encode(); + WeierstrassPoint::decode(w).unwrap() + } + + pub fn to_hex_string(&self) -> String { + let mut buf: [u8; 5 * 8] = [0; 40]; + let dst_ptr = buf.as_mut_ptr(); + + let mut offset = 0; + + let encode = Point::encode(*self); + for e in encode.0 { + let bytes = e.to_canonical_u64().to_le_bytes(); + unsafe { + std::ptr::copy_nonoverlapping(bytes.as_ptr(), dst_ptr.add(offset), 8); + } + offset = offset + 8; + } + + let hex_string = hex::encode(&buf); + hex_string + } + + pub fn from_hex_string(input_hex_string: &str) -> Self { + let buf: Vec = hex::decode(input_hex_string).unwrap(); + let mut data: [GoldilocksField; 5] = [GoldilocksField::ZERO; 5]; + + let src_ptr = buf.as_ptr(); + let mut offset = 0; + for ele in data.iter_mut() { + unsafe { + let mut v_buf: [u8; 8] = [0; 8]; + std::ptr::copy_nonoverlapping(src_ptr.add(offset), v_buf.as_mut_ptr(), 8); + let v: u64 = u64::from_le_bytes(v_buf); + *ele = GoldilocksField::from_canonical_u64(v); + } + offset = offset + 8; + } + + let quintic = QuinticExtension::(data); + let decoded = Self::decode(quintic).unwrap(); + decoded + } + + // General point addition. formulas are complete (no special case). + fn set_add(&mut self, rhs: &Self) { + // cost: 10M + let (x1, z1, u1, _t1) = (self.x, self.z, self.u, self.t); + let (x2, z2, u2, _t2) = (rhs.x, rhs.z, rhs.u, rhs.t); + + let t1 = x1 * x2; + let t2 = z1 * z2; + let t3 = u1 * u2; + let t4 = _t1 * _t2; + let t5 = (x1 + z1) * (x2 + z2) - t1 - t2; + let t6 = (u1 + _t1) * (u2 + _t2) - t3 - t4; + let t7 = t1 + t2 * Self::B; + let t8 = t4 * t7; + let t9 = t3 * (t5 * Self::B_MUL2 + t7.double()); + let t10 = (t4 + t3.double()) * (t5 + t7); + + self.x = (t10 - t8) * Self::B; + self.z = t8 - t9; + self.u = t6 * ((t2 * Self::B) - t1); + self.t = t8 + t9; + } + + // Add a point in affine coordinates to this one. + fn set_add_affine(&mut self, rhs: &AffinePoint) { + // cost: 8M + let (x1, z1, u1, _t1) = (self.x, self.z, self.u, self.t); + let (x2, u2) = (rhs.x, rhs.u); + + let t1 = x1 * x2; + let t2 = z1; + let t3 = u1 * u2; + let t4 = _t1; + let t5 = x1 + x2 * z1; + let t6 = u1 + u2 * _t1; + let t7 = t1 + t2 * Self::B; + let t8 = t4 * t7; + let t9 = t3 * (t5 * Self::B_MUL2 + t7.double()); + let t10 = (t4 + t3.double()) * (t5 + t7); + + self.x = (t10 - t8) * Self::B; + self.u = t6 * (t2 * Self::B - t1); + self.z = t8 - t9; + self.t = t8 + t9; + } + + // Subtract a point in affine coordinates from this one. + fn set_sub_affine(&mut self, rhs: &AffinePoint) { + self.set_add_affine(&AffinePoint { x: rhs.x, u: -rhs.u }) + } + + fn set_neg(&mut self) { + self.u = -self.u; + } + + fn set_sub(&mut self, rhs: &Self) { + self.set_add(&rhs.neg()) + } + + /// Specialized point doubling function (faster than using general + /// addition on the point and itself). + pub fn double(self) -> Self { + let mut r = self; + r.set_double(); + r + } + + fn set_double(&mut self) { + // cost: 4M+5S + let (x, z, u, t) = (self.x, self.z, self.u, self.t); + + let t1 = z * t; + let t2 = t1 * t; + let x1 = t2.square(); + let z1 = t1 * u; + let t3 = u.square(); + let w1 = t2 - (x + z).double() * t3; + let t4 = z1.square(); + + self.x = t4 * Self::B_MUL4; + self.z = w1.square(); + self.u = (w1 + z1).square() - t4 - self.z; + self.t = x1.double() - t4 * GFp5::from_canonical_u64(4) - self.z; + } + + /// Multiply this point by 2^n (i.e. n successive doublings). This is + /// faster than calling the double() function n times. + pub fn mdouble(self, n: u32) -> Self { + let mut r = self; + r.set_mdouble(n); + r + } + + fn set_mdouble(&mut self, n: u32) { + // Handle corner cases (0 or 1 double). + if n == 0 { + return; + } + if n == 1 { + self.set_double(); + return; + } + + // cost: n*(2M+5S) + 2M+1S + let (x0, z0, u0, t0) = (self.x, self.z, self.u, self.t); + let mut t1 = z0 * t0; + let mut t2 = t1 * t0; + let x1 = t2.square(); + let z1 = t1 * u0; + let mut t3 = u0.square(); + let mut w1 = t2 - (x0 + z0).double() * t3; + let mut t4 = w1.square(); + let mut t5 = z1.square(); + let mut x = t5.square() * Self::B_MUL16; + let mut w = x1.double() - t5 * GFp5::from_canonical_u16(4) - t4; + let mut z = (w1 + z1).square() - t4 - t5; + + for _ in 2..n { + t1 = z.square(); + t2 = t1.square(); + t3 = w.square(); + t4 = t3.square(); + t5 = (w + z).square() - t1 - t3; + z = t5 * ((x + t1).double() - t3); + x = (t2 * t4) * Self::B_MUL16; + w = -t4 - t2 * (Self::B_MUL4 - GFp5::from_canonical_u16(4)); + } + + t1 = w.square(); + t2 = z.square(); + t3 = (w + z).square() - t1 - t2; + w1 = t1 - (x + t2).double(); + self.x = t3.square() * Self::B; + self.z = w1.square(); + self.u = t3 * w1; + self.t = t1.double() * (t1 - t2.double()) - self.z; + } + + /// Return `true` if this point is the neutral, `false` otherwise. + pub fn is_neutral(self) -> bool { + self.u == GFp5::ZERO + } + + /// Compare this point with another + /// return `true` if they're equal`, `false` otherwise + pub fn equals(self, rhs: Self) -> bool { + self.u * rhs.t == rhs.u * self.t + } + + // Convert points to affine coordinates. + pub(crate) fn batch_to_affine(src: &[Self]) -> Vec { + // We use a trick due to Montgomery: to compute the inverse of + // x and of y, a single inversion suffices, with: + // 1/x = y*(1/(x*y)) + // 1/y = x*(1/(x*y)) + // This extends to the case of inverting n values, with a total + // cost of 1 inversion and 3*(n-1) multiplications. + match src.len() { + 0 => Vec::new(), + 1 => { + let p = src[0]; + let m1 = (p.z * p.t).inverse_or_zero(); + let res = AffinePoint { x: p.x * p.t * m1, u: p.u * p.z * m1 }; + + vec![res] + } + n => { + let mut res = vec![AffinePoint::NEUTRAL; n]; + // Compute product of all values to invert, and invert it. + // We also use the x and u coordinates of the points in the + // destination slice to keep track of the partial products. + let mut m = src[0].z * src[0].t; + for i in 1..n { + let x = m; + m *= src[i].z; + let u = m; + m *= src[i].t; + + res[i] = AffinePoint { x, u }; + } + + m = m.inverse_or_zero(); + + // Propagate back inverses. + for i in (1..n).rev() { + res[i].u = src[i].u * res[i].u * m; + m *= src[i].t; + res[i].x = src[i].x * res[i].x * m; + m *= src[i].z; + } + res[0].u = src[0].u * src[0].z * m; + m *= src[0].t; + res[0].x = src[0].x * m; + + res + } + } + } + + // Optimal window size should be 4 or 5 bits, depending on target + // architecture. On an Intel i5-8259U ("Coffee Lake" core), a 5-bit + // window seems very slightly better. + const WINDOW: usize = 5; + const WIN_SIZE: usize = 1 << ((Self::WINDOW - 1) as i32); + + fn make_window_affine(self) -> Vec { + let mut tmp = [Self::NEUTRAL; Self::WIN_SIZE]; + tmp[0] = self; + for i in 1..Self::WIN_SIZE { + if (i & 1) == 0 { + tmp[i] = self.add(tmp[i - 1]); + } else { + tmp[i] = tmp[i >> 1].double(); + } + } + + let win = Self::batch_to_affine(&tmp); + win + } + + // Multiply this point by a scalar. + fn set_mul(&mut self, s: &Scalar) { + // Make a window with affine points. + let win = self.make_window_affine(); + let mut digits = [0; (319 + Self::WINDOW) / Self::WINDOW]; + s.recode_signed(&mut digits, Self::WINDOW as i32); + + *self = AffinePoint::lookup_vartime(&win, *digits.last().unwrap()).to_point(); + for &digit in digits.iter().rev().skip(1) { + self.set_mdouble(Self::WINDOW as u32); + *self += AffinePoint::lookup(&win, digit); + } + } + + /// Multiply the conventional generator by a scalar. + /// This function is faster than using the multiplication operator + /// on the generator point. + pub fn mulgen(s: Scalar) -> Self { + let mut digits = [0i32; 64]; + s.recode_signed(&mut digits, 5); + let mut p = AffinePoint::lookup(&MUL_TABLE_G0, digits[7]).to_point(); + + p += AffinePoint::lookup(&MUL_TABLE_G40, digits[15]); + p += AffinePoint::lookup(&MUL_TABLE_G80, digits[23]); + p += AffinePoint::lookup(&MUL_TABLE_G120, digits[31]); + p += AffinePoint::lookup(&MUL_TABLE_G160, digits[39]); + p += AffinePoint::lookup(&MUL_TABLE_G200, digits[47]); + p += AffinePoint::lookup(&MUL_TABLE_G240, digits[55]); + p += AffinePoint::lookup(&MUL_TABLE_G280, digits[63]); + for i in (0..7).rev() { + p.set_mdouble(5); + p += AffinePoint::lookup(&MUL_TABLE_G0, digits[i]); + p += AffinePoint::lookup(&MUL_TABLE_G40, digits[i + 8]); + p += AffinePoint::lookup(&MUL_TABLE_G80, digits[i + 16]); + p += AffinePoint::lookup(&MUL_TABLE_G120, digits[i + 24]); + p += AffinePoint::lookup(&MUL_TABLE_G160, digits[i + 32]); + p += AffinePoint::lookup(&MUL_TABLE_G200, digits[i + 40]); + p += AffinePoint::lookup(&MUL_TABLE_G240, digits[i + 48]); + p += AffinePoint::lookup(&MUL_TABLE_G280, digits[i + 56]); + } + p + } + + fn make_window_5(self) -> [Self; 16] { + let mut win = [Self::NEUTRAL; 16]; + win[0] = self; + for i in 1..win.len() { + if (i & 1) == 0 { + win[i] = self.add(win[i - 1]); + } else { + win[i] = win[i >> 1].double(); + } + } + win + } + + fn lookup_vartime(win: &[Self], k: i32) -> Self { + if k > 0 { + return win[(k - 1) as usize]; + } else if k == 0 { + return Self::NEUTRAL; + } else { + return -win[(-k - 1) as usize]; + } + } + + /// Given scalars s and k, and point R, verify whether s*G + k*Q = R + /// (with G being the curve conventional generator, and Q this instance). + /// This is the main operation in Schnorr signature verification. + /// WARNING: this function is not constant-time; use only on + /// public data. + pub fn verify_muladd_vartime(self, s: Scalar, k: Scalar, r: Self) -> bool { + // We use a method by Antipa et al (SAC 2005), following the + // description in: https://eprint.iacr.org/2020/454 + // We split k into two (signed) integers c0 and c1 such + // that k = c0/c1 mod n; the integers c0 and c1 fit on 161 bits + // each (including the signed bit). The verification is then: + // (s*c1)*G + c0*Q - c1*R = 0 + // We split s*c1 into two 160-bit halves, and use the precomputed + // tables for G; thus, all scalars fit on 160 bits (+sign). + // + // Since formulas for multiple doublings favour long runs of + // doublings, we do not use a wNAF representation; instead, we + // make regular 5-bit (signed) windows. + // + // We use fractional coordinates for the Q and R windows; it is + // not worth it converting them to affine. + + // Compute c0 and c1. + let (c0, c1) = k.lagrange(); + + // Compute t <- s*c1. + let t = s * c1.to_scalar_vartime(); + + // Recode multipliers. + let mut tt = [0i32; 64]; + t.recode_signed(&mut tt, 5); + let tt0 = &tt[..32]; + let tt1 = &tt[32..]; + let ss0 = c0.recode_signed_5(); + let ss1 = c1.recode_signed_5(); + + // Make windows for this point (Q) and for -R. + let win_q = self.make_window_5(); + let win_r = (-r).make_window_5(); + + let mut p = Self::lookup_vartime(&win_q, ss0[32]); + if ss1[32] != 0 { + p += Self::lookup_vartime(&win_r, ss1[32]); + } + for i in (0..32).rev() { + p.set_mdouble(5); + if tt0[i] != 0 { + p += AffinePoint::lookup_vartime(&MUL_TABLE_G0, tt0[i]); + } + if tt1[i] != 0 { + p += AffinePoint::lookup_vartime(&MUL_TABLE_G160, tt1[i]); + } + if ss0[i] != 0 { + p += Self::lookup_vartime(&win_q, ss0[i]); + } + if ss1[i] != 0 { + p += Self::lookup_vartime(&win_r, ss1[i]); + } + } + + p == Self::NEUTRAL + } +} + +impl AffinePoint { + pub(crate) const NEUTRAL: Self = Self { x: GFp5::ZERO, u: GFp5::ZERO }; + + fn to_point(self) -> Point { + let Self { x, u } = self; + Point { x, z: GFp5::ONE, u, t: GFp5::ONE } + } + + fn set_neg(&mut self) { + self.u = -self.u; + } + + // Lookup a point in a window. The win[] slice must contain values + // i*P for i = 1 to n (win[0] contains P, win[1] contains 2*P, and + // so on). Index value k is an integer in the -n to n range; returned + // point is k*P. + fn set_lookup(&mut self, win: &[Self], k: i32) { + // sign = 0xFFFFFFFF if k < 0, 0x00000000 otherwise + let sign = (k >> 31) as u32; + // ka = abs(k) + let ka = ((k as u32) ^ sign).wrapping_sub(sign); + // km1 = ka - 1 + let km1 = ka.wrapping_sub(1); + + let mut x = GFp5::ZERO; + let mut u = GFp5::ZERO; + for i in 0..win.len() { + let m = km1.wrapping_sub(i as u32); + let c = (((m | m.wrapping_neg()) >> 31) as u64).wrapping_sub(1); + x = if c == 0 { x } else { win[i].x }; + u = if c == 0 { u } else { win[i].u }; + } + + // If k < 0, then we must negate the point. + let c = (sign as u64) | ((sign as u64) << 32); + self.x = x; + self.u = u; + + if c != 0 { + self.u = -self.u; + } + } + + fn lookup(win: &[Self], k: i32) -> Self { + let mut r = Self::NEUTRAL; + r.set_lookup(win, k); + r + } + + // Same as lookup(), except this implementation is variable-time. + fn lookup_vartime(win: &[Self], k: i32) -> Self { + if k > 0 { + return win[(k - 1) as usize]; + } else if k == 0 { + return Self::NEUTRAL; + } else { + return -win[(-k - 1) as usize]; + } + } +} + +// We implement all the needed traits to allow use of the arithmetic +// operators on points. We support all combinations of operands +// either as Point structures, or pointers to Point structures. Some +// operations with AffinePoint structures are also implemented. + +impl Add for Point { + type Output = Point; + + #[inline(always)] + fn add(self, other: Point) -> Point { + let mut r = self; + r.set_add(&other); + r + } +} + +impl Add<&Point> for Point { + type Output = Point; + + #[inline(always)] + fn add(self, other: &Point) -> Point { + let mut r = self; + r.set_add(other); + r + } +} + +impl Add for &Point { + type Output = Point; + + #[inline(always)] + fn add(self, other: Point) -> Point { + let mut r = *self; + r.set_add(&other); + r + } +} + +impl Add<&Point> for &Point { + type Output = Point; + + #[inline(always)] + fn add(self, other: &Point) -> Point { + let mut r = *self; + r.set_add(other); + r + } +} + +impl Add for Point { + type Output = Point; + + #[inline(always)] + fn add(self, other: AffinePoint) -> Point { + let mut r = self; + r.set_add_affine(&other); + r + } +} + +impl Add<&AffinePoint> for Point { + type Output = Point; + + #[inline(always)] + fn add(self, other: &AffinePoint) -> Point { + let mut r = self; + r.set_add_affine(other); + r + } +} + +impl Add for &Point { + type Output = Point; + + #[inline(always)] + fn add(self, other: AffinePoint) -> Point { + let mut r = *self; + r.set_add_affine(&other); + r + } +} + +impl Add<&AffinePoint> for &Point { + type Output = Point; + + #[inline(always)] + fn add(self, other: &AffinePoint) -> Point { + let mut r = *self; + r.set_add_affine(other); + r + } +} + +impl Add for AffinePoint { + type Output = Point; + + #[inline(always)] + fn add(self, other: Point) -> Point { + let mut r = other; + r.set_add_affine(&self); + r + } +} + +impl Add<&Point> for AffinePoint { + type Output = Point; + + #[inline(always)] + fn add(self, other: &Point) -> Point { + let mut r = *other; + r.set_add_affine(&self); + r + } +} + +impl Add for &AffinePoint { + type Output = Point; + + #[inline(always)] + fn add(self, other: Point) -> Point { + let mut r = other; + r.set_add_affine(self); + r + } +} + +impl Add<&Point> for &AffinePoint { + type Output = Point; + + #[inline(always)] + fn add(self, other: &Point) -> Point { + let mut r = *other; + r.set_add_affine(self); + r + } +} + +impl AddAssign for Point { + #[inline(always)] + fn add_assign(&mut self, other: Point) { + self.set_add(&other); + } +} + +impl AddAssign<&Point> for Point { + #[inline(always)] + fn add_assign(&mut self, other: &Point) { + self.set_add(other); + } +} + +impl AddAssign for Point { + #[inline(always)] + fn add_assign(&mut self, other: AffinePoint) { + self.set_add_affine(&other); + } +} + +impl AddAssign<&AffinePoint> for Point { + #[inline(always)] + fn add_assign(&mut self, other: &AffinePoint) { + self.set_add_affine(other); + } +} + +impl Sub for Point { + type Output = Point; + + #[inline(always)] + fn sub(self, other: Point) -> Point { + let mut r = self; + r.set_sub(&other); + r + } +} + +impl Sub<&Point> for Point { + type Output = Point; + + #[inline(always)] + fn sub(self, other: &Point) -> Point { + let mut r = self; + r.set_sub(other); + r + } +} + +impl Sub for &Point { + type Output = Point; + + #[inline(always)] + fn sub(self, other: Point) -> Point { + let mut r = *self; + r.set_sub(&other); + r + } +} + +impl Sub<&Point> for &Point { + type Output = Point; + + #[inline(always)] + fn sub(self, other: &Point) -> Point { + let mut r = *self; + r.set_sub(other); + r + } +} + +impl Sub for Point { + type Output = Point; + + #[inline(always)] + fn sub(self, other: AffinePoint) -> Point { + let mut r = self; + r.set_sub_affine(&other); + r + } +} + +impl Sub<&AffinePoint> for Point { + type Output = Point; + + #[inline(always)] + fn sub(self, other: &AffinePoint) -> Point { + let mut r = self; + r.set_sub_affine(other); + r + } +} + +impl Sub for &Point { + type Output = Point; + + #[inline(always)] + fn sub(self, other: AffinePoint) -> Point { + let mut r = *self; + r.set_sub_affine(&other); + r + } +} + +impl Sub<&AffinePoint> for &Point { + type Output = Point; + + #[inline(always)] + fn sub(self, other: &AffinePoint) -> Point { + let mut r = *self; + r.set_sub_affine(other); + r + } +} + +impl Sub for AffinePoint { + type Output = Point; + + #[inline(always)] + fn sub(self, other: Point) -> Point { + let mut r = other; + r.set_sub_affine(&self); + r + } +} + +impl Sub<&Point> for AffinePoint { + type Output = Point; + + #[inline(always)] + fn sub(self, other: &Point) -> Point { + let mut r = *other; + r.set_sub_affine(&self); + r + } +} + +impl Sub for &AffinePoint { + type Output = Point; + + #[inline(always)] + fn sub(self, other: Point) -> Point { + let mut r = other; + r.set_sub_affine(self); + r + } +} + +impl Sub<&Point> for &AffinePoint { + type Output = Point; + + #[inline(always)] + fn sub(self, other: &Point) -> Point { + let mut r = *other; + r.set_sub_affine(self); + r + } +} + +impl SubAssign for Point { + #[inline(always)] + fn sub_assign(&mut self, other: Point) { + self.set_sub(&other); + } +} + +impl SubAssign<&Point> for Point { + #[inline(always)] + fn sub_assign(&mut self, other: &Point) { + self.set_sub(other); + } +} + +impl SubAssign for Point { + #[inline(always)] + fn sub_assign(&mut self, other: AffinePoint) { + self.set_sub_affine(&other); + } +} + +impl SubAssign<&AffinePoint> for Point { + #[inline(always)] + fn sub_assign(&mut self, other: &AffinePoint) { + self.set_sub_affine(other); + } +} + +impl Neg for Point { + type Output = Point; + + #[inline(always)] + fn neg(self) -> Point { + let mut r = self; + r.set_neg(); + r + } +} + +impl Neg for &Point { + type Output = Point; + + #[inline(always)] + fn neg(self) -> Point { + let mut r = *self; + r.set_neg(); + r + } +} + +impl Neg for AffinePoint { + type Output = AffinePoint; + + #[inline(always)] + fn neg(self) -> AffinePoint { + let mut r = self; + r.set_neg(); + r + } +} + +impl Neg for &AffinePoint { + type Output = AffinePoint; + + #[inline(always)] + fn neg(self) -> AffinePoint { + let mut r = *self; + r.set_neg(); + r + } +} + +impl Mul for Point { + type Output = Point; + + #[inline(always)] + fn mul(self, other: Scalar) -> Point { + let mut r = self; + r.set_mul(&other); + r + } +} + +impl Mul<&Scalar> for Point { + type Output = Point; + + #[inline(always)] + fn mul(self, other: &Scalar) -> Point { + let mut r = self; + r.set_mul(other); + r + } +} + +impl Mul for &Point { + type Output = Point; + + #[inline(always)] + fn mul(self, other: Scalar) -> Point { + let mut r = *self; + r.set_mul(&other); + r + } +} + +impl Mul<&Scalar> for &Point { + type Output = Point; + + #[inline(always)] + fn mul(self, other: &Scalar) -> Point { + let mut r = *self; + r.set_mul(other); + r + } +} + +impl MulAssign for Point { + #[inline(always)] + fn mul_assign(&mut self, other: Scalar) { + self.set_mul(&other); + } +} + +impl MulAssign<&Scalar> for Point { + #[inline(always)] + fn mul_assign(&mut self, other: &Scalar) { + self.set_mul(other); + } +} + +impl Mul for Scalar { + type Output = Point; + + #[inline(always)] + fn mul(self, other: Point) -> Point { + let mut r = other; + r.set_mul(&self); + r + } +} + +impl Mul<&Point> for Scalar { + type Output = Point; + + #[inline(always)] + fn mul(self, other: &Point) -> Point { + let mut r = *other; + r.set_mul(&self); + r + } +} + +impl Mul for &Scalar { + type Output = Point; + + #[inline(always)] + fn mul(self, other: Point) -> Point { + let mut r = other; + r.set_mul(self); + r + } +} + +impl Mul<&Point> for &Scalar { + type Output = Point; + + #[inline(always)] + fn mul(self, other: &Point) -> Point { + let mut r = *other; + r.set_mul(self); + r + } +} + +impl PartialEq for Point { + #[inline(always)] + fn eq(&self, other: &Point) -> bool { + self.equals(*other) + } +} + +impl PartialEq<&Point> for Point { + #[inline(always)] + fn eq(&self, other: &&Point) -> bool { + self.equals(**other) + } +} + +impl PartialEq for &Point { + #[inline(always)] + fn eq(&self, other: &Point) -> bool { + self.equals(*other) + } +} + +impl Eq for Point {} + +#[cfg(test)] +mod tests { + use plonky2_field::{ + extension::quintic::QuinticExtension, + goldilocks_field::GoldilocksField, + types::{Field, Sample}, + }; + use rand::{thread_rng, Rng}; + + use crate::curve::{base_field::InverseOrZero, scalar_field::Scalar, GFp, GFp5}; + + use super::{AffinePoint, Point, WeierstrassPoint}; + + fn test_vectors() -> [GFp5; 8] { + // P0 is neutral of G. + // P1 is a random point in G (encoded as w1) + // P2 = e*P1 in G (encoded as w2) + // P3 = P1 + P2 (in G) (encoded as w3) + // P4 = 2*P1 (in G) (encoded as w4) + // P5 = 2*P2 (in G) (encoded as w5) + // P6 = 2*P1 + P2 (in G) (encoded as w6) + // P7 = P1 + 2*P2 (in G) (encoded as w7) + + let w0 = GFp5::ZERO; + let w1 = QuinticExtension([ + GoldilocksField(12539254003028696409), + GoldilocksField(15524144070600887654), + GoldilocksField(15092036948424041984), + GoldilocksField(11398871370327264211), + GoldilocksField(10958391180505708567), + ]); + let w2 = QuinticExtension([ + GoldilocksField(11001943240060308920), + GoldilocksField(17075173755187928434), + GoldilocksField(3940989555384655766), + GoldilocksField(15017795574860011099), + GoldilocksField(5548543797011402287), + ]); + let w3 = QuinticExtension([ + GoldilocksField(246872606398642312), + GoldilocksField(4900963247917836450), + GoldilocksField(7327006728177203977), + GoldilocksField(13945036888436667069), + GoldilocksField(3062018119121328861), + ]); + let w4 = QuinticExtension([ + GoldilocksField(8058035104653144162), + GoldilocksField(16041715455419993830), + GoldilocksField(7448530016070824199), + GoldilocksField(11253639182222911208), + GoldilocksField(6228757819849640866), + ]); + let w5 = QuinticExtension([ + GoldilocksField(10523134687509281194), + GoldilocksField(11148711503117769087), + GoldilocksField(9056499921957594891), + GoldilocksField(13016664454465495026), + GoldilocksField(16494247923890248266), + ]); + let w6 = QuinticExtension([ + GoldilocksField(12173306542237620), + GoldilocksField(6587231965341539782), + GoldilocksField(17027985748515888117), + GoldilocksField(17194831817613584995), + GoldilocksField(10056734072351459010), + ]); + let w7 = QuinticExtension([ + GoldilocksField(9420857400785992333), + GoldilocksField(4695934009314206363), + GoldilocksField(14471922162341187302), + GoldilocksField(13395190104221781928), + GoldilocksField(16359223219913018041), + ]); + + [w0, w1, w2, w3, w4, w5, w6, w7] + } + + #[test] + fn test_basic_ops() { + let [w0, w1, w2, w3, w4, w5, w6, w7] = test_vectors(); + + // Values that should not decode successfully. + let bww: [GFp5; 6] = [ + QuinticExtension([ + GoldilocksField(13557832913345268708), + GoldilocksField(15669280705791538619), + GoldilocksField(8534654657267986396), + GoldilocksField(12533218303838131749), + GoldilocksField(5058070698878426028), + ]), + QuinticExtension([ + GoldilocksField(135036726621282077), + GoldilocksField(17283229938160287622), + GoldilocksField(13113167081889323961), + GoldilocksField(1653240450380825271), + GoldilocksField(520025869628727862), + ]), + QuinticExtension([ + GoldilocksField(6727960962624180771), + GoldilocksField(17240764188796091916), + GoldilocksField(3954717247028503753), + GoldilocksField(1002781561619501488), + GoldilocksField(4295357288570643789), + ]), + QuinticExtension([ + GoldilocksField(4578929270179684956), + GoldilocksField(3866930513245945042), + GoldilocksField(7662265318638150701), + GoldilocksField(9503686272550423634), + GoldilocksField(12241691520798116285), + ]), + QuinticExtension([ + GoldilocksField(16890297404904119082), + GoldilocksField(6169724643582733633), + GoldilocksField(9725973298012340311), + GoldilocksField(5977049210035183790), + GoldilocksField(11379332130141664883), + ]), + QuinticExtension([ + GoldilocksField(13777379982711219130), + GoldilocksField(14715168412651470168), + GoldilocksField(17942199593791635585), + GoldilocksField(6188824164976547520), + GoldilocksField(15461469634034461986), + ]), + ]; + + assert!(Point::validate(w0)); + assert!(Point::validate(w1)); + assert!(Point::validate(w2)); + assert!(Point::validate(w3)); + assert!(Point::validate(w4)); + assert!(Point::validate(w5)); + assert!(Point::validate(w6)); + assert!(Point::validate(w7)); + + let p0 = Point::decode(w0).expect("w0 should successfully decode"); + let p1 = Point::decode(w1).expect("w1 should successfully decode"); + let p2 = Point::decode(w2).expect("w2 should successfully decode"); + let p3 = Point::decode(w3).expect("w3 should successfully decode"); + let p4 = Point::decode(w4).expect("w4 should successfully decode"); + let p5 = Point::decode(w5).expect("w5 should successfully decode"); + let p6 = Point::decode(w6).expect("w6 should successfully decode"); + let p7 = Point::decode(w7).expect("w7 should successfully decode"); + + assert!(p0.is_neutral()); + assert!(!p1.is_neutral()); + assert!(!p2.is_neutral()); + assert!(!p3.is_neutral()); + assert!(!p4.is_neutral()); + assert!(!p5.is_neutral()); + assert!(!p6.is_neutral()); + assert!(!p7.is_neutral()); + + assert_eq!(p0, p0); + assert_eq!(p1, p1); + assert_ne!(p0, p1); + assert_ne!(p1, p0); + assert_ne!(p1, p2); + + assert_eq!(p0.encode(), w0); + assert_eq!(p1.encode(), w1); + assert_eq!(p2.encode(), w2); + assert_eq!(p3.encode(), w3); + assert_eq!(p4.encode(), w4); + assert_eq!(p5.encode(), w5); + assert_eq!(p6.encode(), w6); + assert_eq!(p7.encode(), w7); + + for &w in bww.iter() { + assert!(!Point::validate(w)); + assert!(Point::decode(w).is_none()); + } + + assert_eq!((p1 + p2).encode(), w3); + assert_eq!((p1 + p1).encode(), w4); + assert_eq!(p2.double().encode(), w5); + assert_eq!((p1.double() + p2).encode(), w6); + assert_eq!((p1 + p2 + p2).encode(), w7); + + assert_eq!((p0.double()).encode(), GFp5::ZERO); + assert_eq!((p0 + p0).encode(), GFp5::ZERO); + assert_eq!((p0 + p1).encode(), w1); + assert_eq!((p1 + p0).encode(), w1); + + for i in 0..10 { + let q1 = p1.mdouble(i); + let mut q2 = p1; + for _ in 0..i { + q2 = q2.double(); + } + assert_eq!(q1, q2); + } + + let p2_affine = + AffinePoint { x: p2.x * p2.z.inverse_or_zero(), u: p2.u * p2.t.inverse_or_zero() }; + assert_eq!(p1 + p2_affine, p1 + p2); + } + + #[test] + fn test_to_affine() { + let w = QuinticExtension([ + GoldilocksField(12539254003028696409), + GoldilocksField(15524144070600887654), + GoldilocksField(15092036948424041984), + GoldilocksField(11398871370327264211), + GoldilocksField(10958391180505708567), + ]); + let p = Point::decode(w).expect("w should successfully decode"); + + // Create an array of 8 points. + let mut tab1 = [Point::NEUTRAL; 8]; + tab1[0] = p.double(); + for i in 1..tab1.len() { + tab1[i] = tab1[0] + tab1[i - 1]; + } + + // Test conversion to affine coordinates. + for n in 1..(tab1.len() + 1) { + let tab2 = Point::batch_to_affine(&tab1); + for i in 0..n { + assert_eq!(tab1[i].z * tab2[i].x, tab1[i].x); + assert_eq!(tab1[i].t * tab2[i].u, tab1[i].u); + } + } + + // Test lookup. + let win = Point::batch_to_affine(&tab1); + let p1_affine = AffinePoint::lookup(&win, 0); + assert_eq!(p1_affine.x, GFp5::ZERO); + assert_eq!(p1_affine.u, GFp5::ZERO); + for i in 1..9 { + let p2_affine = AffinePoint::lookup(&win, i as i32); + assert_eq!(tab1[i - 1].z * p2_affine.x, tab1[i - 1].x); + assert_eq!(tab1[i - 1].t * p2_affine.u, tab1[i - 1].u); + + let p3_affine = AffinePoint::lookup(&win, -(i as i32)); + assert_eq!(tab1[i - 1].z * p3_affine.x, tab1[i - 1].x); + assert_eq!(tab1[i - 1].t * p3_affine.u, -tab1[i - 1].u); + } + } + + #[test] + fn test_scalar_mul() { + // w1 = encoding of a random point P1 + // ebuf = encoding of a random scalar e + // w2 = encoding of P2 = e*P1 + let w1 = QuinticExtension([ + GoldilocksField(7534507442095725921), + GoldilocksField(16658460051907528927), + GoldilocksField(12417574136563175256), + GoldilocksField(2750788641759288856), + GoldilocksField(620002843272906439), + ]); + let ebuf: [u8; 40] = [ + 0x1B, 0x18, 0x51, 0xC8, 0x1D, 0x22, 0xD4, 0x0D, 0x6D, 0x36, 0xEC, 0xCE, 0x54, 0x27, + 0x41, 0x66, 0x08, 0x14, 0x2F, 0x8F, 0xFF, 0x64, 0xB4, 0x76, 0x28, 0xCD, 0x3F, 0xF8, + 0xAA, 0x25, 0x16, 0xD4, 0xBA, 0xD0, 0xCC, 0x02, 0x1A, 0x44, 0x7C, 0x03, + ]; + let w2 = QuinticExtension([ + GoldilocksField(9486104512504676657), + GoldilocksField(14312981644741144668), + GoldilocksField(5159846406177847664), + GoldilocksField(15978863787033795628), + GoldilocksField(3249948839313771192), + ]); + + let p1 = Point::decode(w1).expect("w1 should successfully decode"); + let p2 = Point::decode(w2).expect("w2 should successfully decode"); + let (e, ce) = Scalar::try_from_noncanonical_bytes(&ebuf); + + assert!(ce == 0xFFFFFFFFFFFFFFFF); + let q1 = p1 * e; + assert!(q1 == p2); + assert!(q1.encode() == w2); + + let q2 = e * p1; + assert!(q2 == p2); + assert!(q2.encode() == w2); + } + + #[test] + fn test_decode() { + let [w0, w1, w2, w3, w4, w5, w6, w7] = test_vectors(); + + let p0_expected = WeierstrassPoint { + x: QuinticExtension([ + GoldilocksField(6148914689804861440), + GFp::ZERO, + GFp::ZERO, + GFp::ZERO, + GFp::ZERO, + ]), + y: GFp5::ZERO, + is_inf: true, + }; + let p0 = WeierstrassPoint::decode(w0).expect("w0 should successfully decode"); + assert_eq!(p0, p0_expected); + + let p1_expected = WeierstrassPoint { + x: QuinticExtension([ + GoldilocksField(7887569478949190020), + GoldilocksField(11586418388990522938), + GoldilocksField(13676447623055915878), + GoldilocksField(5945168854809921881), + GoldilocksField(16291886980725359814), + ]), + y: QuinticExtension([ + GoldilocksField(7556511254681645335), + GoldilocksField(17611929280367064763), + GoldilocksField(9410908488141053806), + GoldilocksField(11351540010214108766), + GoldilocksField(4846226015431423207), + ]), + is_inf: false, + }; + let p1 = WeierstrassPoint::decode(w1).expect("w1 should successfully decode"); + assert_eq!(p1, p1_expected); + + let p2_expected = WeierstrassPoint { + x: QuinticExtension([ + GoldilocksField(11231216549003316587), + GoldilocksField(17312878720767554617), + GoldilocksField(5614299211412933260), + GoldilocksField(2256199868722187419), + GoldilocksField(14229722163821261464), + ]), + y: QuinticExtension([ + GoldilocksField(11740132275098847128), + GoldilocksField(18250632754932612452), + GoldilocksField(6988589976052950880), + GoldilocksField(13612651576898186637), + GoldilocksField(16040252831112129154), + ]), + is_inf: false, + }; + let p2 = WeierstrassPoint::decode(w2).expect("w2 should successfully decode"); + assert_eq!(p2, p2_expected); + + let p3_expected = WeierstrassPoint { + x: QuinticExtension([ + GoldilocksField(567456832026211571), + GoldilocksField(6401615614732569674), + GoldilocksField(7303004494044972219), + GoldilocksField(4332356015409706768), + GoldilocksField(4663512734739523713), + ]), + y: QuinticExtension([ + GoldilocksField(13838792670272995877), + GoldilocksField(11742686110311813089), + GoldilocksField(17972799251722850796), + GoldilocksField(8534723577625674697), + GoldilocksField(3138422718990519265), + ]), + is_inf: false, + }; + let p3 = WeierstrassPoint::decode(w3).expect("w3 should successfully decode"); + assert_eq!(p3, p3_expected); + + let p4_expected = WeierstrassPoint { + x: QuinticExtension([ + GoldilocksField(2626390539619063455), + GoldilocksField(3069873143820007175), + GoldilocksField(16481805966921623903), + GoldilocksField(2169403494164322467), + GoldilocksField(15849876939764656634), + ]), + y: QuinticExtension([ + GoldilocksField(8052493994140007067), + GoldilocksField(12476750341447220703), + GoldilocksField(7297584762312352412), + GoldilocksField(4456043296886321460), + GoldilocksField(17416054515469523789), + ]), + is_inf: false, + }; + let p4 = WeierstrassPoint::decode(w4).expect("w4 should successfully decode"); + assert_eq!(p4, p4_expected); + + let p5_expected = WeierstrassPoint { + x: QuinticExtension([ + GoldilocksField(3378618241466923429), + GoldilocksField(1600085176765664645), + GoldilocksField(8450735902517439914), + GoldilocksField(879305481131694650), + GoldilocksField(9249368002914244868), + ]), + y: QuinticExtension([ + GoldilocksField(7063301786803892166), + GoldilocksField(16450112846546843898), + GoldilocksField(13291990378137922105), + GoldilocksField(17122501309646837992), + GoldilocksField(13551174888872382132), + ]), + is_inf: false, + }; + let p5 = WeierstrassPoint::decode(w5).expect("w5 should successfully decode"); + assert_eq!(p5, p5_expected); + + let p6_expected = WeierstrassPoint { + x: QuinticExtension([ + GoldilocksField(12792842147978866906), + GoldilocksField(10605017725125541653), + GoldilocksField(7515179057747849898), + GoldilocksField(4244613931017322576), + GoldilocksField(5015379385130367832), + ]), + y: QuinticExtension([ + GoldilocksField(11618884250209642346), + GoldilocksField(14788516166813429253), + GoldilocksField(7317520700234795285), + GoldilocksField(12825292405177435802), + GoldilocksField(17658454967394645353), + ]), + is_inf: false, + }; + let p6 = WeierstrassPoint::decode(w6).expect("w6 should successfully decode"); + assert_eq!(p6, p6_expected); + + let p7_expected = WeierstrassPoint { + x: QuinticExtension([ + GoldilocksField(10440794216646581227), + GoldilocksField(13992847258701590930), + GoldilocksField(11213401763785319360), + GoldilocksField(12830171931568113117), + GoldilocksField(6220154342199499160), + ]), + y: QuinticExtension([ + GoldilocksField(7971683838841472962), + GoldilocksField(1639066249976938469), + GoldilocksField(15015315060237521031), + GoldilocksField(10847769264696425470), + GoldilocksField(9177491810370773777), + ]), + is_inf: false, + }; + let p7 = WeierstrassPoint::decode(w7).expect("w7 should successfully decode"); + assert_eq!(p7, p7_expected); + + let w_gen = GFp5::from_canonical_u16(4); + let g = WeierstrassPoint::decode(w_gen).expect("w_gen should successfully decode"); + assert_eq!(g, WeierstrassPoint::GENERATOR); + } + + #[test] + fn test_decode_random() { + let mut rng = thread_rng(); + for _ in 0..30 { + let point = Point::sample(&mut rng); + let encoded = point.encode(); + let decoded = Point::decode(encoded).expect("decoding should succeed"); + assert_eq!(point, decoded); + + let encoded = point.to_weierstrass().encode(); + let decoded = WeierstrassPoint::decode(encoded).expect("decoding should succeed"); + assert_eq!(point.to_weierstrass(), decoded); + + let decoded = Point::decode(encoded).expect("decoding should succeed"); + assert_eq!(point, decoded); + } + } + + #[test] + fn test_mulgen() { + let mut rng = thread_rng(); + for _ in 0..20 { + let mut ebuf = [0u8; 48]; + rng.fill(&mut ebuf); + + let e = Scalar::from_noncanonical_bytes(&ebuf); + let p1 = Point::GENERATOR * e; + let p2 = Point::mulgen(e); + + assert_eq!(p1, p2); + } + } + + #[test] + fn test_verify_muladd() { + let mut rng = thread_rng(); + for _ in 0..20 { + let mut ebuf = [0u8; 48]; + let mut sbuf = [0u8; 48]; + let mut kbuf = [0u8; 48]; + rng.fill(&mut ebuf); + rng.fill(&mut sbuf); + rng.fill(&mut kbuf); + + let e = Scalar::from_noncanonical_bytes(&ebuf); + let s = Scalar::from_noncanonical_bytes(&sbuf); + let k = Scalar::from_noncanonical_bytes(&kbuf); + + let q = Point::mulgen(e); + let r = Point::mulgen(s) + k * q; + assert!(q.verify_muladd_vartime(s, k, r)); + + let r2 = r + Point::GENERATOR; + assert!(!q.verify_muladd_vartime(s, k, r2)); + } + } + + #[test] + fn test_point_convert_str() { + let w1 = QuinticExtension([ + GoldilocksField(7534507442095725921), + GoldilocksField(16658460051907528927), + GoldilocksField(12417574136563175256), + GoldilocksField(2750788641759288856), + GoldilocksField(620002843272906439), + ]); + + let p1 = Point::decode(w1).expect("w1 should successfully decode"); + let hex_str = p1.to_hex_string(); + let recoverred = Point::from_hex_string(&hex_str); + assert_eq!(p1, recoverred); + } + + #[test] + fn test_to_from_le_bytes() { + let mut rng = thread_rng(); + for _ in 0..20 { + let point = Point::sample(&mut rng); + let bytes = point.to_le_bytes(); + let decoded = Point::from_le_bytes(bytes).expect("decoding should succeed"); + assert_eq!(point, decoded); + } + } +} diff --git a/ecgfp5/src/curve/mod.rs b/ecgfp5/src/curve/mod.rs new file mode 100644 index 0000000000..5740ee0598 --- /dev/null +++ b/ecgfp5/src/curve/mod.rs @@ -0,0 +1,12 @@ +use plonky2_field::{extension::quintic::QuinticExtension, goldilocks_field::GoldilocksField}; + +pub(crate) type GFp5 = QuinticExtension; +pub(crate) type GFp = GoldilocksField; + +pub mod base_field; +pub mod curve; +pub(crate) mod mul_table; +pub mod scalar_field; + +#[cfg(test)] +pub mod test_utils; diff --git a/ecgfp5/src/curve/mul_table.rs b/ecgfp5/src/curve/mul_table.rs new file mode 100644 index 0000000000..dab2443f19 --- /dev/null +++ b/ecgfp5/src/curve/mul_table.rs @@ -0,0 +1,2132 @@ +use super::curve::AffinePoint; +use plonky2_field::{extension::quintic::QuinticExtension, goldilocks_field::GoldilocksField}; + +pub(crate) const MUL_TABLE_G0: [AffinePoint; 16] = [ + AffinePoint { + x: QuinticExtension([ + GoldilocksField(12883135586176881569), + GoldilocksField(4356519642755055268), + GoldilocksField(5248930565894896907), + GoldilocksField(2165973894480315022), + GoldilocksField(2448410071095648785), + ]), + u: QuinticExtension([ + GoldilocksField(13835058052060938241), + GoldilocksField(0), + GoldilocksField(0), + GoldilocksField(0), + GoldilocksField(0), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(16517537419581740386), + GoldilocksField(6962630169123120981), + GoldilocksField(12147752690379666704), + GoldilocksField(16637325971742264607), + GoldilocksField(2335078582315237010), + ]), + u: QuinticExtension([ + GoldilocksField(8457587110646932172), + GoldilocksField(138591869800252458), + GoldilocksField(3187444967472352324), + GoldilocksField(18179149801168653736), + GoldilocksField(9453003655195557048), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(4546139357324501584), + GoldilocksField(1393728687664685160), + GoldilocksField(15208040286522119521), + GoldilocksField(7903224051455420834), + GoldilocksField(12463930627278381774), + ]), + u: QuinticExtension([ + GoldilocksField(16373828487211693378), + GoldilocksField(5899455736915524900), + GoldilocksField(17616512450102495476), + GoldilocksField(17643201028570366669), + GoldilocksField(2833280130550676525), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(4341836049185169731), + GoldilocksField(9111482874850194930), + GoldilocksField(7798994609726992878), + GoldilocksField(12619124383509403661), + GoldilocksField(13047834166950680886), + ]), + u: QuinticExtension([ + GoldilocksField(3584786391427904733), + GoldilocksField(1717626083626375072), + GoldilocksField(16549008311909030594), + GoldilocksField(17550175197111849143), + GoldilocksField(18374971670674568416), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(18121072711119258927), + GoldilocksField(3394315639035318724), + GoldilocksField(2648370499809919556), + GoldilocksField(13348924736921714137), + GoldilocksField(3428166646246873447), + ]), + u: QuinticExtension([ + GoldilocksField(9264305576790077869), + GoldilocksField(7426254234280836405), + GoldilocksField(5107777768036114824), + GoldilocksField(9390769538758625122), + GoldilocksField(9788182195111344062), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(11080635543643017332), + GoldilocksField(3122290570793204485), + GoldilocksField(16632474826839786439), + GoldilocksField(14883711538614796285), + GoldilocksField(10396852362099782295), + ]), + u: QuinticExtension([ + GoldilocksField(14253916706639980511), + GoldilocksField(15728038457561632290), + GoldilocksField(3947138785484546318), + GoldilocksField(4740958322851071718), + GoldilocksField(17384736114265519442), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(4763058716218401568), + GoldilocksField(17879823368956058516), + GoldilocksField(13578954599286698938), + GoldilocksField(8634670560943921567), + GoldilocksField(13706660844700767685), + ]), + u: QuinticExtension([ + GoldilocksField(3354778288360932917), + GoldilocksField(13842278303693121409), + GoldilocksField(4717821645259836467), + GoldilocksField(7978743897613094276), + GoldilocksField(10118963888992569394), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(4026958896735257282), + GoldilocksField(13595990041314210204), + GoldilocksField(11499471878438064392), + GoldilocksField(10019455879458851233), + GoldilocksField(11986847968355927330), + ]), + u: QuinticExtension([ + GoldilocksField(14532821659997761913), + GoldilocksField(9582789969382797985), + GoldilocksField(3082219099923033594), + GoldilocksField(2859656980617778370), + GoldilocksField(3746047816071136016), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(15935900828168308224), + GoldilocksField(8668680449802005535), + GoldilocksField(491315506768012688), + GoldilocksField(6584881037682113026), + GoldilocksField(12386385009372860460), + ]), + u: QuinticExtension([ + GoldilocksField(13217832923050551864), + GoldilocksField(51671271962049328), + GoldilocksField(15400792709153778477), + GoldilocksField(6752203529649104660), + GoldilocksField(2855313280735340066), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(8473506523195244465), + GoldilocksField(2446964921175324878), + GoldilocksField(17962771942831363202), + GoldilocksField(6949608686158330138), + GoldilocksField(9315492999547366751), + ]), + u: QuinticExtension([ + GoldilocksField(5171814696081600409), + GoldilocksField(3025466154945175207), + GoldilocksField(453302446979841822), + GoldilocksField(14135305892339872079), + GoldilocksField(2556388051049291052), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(3960231187580500028), + GoldilocksField(3695840168764199059), + GoldilocksField(2914577777792670911), + GoldilocksField(9249939676680902688), + GoldilocksField(17553522813502241416), + ]), + u: QuinticExtension([ + GoldilocksField(3015152305907361949), + GoldilocksField(10730034543155667220), + GoldilocksField(3314242046485170944), + GoldilocksField(1984395553885795852), + GoldilocksField(13781645774758249860), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(11575997426281090678), + GoldilocksField(1534495174840625570), + GoldilocksField(7539338128385981583), + GoldilocksField(10393042019577161985), + GoldilocksField(10667466219175771157), + ]), + u: QuinticExtension([ + GoldilocksField(16681365912970185037), + GoldilocksField(11287896019745355117), + GoldilocksField(11069899752345274504), + GoldilocksField(15487604769605237513), + GoldilocksField(13467978440572613228), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(11192179397773394280), + GoldilocksField(3555953455665397909), + GoldilocksField(5346523552109387121), + GoldilocksField(4514445299325204396), + GoldilocksField(3932728981135688453), + ]), + u: QuinticExtension([ + GoldilocksField(5421638117266109845), + GoldilocksField(204299445119713184), + GoldilocksField(6067390115784997081), + GoldilocksField(16191134954342419157), + GoldilocksField(4139938600224417293), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(13189785832536261642), + GoldilocksField(8777097377506996162), + GoldilocksField(17497140949916325738), + GoldilocksField(15140279769427597032), + GoldilocksField(15517274717131999881), + ]), + u: QuinticExtension([ + GoldilocksField(1040464435413162742), + GoldilocksField(9262701069034606854), + GoldilocksField(2990438819650713743), + GoldilocksField(18129195737333990255), + GoldilocksField(12490074042478236606), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(17716508479149156535), + GoldilocksField(14351380558651795729), + GoldilocksField(3644546258883003807), + GoldilocksField(5171318241596472386), + GoldilocksField(294806796132518330), + ]), + u: QuinticExtension([ + GoldilocksField(7535225611936271281), + GoldilocksField(14682077054502188499), + GoldilocksField(784215514926156349), + GoldilocksField(5280586574139275596), + GoldilocksField(14407528916988559545), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(8681294642569802563), + GoldilocksField(7751765660802747503), + GoldilocksField(16382129702876313971), + GoldilocksField(7447155060842833278), + GoldilocksField(6859908403876474879), + ]), + u: QuinticExtension([ + GoldilocksField(9674486254207846385), + GoldilocksField(5248970165164951259), + GoldilocksField(3611784478790504991), + GoldilocksField(18437168019170350173), + GoldilocksField(3537959913875671086), + ]), + }, +]; +pub(crate) const MUL_TABLE_G40: [AffinePoint; 16] = [ + AffinePoint { + x: QuinticExtension([ + GoldilocksField(6996996617034310847), + GoldilocksField(1312534891996392328), + GoldilocksField(1967056454231743182), + GoldilocksField(12432745115107639465), + GoldilocksField(8188918658769983203), + ]), + u: QuinticExtension([ + GoldilocksField(9779151955752388390), + GoldilocksField(12827693252247248589), + GoldilocksField(8299002358494291091), + GoldilocksField(10057624387258292793), + GoldilocksField(9561932552523598817), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(12727761422252171591), + GoldilocksField(11715233354058649362), + GoldilocksField(11258110171296383015), + GoldilocksField(4946612044061620143), + GoldilocksField(10674140266605092092), + ]), + u: QuinticExtension([ + GoldilocksField(13968556698015688219), + GoldilocksField(9764817221409883159), + GoldilocksField(6009815048702102249), + GoldilocksField(928542484379469501), + GoldilocksField(17548136021451934003), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(1449347403861973234), + GoldilocksField(1268700206465777189), + GoldilocksField(68931803832001930), + GoldilocksField(508124187869777281), + GoldilocksField(14966299269768645002), + ]), + u: QuinticExtension([ + GoldilocksField(12519156548432608657), + GoldilocksField(1830718924858545317), + GoldilocksField(8290101973558828816), + GoldilocksField(6963396969528752135), + GoldilocksField(5027294278125306748), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(7543060992125635458), + GoldilocksField(13154269169488238929), + GoldilocksField(13038629689209617296), + GoldilocksField(17607665244721587028), + GoldilocksField(4076634695833139117), + ]), + u: QuinticExtension([ + GoldilocksField(16474278336963843968), + GoldilocksField(7342735040871703005), + GoldilocksField(11822823161099820577), + GoldilocksField(15838689010341349421), + GoldilocksField(8387592947884092077), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(6956288471673670775), + GoldilocksField(10363521468804730232), + GoldilocksField(1618310362752793530), + GoldilocksField(16886810269476841179), + GoldilocksField(4982980062158920723), + ]), + u: QuinticExtension([ + GoldilocksField(13688045661223437644), + GoldilocksField(17947601766473933193), + GoldilocksField(7138906029562123225), + GoldilocksField(14564553876341839060), + GoldilocksField(4126496432434298977), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(800685292854622487), + GoldilocksField(11040079590365906652), + GoldilocksField(1466305609865524328), + GoldilocksField(8372552820474238249), + GoldilocksField(10874913568038030998), + ]), + u: QuinticExtension([ + GoldilocksField(4703724548613471267), + GoldilocksField(16058989380922585526), + GoldilocksField(8365972383552432650), + GoldilocksField(12321780682158893877), + GoldilocksField(2418487585371688136), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(12518522439291713316), + GoldilocksField(18265859802652833974), + GoldilocksField(4355197864757715454), + GoldilocksField(16333106890933317228), + GoldilocksField(7860825917869078801), + ]), + u: QuinticExtension([ + GoldilocksField(12169474924601364130), + GoldilocksField(1427729574788767322), + GoldilocksField(3451823787886833090), + GoldilocksField(4595725973834664846), + GoldilocksField(5636506224235047729), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(7016248550036618856), + GoldilocksField(14664150699534918598), + GoldilocksField(7289196844394571239), + GoldilocksField(3733481542224777638), + GoldilocksField(12940819275544993154), + ]), + u: QuinticExtension([ + GoldilocksField(5962170105887193190), + GoldilocksField(7757792046810148121), + GoldilocksField(17754145760690637154), + GoldilocksField(5608151523576337415), + GoldilocksField(10158975094989974837), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(9657667902075638078), + GoldilocksField(1738398137082324930), + GoldilocksField(3309896085425426006), + GoldilocksField(5244596195331513559), + GoldilocksField(11098614916240915598), + ]), + u: QuinticExtension([ + GoldilocksField(10176686769986870501), + GoldilocksField(17149616066773579692), + GoldilocksField(16557806655360885458), + GoldilocksField(6409371822017281510), + GoldilocksField(447032970886916415), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(2000298634105946483), + GoldilocksField(7990437998469847294), + GoldilocksField(13891384442822604159), + GoldilocksField(3400617664053350732), + GoldilocksField(17650120710895099722), + ]), + u: QuinticExtension([ + GoldilocksField(9011065287270146338), + GoldilocksField(9712006535696787670), + GoldilocksField(5197636265344816024), + GoldilocksField(14644619822912127741), + GoldilocksField(5091497898426581809), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(4353689210628214181), + GoldilocksField(11629282537514442736), + GoldilocksField(519301038092536110), + GoldilocksField(17451856528277649540), + GoldilocksField(8053963837814854762), + ]), + u: QuinticExtension([ + GoldilocksField(16247175863676166340), + GoldilocksField(13321024650071188595), + GoldilocksField(13226465566647040787), + GoldilocksField(15830701216342305199), + GoldilocksField(10171768200911815007), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(8618084654605828418), + GoldilocksField(2932134432657893516), + GoldilocksField(14040921219429416616), + GoldilocksField(8539270659386774297), + GoldilocksField(8223174716536738537), + ]), + u: QuinticExtension([ + GoldilocksField(2118173438466787625), + GoldilocksField(17017456632539625481), + GoldilocksField(3822614388660837302), + GoldilocksField(18012676134277779138), + GoldilocksField(14555233257002087745), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(14104364668871773315), + GoldilocksField(4671583541374529339), + GoldilocksField(14595315310536253921), + GoldilocksField(12293043219805252275), + GoldilocksField(11083273927620890457), + ]), + u: QuinticExtension([ + GoldilocksField(13013197605833180311), + GoldilocksField(6369553806055216484), + GoldilocksField(13715364943719691230), + GoldilocksField(832870131890809214), + GoldilocksField(2834204446065110889), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(17610707880266706457), + GoldilocksField(8946198449628536102), + GoldilocksField(14056449117236625467), + GoldilocksField(6751468363564694789), + GoldilocksField(10581122285882655867), + ]), + u: QuinticExtension([ + GoldilocksField(16822879694511882841), + GoldilocksField(7030889609682609080), + GoldilocksField(1819733726510865699), + GoldilocksField(1477354361991598818), + GoldilocksField(3060932650955723086), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(16383078186728412911), + GoldilocksField(17336793750234608284), + GoldilocksField(10282704501742138249), + GoldilocksField(8902952211247569575), + GoldilocksField(10036728575538225007), + ]), + u: QuinticExtension([ + GoldilocksField(980771758638014650), + GoldilocksField(8822864673362619613), + GoldilocksField(1247272673889574430), + GoldilocksField(8049338215992656959), + GoldilocksField(5754772454101411592), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(4793164719180728081), + GoldilocksField(10337661541467018847), + GoldilocksField(4370608981419008671), + GoldilocksField(8309057178611279515), + GoldilocksField(11967697131554357119), + ]), + u: QuinticExtension([ + GoldilocksField(17586180332786867000), + GoldilocksField(10992062529780955862), + GoldilocksField(4283639578773926288), + GoldilocksField(10598406479331979533), + GoldilocksField(13292632801372322468), + ]), + }, +]; +pub(crate) const MUL_TABLE_G80: [AffinePoint; 16] = [ + AffinePoint { + x: QuinticExtension([ + GoldilocksField(13832685079504880268), + GoldilocksField(18013036221761440296), + GoldilocksField(1301626881083565265), + GoldilocksField(9139126253053898429), + GoldilocksField(4505395467569954655), + ]), + u: QuinticExtension([ + GoldilocksField(7359813255592029850), + GoldilocksField(16688014242518042008), + GoldilocksField(4399996806448279465), + GoldilocksField(5271684552135959425), + GoldilocksField(11652444551874101645), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(10957597387983347508), + GoldilocksField(15279113224094632648), + GoldilocksField(16636004563247846338), + GoldilocksField(139361871129849794), + GoldilocksField(14913244377905888101), + ]), + u: QuinticExtension([ + GoldilocksField(7004241227096627206), + GoldilocksField(639096603853214644), + GoldilocksField(17343971022152731708), + GoldilocksField(11127082727624914758), + GoldilocksField(6961420809959752544), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(1678194015101167575), + GoldilocksField(10443033913340861968), + GoldilocksField(15723961754453665965), + GoldilocksField(466551946746500778), + GoldilocksField(1384638131140679955), + ]), + u: QuinticExtension([ + GoldilocksField(7911659739613756657), + GoldilocksField(9008449922226900897), + GoldilocksField(8828649835406020350), + GoldilocksField(12804093940915848836), + GoldilocksField(5168873490743917498), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(9668801441369446334), + GoldilocksField(1618542760290755427), + GoldilocksField(15806911258918325259), + GoldilocksField(14508945524557601221), + GoldilocksField(8400708218360666510), + ]), + u: QuinticExtension([ + GoldilocksField(2070702333293922760), + GoldilocksField(6249392735673775978), + GoldilocksField(5221268220067076678), + GoldilocksField(12830382095618421300), + GoldilocksField(6798253292813277552), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(613541603487791685), + GoldilocksField(13807376311954113152), + GoldilocksField(4937154484322350324), + GoldilocksField(3044864073363788260), + GoldilocksField(10659806245468237672), + ]), + u: QuinticExtension([ + GoldilocksField(11268721606331277338), + GoldilocksField(14114972563238185761), + GoldilocksField(15134656524184558801), + GoldilocksField(8109827563124888851), + GoldilocksField(3238236749755375190), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(3653303296020985061), + GoldilocksField(16968816356701165477), + GoldilocksField(3537989784194419792), + GoldilocksField(6048563117397703739), + GoldilocksField(13275594789417281589), + ]), + u: QuinticExtension([ + GoldilocksField(15320572452406052803), + GoldilocksField(423975947193335924), + GoldilocksField(9786061404780445812), + GoldilocksField(113935901661183202), + GoldilocksField(17462508908451992614), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(5575647366699441601), + GoldilocksField(2189227564735866743), + GoldilocksField(1686208091749425593), + GoldilocksField(6736750915939348632), + GoldilocksField(17433930427527644213), + ]), + u: QuinticExtension([ + GoldilocksField(7057911563532867792), + GoldilocksField(16566118262655927325), + GoldilocksField(12888897205414551370), + GoldilocksField(14415855073450397097), + GoldilocksField(1147090766535755807), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(15658118616953616040), + GoldilocksField(2263539327323250613), + GoldilocksField(2715487874579250798), + GoldilocksField(11933376952724039169), + GoldilocksField(17769318666901826029), + ]), + u: QuinticExtension([ + GoldilocksField(16745623139313228390), + GoldilocksField(9536464142142244411), + GoldilocksField(12504946243788089281), + GoldilocksField(704708129354743638), + GoldilocksField(14573477780244357666), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(13701409545959547673), + GoldilocksField(11537398060095127371), + GoldilocksField(18304316093185449069), + GoldilocksField(16990637176495122248), + GoldilocksField(8300454239136955447), + ]), + u: QuinticExtension([ + GoldilocksField(12946536999123301864), + GoldilocksField(16028271018248917226), + GoldilocksField(14442669626987508876), + GoldilocksField(8204605677104061293), + GoldilocksField(13012677989830312429), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(8977822175893189913), + GoldilocksField(8385291758088962932), + GoldilocksField(6459781748990922334), + GoldilocksField(10500670301259390474), + GoldilocksField(8148745850566531944), + ]), + u: QuinticExtension([ + GoldilocksField(648416469448933683), + GoldilocksField(2018140447090876597), + GoldilocksField(11059355864713025945), + GoldilocksField(17171402628974174968), + GoldilocksField(720667133464111689), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(533755834279203303), + GoldilocksField(9133223350344638107), + GoldilocksField(6496913004501565984), + GoldilocksField(5070553496917221248), + GoldilocksField(10026395429516732342), + ]), + u: QuinticExtension([ + GoldilocksField(17311718290481148297), + GoldilocksField(12616184711972987746), + GoldilocksField(16195499951758316636), + GoldilocksField(8118955923598298529), + GoldilocksField(16774524951584936403), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(8977932331742105562), + GoldilocksField(9135241432935976918), + GoldilocksField(8762554005485625681), + GoldilocksField(14767442741287060847), + GoldilocksField(9223537459805575058), + ]), + u: QuinticExtension([ + GoldilocksField(15269989054854026299), + GoldilocksField(11931086694777575213), + GoldilocksField(1979657370606607924), + GoldilocksField(10082554692183350114), + GoldilocksField(4573690475951190900), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(12857842861752747447), + GoldilocksField(11647180289644065286), + GoldilocksField(17408779236478002670), + GoldilocksField(5917000661832739376), + GoldilocksField(1047056879360966448), + ]), + u: QuinticExtension([ + GoldilocksField(60118689797675542), + GoldilocksField(1664328840457595492), + GoldilocksField(7075936368160047305), + GoldilocksField(13974115771952400562), + GoldilocksField(11318108364890349009), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(17452797179483233405), + GoldilocksField(6882955852043132316), + GoldilocksField(9304840691925828603), + GoldilocksField(981483665863638676), + GoldilocksField(11024236439678964632), + ]), + u: QuinticExtension([ + GoldilocksField(2608844450889021414), + GoldilocksField(2862891036050959369), + GoldilocksField(9059816914007502053), + GoldilocksField(16849128770451662626), + GoldilocksField(54944805734402483), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(7304267395998600281), + GoldilocksField(12651945702824162423), + GoldilocksField(12034846251704181244), + GoldilocksField(14535937891251268540), + GoldilocksField(16446125823956689442), + ]), + u: QuinticExtension([ + GoldilocksField(14013745143822621484), + GoldilocksField(13346293440957348839), + GoldilocksField(14559163781616146382), + GoldilocksField(10079303505894311335), + GoldilocksField(13316971442260780794), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(2073268421803561746), + GoldilocksField(13903239987062959402), + GoldilocksField(6595765789009484358), + GoldilocksField(12734389031510939331), + GoldilocksField(14507055985845886345), + ]), + u: QuinticExtension([ + GoldilocksField(6178525556615612), + GoldilocksField(5187104181066643307), + GoldilocksField(2097004975629951488), + GoldilocksField(3624702972881058018), + GoldilocksField(15835733836057682299), + ]), + }, +]; +pub(crate) const MUL_TABLE_G120: [AffinePoint; 16] = [ + AffinePoint { + x: QuinticExtension([ + GoldilocksField(9358418073545563325), + GoldilocksField(6201803925005767184), + GoldilocksField(17525836657555505989), + GoldilocksField(18172103331346227979), + GoldilocksField(11525670089424228174), + ]), + u: QuinticExtension([ + GoldilocksField(15389027580004038174), + GoldilocksField(17425413276694524614), + GoldilocksField(15639145503384753087), + GoldilocksField(15041017306226520945), + GoldilocksField(7937401073912193639), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(424871884768762681), + GoldilocksField(13522556051462729987), + GoldilocksField(12578037128032095483), + GoldilocksField(15478027026291985081), + GoldilocksField(3107357372380600388), + ]), + u: QuinticExtension([ + GoldilocksField(139609698330600720), + GoldilocksField(13047471464877067976), + GoldilocksField(14569000597615364817), + GoldilocksField(2241769726453036433), + GoldilocksField(15809930333584099827), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(14490559385422698658), + GoldilocksField(9192132350820542857), + GoldilocksField(5174647998374408373), + GoldilocksField(14517418341859680382), + GoldilocksField(17127851909541764338), + ]), + u: QuinticExtension([ + GoldilocksField(10617869578552630251), + GoldilocksField(15452062022333822112), + GoldilocksField(74217513813449143), + GoldilocksField(7065334431037916517), + GoldilocksField(1908363005628198785), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(14158363767375738516), + GoldilocksField(10881488396819614845), + GoldilocksField(9845083246403658682), + GoldilocksField(308084846693439896), + GoldilocksField(2258456665285229766), + ]), + u: QuinticExtension([ + GoldilocksField(10189353602169967163), + GoldilocksField(3307134994579671177), + GoldilocksField(15193472587506759411), + GoldilocksField(1522949334698619656), + GoldilocksField(10335076055833410122), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(6988575191781662507), + GoldilocksField(6763011815163702392), + GoldilocksField(12700583067108401780), + GoldilocksField(10889091046959437472), + GoldilocksField(14563326032896307580), + ]), + u: QuinticExtension([ + GoldilocksField(12122806272622858917), + GoldilocksField(17957572904440664730), + GoldilocksField(1371105162549165938), + GoldilocksField(7050159476133204977), + GoldilocksField(14174648605675469597), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(11134953890003368271), + GoldilocksField(1950173651962543315), + GoldilocksField(15717245132850143332), + GoldilocksField(3404453732698149788), + GoldilocksField(5301672891646287185), + ]), + u: QuinticExtension([ + GoldilocksField(3625450390591129442), + GoldilocksField(7246221686985732698), + GoldilocksField(883169685721066424), + GoldilocksField(4890159692945065594), + GoldilocksField(5846189492174531971), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(17779324141575511119), + GoldilocksField(2222820233157145199), + GoldilocksField(14311299573357024128), + GoldilocksField(6091166172492559467), + GoldilocksField(13251122054512244755), + ]), + u: QuinticExtension([ + GoldilocksField(13595785608342218333), + GoldilocksField(5346420442473779380), + GoldilocksField(15973815498598602014), + GoldilocksField(17570023165337986853), + GoldilocksField(4489084688781803549), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(18066103166710948306), + GoldilocksField(17952420946495149357), + GoldilocksField(6895726862811180419), + GoldilocksField(5250742026743142449), + GoldilocksField(5546233908977317256), + ]), + u: QuinticExtension([ + GoldilocksField(13627730136315133390), + GoldilocksField(16318021942381891511), + GoldilocksField(17522263726824223313), + GoldilocksField(2960524358953784315), + GoldilocksField(9229420628457238614), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(1295063301573260180), + GoldilocksField(2809464405706641890), + GoldilocksField(16876063007570590190), + GoldilocksField(414980406456848047), + GoldilocksField(8882993381636093379), + ]), + u: QuinticExtension([ + GoldilocksField(14084704505090840803), + GoldilocksField(1455438701125484684), + GoldilocksField(7140138141300391159), + GoldilocksField(3304135812365795152), + GoldilocksField(2617025679312300128), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(6275460045614886806), + GoldilocksField(3390801146218874506), + GoldilocksField(17247998212939720068), + GoldilocksField(14133145208463656732), + GoldilocksField(3920522032578446900), + ]), + u: QuinticExtension([ + GoldilocksField(7628461038336051188), + GoldilocksField(5939897916270777659), + GoldilocksField(9105159200762125376), + GoldilocksField(13546478897675664577), + GoldilocksField(10279072558522952380), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(11630988947772602329), + GoldilocksField(12620300429100070711), + GoldilocksField(79628975116792272), + GoldilocksField(17920472109136769182), + GoldilocksField(5826732348459131885), + ]), + u: QuinticExtension([ + GoldilocksField(2736111763898189506), + GoldilocksField(14407691554344511345), + GoldilocksField(10405697919259369402), + GoldilocksField(2951539272691560626), + GoldilocksField(17028604616981679777), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(9988698078321716091), + GoldilocksField(429119229031363106), + GoldilocksField(7711926677839955310), + GoldilocksField(14843425123144395632), + GoldilocksField(2253491748118774140), + ]), + u: QuinticExtension([ + GoldilocksField(17190043005790419516), + GoldilocksField(13808981798094567902), + GoldilocksField(4645442529701115361), + GoldilocksField(10360499666917437943), + GoldilocksField(13003321814463836887), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(5428331454841389691), + GoldilocksField(5911352608778299689), + GoldilocksField(12033745745356201095), + GoldilocksField(14100994707656604830), + GoldilocksField(2886042088926452362), + ]), + u: QuinticExtension([ + GoldilocksField(12925133128294153456), + GoldilocksField(6458535650167456730), + GoldilocksField(8582452901418814402), + GoldilocksField(9403948375821725222), + GoldilocksField(4166244923628463342), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(14049231651863941702), + GoldilocksField(9994040187462027964), + GoldilocksField(16602305579940231446), + GoldilocksField(8805602289491330699), + GoldilocksField(544940053745291275), + ]), + u: QuinticExtension([ + GoldilocksField(18184165264127619754), + GoldilocksField(11557606822284913524), + GoldilocksField(7784129138807937081), + GoldilocksField(11583517824597488539), + GoldilocksField(7002309200501552489), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(814125571699125593), + GoldilocksField(113298508670324430), + GoldilocksField(3553512439231149575), + GoldilocksField(5722734149611317431), + GoldilocksField(13535892466294020417), + ]), + u: QuinticExtension([ + GoldilocksField(10718151468633124775), + GoldilocksField(1411760656056230045), + GoldilocksField(2150017719245220876), + GoldilocksField(14735221082549759933), + GoldilocksField(13642901740019011009), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(16483184730500681046), + GoldilocksField(9673757784055259057), + GoldilocksField(7760528659260895061), + GoldilocksField(14112860811008950766), + GoldilocksField(14165735631683045125), + ]), + u: QuinticExtension([ + GoldilocksField(16200754974233622593), + GoldilocksField(15775772353572942080), + GoldilocksField(8728522175126988968), + GoldilocksField(14337787208807512369), + GoldilocksField(6870309312996910338), + ]), + }, +]; +pub(crate) const MUL_TABLE_G160: [AffinePoint; 16] = [ + AffinePoint { + x: QuinticExtension([ + GoldilocksField(4048997798593065056), + GoldilocksField(8401406543098379712), + GoldilocksField(8471972887547353150), + GoldilocksField(11271856534362959532), + GoldilocksField(11485893719004138771), + ]), + u: QuinticExtension([ + GoldilocksField(9981895593163975663), + GoldilocksField(16506992680199754648), + GoldilocksField(9795990766132909080), + GoldilocksField(14537323266760073360), + GoldilocksField(16786980505293186490), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(16515152542557678971), + GoldilocksField(2820879738576535933), + GoldilocksField(14546871004256087775), + GoldilocksField(8067774721434663075), + GoldilocksField(5547758516300176370), + ]), + u: QuinticExtension([ + GoldilocksField(13156895577790221631), + GoldilocksField(14079823781876329633), + GoldilocksField(3663423310046916033), + GoldilocksField(8256729270602146828), + GoldilocksField(8025936200066564880), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(7199041597631769070), + GoldilocksField(6507380560962078664), + GoldilocksField(8741238648067440929), + GoldilocksField(5032023372661133788), + GoldilocksField(1471499738040488525), + ]), + u: QuinticExtension([ + GoldilocksField(16127942173059622373), + GoldilocksField(17662578881118466367), + GoldilocksField(5426223217353814653), + GoldilocksField(12687076501536075723), + GoldilocksField(11700332978843695966), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(15540762768637989229), + GoldilocksField(5977836013802283595), + GoldilocksField(17611433126093706192), + GoldilocksField(1869784237181444322), + GoldilocksField(262436190082189342), + ]), + u: QuinticExtension([ + GoldilocksField(16646868690306195484), + GoldilocksField(2492778147148350975), + GoldilocksField(12994887025011189709), + GoldilocksField(18073347299788553346), + GoldilocksField(16182392324261935778), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(14451630909414751763), + GoldilocksField(2682316750096868275), + GoldilocksField(15784810705353479256), + GoldilocksField(9913396490753039039), + GoldilocksField(17084522528101355432), + ]), + u: QuinticExtension([ + GoldilocksField(13015512373883463322), + GoldilocksField(18140315257280584894), + GoldilocksField(5133502996496697434), + GoldilocksField(581463011694460141), + GoldilocksField(10720904114857970130), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(1736453911989778095), + GoldilocksField(2522761214792928606), + GoldilocksField(8490724482968195082), + GoldilocksField(3061517266849590914), + GoldilocksField(7560708607478466898), + ]), + u: QuinticExtension([ + GoldilocksField(2559867271025702686), + GoldilocksField(8279186716530148418), + GoldilocksField(9394033500068495079), + GoldilocksField(15391096564340037389), + GoldilocksField(15441682874751040991), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(10406668092884681305), + GoldilocksField(12237016771705070337), + GoldilocksField(6310848257747044271), + GoldilocksField(4113320295997237722), + GoldilocksField(16814466981743832206), + ]), + u: QuinticExtension([ + GoldilocksField(3167328430596803860), + GoldilocksField(3373273028644416665), + GoldilocksField(13236218152769964416), + GoldilocksField(15816058495228292851), + GoldilocksField(8001858317475143616), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(18177821317840763398), + GoldilocksField(7208454950216370358), + GoldilocksField(14780008596150434388), + GoldilocksField(1996209710201147400), + GoldilocksField(15053807226826426393), + ]), + u: QuinticExtension([ + GoldilocksField(15715262971932204033), + GoldilocksField(8714266598318325282), + GoldilocksField(16219555901832677748), + GoldilocksField(245656264630859564), + GoldilocksField(4633621313248689546), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(1841517068532537972), + GoldilocksField(18159427598377627852), + GoldilocksField(7101751901448687743), + GoldilocksField(17419951806778701769), + GoldilocksField(15872135176100603181), + ]), + u: QuinticExtension([ + GoldilocksField(14938501378296161155), + GoldilocksField(1475670735048314023), + GoldilocksField(16050270748361767813), + GoldilocksField(14927165212644255889), + GoldilocksField(11393545566941110440), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(11016016673823483108), + GoldilocksField(10346862381783788104), + GoldilocksField(15413371668364281620), + GoldilocksField(3789574685442821016), + GoldilocksField(10327416280296530490), + ]), + u: QuinticExtension([ + GoldilocksField(1471176826026130963), + GoldilocksField(4450232675785892534), + GoldilocksField(1999057422912905727), + GoldilocksField(1862118471196890026), + GoldilocksField(9836667920542412877), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(6731903398643108337), + GoldilocksField(9730751811492293683), + GoldilocksField(14448312389075081409), + GoldilocksField(12232946634578520226), + GoldilocksField(5149728844990350383), + ]), + u: QuinticExtension([ + GoldilocksField(13304407859937134355), + GoldilocksField(9591204855047500826), + GoldilocksField(7113854334829183285), + GoldilocksField(11795800474402144172), + GoldilocksField(15515700514512556333), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(16206498404205552972), + GoldilocksField(7467982993043588737), + GoldilocksField(954111054908556531), + GoldilocksField(15259102337638072429), + GoldilocksField(12916169332967261393), + ]), + u: QuinticExtension([ + GoldilocksField(10986259094443142549), + GoldilocksField(918816446526617182), + GoldilocksField(10678622673672003543), + GoldilocksField(9174304313393317665), + GoldilocksField(7047157651466091392), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(5524339637429426536), + GoldilocksField(13012396000187524883), + GoldilocksField(8701257881797351665), + GoldilocksField(7601128411527015893), + GoldilocksField(16817462731082877836), + ]), + u: QuinticExtension([ + GoldilocksField(17315109416612436252), + GoldilocksField(8903947754371488039), + GoldilocksField(11080795620793054950), + GoldilocksField(12186542410997831530), + GoldilocksField(10711958746278079839), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(6264744119896948359), + GoldilocksField(10601340541921400101), + GoldilocksField(14657733949024428965), + GoldilocksField(4449426502181859631), + GoldilocksField(15315608631820517742), + ]), + u: QuinticExtension([ + GoldilocksField(2840761601268004671), + GoldilocksField(5838696228978373234), + GoldilocksField(8592255273635329784), + GoldilocksField(11032000397652854764), + GoldilocksField(10686619933707435695), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(18403739294830496585), + GoldilocksField(5395321127948182079), + GoldilocksField(4362930334215698085), + GoldilocksField(3891000203325226477), + GoldilocksField(7114568565526390560), + ]), + u: QuinticExtension([ + GoldilocksField(3975610669088804605), + GoldilocksField(17233183788370721900), + GoldilocksField(3024806945190232867), + GoldilocksField(12024175965114418277), + GoldilocksField(16612390798970961761), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(16856493815994067196), + GoldilocksField(14785817845078013270), + GoldilocksField(2731707303337213832), + GoldilocksField(5700860421257465777), + GoldilocksField(16512159307408461032), + ]), + u: QuinticExtension([ + GoldilocksField(15635761722509813626), + GoldilocksField(9688917495669656811), + GoldilocksField(13645873987197106712), + GoldilocksField(11818012498378673433), + GoldilocksField(5396192277875563403), + ]), + }, +]; +pub(crate) const MUL_TABLE_G200: [AffinePoint; 16] = [ + AffinePoint { + x: QuinticExtension([ + GoldilocksField(16579624480310836700), + GoldilocksField(4654009893788464381), + GoldilocksField(1879948550255376688), + GoldilocksField(7165778526988411257), + GoldilocksField(10027404176497435516), + ]), + u: QuinticExtension([ + GoldilocksField(14672132787094789329), + GoldilocksField(8134912716135063128), + GoldilocksField(1681926888624011127), + GoldilocksField(3090601642585073427), + GoldilocksField(5082367180675620723), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(12935460095664666921), + GoldilocksField(13688862829955708769), + GoldilocksField(9016000768964455819), + GoldilocksField(18074632734324577885), + GoldilocksField(10067423399607639746), + ]), + u: QuinticExtension([ + GoldilocksField(1891495241569347963), + GoldilocksField(9444474092325495302), + GoldilocksField(10783243003245947999), + GoldilocksField(15112298120192081012), + GoldilocksField(8489851093422035711), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(3971319576308477519), + GoldilocksField(11846884681248592704), + GoldilocksField(7252181329490053993), + GoldilocksField(2837725043423724233), + GoldilocksField(554712018738433440), + ]), + u: QuinticExtension([ + GoldilocksField(2283716927456626486), + GoldilocksField(4678849096118793201), + GoldilocksField(7064207072633614681), + GoldilocksField(1293928013652227803), + GoldilocksField(4122458298059420843), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(17348512312529533060), + GoldilocksField(8639109793604178953), + GoldilocksField(15818212126524653050), + GoldilocksField(17950462779586277033), + GoldilocksField(15262813331997381824), + ]), + u: QuinticExtension([ + GoldilocksField(5107077498491622223), + GoldilocksField(4004992081347212098), + GoldilocksField(13489481871700798330), + GoldilocksField(1439663511274537768), + GoldilocksField(2916087242841422420), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(6922005184759837266), + GoldilocksField(4816940458101140735), + GoldilocksField(14348527682434315092), + GoldilocksField(16536303157525555702), + GoldilocksField(8297623336197847715), + ]), + u: QuinticExtension([ + GoldilocksField(3550074682900750241), + GoldilocksField(5900089539068431592), + GoldilocksField(15881485277116367548), + GoldilocksField(4494234475833006435), + GoldilocksField(698663099382505402), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(16241061472728124620), + GoldilocksField(6131873802153215223), + GoldilocksField(16831672901554690917), + GoldilocksField(17254464099918200920), + GoldilocksField(11185076059758094886), + ]), + u: QuinticExtension([ + GoldilocksField(4298246016297961963), + GoldilocksField(7189403662133590696), + GoldilocksField(9418905817123278198), + GoldilocksField(14531204622533844239), + GoldilocksField(8541862381303549676), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(15447868881789362057), + GoldilocksField(5508196216379753233), + GoldilocksField(16485811425555940264), + GoldilocksField(13222911319795183488), + GoldilocksField(3159840448703036170), + ]), + u: QuinticExtension([ + GoldilocksField(5204165561238044016), + GoldilocksField(13468232899848292870), + GoldilocksField(17191293205041837891), + GoldilocksField(18246478932776430841), + GoldilocksField(15325962678017236259), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(10621030779181154903), + GoldilocksField(17136235063952920846), + GoldilocksField(9314218722796245020), + GoldilocksField(9368391401101022200), + GoldilocksField(8555825846071793318), + ]), + u: QuinticExtension([ + GoldilocksField(7664574944617879289), + GoldilocksField(16231709863136830941), + GoldilocksField(6590015398331881523), + GoldilocksField(14621275666438800255), + GoldilocksField(6788090867699016859), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(720425677629731910), + GoldilocksField(558256443318656554), + GoldilocksField(5541446382714187419), + GoldilocksField(16510886437312776757), + GoldilocksField(10390794562653129460), + ]), + u: QuinticExtension([ + GoldilocksField(13345062980903998097), + GoldilocksField(13515598458298192134), + GoldilocksField(6777126340206327673), + GoldilocksField(14815170113495224049), + GoldilocksField(1808065573577174046), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(8512864041521600225), + GoldilocksField(13632121416118897300), + GoldilocksField(15219847883797542830), + GoldilocksField(6281672652132756722), + GoldilocksField(12690075810246041331), + ]), + u: QuinticExtension([ + GoldilocksField(15240394429738581893), + GoldilocksField(13526765963383505570), + GoldilocksField(385005971031338975), + GoldilocksField(17055827423572264183), + GoldilocksField(9799789282342151082), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(16433318271062727305), + GoldilocksField(1053347735296699652), + GoldilocksField(5450980641165850759), + GoldilocksField(4054013846223550412), + GoldilocksField(2947006303368928546), + ]), + u: QuinticExtension([ + GoldilocksField(17928731966205396993), + GoldilocksField(17380491217072802345), + GoldilocksField(11249617314541463800), + GoldilocksField(13746866206588898967), + GoldilocksField(12571294391280109436), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(18390535763930288817), + GoldilocksField(13956792141128102015), + GoldilocksField(14894537812189538984), + GoldilocksField(5333289079328326940), + GoldilocksField(10076243009389690036), + ]), + u: QuinticExtension([ + GoldilocksField(8250298621563594676), + GoldilocksField(5752869300366626776), + GoldilocksField(3645497280270257308), + GoldilocksField(12900372348275640100), + GoldilocksField(16885169851778635393), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(1544673605232492218), + GoldilocksField(661976301925838846), + GoldilocksField(18424167365360574137), + GoldilocksField(3246102959607711481), + GoldilocksField(5211114398364569488), + ]), + u: QuinticExtension([ + GoldilocksField(4460870711092798561), + GoldilocksField(3451028986412879783), + GoldilocksField(17189436277480328087), + GoldilocksField(16695916816719405476), + GoldilocksField(712205578119358045), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(6293897706165296494), + GoldilocksField(1058852550215266328), + GoldilocksField(5340568372786241917), + GoldilocksField(5059226109913370799), + GoldilocksField(3526604109990729657), + ]), + u: QuinticExtension([ + GoldilocksField(11157536197710362632), + GoldilocksField(12986275077072906620), + GoldilocksField(3545776948579292831), + GoldilocksField(11785840473114906984), + GoldilocksField(10099190834060857641), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(11775056308366361359), + GoldilocksField(797408970642010187), + GoldilocksField(11281697728680415953), + GoldilocksField(15083731163311144943), + GoldilocksField(2591402698173474283), + ]), + u: QuinticExtension([ + GoldilocksField(11008763107345506753), + GoldilocksField(5488815957510229275), + GoldilocksField(14952094509887379098), + GoldilocksField(11189563823429936956), + GoldilocksField(5358872537390699328), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(7693346203646808522), + GoldilocksField(3196464325266151639), + GoldilocksField(1785222888514983133), + GoldilocksField(4961084796993397121), + GoldilocksField(16651160545753804914), + ]), + u: QuinticExtension([ + GoldilocksField(12113910774037980879), + GoldilocksField(7934748119329239619), + GoldilocksField(14520318444063438710), + GoldilocksField(1372113091606068548), + GoldilocksField(11259415352488711270), + ]), + }, +]; +pub(crate) const MUL_TABLE_G240: [AffinePoint; 16] = [ + AffinePoint { + x: QuinticExtension([ + GoldilocksField(12150973993870501418), + GoldilocksField(4223924024756880744), + GoldilocksField(12164602482423882598), + GoldilocksField(10110827219574637558), + GoldilocksField(7454721448521923322), + ]), + u: QuinticExtension([ + GoldilocksField(8223067178251187472), + GoldilocksField(14791411048736217143), + GoldilocksField(6548050514357003677), + GoldilocksField(14101051606185056042), + GoldilocksField(9723051335063761713), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(2309936888761803695), + GoldilocksField(10528374492977918782), + GoldilocksField(2909110930817727979), + GoldilocksField(14140458781369438628), + GoldilocksField(14608954252678341690), + ]), + u: QuinticExtension([ + GoldilocksField(13315057417082143829), + GoldilocksField(2875970576192442492), + GoldilocksField(10204753160271556880), + GoldilocksField(2528165599636440836), + GoldilocksField(15588626368559095887), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(10285645489096016828), + GoldilocksField(6826665230734386908), + GoldilocksField(3643430412114742539), + GoldilocksField(3525069461824492670), + GoldilocksField(9265259914130088255), + ]), + u: QuinticExtension([ + GoldilocksField(18429224257556970829), + GoldilocksField(16335577406386351411), + GoldilocksField(1444816108348712587), + GoldilocksField(532410028340092104), + GoldilocksField(16527851406835121471), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(5704484539924172300), + GoldilocksField(7404153046704669007), + GoldilocksField(10464550607197363861), + GoldilocksField(6247473471694475226), + GoldilocksField(5115259736150878416), + ]), + u: QuinticExtension([ + GoldilocksField(17579790539786983406), + GoldilocksField(2637134544147945869), + GoldilocksField(4816977865203371123), + GoldilocksField(248089872468508433), + GoldilocksField(4531777203898089043), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(4467537824888613905), + GoldilocksField(558399825893683724), + GoldilocksField(4701759200819649961), + GoldilocksField(16655886253669319016), + GoldilocksField(14976096788667951951), + ]), + u: QuinticExtension([ + GoldilocksField(4542754722443867895), + GoldilocksField(17838455475085664297), + GoldilocksField(3856006738985174470), + GoldilocksField(3695500756395218282), + GoldilocksField(10605666420204608788), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(9600870350287519277), + GoldilocksField(2943222643218798300), + GoldilocksField(7504369701746722369), + GoldilocksField(3618345531898965921), + GoldilocksField(7996994629944741723), + ]), + u: QuinticExtension([ + GoldilocksField(1279526320710392206), + GoldilocksField(4757632433269403318), + GoldilocksField(12420546729136568420), + GoldilocksField(17056471951401952929), + GoldilocksField(16063059997803195687), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(38797529778910718), + GoldilocksField(5399283910211167400), + GoldilocksField(14916560969855131779), + GoldilocksField(682297961769392616), + GoldilocksField(18182112167162978281), + ]), + u: QuinticExtension([ + GoldilocksField(4851072938181616220), + GoldilocksField(1768095373600786914), + GoldilocksField(16165351733290258071), + GoldilocksField(16718741168144865753), + GoldilocksField(3387411250674432260), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(2078420199699477832), + GoldilocksField(8841464556389390192), + GoldilocksField(11642290600245563238), + GoldilocksField(3963148268515541490), + GoldilocksField(12483849286604430921), + ]), + u: QuinticExtension([ + GoldilocksField(13785261307443076347), + GoldilocksField(8468941646155066103), + GoldilocksField(3174688882704239544), + GoldilocksField(10801045692115252746), + GoldilocksField(12996585990193072559), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(9988106987444016195), + GoldilocksField(550486750301142863), + GoldilocksField(16147691900152849957), + GoldilocksField(11482331300775839937), + GoldilocksField(18210139443246491531), + ]), + u: QuinticExtension([ + GoldilocksField(17280645777130171058), + GoldilocksField(5143971509316066734), + GoldilocksField(9444564929039929588), + GoldilocksField(2353260944176421839), + GoldilocksField(7465399806142043858), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(6553362623203065460), + GoldilocksField(17774755178623960848), + GoldilocksField(8170189958890581873), + GoldilocksField(16479723056180470829), + GoldilocksField(9052786989344840129), + ]), + u: QuinticExtension([ + GoldilocksField(5051652642644768336), + GoldilocksField(8142249998939619774), + GoldilocksField(6620402268383223033), + GoldilocksField(15441849186338064088), + GoldilocksField(17835312998647746744), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(12874799067286536937), + GoldilocksField(5111767032451732361), + GoldilocksField(9488358619465533651), + GoldilocksField(2298827191559954790), + GoldilocksField(15515015915136216385), + ]), + u: QuinticExtension([ + GoldilocksField(9934501897778259341), + GoldilocksField(4862857445330881324), + GoldilocksField(7191492445992175174), + GoldilocksField(12588576141673201363), + GoldilocksField(16820074689985814838), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(5506619209793629300), + GoldilocksField(7913282297782618980), + GoldilocksField(17035818002685942678), + GoldilocksField(12219904669712698298), + GoldilocksField(4701091471601382843), + ]), + u: QuinticExtension([ + GoldilocksField(15628068501760254685), + GoldilocksField(9969915731376118609), + GoldilocksField(4006095342913065224), + GoldilocksField(11418313546696146922), + GoldilocksField(9535581122323707943), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(660873376897289838), + GoldilocksField(2332132165476400730), + GoldilocksField(4904481730668053625), + GoldilocksField(17592889807182765803), + GoldilocksField(1775714498923493702), + ]), + u: QuinticExtension([ + GoldilocksField(8278491921012401650), + GoldilocksField(255948487882786297), + GoldilocksField(18072518402211877989), + GoldilocksField(5587324201809627359), + GoldilocksField(7916932786454127987), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(14880598518989845991), + GoldilocksField(207047178807534206), + GoldilocksField(8949411863433895830), + GoldilocksField(15987292979823109393), + GoldilocksField(10450748763888590480), + ]), + u: QuinticExtension([ + GoldilocksField(10555084898033032496), + GoldilocksField(11149020781750632904), + GoldilocksField(12754167684588738056), + GoldilocksField(6203699237453069783), + GoldilocksField(8397897173241663238), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(9626581523142877484), + GoldilocksField(1014282284030781401), + GoldilocksField(12559760948477539740), + GoldilocksField(1719475860010180104), + GoldilocksField(12167893974497751844), + ]), + u: QuinticExtension([ + GoldilocksField(10039328052268687164), + GoldilocksField(16635482332793119899), + GoldilocksField(5022923182724434224), + GoldilocksField(13591886545913812687), + GoldilocksField(4895263026932926029), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(15155727891419849945), + GoldilocksField(3461317531761180793), + GoldilocksField(16979908481664688625), + GoldilocksField(15684045230954038314), + GoldilocksField(12877152996799011381), + ]), + u: QuinticExtension([ + GoldilocksField(10120254644770986491), + GoldilocksField(12192410531100649784), + GoldilocksField(10938806981692604655), + GoldilocksField(12172717977579895996), + GoldilocksField(4275232645621155364), + ]), + }, +]; +pub(crate) const MUL_TABLE_G280: [AffinePoint; 16] = [ + AffinePoint { + x: QuinticExtension([ + GoldilocksField(16213503882573976174), + GoldilocksField(17168097236575729409), + GoldilocksField(5196518270315815888), + GoldilocksField(11117797779066091728), + GoldilocksField(8133486084301919302), + ]), + u: QuinticExtension([ + GoldilocksField(11377245759937335205), + GoldilocksField(4469833894127669069), + GoldilocksField(9013706759438268290), + GoldilocksField(1420430480105358672), + GoldilocksField(16254559763550257786), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(6770577214447522289), + GoldilocksField(17115295583004483100), + GoldilocksField(6277430884428490111), + GoldilocksField(9367148506334403125), + GoldilocksField(6974282321669735675), + ]), + u: QuinticExtension([ + GoldilocksField(1707192586757379005), + GoldilocksField(16379422115255719397), + GoldilocksField(9601719515238438547), + GoldilocksField(18382556734962308004), + GoldilocksField(9816132397810204232), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(9294285004075832264), + GoldilocksField(17130662520620891577), + GoldilocksField(17784936778486874242), + GoldilocksField(7903567741417559125), + GoldilocksField(13438250367827046909), + ]), + u: QuinticExtension([ + GoldilocksField(14848083910737694210), + GoldilocksField(3876659422633582058), + GoldilocksField(17589777829179053297), + GoldilocksField(13255998440838131261), + GoldilocksField(16836576774480954338), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(6484667483670905518), + GoldilocksField(14676664710267712890), + GoldilocksField(943902085975544717), + GoldilocksField(9447580128743448969), + GoldilocksField(16970743407772865788), + ]), + u: QuinticExtension([ + GoldilocksField(6954165327706188094), + GoldilocksField(8649474865423322710), + GoldilocksField(2874401123529251159), + GoldilocksField(6791369587301962541), + GoldilocksField(4682935506184263557), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(17851907028754343206), + GoldilocksField(804547578244465260), + GoldilocksField(9836036663990543574), + GoldilocksField(2782503787318970554), + GoldilocksField(11029394870653732940), + ]), + u: QuinticExtension([ + GoldilocksField(17369554270592567524), + GoldilocksField(11522732789192066880), + GoldilocksField(10532626382274872331), + GoldilocksField(15084091109637533903), + GoldilocksField(12335999220635744679), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(17927819688989708837), + GoldilocksField(988383065726890993), + GoldilocksField(17134368434216800793), + GoldilocksField(3745722005614877274), + GoldilocksField(12079981168859675058), + ]), + u: QuinticExtension([ + GoldilocksField(16726774574446090464), + GoldilocksField(16696890676634414315), + GoldilocksField(1768034342698142990), + GoldilocksField(5182686366441226421), + GoldilocksField(12905524404643926664), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(5238880119446176935), + GoldilocksField(7632489756740258264), + GoldilocksField(13186772342659187888), + GoldilocksField(5373269322406587505), + GoldilocksField(4770529397079489612), + ]), + u: QuinticExtension([ + GoldilocksField(906031890843250730), + GoldilocksField(2524575321869066878), + GoldilocksField(1749353240118753004), + GoldilocksField(8401611932919350607), + GoldilocksField(13809067453022178888), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(3463236991239759171), + GoldilocksField(9418504154128111760), + GoldilocksField(16646612147210445285), + GoldilocksField(8048290712504722073), + GoldilocksField(4003118648580238214), + ]), + u: QuinticExtension([ + GoldilocksField(565998296113403270), + GoldilocksField(5639331094891259297), + GoldilocksField(3505572540820256764), + GoldilocksField(828191569017542887), + GoldilocksField(2857618747433407780), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(10579753460617358122), + GoldilocksField(1425995342210623751), + GoldilocksField(4437515648943912607), + GoldilocksField(9208066594954079254), + GoldilocksField(8133603054721359271), + ]), + u: QuinticExtension([ + GoldilocksField(5608659290599426924), + GoldilocksField(14668957524891276508), + GoldilocksField(3526636595086144132), + GoldilocksField(16193609694652077957), + GoldilocksField(14814479961293040846), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(6518242324859070685), + GoldilocksField(10266975352945110329), + GoldilocksField(7108371607348108785), + GoldilocksField(14640665666349949967), + GoldilocksField(6626682978649692287), + ]), + u: QuinticExtension([ + GoldilocksField(14497090493935686223), + GoldilocksField(2359285113881335421), + GoldilocksField(8857893277062885351), + GoldilocksField(17134727430531764861), + GoldilocksField(2965117089847599750), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(3234104227668768457), + GoldilocksField(3225149987207949460), + GoldilocksField(7086354559915607869), + GoldilocksField(12877313486906605274), + GoldilocksField(11561630661277747287), + ]), + u: QuinticExtension([ + GoldilocksField(2820816680951282830), + GoldilocksField(16276533358496276797), + GoldilocksField(8457579751747007027), + GoldilocksField(227223614176367695), + GoldilocksField(9322033005858872072), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(9250294019343021345), + GoldilocksField(1766812231170355307), + GoldilocksField(13463388795950836322), + GoldilocksField(13360210575495818786), + GoldilocksField(8017191608807726449), + ]), + u: QuinticExtension([ + GoldilocksField(2255099676665350867), + GoldilocksField(9521276765352132752), + GoldilocksField(2720865867697005972), + GoldilocksField(11524331848149102745), + GoldilocksField(1853382383896073031), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(2978539147748114010), + GoldilocksField(8802332783321234516), + GoldilocksField(8411186022628666251), + GoldilocksField(8965939348601447933), + GoldilocksField(114587463394660272), + ]), + u: QuinticExtension([ + GoldilocksField(10264182682530549009), + GoldilocksField(9417221851288332824), + GoldilocksField(6749125336792502108), + GoldilocksField(5965179916047598634), + GoldilocksField(5374111552073601171), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(11268491403320804445), + GoldilocksField(3223633055331054259), + GoldilocksField(4794616428042030393), + GoldilocksField(9408476598474014080), + GoldilocksField(9670252162084106811), + ]), + u: QuinticExtension([ + GoldilocksField(513179742804655698), + GoldilocksField(6696701674770108433), + GoldilocksField(15069423489583433755), + GoldilocksField(8503371514376466366), + GoldilocksField(9365587857178664019), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(8819593048375407845), + GoldilocksField(6714540590843281274), + GoldilocksField(12089230224772664935), + GoldilocksField(17126824728760033332), + GoldilocksField(15606471085374753320), + ]), + u: QuinticExtension([ + GoldilocksField(18421631694011443474), + GoldilocksField(14887724257413490347), + GoldilocksField(16780540965430713114), + GoldilocksField(10821826807107857648), + GoldilocksField(6670989693469822701), + ]), + }, + AffinePoint { + x: QuinticExtension([ + GoldilocksField(3738402919300376943), + GoldilocksField(3839435231606872366), + GoldilocksField(9950700849677152681), + GoldilocksField(13599115104880397736), + GoldilocksField(8170114904882828684), + ]), + u: QuinticExtension([ + GoldilocksField(17403178918799426162), + GoldilocksField(12276995036775716338), + GoldilocksField(135085417294263740), + GoldilocksField(6813815177605214808), + GoldilocksField(15873392253765702165), + ]), + }, +]; + +// TODO: separate this into a proper script +#[cfg(test)] +mod tests { + use super::*; + use crate::curve::{curve::Point, scalar_field::Scalar}; + use num::{BigUint, FromPrimitive}; + use plonky2_field::types::{Field, PrimeField64}; + + // For k = 40*j (j = 0 to 7), constant Gk[] is an array of 16 points in + // affine coordinates, with Gk[i] = (i+1)*(2^k)*G for the conventional + // generator point G. + + fn compute_table(j: u64) -> [AffinePoint; 16] { + let k = 40 * j; + let mut table = [Point::NEUTRAL; 16]; + + for i in 0..16 { + let s_biguint = + (BigUint::from_u64(1).unwrap() << k) * BigUint::from_usize(i + 1).unwrap(); + let s = Scalar::from_noncanonical_biguint(s_biguint); + table[i] = Point::GENERATOR * s; + } + + let mut res = [AffinePoint::NEUTRAL; 16]; + res.copy_from_slice(&Point::batch_to_affine(&table)); + + res + } + + fn print_table(table: &[AffinePoint; 16], name: &str) { + println!("pub(crate) const {}: [AffinePoint; 16] = [", name); + + for i in 0..table.len() { + let x_limbs = table[i] + .x + .0 + .map(|x| format!("GoldilocksField({})", x.to_canonical_u64())) + .join(", "); + let u_limbs = table[i] + .u + .0 + .map(|u| format!("GoldilocksField({})", u.to_canonical_u64())) + .join(", "); + + println!( + " AffinePoint {{ x: QuinticExtension([{}]), u: QuinticExtension([{}]) }},", + x_limbs, u_limbs + ); + } + + println!("];"); + } + + #[ignore] + #[test] + fn print_mul_table() { + for j in 0..8 { + let table = compute_table(j); + print_table(&table, &format!("MUL_TABLE_G{}", j * 40)); + } + + panic!(); + } +} diff --git a/ecgfp5/src/curve/scalar_field.rs b/ecgfp5/src/curve/scalar_field.rs new file mode 100644 index 0000000000..d5567f758d --- /dev/null +++ b/ecgfp5/src/curve/scalar_field.rs @@ -0,0 +1,1152 @@ +/// Scalar Field Implementation +/// Most of this is copied from Thomas Pornin's implementation +/// with some modifications to make it play more nicely with plonky2 primitives +/// His implementation can be found here: https://github.com/pornin/ecgfp5 +use alloc::vec::Vec; +use core::{ + fmt::{self, Debug, Display, Formatter}, + hash::{Hash, Hasher}, + iter::{Product, Sum}, + ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign}, +}; +use plonky2_field::extension::quintic::QuinticExtension; +use rand::RngCore; + +use itertools::Itertools; +use num::{bigint::BigUint, One}; +use serde::{Deserialize, Serialize}; + +use plonky2_field::types::{Field, PrimeField, PrimeField64, Sample}; + +use super::GFp5; + +/// The Scalar field of the ECgFP5 elliptic curve. +/// +/// Its order is +/// ```ignore +/// P = 1067993516717146951041484916571792702745057740581727230159139685185762082554198619328292418486241 +/// ``` +#[derive(Copy, Clone, Serialize, Deserialize, PartialOrd, Ord)] +pub struct Scalar(pub [u64; 5]); + +impl Default for Scalar { + fn default() -> Self { + Self::ZERO + } +} + +impl PartialEq for Scalar { + fn eq(&self, other: &Self) -> bool { + self.to_canonical_biguint() == other.to_canonical_biguint() + } +} + +impl Eq for Scalar {} + +impl Hash for Scalar { + fn hash(&self, state: &mut H) { + self.to_canonical_biguint().hash(state) + } +} + +impl Display for Scalar { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Display::fmt(&self.to_canonical_biguint(), f) + } +} + +impl Debug for Scalar { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Debug::fmt(&self.to_canonical_biguint(), f) + } +} + +impl Sample for Scalar { + #[inline] + fn sample(rng: &mut R) -> Self + where + R: RngCore + ?Sized, + { + use num::bigint::RandBigInt; + Self::from_noncanonical_biguint(rng.gen_biguint_below(&Self::order())) + } +} + +impl Add for Scalar { + type Output = Scalar; + + fn add(self, other: Scalar) -> Scalar { + Scalar::add(self, other) + } +} + +impl AddAssign for Scalar { + fn add_assign(&mut self, other: Scalar) { + *self = Scalar::add(*self, other) + } +} + +impl Sub for Scalar { + type Output = Scalar; + + fn sub(self, other: Scalar) -> Scalar { + Scalar::sub(self, other) + } +} + +impl SubAssign for Scalar { + fn sub_assign(&mut self, other: Scalar) { + *self = Scalar::sub(*self, other) + } +} + +impl Neg for Scalar { + type Output = Scalar; + + fn neg(self) -> Scalar { + Scalar::neg(self) + } +} + +impl Mul for Scalar { + type Output = Scalar; + + fn mul(self, other: Scalar) -> Scalar { + Scalar::mul(self, other) + } +} + +impl MulAssign for Scalar { + fn mul_assign(&mut self, other: Scalar) { + *self = Scalar::mul(*self, other) + } +} + +impl Div for Scalar { + type Output = Scalar; + + fn div(self, other: Scalar) -> Scalar { + self * other.inverse() + } +} + +impl DivAssign for Scalar { + fn div_assign(&mut self, other: Scalar) { + *self = *self * other.inverse() + } +} + +impl Sum for Scalar { + fn sum>(iter: I) -> Scalar { + iter.fold(Scalar::ZERO, |acc, x| acc + x) + } +} + +impl Product for Scalar { + fn product>(iter: I) -> Scalar { + iter.fold(Scalar::ONE, |acc, x| acc * x) + } +} + +impl Field for Scalar { + const ZERO: Self = Self([0; 5]); + const ONE: Self = Self([1, 0, 0, 0, 0]); + const TWO: Self = Self([2, 0, 0, 0, 0]); + const NEG_ONE: Self = Self([ + 0xE80FD996_948BFFE0, + 0xE8885C39_D724A09C, + 0x7FFFFFE6_CFB80639, + 0x7FFFFFF1_00000016, + 0x7FFFFFFD_80000007, + ]); + + const TWO_ADICITY: usize = 5; + const CHARACTERISTIC_TWO_ADICITY: usize = Self::TWO_ADICITY; + + // Sage: `g = GF(p).multiplicative_generator()` + const MULTIPLICATIVE_GROUP_GENERATOR: Self = Self([6, 0, 0, 0, 0]); + + // Sage: `g_2 = power_mod(g, (p - 1) // 2^5, p)` + const POWER_OF_TWO_GENERATOR: Self = Self([ + 0xCC13C747_343470DD, + 0xC09E9EF2_B5CC8610, + 0xBE95D8B7_B2143AF3, + 0xA774D847_A1C95ECC, + 0x6037242B_8FA79C31, + ]); + + const BITS: usize = 320; + + fn order() -> BigUint { + BigUint::from_slice(&[ + 0x948BFFE1, 0xE80FD996, 0xD724A09C, 0xE8885C39, 0xCFB80639, 0x7FFFFFE6, 0x00000016, + 0x7FFFFFF1, 0x80000007, 0x7FFFFFFD, + ]) + } + fn characteristic() -> BigUint { + Self::order() + } + + fn try_inverse(&self) -> Option { + if self.is_zero() { + return None; + } + + // Fermat's Little Theorem + Some(self.exp_biguint(&(Self::order() - BigUint::one() - BigUint::one()))) + } + + fn from_noncanonical_biguint(val: BigUint) -> Self { + let val = val % Self::order(); + Self( + val.to_u64_digits().into_iter().pad_using(5, |_| 0).collect::>()[..] + .try_into() + .expect("error converting to u64 array"), + ) + } + + #[inline] + fn from_canonical_u64(n: u64) -> Self { + Self([n, 0, 0, 0, 0]) + } + + #[inline] + fn from_noncanonical_u128(n: u128) -> Self { + Self([n as u64, (n >> 64) as u64, 0, 0, 0]) + } + + #[inline] + fn from_noncanonical_u96(n: (u64, u32)) -> Self { + Self([n.0, n.1 as u64, 0, 0, 0]) + } + + #[inline] + fn from_noncanonical_i64(n: i64) -> Self { + Self::from_canonical_u64(n as u64) + } + + #[inline] + fn from_noncanonical_u64(n: u64) -> Self { + Self::from_canonical_u64(n) + } +} + +impl PrimeField for Scalar { + fn to_canonical_biguint(&self) -> BigUint { + let mut result = biguint_from_array(self.0); + if result >= Self::order() { + result -= Self::order(); + } + result + } +} + +pub(crate) fn biguint_from_array(arr: [u64; 5]) -> BigUint { + BigUint::from_slice(&[ + arr[0] as u32, + (arr[0] >> 32) as u32, + arr[1] as u32, + (arr[1] >> 32) as u32, + arr[2] as u32, + (arr[2] >> 32) as u32, + arr[3] as u32, + (arr[3] >> 32) as u32, + arr[4] as u32, + (arr[4] >> 32) as u32, + ]) +} + +// these methods are mostly copied from +impl Scalar { + // IMPLEMENTATION NOTES: + // --------------------- + // + // Group order n is slightly below 2^319. We store values over five + // 64-bit limbs. We use Montgomery multiplication to perform + // computations; however, we keep the limbs in normal + // (non-Montgomery) representation, so that operations that do not + // require any multiplication of scalars, just encoding and + // decoding, are fastest. + + // The modulus itself, stored in a Scalar structure (which + // contravenes to the rules of a Scalar; this constant MUST NOT leak + // outside the API). + const N: Self = Self([ + 0xE80FD996948BFFE1, + 0xE8885C39D724A09C, + 0x7FFFFFE6CFB80639, + 0x7FFFFFF100000016, + 0x7FFFFFFD80000007, + ]); + + // -1/N[0] mod 2^64 + const N0I: u64 = 0xD78BEF72057B7BDF; + + /* not used + // 2^320 mod n. + const R: Self = Self([ + 0x2FE04CD2D6E8003E, + 0x2EEF478C51B6BEC6, + 0x00000032608FF38C, + 0x0000001DFFFFFFD3, + 0x00000004FFFFFFF1, + ]); + */ + + // 2^640 mod n. + const R2: Self = Self([ + 0xA01001DCE33DC739, + 0x6C3228D33F62ACCF, + 0xD1D796CC91CF8525, + 0xAADFFF5D1574C1D8, + 0x4ACA13B28CA251F5, + ]); + + // 2^632 mod n. + const T632: Self = Self([ + 0x2B0266F317CA91B3, + 0xEC1D26528E984773, + 0x8651D7865E12DB94, + 0xDA2ADFF5941574D0, + 0x53CACA12110CA256, + ]); + + // raw addition (no reduction) + fn add_inner(self, a: Self) -> Self { + let mut r = Self::ZERO; + let mut c: u64 = 0; + for i in 0..5 { + let z = (self.0[i] as u128).wrapping_add(a.0[i] as u128).wrapping_add(c as u128); + r.0[i] = z as u64; + c = (z >> 64) as u64; + } + // no extra carry, since inputs are supposed to fit on 319 bits. + r + } + + // raw subtraction (no reduction) + // Final borrow is returned (0xFFFFFFFFFFFFFFFF if borrow, 0 otherwise). + fn sub_inner(self, a: Self) -> (Self, u64) { + let mut r = Self::ZERO; + let mut c: u64 = 0; + for i in 0..5 { + let z = (self.0[i] as u128).wrapping_sub(a.0[i] as u128).wrapping_sub(c as u128); + r.0[i] = z as u64; + c = ((z >> 64) as u64) & 1; + } + (r, c.wrapping_neg()) + } + + /// If c == 0, return a0. + /// If c == 0xFFFFFFFFFFFFFFFF, return a1. + /// c MUST be equal to 0 or 0xFFFFFFFFFFFFFFFF. + pub fn select(c: u64, a0: Self, a1: Self) -> Self { + let mut r = Self::ZERO; + for i in 0..5 { + r.0[i] = a0.0[i] ^ (c & (a0.0[i] ^ a1.0[i])); + } + r + } + + // Scalar addition. + fn add(self, rhs: Self) -> Self { + let r0 = self.add_inner(rhs); + let (r1, c) = r0.sub_inner(Self::N); + Self::select(c, r1, r0) + } + + // Scalar subtraction. + fn sub(self, rhs: Self) -> Self { + let (r0, c) = self.sub_inner(rhs); + let r1 = r0.add_inner(Self::N); + Self::select(c, r0, r1) + } + + // Scalar negation. + fn neg(self) -> Self { + Self::ZERO.sub(self) + } + + // Montgomery multiplication. + // Returns (self*rhs)/2^320 mod n. + // 'self' MUST be less than n (the other operand can be up to 2^320-1). + fn montymul(self, rhs: Self) -> Self { + let mut r = Self::ZERO; + for i in 0..5 { + // Iteration i computes r <- (r + self*rhs_i + f*n)/2^64. + // Factor f is at most 2^64-1 and set so that the division + // is exact. + // On input: + // r <= 2^320 - 1 + // self <= n - 1 + // rhs_i <= 2^64 - 1 + // f <= 2^64 - 1 + // Therefore: + // r + self*rhs_i + f*n <= 2^320-1 + (2^64 - 1) * (n - 1) + // + (2^64 - 1) * n + // < 2^384 + // Thus, the new r fits on 320 bits. + let m = rhs.0[i]; + let f = self.0[0].wrapping_mul(m).wrapping_add(r.0[0]).wrapping_mul(Self::N0I); + let mut cc1: u64 = 0; + let mut cc2: u64 = 0; + for j in 0..5 { + let mut z = (self.0[j] as u128) + .wrapping_mul(m as u128) + .wrapping_add(r.0[j] as u128) + .wrapping_add(cc1 as u128); + cc1 = (z >> 64) as u64; + z = (f as u128) + .wrapping_mul(Self::N.0[j] as u128) + .wrapping_add((z as u64) as u128) + .wrapping_add(cc2 as u128); + cc2 = (z >> 64) as u64; + if j > 0 { + r.0[j - 1] = z as u64; + } + } + // No overflow here since the new r fits on 320 bits. + r.0[4] = cc1.wrapping_add(cc2); + } + + // We computed (self*rhs + ff*n) / 2^320, with: + // self < n + // rhs < 2^320 + // ff < 2^320 + // Thus, the value we obtained is lower than 2*n. Subtracting n + // once (conditionally) is sufficient to achieve full reduction. + let (r2, c) = r.sub_inner(Self::N); + Self::select(c, r2, r) + } + + fn mul(self, rhs: Self) -> Self { + self.montymul(Self::R2).montymul(rhs) + } + + /// Decode the provided byte slice into a scalar. The bytes are + /// interpreted into an integer in little-endian unsigned convention. + /// All slice bytes are read. Return value is (s, c): + /// - If the decoded integer is lower than the group order, then that + /// value is returned as s, and c == 0xFFFFFFFFFFFFFFFF. + /// - Otherwise, s is set to Scalar::ZERO, and c == 0. + pub fn try_from_noncanonical_bytes(buf: &[u8]) -> (Self, u64) { + let n = buf.len(); + let mut r = Self::ZERO; + let mut extra: u8 = 0; + for i in 0..n { + if i < 40 { + r.0[i >> 3] |= (buf[i] as u64).wrapping_shl(((i as u32) & 7) << 3); + } else { + extra |= buf[i]; + } + } + + // If input buffer is at most 39 bytes then the result is + // necessarily in range; we can skip the reduction tests. + if n <= 39 { + return (r, 0xFFFFFFFFFFFFFFFF); + } + + // Output is in the correct range if and only if extra == 0 and + // the value is lower than n. + let (_, mut c) = r.sub_inner(Self::N); + c &= ((extra as u64).wrapping_add(0xFF) >> 8).wrapping_sub(1); + for i in 0..5 { + r.0[i] &= c; + } + (r, c) + } + + pub fn from_gfp5(x: GFp5) -> Self { + let QuinticExtension(limbs) = x; + Self::from_noncanonical_biguint(biguint_from_array(limbs.map(|l| l.to_canonical_u64()))) + } + + /// Decode the provided byte slice into a scalar. The bytes are + /// interpreted into an integer in little-endian unsigned convention. + /// All slice bytes are read, and the value is REDUCED modulo n. This + /// function never fails; it accepts arbitrary input values. + pub fn from_noncanonical_bytes(buf: &[u8]) -> Self { + // We inject the value by chunks of 312 bits, in high-to-low + // order. We multiply by 2^312 the intermediate result, which + // is equivalent to performing a Montgomery multiplication + // by 2^632 mod n. + + // If buffer length is at most 39 bytes, then the plain try_from_noncanonical_bytes() + // function works. + let n = buf.len(); + if n <= 39 { + let (r, _) = Self::try_from_noncanonical_bytes(buf); + return r; + } + + // We can now assume that we have at least 40 bytes of input. + + // Compute k as a multiple of 39 such that n-39 <= k < n. Since + // n >= 40, this implies that k >= 1. We decode the top chunk + // (which has length _at most_ 39 bytes) into acc. + let mut k = ((n - 1) / 39) * 39; + let (mut acc, _) = Self::try_from_noncanonical_bytes(&buf[k..n]); + while k > 0 { + k -= 39; + let (b, _) = Self::try_from_noncanonical_bytes(&buf[k..k + 39]); + acc = acc.montymul(Self::T632).add(b); + } + acc + } + + /// Decode 40 bytes into a scalar. If it is larger than the group order, return None. + pub fn from_canonical_bytes(buf: [u8; 40]) -> Option { + let (scalar, carry) = Self::try_from_noncanonical_bytes(&buf); + if carry == 0xFFFFFFFFFFFFFFFF { + Some(scalar) + } else { + None + } + } + + /// Encode this scalar over exactly 40 bytes. + pub fn encode(self) -> [u8; 40] { + let mut r = [0u8; 40]; + for i in 0..5 { + r[8 * i..8 * i + 8].copy_from_slice(&self.0[i].to_le_bytes()); + } + r + } + + // Recode a scalar into signed integers. For a window width of w + // bits, returned integers are in the -(2^w-1) to +2^w range. The + // provided slice is filled; if w*len(ss) >= 320, then the output + // encodes the complete scalar value, and the top (last) signed + // integer is nonnegative. + // Window width MUST be between 2 and 10. + pub(crate) fn recode_signed(self, ss: &mut [i32], w: i32) { + Self::recode_signed_from_limbs(&self.0, ss, w); + } + + pub(crate) fn recode_signed_from_limbs(limbs: &[u64], ss: &mut [i32], w: i32) { + let mut acc: u64 = 0; + let mut acc_len: i32 = 0; + let mut j = 0; + let mw = (1u32 << w) - 1; + let hw = 1u32 << (w - 1); + let mut cc: u32 = 0; + for i in 0..ss.len() { + // Get next w-bit chunk in bb. + let mut bb: u32; + if acc_len < w { + if j < limbs.len() { + let nl = limbs[j]; + j += 1; + bb = ((acc | (nl << acc_len)) as u32) & mw; + acc = nl >> (w - acc_len); + } else { + bb = (acc as u32) & mw; + acc = 0; + } + acc_len += 64 - w; + } else { + bb = (acc as u32) & mw; + acc_len -= w; + acc >>= w; + } + + // If bb is greater than 2^(w-1), subtract 2^w and + // propagate a carry. + bb += cc; + cc = hw.wrapping_sub(bb) >> 31; + ss[i] = (bb as i32).wrapping_sub((cc << w) as i32); + } + } + + // Use Lagrange's algorithm to represent this scalar k as a + // pair (v0, v1) such that k = v0/v1 mod n. + // This function is NOT constant-time and should be used only on + // a non-secret scalar (e.g. as part of signature verification). + pub fn lagrange(self) -> (Signed161, Signed161) { + // We use algorithm 4 from: https://eprint.iacr.org/2020/454 + + // Nu <- n^2 + // Nv <- k^2 + 1 + // sp <- n*k + let mut nu_buf = Signed640::from_nsquared(); + let mut nv_buf = Signed640::from_mul_scalars(self, self); + nv_buf.add1(); + let (mut nu, mut nv) = (&mut nu_buf, &mut nv_buf); + let mut sp = Signed640::from_mul_scalars(self, Self::N); + + // (u0, u1) <- (n, 0) + // (v0, v1) <- (k, 1) + let mut u0_buf = Signed161::from_scalar(Self::N); + let mut u1_buf = Signed161::from_scalar(Self::ZERO); + let mut v0_buf = Signed161::from_scalar(self); + let mut v1_buf = Signed161::from_scalar(Self::ONE); + let (mut u0, mut u1) = (&mut u0_buf, &mut u1_buf); + let (mut v0, mut v1) = (&mut v0_buf, &mut v1_buf); + + // Main loop. + loop { + // if u is smaller than v, then swap them. + if nu.lt_unsigned(nv) { + let tn = nu; + nu = nv; + nv = tn; + let (t0, t1) = (u0, u1); + u0 = v0; + u1 = v1; + v0 = t0; + v1 = t1; + } + + // if len(Nv) <= 320, then we are finished. + let vlen = nv.bitlength(); + if vlen <= 320 { + return (*v0, *v1); + } + + // shift count s = max(0, len(p) - len(Nv)) + let mut s = sp.bitlength() - vlen; + if s < 0 { + s = 0; + } + + if sp.is_nonnegative() { + u0.sub_shifted(v0, s); + u1.sub_shifted(v1, s); + nu.add_shifted(nv, s << 1); + nu.sub_shifted(&sp, s + 1); + sp.sub_shifted(nv, s); + } else { + u0.add_shifted(v0, s); + u1.add_shifted(v1, s); + nu.add_shifted(nv, s << 1); + nu.add_shifted(&sp, s + 1); + sp.add_shifted(nv, s); + } + } + } + + /// Compare this scalar with zero. Returned value is 0xFFFFFFFFFFFFFFFF + /// if this scalar is zero, or 0 otherwise. + pub fn iszero(self) -> u64 { + let x = self.0[0] | self.0[1] | self.0[2] | self.0[3] | self.0[4]; + ((x | x.wrapping_neg()) >> 63).wrapping_sub(1) + } + + /// Compare this scalar with another one. Returned value is + /// 0xFFFFFFFFFFFFFFFF if they are equal, or 0 otherwise. + /// Equality is defined modulo n. + pub fn equals(self, rhs: Self) -> u64 { + let x = (self.0[0] ^ rhs.0[0]) + | (self.0[1] ^ rhs.0[1]) + | (self.0[2] ^ rhs.0[2]) + | (self.0[3] ^ rhs.0[3]) + | (self.0[4] ^ rhs.0[4]); + ((x | x.wrapping_neg()) >> 63).wrapping_sub(1) + } + + pub fn to_hex_string(&self) -> String { + let u64_array = self.0; + + let mut buf: [u8; 40] = [0; 40]; + let dst_ptr = buf.as_mut_ptr(); + + let mut offset = 0; + for e in u64_array { + let bytes = e.to_le_bytes(); + unsafe { + let src_ptr = bytes.as_ptr(); + std::ptr::copy_nonoverlapping(src_ptr, dst_ptr.add(offset), 8); + offset = offset + 8; + } + } + + let hex_string = hex::encode(&buf); + hex_string + } + + pub fn from_hex_string(input_hex_string: &str) -> Self { + let buf: Vec = hex::decode(input_hex_string).unwrap(); + let mut data: [u64; 5] = [0; 5]; + + let src_ptr = buf.as_ptr(); + let mut offset = 0; + for ele in data.iter_mut() { + unsafe { + let mut v_buf: [u8; 8] = [0; 8]; + std::ptr::copy_nonoverlapping(src_ptr.add(offset), v_buf.as_mut_ptr(), 8); + let v: u64 = u64::from_le_bytes(v_buf); + *ele = v; + } + offset = offset + 8; + } + + Self(data) + } +} + +/// A custom 161-bit integer type; used for splitting a scalar into a +/// fraction. Negative values use two's complement notation; the value +/// is truncated to 161 bits (upper bits in the top limb are ignored). +/// Elements are mutable containers. +/// WARNING: everything in here is vartime; do not use on secret values. +#[derive(Clone, Copy, Debug)] +pub struct Signed161([u64; 3]); + +impl Signed161 { + fn from_scalar(s: Scalar) -> Self { + Self([s.0[0], s.0[1], s.0[2]]) + } + + /// Convert that value into a scalar (integer modulo n). + pub fn to_scalar_vartime(self) -> Scalar { + let mut tmp = self.to_u192(); + let neg = (tmp[2] >> 63) != 0; + if neg { + tmp[0] = (!tmp[0]).wrapping_add(1); + let mut cc = tmp[0] == 0; + tmp[1] = !tmp[1]; + if cc { + tmp[1] = tmp[1].wrapping_add(1); + cc = tmp[1] == 0; + } + tmp[2] = !tmp[2]; + if cc { + tmp[2] = tmp[2].wrapping_add(1); + } + return -Scalar([tmp[0], tmp[1], tmp[2], 0, 0]); + } else { + return Scalar([tmp[0], tmp[1], tmp[2], 0, 0]); + } + } + + /// Export this value as a 192-bit integer (three 64-bit limbs, + /// in little-endian order). + pub fn to_u192(self) -> [u64; 3] { + let mut x = self.0[2]; + x &= 0x00000001FFFFFFFF; + x |= (x >> 32).wrapping_neg() << 33; + [self.0[0], self.0[1], x] + } + + // Recode this integer into 33 signed digits for a 5-bit window. + pub(crate) fn recode_signed_5(self) -> [i32; 33] { + // We first sign-extend the value to 192 bits, then add + // 2^160 to get a nonnegative value in the 0 to 2^161-1 + // range. We then recode that value; and finally we fix + // the result by subtracting 1 from the top digit. + let mut tmp = self.to_u192(); + tmp[2] = tmp[2].wrapping_add(0x0000000100000000); + let mut ss = [0i32; 33]; + Scalar::recode_signed_from_limbs(&tmp, &mut ss, 5); + ss[32] -= 1; + ss + } + + // Add v*2^s to this value. + fn add_shifted(&mut self, v: &Signed161, s: i32) { + if s == 0 { + Self::add(self, &v.0[..]); + } else if s < 64 { + Self::add_shifted_small(self, &v.0[..], s); + } else if s < 161 { + Self::add_shifted_small(self, &v.0[((s >> 6) as usize)..], s & 63); + } + } + + fn add_shifted_small(&mut self, v: &[u64], s: i32) { + let mut cc = 0u64; + let j = 3 - v.len(); + let mut vbits = 0u64; + for i in j..3 { + let vw = v[i - j]; + let vws = vw.wrapping_shl(s as u32) | vbits; + vbits = vw.wrapping_shr((64 - s) as u32); + let z = (self.0[i] as u128) + (vws as u128) + (cc as u128); + self.0[i] = z as u64; + cc = (z >> 64) as u64; + } + } + + fn add(&mut self, v: &[u64]) { + let mut cc = 0; + let j = 3 - v.len(); + for i in j..3 { + let z = (self.0[i] as u128) + (v[i - j] as u128) + (cc as u128); + self.0[i] = z as u64; + cc = (z >> 64) as u64; + } + } + + // Subtract v*2^s from this value. + fn sub_shifted(&mut self, v: &Signed161, s: i32) { + if s == 0 { + Self::sub(self, &v.0[..]); + } else if s < 64 { + Self::sub_shifted_small(self, &v.0[..], s); + } else if s < 161 { + Self::sub_shifted_small(self, &v.0[((s >> 6) as usize)..], s & 63); + } + } + + fn sub_shifted_small(&mut self, v: &[u64], s: i32) { + let mut cc = 0u64; + let j = 3 - v.len(); + let mut vbits = 0u64; + for i in j..3 { + let vw = v[i - j]; + let vws = vw.wrapping_shl(s as u32) | vbits; + vbits = vw.wrapping_shr((64 - s) as u32); + let z = (self.0[i] as u128).wrapping_sub(vws as u128).wrapping_sub(cc as u128); + self.0[i] = z as u64; + cc = ((z >> 64) as u64) & 1; + } + } + + fn sub(&mut self, v: &[u64]) { + let mut cc = 0; + let j = 3 - v.len(); + for i in j..3 { + let z = (self.0[i] as u128).wrapping_sub(v[i - j] as u128).wrapping_sub(cc as u128); + self.0[i] = z as u64; + cc = ((z >> 64) as u64) & 1; + } + } +} + +// A custom 640-bit integer type (signed). +// Elements are mutable containers. +// WARNING: everything in here is vartime; do not use on secret values. +#[derive(Clone, Copy, Debug)] +struct Signed640([u64; 10]); + +impl Signed640 { + // Obtain an instance containing n^2. + fn from_nsquared() -> Self { + Signed640([ + 0x8E6B7A18061803C1, + 0x0AD8BDEE1594E2CF, + 0x17640E465F2598BC, + 0x90465B4214B27B1C, + 0xD308FECCB1878B88, + 0x3CC55EB2EAC07502, + 0x59F038FB784335CE, + 0xBFFFFE954FB808EA, + 0xBFFFFFCB80000099, + 0x3FFFFFFD8000000D, + ]) + } + + // Obtain an instance containing a*b (both a and b are interpreted + // as integers in the 0..n-1 range). + fn from_mul_scalars(a: Scalar, b: Scalar) -> Self { + let mut r = Signed640([0u64; 10]); + for i in 0..5 { + let aw = a.0[i]; + let mut cc = 0u64; + for j in 0..5 { + let bw = b.0[j]; + let z = ((aw as u128) * (bw as u128)) + .wrapping_add(r.0[i + j] as u128) + .wrapping_add(cc as u128); + r.0[i + j] = z as u64; + cc = (z >> 64) as u64; + } + r.0[i + 5] = cc; + } + r + } + + // Add 1 to this instance. + fn add1(&mut self) { + for i in 0..10 { + self.0[i] = self.0[i].wrapping_add(1); + if self.0[i] != 0 { + return; + } + } + } + + fn is_nonnegative(&self) -> bool { + (self.0[9] >> 63) == 0 + } + + fn lt_unsigned(&self, rhs: &Self) -> bool { + for i in (0..10).rev() { + let aw = self.0[i]; + let bw = rhs.0[i]; + if aw < bw { + return true; + } + if aw > bw { + return false; + } + } + false + } + + // Get the bit length of this value. The bit length is defined as the + // minimal size of the binary representation in two's complement, + // _excluding_ the sign bit (thus, -2^k has bit length k, whereas +2^k + // has bit length k+1). + fn bitlength(&self) -> i32 { + let sm = (self.0[9] >> 63).wrapping_neg(); + for i in (0..10).rev() { + let w = self.0[i] ^ sm; + if w != 0 { + return ((i as i32) << 6) + Self::u64_bitlength(w); + } + } + 0 + } + + fn u64_bitlength(w: u64) -> i32 { + // We use here a portable algorithm; some architectures have + // dedicated opcodes that could speed up this operation + // greatly (e.g. lzcnt on recent x86). + let mut x = w; + let mut r = 0; + if x > 0xFFFFFFFF { + x >>= 32; + r += 32; + } + if x > 0x0000FFFF { + x >>= 16; + r += 16; + } + if x > 0x000000FF { + x >>= 8; + r += 8; + } + if x > 0x0000000F { + x >>= 4; + r += 4; + } + if x > 0x00000003 { + x >>= 2; + r += 2; + } + r + (x as i32) - (((x + 1) >> 2) as i32) + } + + // Add v*2^s to this instance. + fn add_shifted(&mut self, v: &Signed640, s: i32) { + if s == 0 { + Self::add(self, &v.0[..]); + } else if s < 64 { + Self::add_shifted_small(self, &v.0[..], s); + } else if s < 640 { + Self::add_shifted_small(self, &v.0[((s >> 6) as usize)..], s & 63); + } + } + + fn add_shifted_small(&mut self, v: &[u64], s: i32) { + let mut cc = 0u64; + let j = 10 - v.len(); + let mut vbits = 0u64; + for i in j..10 { + let vw = v[i - j]; + let vws = vw.wrapping_shl(s as u32) | vbits; + vbits = vw.wrapping_shr((64 - s) as u32); + let z = (self.0[i] as u128) + (vws as u128) + (cc as u128); + self.0[i] = z as u64; + cc = (z >> 64) as u64; + } + } + + fn add(&mut self, v: &[u64]) { + let mut cc = 0; + let j = 10 - v.len(); + for i in j..10 { + let z = (self.0[i] as u128) + (v[i - j] as u128) + (cc as u128); + self.0[i] = z as u64; + cc = (z >> 64) as u64; + } + } + + // Subtract v*2^s from this instance. + fn sub_shifted(&mut self, v: &Signed640, s: i32) { + if s == 0 { + Self::sub(self, &v.0[..]); + } else if s < 64 { + Self::sub_shifted_small(self, &v.0[..], s); + } else { + Self::sub_shifted_small(self, &v.0[((s >> 6) as usize)..], s & 63); + } + } + + fn sub_shifted_small(&mut self, v: &[u64], s: i32) { + let mut cc = 0u64; + let j = 10 - v.len(); + let mut vbits = 0u64; + for i in j..10 { + let vw = v[i - j]; + let vws = vw.wrapping_shl(s as u32) | vbits; + vbits = vw.wrapping_shr((64 - s) as u32); + let z = (self.0[i] as u128).wrapping_sub(vws as u128).wrapping_sub(cc as u128); + self.0[i] = z as u64; + cc = ((z >> 64) as u64) & 1; + } + } + + fn sub(&mut self, v: &[u64]) { + let mut cc = 0; + let j = 10 - v.len(); + for i in j..10 { + let z = (self.0[i] as u128).wrapping_sub(v[i - j] as u128).wrapping_sub(cc as u128); + self.0[i] = z as u64; + cc = ((z >> 64) as u64) & 1; + } + } +} + +// ======================================================================== +// Unit tests. + +#[cfg(test)] +mod tests { + use crate::test_field_arithmetic; + + use super::Scalar; + use rand::{thread_rng, Rng}; + + #[test] + fn test_scalar_ops() { + let buf1: [u8; 50] = [ + 0xE0, 0xFF, 0x8B, 0x94, 0x96, 0xD9, 0x0F, 0xE8, 0x9C, 0xA0, 0x24, 0xD7, 0x39, 0x5C, + 0x88, 0xE8, 0x39, 0x06, 0xB8, 0xCF, 0xE6, 0xFF, 0xFF, 0x7F, 0x16, 0x00, 0x00, 0x00, + 0xF1, 0xFF, 0xFF, 0x7F, 0x07, 0x00, 0x00, 0x80, 0xFD, 0xFF, 0xFF, 0x7F, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + ]; + let buf2: [u8; 50] = [ + 0xE1, 0xFF, 0x8B, 0x94, 0x96, 0xD9, 0x0F, 0xE8, 0x9C, 0xA0, 0x24, 0xD7, 0x39, 0x5C, + 0x88, 0xE8, 0x39, 0x06, 0xB8, 0xCF, 0xE6, 0xFF, 0xFF, 0x7F, 0x16, 0x00, 0x00, 0x00, + 0xF1, 0xFF, 0xFF, 0x7F, 0x07, 0x00, 0x00, 0x80, 0xFD, 0xFF, 0xFF, 0x7F, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + ]; + let buf3: [u8; 50] = [ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + ]; + for i in 0..51 { + let (s1, c1) = Scalar::try_from_noncanonical_bytes(&buf1[..i]); + let (s2, c2) = Scalar::try_from_noncanonical_bytes(&buf2[..i]); + let (s3, c3) = Scalar::try_from_noncanonical_bytes(&buf3[..i]); + assert!(c1 == 0xFFFFFFFFFFFFFFFF); + if i <= 40 { + assert!(s1.encode()[..i] == buf1[..i]); + } else { + assert!(s1.encode()[..] == buf1[..40]); + } + if i <= 39 { + assert!(c2 == 0xFFFFFFFFFFFFFFFF); + assert!(s2.encode()[..i] == buf2[..i]); + } else { + assert!(c2 == 0); + } + if i <= 47 { + assert!(c3 == 0xFFFFFFFFFFFFFFFF); + if i <= 40 { + assert!(s3.encode()[..i] == buf3[..i]); + } else { + assert!(s3.encode()[..] == buf3[..40]); + } + } else { + assert!(c3 == 0); + } + } + + // buf4 = a randomly chosen 512-bit integer + let buf4: [u8; 64] = [ + 0xB5, 0xDD, 0x28, 0xB8, 0xD2, 0x9B, 0x6F, 0xF8, 0x15, 0x65, 0x3F, 0x89, 0xDB, 0x7B, + 0xA9, 0xDE, 0x33, 0x7D, 0xA8, 0x27, 0x82, 0x26, 0xB4, 0xD6, 0x9E, 0x1F, 0xFA, 0x97, + 0x3D, 0x9E, 0x01, 0x9C, 0x77, 0xC9, 0x63, 0x5C, 0xB8, 0x34, 0xD8, 0x1A, 0x4D, 0xCB, + 0x03, 0x48, 0x62, 0xCD, 0xEE, 0xC9, 0x8E, 0xC8, 0xC9, 0xA7, 0xB3, 0x6E, 0xDA, 0xCE, + 0x18, 0x75, 0x1B, 0xDD, 0x4F, 0x94, 0x67, 0xB5, + ]; + // buf5 = buf4 mod n + let buf5: [u8; 40] = [ + 0x89, 0x01, 0x7A, 0x52, 0xBD, 0xDF, 0x45, 0x60, 0xCE, 0x5B, 0xBA, 0xE5, 0x5D, 0x25, + 0x96, 0x5A, 0x0A, 0x4F, 0x0A, 0x27, 0x1A, 0x7A, 0xE8, 0x1D, 0x7D, 0xBF, 0xE3, 0xE3, + 0xFA, 0x5E, 0x17, 0xE0, 0x44, 0xD9, 0xA5, 0x37, 0x9B, 0xF8, 0x38, 0x74, + ]; + let s4 = Scalar::from_noncanonical_bytes(&buf4[..]); + assert!(s4.encode() == buf5); + let (s5, c5) = Scalar::try_from_noncanonical_bytes(&buf5[..]); + assert!(c5 == 0xFFFFFFFFFFFFFFFF); + assert!(s5.encode() == buf5); + { + let should_be_some = Scalar::from_canonical_bytes(buf5); + assert!(should_be_some.is_some()); + assert!(should_be_some.unwrap() == s5); + } + + // buf6 = (buf4^256) mod n + let buf6: [u8; 40] = [ + 0x27, 0x7E, 0x2C, 0xAB, 0x6D, 0xAD, 0x8D, 0xA0, 0x15, 0x44, 0x02, 0x0F, 0xFA, 0xD5, + 0x4F, 0x15, 0xBF, 0x6D, 0x1D, 0x76, 0x22, 0x73, 0xCD, 0xDA, 0x23, 0xFE, 0x5A, 0xED, + 0xCA, 0x75, 0xD7, 0x04, 0x05, 0x66, 0x87, 0x3D, 0x37, 0x5B, 0x24, 0x13, + ]; + let mut s6 = s4; + for _ in 0..8 { + s6 *= s6; + } + assert!(s6.encode() == buf6); + + // buf6 recoded in signed integers, w = 4 + let ref4: [i32; 80] = [ + 7, 2, -2, 8, -4, 3, -5, -5, -2, 7, -3, -5, -2, -7, 1, -6, 6, 1, 4, 4, 2, 0, -1, 1, -6, + 0, 6, -3, 0, 5, 5, 1, -1, -4, -2, 7, -3, 2, 6, 7, 2, 2, 3, 7, -3, -3, -5, -2, 4, 2, -2, + 0, -5, 6, -3, -1, -5, -3, 6, 7, 7, -3, 5, 0, 5, 0, 6, 6, 7, 8, -3, 4, 7, 3, -5, 6, 4, + 2, 3, 1, + ]; + // buf6 recoded in signed integers, w = 5 + let ref5: [i32; 64] = [ + 7, -15, 0, -7, -13, -10, -9, 14, 13, 13, 3, 1, -6, 11, 16, 8, 2, -8, 4, -12, 0, 11, -1, + 10, -11, -7, 16, -5, -9, 15, -8, 15, 2, -7, -3, -5, 13, 13, 15, 4, -2, -8, -9, -5, 15, + 5, -9, 15, -9, 7, 1, 10, 0, -13, -2, -15, -2, -6, 14, -10, 6, -14, 13, 2, + ]; + + let mut ss4 = [0i32; 80]; + s6.recode_signed(&mut ss4[..], 4); + assert!(ss4 == ref4); + let mut ss5 = [0i32; 64]; + s6.recode_signed(&mut ss5[..], 5); + assert!(ss5 == ref5); + } + + #[test] + fn test_lagrange() { + let mut rng = thread_rng(); + for _ in 0..100 { + let mut sbuf = [0u8; 48]; + rng.fill(&mut sbuf); + let s = Scalar::from_noncanonical_bytes(&mut sbuf); + let (v0, v1) = s.lagrange(); + let c0 = v0.to_scalar_vartime(); + let c1 = v1.to_scalar_vartime(); + assert!((c1 * s - c0).iszero() == 0xFFFFFFFFFFFFFFFF); + } + } + + #[test] + fn test_convert_hex_str() { + // buf4 = a randomly chosen 512-bit integer + let buf4: [u8; 64] = [ + 0xB5, 0xDD, 0x28, 0xB8, 0xD2, 0x9B, 0x6F, 0xF8, 0x15, 0x65, 0x3F, 0x89, 0xDB, 0x7B, + 0xA9, 0xDE, 0x33, 0x7D, 0xA8, 0x27, 0x82, 0x26, 0xB4, 0xD6, 0x9E, 0x1F, 0xFA, 0x97, + 0x3D, 0x9E, 0x01, 0x9C, 0x77, 0xC9, 0x63, 0x5C, 0xB8, 0x34, 0xD8, 0x1A, 0x4D, 0xCB, + 0x03, 0x48, 0x62, 0xCD, 0xEE, 0xC9, 0x8E, 0xC8, 0xC9, 0xA7, 0xB3, 0x6E, 0xDA, 0xCE, + 0x18, 0x75, 0x1B, 0xDD, 0x4F, 0x94, 0x67, 0xB5, + ]; + + let s4 = Scalar::from_noncanonical_bytes(&buf4[..]); + let hex_str = s4.to_hex_string(); + let recoverred = Scalar::from_hex_string(&hex_str); + assert_eq!(s4, recoverred); + } + + test_field_arithmetic!(crate::curve::scalar_field::Scalar); +} diff --git a/ecgfp5/src/curve/test_utils.rs b/ecgfp5/src/curve/test_utils.rs new file mode 100644 index 0000000000..1b3758696e --- /dev/null +++ b/ecgfp5/src/curve/test_utils.rs @@ -0,0 +1,26 @@ +use super::{ + base_field::{Sgn0, SquareRoot}, + GFp, GFp5, +}; +use plonky2_field::{extension::quintic::QuinticExtension, types::Sample}; +use rand::thread_rng; + +pub fn gfp5_random_non_square() -> GFp5 { + let mut rng = thread_rng(); + loop { + let attempt = QuinticExtension::::sample(&mut rng); + if let None = attempt.sqrt() { + return attempt; + } + } +} + +pub fn gfp5_random_sgn0_eq_0() -> GFp5 { + let mut rng = thread_rng(); + loop { + let attempt = QuinticExtension::::sample(&mut rng); + if false == attempt.sgn0() { + return attempt; + } + } +} diff --git a/ecgfp5/src/gadgets/base_field.rs b/ecgfp5/src/gadgets/base_field.rs new file mode 100644 index 0000000000..c7008db37f --- /dev/null +++ b/ecgfp5/src/gadgets/base_field.rs @@ -0,0 +1,1219 @@ +use alloc::vec::Vec; +use plonky2_ecdsa::gadgets::nonnative::NonNativeTarget; +use plonky2_field::{extension::quintic::QuinticExtension, goldilocks_field::GoldilocksField}; + +use plonky2::{ + hash::hash_types::RichField, + iop::{ + generator::{GeneratedValues, SimpleGenerator}, + target::{BoolTarget, Target}, + witness::{PartitionWitness, Witness, WitnessWrite}, + }, + plonk::circuit_builder::CircuitBuilder, +}; +use plonky2_ecdsa::gadgets::{biguint::BigUintTarget, nonnative::CircuitBuilderNonNative}; +use plonky2_field::{ + extension::{Extendable, FieldExtension}, + types::Field, +}; +use plonky2_u32::gadgets::arithmetic_u32::U32Target; + +use crate::{ + curve::{base_field::SquareRoot, scalar_field::Scalar, GFp, GFp5}, + gates::gfp5_mul::MulGFp5Gate, +}; + +const THREE: GFp = GoldilocksField(3); + +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +#[repr(transparent)] +pub struct QuinticExtensionTarget(pub [Target; 5]); + +impl QuinticExtensionTarget { + pub fn new(limbs: [Target; 5]) -> Self { + Self(limbs) + } + + pub fn to_target_array(&self) -> [Target; 5] { + self.0 + } +} + +pub trait CircuitBuilderGFp5> { + fn add_virtual_quintic_ext_target(&mut self) -> QuinticExtensionTarget; + fn connect_quintic_ext(&mut self, a: QuinticExtensionTarget, b: QuinticExtensionTarget); + fn register_quintic_ext_public_input(&mut self, a: QuinticExtensionTarget); + + fn zero_quintic_ext(&mut self) -> QuinticExtensionTarget; + fn one_quintic_ext(&mut self) -> QuinticExtensionTarget; + fn constant_quintic_ext(&mut self, c: GFp5) -> QuinticExtensionTarget; + fn select_quintic_ext( + &mut self, + cond: BoolTarget, + a: QuinticExtensionTarget, + b: QuinticExtensionTarget, + ) -> QuinticExtensionTarget; + fn random_access_quintic_ext( + &mut self, + access_index: Target, + v: &[QuinticExtensionTarget], + ) -> QuinticExtensionTarget; + fn is_equal_quintic_ext( + &mut self, + a: QuinticExtensionTarget, + b: QuinticExtensionTarget, + ) -> BoolTarget; + + fn double_quintic_ext(&mut self, a: QuinticExtensionTarget) -> QuinticExtensionTarget; + fn triple_quintic_ext(&mut self, a: QuinticExtensionTarget) -> QuinticExtensionTarget; + + fn neg_quintic_ext(&mut self, a: QuinticExtensionTarget) -> QuinticExtensionTarget; + fn add_quintic_ext( + &mut self, + a: QuinticExtensionTarget, + b: QuinticExtensionTarget, + ) -> QuinticExtensionTarget; + fn add_const_quintic_ext( + &mut self, + a: QuinticExtensionTarget, + c: GFp5, + ) -> QuinticExtensionTarget; + fn sub_quintic_ext( + &mut self, + a: QuinticExtensionTarget, + b: QuinticExtensionTarget, + ) -> QuinticExtensionTarget; + fn weighted_mul_quintic_ext( + &mut self, + c: GFp, + a: QuinticExtensionTarget, + b: QuinticExtensionTarget, + ) -> QuinticExtensionTarget; + fn mul_quintic_ext( + &mut self, + a: QuinticExtensionTarget, + b: QuinticExtensionTarget, + ) -> QuinticExtensionTarget; + fn mul_const_quintic_ext( + &mut self, + c: GFp5, + a: QuinticExtensionTarget, + ) -> QuinticExtensionTarget; + + fn div_quintic_ext( + &mut self, + a: QuinticExtensionTarget, + b: QuinticExtensionTarget, + ) -> QuinticExtensionTarget; + fn div_const_quintic_ext( + &mut self, + a: QuinticExtensionTarget, + c: GFp5, + ) -> QuinticExtensionTarget; + fn div_or_zero_quintic_ext( + &mut self, + a: QuinticExtensionTarget, + b: QuinticExtensionTarget, + ) -> QuinticExtensionTarget; + fn inverse_quintic_ext(&mut self, x: QuinticExtensionTarget) -> QuinticExtensionTarget; + + fn any_sqrt_quintic_ext(&mut self, x: QuinticExtensionTarget) -> QuinticExtensionTarget; + fn try_any_sqrt_quintic_ext( + &mut self, + x: QuinticExtensionTarget, + ) -> (QuinticExtensionTarget, BoolTarget); + fn try_canonical_sqrt_quintic_ext( + &mut self, + x: QuinticExtensionTarget, + ) -> (QuinticExtensionTarget, BoolTarget); + fn canonical_sqrt_quintic_ext(&mut self, x: QuinticExtensionTarget) -> QuinticExtensionTarget; + + fn sgn0_quintic_ext(&mut self, x: QuinticExtensionTarget) -> BoolTarget; + fn legendre_sym_quintic_ext(&mut self, x: QuinticExtensionTarget) -> Target; + fn frob_quintic_ext(&mut self, x: QuinticExtensionTarget) -> QuinticExtensionTarget; + fn frob2_quintic_ext(&mut self, x: QuinticExtensionTarget) -> QuinticExtensionTarget; + + fn square_quintic_ext(&mut self, x: QuinticExtensionTarget) -> QuinticExtensionTarget; + fn add_many_quintic_ext( + &mut self, + terms: Vec, + ) -> QuinticExtensionTarget; + fn dot_product_quintic_ext( + &mut self, + a: Vec, + b: Vec, + ) -> QuinticExtensionTarget; + + fn encode_quintic_ext_as_scalar( + &mut self, + x: QuinticExtensionTarget, + ) -> NonNativeTarget; +} + +pub trait PartialWitnessQuinticExt>: Witness { + fn get_quintic_ext_target(&self, target: QuinticExtensionTarget) -> QuinticExtension; + + fn get_quintic_ext_targets( + &self, + targets: &[QuinticExtensionTarget], + ) -> Vec> { + targets.iter().map(|&t| self.get_quintic_ext_target(t)).collect() + } + + fn set_quintic_ext_target( + &mut self, + target: QuinticExtensionTarget, + value: QuinticExtension, + ); + + fn set_quintic_ext_targets( + &mut self, + targets: &[QuinticExtensionTarget], + values: &[QuinticExtension], + ) { + for (&t, &v) in targets.iter().zip(values.iter()) { + self.set_quintic_ext_target(t, v); + } + } +} + +impl, F: RichField + Extendable<5>> PartialWitnessQuinticExt for W { + fn get_quintic_ext_target(&self, target: QuinticExtensionTarget) -> QuinticExtension { + let QuinticExtensionTarget([t0, t1, t2, t3, t4]) = target; + QuinticExtension([ + self.get_target(t0), + self.get_target(t1), + self.get_target(t2), + self.get_target(t3), + self.get_target(t4), + ]) + } + + fn set_quintic_ext_target( + &mut self, + target: QuinticExtensionTarget, + value: QuinticExtension, + ) { + let QuinticExtensionTarget([t0, t1, t2, t3, t4]) = target; + let [v0, v1, v2, v3, v4] = value.0; + + self.set_target(t0, v0); + self.set_target(t1, v1); + self.set_target(t2, v2); + self.set_target(t3, v3); + self.set_target(t4, v4); + } +} + +// macro_rules! impl_circuit_builder_for_extension_degree { +// ($degree:literal) => { +impl CircuitBuilderGFp5 for CircuitBuilder { + fn add_virtual_quintic_ext_target(&mut self) -> QuinticExtensionTarget { + QuinticExtensionTarget::new([ + self.add_virtual_target(), + self.add_virtual_target(), + self.add_virtual_target(), + self.add_virtual_target(), + self.add_virtual_target(), + ]) + } + + fn connect_quintic_ext(&mut self, a: QuinticExtensionTarget, b: QuinticExtensionTarget) { + for (lhs, rhs) in a.to_target_array().into_iter().zip(b.to_target_array().into_iter()) { + self.connect(lhs, rhs); + } + } + + fn register_quintic_ext_public_input(&mut self, a: QuinticExtensionTarget) { + for t in a.to_target_array().into_iter() { + self.register_public_input(t); + } + } + + fn zero_quintic_ext(&mut self) -> QuinticExtensionTarget { + QuinticExtensionTarget::new([self.zero(); 5]) + } + + fn one_quintic_ext(&mut self) -> QuinticExtensionTarget { + QuinticExtensionTarget::new([ + self.one(), + self.zero(), + self.zero(), + self.zero(), + self.zero(), + ]) + } + + fn constant_quintic_ext(&mut self, c: GFp5) -> QuinticExtensionTarget { + let QuinticExtension([c0, c1, c2, c3, c4]) = c; + QuinticExtensionTarget::new([ + self.constant(c0), + self.constant(c1), + self.constant(c2), + self.constant(c3), + self.constant(c4), + ]) + } + + fn select_quintic_ext( + &mut self, + cond: BoolTarget, + a: QuinticExtensionTarget, + b: QuinticExtensionTarget, + ) -> QuinticExtensionTarget { + let QuinticExtensionTarget([a0, a1, a2, a3, a4]) = a; + let QuinticExtensionTarget([b0, b1, b2, b3, b4]) = b; + QuinticExtensionTarget::new([ + self.select(cond, a0, b0), + self.select(cond, a1, b1), + self.select(cond, a2, b2), + self.select(cond, a3, b3), + self.select(cond, a4, b4), + ]) + } + + fn random_access_quintic_ext( + &mut self, + access_index: Target, + v: &[QuinticExtensionTarget], + ) -> QuinticExtensionTarget { + let mut a0s = Vec::new(); + let mut a1s = Vec::new(); + let mut a2s = Vec::new(); + let mut a3s = Vec::new(); + let mut a4s = Vec::new(); + for &QuinticExtensionTarget([a0, a1, a2, a3, a4]) in v { + a0s.push(a0); + a1s.push(a1); + a2s.push(a2); + a3s.push(a3); + a4s.push(a4); + } + + QuinticExtensionTarget([ + self.random_access(access_index, a0s), + self.random_access(access_index, a1s), + self.random_access(access_index, a2s), + self.random_access(access_index, a3s), + self.random_access(access_index, a4s), + ]) + } + + fn is_equal_quintic_ext( + &mut self, + a: QuinticExtensionTarget, + b: QuinticExtensionTarget, + ) -> BoolTarget { + let QuinticExtensionTarget([a0, a1, a2, a3, a4]) = a; + let QuinticExtensionTarget([b0, b1, b2, b3, b4]) = b; + + let terms = vec![ + self.is_equal(a0, b0).target, + self.is_equal(a1, b1).target, + self.is_equal(a2, b2).target, + self.is_equal(a3, b3).target, + self.is_equal(a4, b4).target, + ]; + + let prod = self.mul_many(terms); + BoolTarget::new_unsafe(prod) + } + + fn neg_quintic_ext(&mut self, a: QuinticExtensionTarget) -> QuinticExtensionTarget { + let QuinticExtensionTarget([a0, a1, a2, a3, a4]) = a; + QuinticExtensionTarget::new([ + self.neg(a0), + self.neg(a1), + self.neg(a2), + self.neg(a3), + self.neg(a4), + ]) + } + + fn double_quintic_ext(&mut self, a: QuinticExtensionTarget) -> QuinticExtensionTarget { + let QuinticExtensionTarget([a0, a1, a2, a3, a4]) = a; + QuinticExtensionTarget::new([ + self.mul_const(GFp::TWO, a0), + self.mul_const(GFp::TWO, a1), + self.mul_const(GFp::TWO, a2), + self.mul_const(GFp::TWO, a3), + self.mul_const(GFp::TWO, a4), + ]) + } + + fn triple_quintic_ext(&mut self, a: QuinticExtensionTarget) -> QuinticExtensionTarget { + let QuinticExtensionTarget([a0, a1, a2, a3, a4]) = a; + QuinticExtensionTarget::new([ + self.mul_const(THREE, a0), + self.mul_const(THREE, a1), + self.mul_const(THREE, a2), + self.mul_const(THREE, a3), + self.mul_const(THREE, a4), + ]) + } + + fn add_quintic_ext( + &mut self, + a: QuinticExtensionTarget, + b: QuinticExtensionTarget, + ) -> QuinticExtensionTarget { + let QuinticExtensionTarget([a0, a1, a2, a3, a4]) = a; + let QuinticExtensionTarget([b0, b1, b2, b3, b4]) = b; + QuinticExtensionTarget::new([ + self.add(a0, b0), + self.add(a1, b1), + self.add(a2, b2), + self.add(a3, b3), + self.add(a4, b4), + ]) + } + + fn add_const_quintic_ext( + &mut self, + a: QuinticExtensionTarget, + c: GFp5, + ) -> QuinticExtensionTarget { + let QuinticExtensionTarget([a0, a1, a2, a3, a4]) = a; + let QuinticExtension([c0, c1, c2, c3, c4]) = c; + QuinticExtensionTarget::new([ + self.add_const(a0, c0), + self.add_const(a1, c1), + self.add_const(a2, c2), + self.add_const(a3, c3), + self.add_const(a4, c4), + ]) + } + + fn sub_quintic_ext( + &mut self, + a: QuinticExtensionTarget, + b: QuinticExtensionTarget, + ) -> QuinticExtensionTarget { + let QuinticExtensionTarget([a0, a1, a2, a3, a4]) = a; + let QuinticExtensionTarget([b0, b1, b2, b3, b4]) = b; + QuinticExtensionTarget::new([ + self.sub(a0, b0), + self.sub(a1, b1), + self.sub(a2, b2), + self.sub(a3, b3), + self.sub(a4, b4), + ]) + } + + fn weighted_mul_quintic_ext( + &mut self, + c: GFp, + a: QuinticExtensionTarget, + b: QuinticExtensionTarget, + ) -> QuinticExtensionTarget { + // TODO: check for special cases + // TODO: check to see if we've done the same operation already + let gate = MulGFp5Gate::new_from_config(&self.config); + let constants = &[c]; + let (gate, i) = self.find_slot(gate, constants, constants); + + let multiplicand_0_wires = + MulGFp5Gate::wires_ith_multiplicand_0(i).map(|wire| Target::wire(gate, wire)); + let multiplicand_1_wires = + MulGFp5Gate::wires_ith_multiplicand_1(i).map(|wire| Target::wire(gate, wire)); + + a.0.into_iter().zip(multiplicand_0_wires).for_each(|(a, wire)| self.connect(a, wire)); + b.0.into_iter().zip(multiplicand_1_wires).for_each(|(b, wire)| self.connect(b, wire)); + + let output_limbs: [Target; 5] = MulGFp5Gate::wires_ith_output(i) + .map(|wire| Target::wire(gate, wire)) + .collect::>() + .try_into() + .unwrap(); + QuinticExtensionTarget::new(output_limbs) + } + + fn mul_quintic_ext( + &mut self, + a: QuinticExtensionTarget, + b: QuinticExtensionTarget, + ) -> QuinticExtensionTarget { + self.weighted_mul_quintic_ext(GFp::ONE, a, b) + } + + fn mul_const_quintic_ext( + &mut self, + c: GFp5, + a: QuinticExtensionTarget, + ) -> QuinticExtensionTarget { + let c = self.constant_quintic_ext(c); + self.weighted_mul_quintic_ext(GFp::ONE, c, a) + } + + fn div_quintic_ext( + &mut self, + a: QuinticExtensionTarget, + b: QuinticExtensionTarget, + ) -> QuinticExtensionTarget { + let quotient = self.add_virtual_quintic_ext_target(); + self.add_simple_generator(QuinticQuotientGenerator::new(a, b, quotient)); + + let quotient_times_denominator = self.mul_quintic_ext(quotient, b); + self.connect_quintic_ext(quotient_times_denominator, a); + + quotient + } + + fn div_or_zero_quintic_ext( + &mut self, + a: QuinticExtensionTarget, + b: QuinticExtensionTarget, + ) -> QuinticExtensionTarget { + let quotient = self.add_virtual_quintic_ext_target(); + self.add_simple_generator(QuinticQuotientGenerator::new(a, b, quotient)); + + let quotient_times_denominator = self.mul_quintic_ext(quotient, b); + let zero_if_prod_is_a = self.sub_quintic_ext(quotient_times_denominator, a); + + // check zero + // we can do the multiplication limb-wise here, as their product is zero + // iff one of them is all zeros + let QuinticExtensionTarget([b0, b1, b2, b3, b4]) = b; + let QuinticExtensionTarget([p0, p1, p2, p3, p4]) = zero_if_prod_is_a; + let z0 = self.mul(b0, p0); + let z1 = self.mul(b1, p1); + let z2 = self.mul(b2, p2); + let z3 = self.mul(b3, p3); + let z4 = self.mul(b4, p4); + self.assert_zero(z0); + self.assert_zero(z1); + self.assert_zero(z2); + self.assert_zero(z3); + self.assert_zero(z4); + + quotient + } + + fn div_const_quintic_ext( + &mut self, + num: QuinticExtensionTarget, + denom: GFp5, + ) -> QuinticExtensionTarget { + let denom = self.constant_quintic_ext(denom); + self.div_quintic_ext(num, denom) + } + + fn inverse_quintic_ext(&mut self, x: QuinticExtensionTarget) -> QuinticExtensionTarget { + let one = self.one_quintic_ext(); + + let inverse = self.add_virtual_quintic_ext_target(); + self.add_simple_generator(QuinticQuotientGenerator::new(one, x, inverse)); + + let should_be_one = self.mul_quintic_ext(inverse, x); + self.connect_quintic_ext(should_be_one, one); + + inverse + } + + fn any_sqrt_quintic_ext(&mut self, x: QuinticExtensionTarget) -> QuinticExtensionTarget { + let (root_x, _) = self.try_any_sqrt_quintic_ext(x); + root_x + } + + fn try_any_sqrt_quintic_ext( + &mut self, + x: QuinticExtensionTarget, + ) -> (QuinticExtensionTarget, BoolTarget) { + let zero = self.zero_quintic_ext(); + let root_x = self.add_virtual_quintic_ext_target(); + let is_sqrt = self.add_virtual_bool_target_unsafe(); + self.add_simple_generator(QuinticSqrtGenerator::new(x, root_x, is_sqrt)); + + let should_be_x_or_zero = self.square_quintic_ext(root_x); + let x_or_zero = self.select_quintic_ext(is_sqrt, x, zero); + self.connect_quintic_ext(should_be_x_or_zero, x_or_zero); + + (root_x, is_sqrt) + } + + /// returns true or false indicating a notion of "sign" for quintic_ext. + /// This is used to canonicalize the square root + /// This is an implementation of the function sgn0 from the IRTF's hash-to-curve document + /// https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-07#name-the-sgn0-function + fn sgn0_quintic_ext(&mut self, x: QuinticExtensionTarget) -> BoolTarget { + let one = self.one(); + let zero = self.zero(); + + let mut sign = self.constant_bool(false); + let mut is_zero = self.constant_bool(true); + for limb in x.to_target_array() { + let bit_decomp = self.split_le_base::<2>(limb, 64); + + // sign_i = x_i mod 2 + // is_zero_i = x_i == 0 + // SAFETY: targets from bit_decomp guaranteed to contain values of 0 or 1 + let sign_i = BoolTarget::new_unsafe(self.sub(one, bit_decomp[0])); + let is_zero_i = self.is_equal(limb, zero); + + // sign = sign || (is_zero && sign_i) + // is_zero = is_zero && is_zero_i + + // x or y = x + y - xy + let is_zero_and_sign_i = self.and(is_zero_i, sign_i); + let sign_and_is_zero_and_sign_i = self.and(sign, is_zero_and_sign_i); + let tmp = + self.mul_const_add(-GFp::ONE, sign_and_is_zero_and_sign_i.target, sign.target); + sign = BoolTarget::new_unsafe(self.add(tmp, is_zero_and_sign_i.target)); + is_zero = self.and(is_zero, is_zero_i); + } + + sign + } + + fn legendre_sym_quintic_ext(&mut self, x: QuinticExtensionTarget) -> Target { + // compute x^r where r = p^4 + p^3 + p^2 + p + 1 + let frob1 = self.frob_quintic_ext(x); + let frob2 = self.frob2_quintic_ext(x); + let frob1_times_frob2 = self.mul_quintic_ext(frob1, frob2); + let frob2_frob1_times_frob2 = self.frob2_quintic_ext(frob1_times_frob2); + + let x_to_r_minus_1 = self.mul_quintic_ext(frob1_times_frob2, frob2_frob1_times_frob2); + let x_to_r_quintic = self.mul_quintic_ext(x_to_r_minus_1, x); + + // x^r guaranteed to be in base field + let QuinticExtensionTarget([y, _, _, _, _]) = x_to_r_quintic; + + let y31 = self.exp_power_of_2(y, 31); + let y63 = self.exp_power_of_2(y31, 32); + + // TODO upstream an inverse_or_zero gadget + let zero = self.zero(); + let one = self.one(); + let y31_is_zero = self.is_equal(y31, zero); + let denom = self.select(y31_is_zero, one, y31); + let res = self.div(y63, denom); + + self.select(y31_is_zero, zero, res) + } + + fn frob_quintic_ext(&mut self, x: QuinticExtensionTarget) -> QuinticExtensionTarget { + let frob_coeff_1 = GFp::from_canonical_u64(1041288259238279555); + let frob_coeff_2 = GFp::from_canonical_u64(15820824984080659046); + let frob_coeff_3 = GFp::from_canonical_u64(211587555138949697); + let frob_coeff_4 = GFp::from_canonical_u64(1373043270956696022); + + let QuinticExtensionTarget([c0, mut c1, mut c2, mut c3, mut c4]) = x; + + c1 = self.mul_const(frob_coeff_1, c1); + c2 = self.mul_const(frob_coeff_2, c2); + c3 = self.mul_const(frob_coeff_3, c3); + c4 = self.mul_const(frob_coeff_4, c4); + + QuinticExtensionTarget([c0, c1, c2, c3, c4]) + } + + fn frob2_quintic_ext(&mut self, x: QuinticExtensionTarget) -> QuinticExtensionTarget { + let frob2_coeff_1 = GFp::from_canonical_u64(15820824984080659046); + let frob2_coeff_2 = GFp::from_canonical_u64(1373043270956696022); + let frob2_coeff_3 = GFp::from_canonical_u64(1041288259238279555); + let frob2_coeff_4 = GFp::from_canonical_u64(211587555138949697); + + let QuinticExtensionTarget([c0, mut c1, mut c2, mut c3, mut c4]) = x; + + c1 = self.mul_const(frob2_coeff_1, c1); + c2 = self.mul_const(frob2_coeff_2, c2); + c3 = self.mul_const(frob2_coeff_3, c3); + c4 = self.mul_const(frob2_coeff_4, c4); + + QuinticExtensionTarget([c0, c1, c2, c3, c4]) + } + + // returns the sqrt(x) such that `sgn0(sqrt(x)) == false` + fn canonical_sqrt_quintic_ext(&mut self, x: QuinticExtensionTarget) -> QuinticExtensionTarget { + let root_x = self.any_sqrt_quintic_ext(x); + let neg_root_x = self.neg_quintic_ext(root_x); + + let sign = self.sgn0_quintic_ext(root_x); + self.select_quintic_ext(sign, neg_root_x, root_x) + } + + fn try_canonical_sqrt_quintic_ext( + &mut self, + x: QuinticExtensionTarget, + ) -> (QuinticExtensionTarget, BoolTarget) { + let (root_x, is_sqrt) = self.try_any_sqrt_quintic_ext(x); + let neg_root_x = self.neg_quintic_ext(root_x); + + let sign = self.sgn0_quintic_ext(root_x); + let canonical_root_x = self.select_quintic_ext(sign, neg_root_x, root_x); + + (canonical_root_x, is_sqrt) + } + + fn square_quintic_ext(&mut self, a: QuinticExtensionTarget) -> QuinticExtensionTarget { + self.mul_quintic_ext(a, a) + } + + fn add_many_quintic_ext( + &mut self, + terms: Vec, + ) -> QuinticExtensionTarget { + let mut sum = self.zero_quintic_ext(); + for term in terms { + sum = self.add_quintic_ext(sum, term); + } + sum + } + + fn dot_product_quintic_ext( + &mut self, + a: Vec, + b: Vec, + ) -> QuinticExtensionTarget { + let mut terms = Vec::new(); + for (a, b) in a.into_iter().zip(b.into_iter()) { + terms.push(self.mul_quintic_ext(a, b)); + } + self.add_many_quintic_ext(terms) + } + + // TODO optimize + fn encode_quintic_ext_as_scalar( + &mut self, + x: QuinticExtensionTarget, + ) -> NonNativeTarget { + let QuinticExtensionTarget([c0, c1, c2, c3, c4]) = x; + + let bits = [ + self.split_le_base::<2>(c0, 64), + self.split_le_base::<2>(c1, 64), + self.split_le_base::<2>(c2, 64), + self.split_le_base::<2>(c3, 64), + self.split_le_base::<2>(c4, 64), + ] + .concat(); + + let limbs_u32 = bits + .chunks(32) + .map(|chunk| { + let mut terms = vec![]; + for (i, term) in chunk.iter().enumerate() { + terms.push(self.mul_const(GFp::from_canonical_u32(1 << i), *term)); + } + + U32Target(self.add_many(terms)) + }) + .collect::>(); + + let biguint = BigUintTarget { limbs: limbs_u32 }; + self.reduce::(&biguint) + } +} +// }; +// } + +// impl_circuit_builder_for_extension_degree!(1); +// impl_circuit_builder_for_extension_degree!(2); +// impl_circuit_builder_for_extension_degree!(4); +// impl_circuit_builder_for_extension_degree!(5); + +#[derive(Debug, Clone)] +pub struct QuinticQuotientGenerator { + numerator: QuinticExtensionTarget, + denominator: QuinticExtensionTarget, + quotient: QuinticExtensionTarget, +} + +impl QuinticQuotientGenerator { + pub fn new( + numerator: QuinticExtensionTarget, + denominator: QuinticExtensionTarget, + quotient: QuinticExtensionTarget, + ) -> Self { + QuinticQuotientGenerator { numerator, denominator, quotient } + } +} + +impl + Extendable<2>> SimpleGenerator + for QuinticQuotientGenerator +{ + fn id(&self) -> String { + "QuinticQuotientGenerator".to_string() + } + fn serialize( + &self, + _dst: &mut Vec, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult<()> { + todo!(); + } + fn deserialize( + _src: &mut plonky2::util::serialization::Buffer, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult + where + Self: Sized, + { + todo!(); + } + fn dependencies(&self) -> Vec { + let mut deps = self.numerator.to_target_array().to_vec(); + deps.extend(self.denominator.to_target_array()); + deps + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let numerator_limbs = self.numerator.to_target_array().map(|t| witness.get_target(t)); + let numerator = QuinticExtension::::from_basefield_array(numerator_limbs); + + let denominator_limbs = self.denominator.to_target_array().map(|t| witness.get_target(t)); + let denominator = QuinticExtension::::from_basefield_array(denominator_limbs); + + let quotient = if denominator == QuinticExtension::::ZERO { + QuinticExtension::::ZERO + } else { + numerator / denominator + }; + for (lhs, rhs) in self.quotient.to_target_array().into_iter().zip( + as FieldExtension<5>>::to_basefield_array("ient).into_iter(), + ) { + out_buffer.set_target(lhs, rhs); + } + } +} + +#[derive(Debug, Clone)] +pub struct QuinticSqrtGenerator { + x: QuinticExtensionTarget, + root_x: QuinticExtensionTarget, + is_sqrt: BoolTarget, +} + +impl QuinticSqrtGenerator { + pub fn new( + x: QuinticExtensionTarget, + root_x: QuinticExtensionTarget, + is_sqrt: BoolTarget, + ) -> Self { + QuinticSqrtGenerator { x, root_x, is_sqrt } + } +} + +impl SimpleGenerator for QuinticSqrtGenerator { + fn id(&self) -> String { + "QuinticSqrtGenerator".to_string() + } + fn serialize( + &self, + _dst: &mut Vec, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult<()> { + todo!(); + } + fn deserialize( + _src: &mut plonky2::util::serialization::Buffer, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult + where + Self: Sized, + { + todo!(); + } + fn dependencies(&self) -> Vec { + self.x.to_target_array().to_vec() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let x_limbs = self.x.to_target_array().map(|t| witness.get_target(t)); + let x = QuinticExtension::::from_basefield_array(x_limbs); + + match x.canonical_sqrt() { + Some(root_x) => { + for (lhs, rhs) in self + .root_x + .to_target_array() + .into_iter() + .zip(>::to_basefield_array(&root_x).into_iter()) + { + out_buffer.set_target(lhs, rhs); + } + out_buffer.set_target(self.is_sqrt.target, GFp::ONE); + } + None => { + for limb in self.root_x.to_target_array().into_iter() { + out_buffer.set_target(limb, GFp::ZERO); + } + out_buffer.set_target(self.is_sqrt.target, GFp::ZERO); + } + } + } +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use plonky2::{ + field::types::{Field, Sample}, + iop::witness::PartialWitness, + plonk::{ + circuit_builder::CircuitBuilder, + circuit_data::CircuitConfig, + config::{GenericConfig, PoseidonGoldilocksConfig}, + }, + }; + use plonky2_field::types::PrimeField64; + use rand::thread_rng; + + use super::*; + use crate::{ + curve::{ + scalar_field::biguint_from_array, + test_utils::{gfp5_random_non_square, gfp5_random_sgn0_eq_0}, + }, + gadgets::scalar_field::{CircuitBuilderScalar, PartialWitnessScalar}, + }; + + #[test] + fn test_add() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let mut rng = thread_rng(); + + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + + let x_expected = GFp5::sample(&mut rng); + let y_expected = GFp5::sample(&mut rng); + let z_expected = x_expected + y_expected; + + let x = builder.constant_quintic_ext(x_expected); + let y = builder.constant_quintic_ext(y_expected); + let z = builder.add_quintic_ext(x, y); + builder.register_quintic_ext_public_input(z); + + let circuit = builder.build::(); + + let mut pw = PartialWitness::new(); + pw.set_quintic_ext_target(z, z_expected); + + let proof = circuit.prove(pw)?; + circuit.verify(proof) + } + + #[test] + fn test_mul() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let mut rng = thread_rng(); + + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + + let x_expected = GFp5::sample(&mut rng); + let y_expected = GFp5::sample(&mut rng); + let z_expected = x_expected * y_expected; + + let x = builder.constant_quintic_ext(x_expected); + let y = builder.constant_quintic_ext(y_expected); + let z = builder.mul_quintic_ext(x, y); + builder.register_quintic_ext_public_input(z); + + let circuit = builder.build::(); + + let mut pw = PartialWitness::new(); + pw.set_quintic_ext_target(z, z_expected); + + let proof = circuit.prove(pw)?; + circuit.verify(proof) + } + + #[test] + fn test_sub() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let mut rng = thread_rng(); + + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + + let x_expected = GFp5::sample(&mut rng); + let y_expected = GFp5::sample(&mut rng); + let z_expected = x_expected - y_expected; + + let x = builder.constant_quintic_ext(x_expected); + let y = builder.constant_quintic_ext(y_expected); + let z = builder.sub_quintic_ext(x, y); + builder.register_quintic_ext_public_input(z); + + let circuit = builder.build::(); + + let mut pw = PartialWitness::new(); + pw.set_quintic_ext_target(z, z_expected); + + let proof = circuit.prove(pw)?; + circuit.verify(proof) + } + + #[test] + fn test_div() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let mut rng = thread_rng(); + + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + + let x_expected = GFp5::sample(&mut rng); + let y_expected = GFp5::sample(&mut rng); + let z_expected = x_expected / y_expected; + + let x = builder.constant_quintic_ext(x_expected); + let y = builder.constant_quintic_ext(y_expected); + let z = builder.div_quintic_ext(x, y); + builder.register_quintic_ext_public_input(z); + + let circuit = builder.build::(); + + let mut pw = PartialWitness::new(); + pw.set_quintic_ext_target(z, z_expected); + + let proof = circuit.prove(pw)?; + circuit.verify(proof) + } + + #[test] + fn test_inverse_quintic_ext() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let mut rng = thread_rng(); + + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + + let x_expected = GFp5::sample(&mut rng); + let x_inv_expected = x_expected.inverse(); + + let x = builder.constant_quintic_ext(x_expected); + let x_inv = builder.inverse_quintic_ext(x); + builder.register_quintic_ext_public_input(x_inv); + + let circuit = builder.build::(); + + let mut pw = PartialWitness::new(); + pw.set_quintic_ext_target(x_inv, x_inv_expected); + + let proof = circuit.prove(pw)?; + circuit.verify(proof) + } + + #[test] + fn test_any_sqrt_quintic_ext() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let mut rng = thread_rng(); + + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + + let x = GFp5::sample(&mut rng); + let square_expected = x * x; + + builder.constant_quintic_ext(square_expected); + + let circuit = builder.build::(); + + let pw = PartialWitness::new(); + let proof = circuit.prove(pw)?; + circuit.verify(proof) + } + #[test] + + fn test_canonical_sqrt_quintic_ext() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + + let x = gfp5_random_sgn0_eq_0(); + let square_expected = x * x; + + let square = builder.constant_quintic_ext(square_expected); + let sqrt = builder.canonical_sqrt_quintic_ext(square); + builder.register_quintic_ext_public_input(sqrt); + + let circuit = builder.build::(); + + let mut pw = PartialWitness::new(); + pw.set_quintic_ext_target(sqrt, x); + + let proof = circuit.prove(pw)?; + circuit.verify(proof) + } + + #[test] + fn test_try_any_sqrt_quintic_ext() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let mut rng = thread_rng(); + + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + + let zero = builder.zero_quintic_ext(); + let true_target = builder.constant_bool(true); + let false_target = builder.constant_bool(false); + + let x = GFp5::sample(&mut rng); + let square_expected = x * x; + + let square = builder.constant_quintic_ext(square_expected); + let (_, is_square) = builder.try_any_sqrt_quintic_ext(square); + builder.connect(true_target.target, is_square.target); + + let non_square = gfp5_random_non_square(); + let non_square = builder.constant_quintic_ext(non_square); + let (should_be_zero, is_square) = builder.try_any_sqrt_quintic_ext(non_square); + builder.connect(false_target.target, is_square.target); + builder.connect_quintic_ext(should_be_zero, zero); + + let circuit = builder.build::(); + + let pw = PartialWitness::new(); + let proof = circuit.prove(pw)?; + circuit.verify(proof) + } + #[test] + + fn test_try_canonical_sqrt_quintic_ext() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + + let zero = builder.zero_quintic_ext(); + let true_target = builder.constant_bool(true); + let false_target = builder.constant_bool(false); + + let x = gfp5_random_sgn0_eq_0(); + let square_expected = x * x; + + let square = builder.constant_quintic_ext(square_expected); + let (_, is_square) = builder.try_canonical_sqrt_quintic_ext(square); + builder.connect(true_target.target, is_square.target); + + let non_square = gfp5_random_non_square(); + let non_square = builder.constant_quintic_ext(non_square); + let (should_be_zero, is_square) = builder.try_canonical_sqrt_quintic_ext(non_square); + builder.connect(false_target.target, is_square.target); + builder.connect_quintic_ext(should_be_zero, zero); + + let circuit = builder.build::(); + + let pw = PartialWitness::new(); + let proof = circuit.prove(pw)?; + circuit.verify(proof) + } + + #[test] + fn test_legendre_sym_quintic_ext() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let mut rng = thread_rng(); + + let config = CircuitConfig::standard_recursion_config(); + + // legendre sym == 1 + let mut builder = CircuitBuilder::::new(config.clone()); + + let x = GFp5::sample(&mut rng); + let square = builder.constant_quintic_ext(x * x); + let legendre_sym = builder.legendre_sym_quintic_ext(square); + builder.register_public_input(legendre_sym); + + let circuit = builder.build::(); + + let mut pw = PartialWitness::new(); + pw.set_target(legendre_sym, GFp::ONE); + + let proof = circuit.prove(pw)?; + circuit.verify(proof)?; + + // legendre sym == -1 + let mut builder = CircuitBuilder::::new(config.clone()); + + let non_square = gfp5_random_non_square(); + let non_square = builder.constant_quintic_ext(non_square); + let legendre_sym = builder.legendre_sym_quintic_ext(non_square); + builder.register_public_input(legendre_sym); + + let circuit = builder.build::(); + + let mut pw = PartialWitness::new(); + pw.set_target(legendre_sym, GFp::NEG_ONE); + + let proof = circuit.prove(pw)?; + circuit.verify(proof)?; + + // legendre sym == 0 + let mut builder = CircuitBuilder::::new(config); + + let zero = builder.zero_quintic_ext(); + let legendre_sym = builder.legendre_sym_quintic_ext(zero); + builder.register_public_input(legendre_sym); + + let circuit = builder.build::(); + + let mut pw = PartialWitness::new(); + pw.set_target(legendre_sym, GFp::ZERO); + + let proof = circuit.prove(pw)?; + circuit.verify(proof) + } + + #[test] + fn test_encode_as_scalar() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let mut rng = thread_rng(); + + let config = CircuitConfig::standard_recursion_config(); + + let mut builder = CircuitBuilder::::new(config.clone()); + let x = GFp5::sample(&mut rng); + + let QuinticExtension(limbs) = x; + let encoded_expected = Scalar::from_noncanonical_biguint(biguint_from_array( + limbs.map(|l| l.to_canonical_u64()), + )); + + let x = builder.constant_quintic_ext(x); + let encoded = builder.encode_quintic_ext_as_scalar(x); + let encoded_as_biguint = builder.nonnative_to_canonical_biguint(&encoded); + builder.register_scalar_public_input(&encoded_as_biguint); + + let circuit = builder.build::(); + + let mut pw = PartialWitness::new(); + pw.set_scalar_target(&encoded_as_biguint, encoded_expected); + + let proof = circuit.prove(pw)?; + circuit.verify(proof) + } +} diff --git a/ecgfp5/src/gadgets/curve.rs b/ecgfp5/src/gadgets/curve.rs new file mode 100644 index 0000000000..bc39a38ece --- /dev/null +++ b/ecgfp5/src/gadgets/curve.rs @@ -0,0 +1,663 @@ +use crate::{ + curve::{ + curve::{Point, WeierstrassPoint}, + scalar_field::Scalar, + GFp, GFp5, + }, + gadgets::base_field::{CircuitBuilderGFp5, QuinticExtensionTarget}, +}; +use plonky2::{ + field::types::Field, + hash::hash_types::RichField, + iop::{ + target::{BoolTarget, Target}, + witness::Witness, + }, + plonk::circuit_builder::CircuitBuilder, +}; +use plonky2_ecdsa::gadgets::{nonnative::NonNativeTarget, split_nonnative::CircuitBuilderSplit}; +use plonky2_field::extension::Extendable; + +use super::base_field::PartialWitnessQuinticExt; + +#[derive(Copy, Clone, Debug)] +#[repr(transparent)] +pub struct CurveTarget(pub ([QuinticExtensionTarget; 2], BoolTarget)); + +pub trait CircuitBuilderEcGFp5 { + fn add_virtual_curve_target(&mut self) -> CurveTarget; + fn register_curve_public_input(&mut self, point: CurveTarget); + fn curve_constant(&mut self, point: WeierstrassPoint) -> CurveTarget; + fn curve_zero(&mut self) -> CurveTarget; + fn curve_generator(&mut self) -> CurveTarget; + + fn curve_eq(&mut self, a: CurveTarget, b: CurveTarget) -> BoolTarget; + fn curve_select(&mut self, cond: BoolTarget, a: CurveTarget, b: CurveTarget) -> CurveTarget; + fn curve_random_access(&mut self, access_index: Target, v: &[CurveTarget]) -> CurveTarget; + + fn curve_add(&mut self, a: CurveTarget, b: CurveTarget) -> CurveTarget; + fn curve_add_spec(&mut self, a: CurveTarget, b: CurveTarget) -> CurveTarget; + fn curve_double(&mut self, a: CurveTarget) -> CurveTarget; + + fn precompute_window(&mut self, a: CurveTarget, window_bits: usize) -> Vec; + fn curve_scalar_mul(&mut self, a: CurveTarget, scalar: &NonNativeTarget) + -> CurveTarget; + + fn precompute_window_const(&mut self, point: Point, window_bits: usize) -> Vec; + fn curve_scalar_mul_const( + &mut self, + point: Point, + scalar: &NonNativeTarget, + ) -> CurveTarget; + + fn curve_encode_to_quintic_ext(&mut self, a: CurveTarget) -> QuinticExtensionTarget; + fn curve_decode_from_quintic_ext(&mut self, w: QuinticExtensionTarget) -> CurveTarget; + + fn curve_muladd_2( + &mut self, + a: CurveTarget, + b: CurveTarget, + scalar_a: &NonNativeTarget, + scalar_b: &NonNativeTarget, + ) -> CurveTarget; + + fn curve_assert_not_zero(&mut self, point: CurveTarget); +} + +macro_rules! impl_circuit_builder_for_extension_degree { + ($degree:literal) => { + impl CircuitBuilderEcGFp5 for CircuitBuilder { + fn curve_assert_not_zero(&mut self, point: CurveTarget) { + let zero_point = self.curve_zero(); + let is_equal = self.curve_eq(point, zero_point); + let zero = self.zero(); + self.connect(is_equal.target, zero); + } + + fn add_virtual_curve_target(&mut self) -> CurveTarget { + let x = self.add_virtual_quintic_ext_target(); + let y = self.add_virtual_quintic_ext_target(); + let is_inf = self.add_virtual_bool_target_safe(); + CurveTarget(([x, y], is_inf)) + } + + fn register_curve_public_input(&mut self, point: CurveTarget) { + let CurveTarget(([x, y], is_inf)) = point; + self.register_quintic_ext_public_input(x); + self.register_quintic_ext_public_input(y); + self.register_public_input(is_inf.target); + } + + fn curve_constant(&mut self, point: WeierstrassPoint) -> CurveTarget { + let WeierstrassPoint { x, y, is_inf } = point; + + let x = self.constant_quintic_ext(x); + let y = self.constant_quintic_ext(y); + let is_inf = self.constant_bool(is_inf); + CurveTarget(([x, y], is_inf)) + } + + fn curve_zero(&mut self) -> CurveTarget { + self.curve_constant(WeierstrassPoint::NEUTRAL) + } + + fn curve_generator(&mut self) -> CurveTarget { + self.curve_constant(WeierstrassPoint::GENERATOR) + } + + fn curve_eq(&mut self, a: CurveTarget, b: CurveTarget) -> BoolTarget { + let CurveTarget(([ax, ay], a_is_inf)) = a; + let CurveTarget(([bx, by], b_is_inf)) = b; + + let both_inf = self.and(a_is_inf, b_is_inf); + + let x_eq = self.is_equal_quintic_ext(ax, bx); + let y_eq = self.is_equal_quintic_ext(ay, by); + let both_eq = self.and(x_eq, y_eq); + + self.or(both_inf, both_eq) + } + + fn curve_select( + &mut self, + cond: BoolTarget, + a: CurveTarget, + b: CurveTarget, + ) -> CurveTarget { + let CurveTarget(([ax, ay], a_is_inf)) = a; + let CurveTarget(([bx, by], b_is_inf)) = b; + CurveTarget(( + [self.select_quintic_ext(cond, ax, bx), self.select_quintic_ext(cond, ay, by)], + BoolTarget::new_unsafe(self.select(cond, a_is_inf.target, b_is_inf.target)), + )) + } + + fn curve_random_access( + &mut self, + access_index: Target, + v: &[CurveTarget], + ) -> CurveTarget { + let mut xs = Vec::new(); + let mut ys = Vec::new(); + let mut is_infs = Vec::new(); + for &CurveTarget(([x, y], is_inf)) in v { + xs.push(x); + ys.push(y); + is_infs.push(is_inf.target); + } + + CurveTarget(( + [ + self.random_access_quintic_ext(access_index, &xs), + self.random_access_quintic_ext(access_index, &ys), + ], + BoolTarget::new_unsafe(self.random_access(access_index, is_infs)), + )) + } + + fn curve_add(&mut self, a: CurveTarget, b: CurveTarget) -> CurveTarget { + let CurveTarget(([x1, y1], a_is_inf)) = a; + let CurveTarget(([x2, y2], b_is_inf)) = b; + + // note: paper has a typo. sx == 1 when x1 != x2, not when x1 == x2 + let x_same = self.is_equal_quintic_ext(x1, x2); + let mut y_diff = self.is_equal_quintic_ext(y1, y2); + y_diff = self.not(y_diff); + + let lambda_0_if_x_not_same = self.sub_quintic_ext(y2, y1); + + let mut lambda_0_if_x_same = + self.weighted_mul_quintic_ext(GFp::from_canonical_u8(3), x1, x1); + lambda_0_if_x_same = + self.add_const_quintic_ext(lambda_0_if_x_same, WeierstrassPoint::A); + + let lambda_1_if_x_not_same = self.sub_quintic_ext(x2, x1); + let lambda_1_if_x_same = self.double_quintic_ext(y1); + + let lambda_0 = + self.select_quintic_ext(x_same, lambda_0_if_x_same, lambda_0_if_x_not_same); + let lambda_1 = + self.select_quintic_ext(x_same, lambda_1_if_x_same, lambda_1_if_x_not_same); + let lambda = self.div_or_zero_quintic_ext(lambda_0, lambda_1); + + let mut x3 = self.square_quintic_ext(lambda); + x3 = self.sub_quintic_ext(x3, x1); + x3 = self.sub_quintic_ext(x3, x2); + + let mut y3 = self.sub_quintic_ext(x1, x3); + y3 = self.mul_quintic_ext(lambda, y3); + y3 = self.sub_quintic_ext(y3, y1); + + let c_is_inf = self.and(x_same, y_diff); + let c = CurveTarget(([x3, y3], c_is_inf)); + + let sel = self.curve_select(a_is_inf, b, c); + self.curve_select(b_is_inf, a, sel) + } + + fn curve_add_spec(&mut self, a: CurveTarget, b: CurveTarget) -> CurveTarget { + let CurveTarget(([x1, y1], _)) = a; + let CurveTarget(([x2, y2], _)) = b; + + let lambda_0 = self.sub_quintic_ext(y2, y1); + let lambda_1 = self.sub_quintic_ext(x2, x1); + let lambda = self.div_or_zero_quintic_ext(lambda_0, lambda_1); + + let mut x3 = self.square_quintic_ext(lambda); + x3 = self.sub_quintic_ext(x3, x1); + x3 = self.sub_quintic_ext(x3, x2); + + let mut y3 = self.sub_quintic_ext(x1, x3); + y3 = self.mul_quintic_ext(lambda, y3); + y3 = self.sub_quintic_ext(y3, y1); + + CurveTarget(([x3, y3], BoolTarget::new_unsafe(self.zero()))) + } + + fn curve_double(&mut self, a: CurveTarget) -> CurveTarget { + let CurveTarget(([x, y], is_inf)) = a; + + let mut lambda_0 = self.weighted_mul_quintic_ext(GFp::from_canonical_u8(3), x, x); + lambda_0 = self.add_const_quintic_ext(lambda_0, WeierstrassPoint::A); + let lambda_1 = self.double_quintic_ext(y); + + let lambda = self.div_or_zero_quintic_ext(lambda_0, lambda_1); + + let mut x2 = self.square_quintic_ext(lambda); + let two_x = self.double_quintic_ext(x); + x2 = self.sub_quintic_ext(x2, two_x); + + let mut y2 = self.sub_quintic_ext(x, x2); + y2 = self.mul_quintic_ext(lambda, y2); + y2 = self.sub_quintic_ext(y2, y); + + CurveTarget(([x2, y2], is_inf)) + } + + fn precompute_window( + &mut self, + a: CurveTarget, + window_bits: usize, + ) -> Vec { + debug_assert!(window_bits > 1); + let mut multiples = vec![self.curve_zero()]; + multiples.push(a); + multiples.push(self.curve_double(a)); + + for _ in 3..(1 << window_bits) { + multiples.push(self.curve_add(multiples.last().unwrap().clone(), a)); + } + + multiples + } + + // TODO optimize + fn curve_scalar_mul( + &mut self, + a: CurveTarget, + scalar: &NonNativeTarget, + ) -> CurveTarget { + let window = self.precompute_window(a, 4); + let four_bit_limbs = self.split_nonnative_to_4_bit_limbs(&scalar); + + let num_limbs = four_bit_limbs.len(); + let mut res = self.curve_random_access(four_bit_limbs[num_limbs - 1], &window); + for limb in four_bit_limbs.into_iter().rev().skip(1) { + for _ in 0..4 { + res = self.curve_double(res); + } + + let addend = self.curve_random_access(limb, &window); + res = self.curve_add(res, addend); + } + + res + } + + fn precompute_window_const( + &mut self, + point: Point, + window_bits: usize, + ) -> Vec { + let mut curr = point; + let mut multiples = vec![self.curve_zero()]; + + for _ in 1..(1 << window_bits) { + multiples.push(self.curve_constant(curr.to_weierstrass())); + curr += point; + } + + multiples + } + + fn curve_scalar_mul_const( + &mut self, + point: Point, + scalar: &NonNativeTarget, + ) -> CurveTarget { + let window = self.precompute_window_const(point, 4); + let four_bit_limbs = self.split_nonnative_to_4_bit_limbs(&scalar); + + let num_limbs = four_bit_limbs.len(); + let mut res = self.curve_random_access(four_bit_limbs[num_limbs - 1], &window); + for limb in four_bit_limbs.into_iter().rev().skip(1) { + for _ in 0..4 { + res = self.curve_double(res); + } + + let addend = self.curve_random_access(limb, &window); + res = self.curve_add(res, addend); + } + + res + } + + // TODO: optimize to use base field when we know it's in the base field + fn curve_encode_to_quintic_ext(&mut self, a: CurveTarget) -> QuinticExtensionTarget { + let CurveTarget(([x, y], is_inf)) = a; + let adiv3 = self.constant_quintic_ext(GFp5::TWO / GFp5::from_canonical_u16(3)); + let denom = self.sub_quintic_ext(adiv3, x); + let w = self.div_or_zero_quintic_ext(y, denom); + + let zero = self.zero_quintic_ext(); + self.select_quintic_ext(is_inf, zero, w) + } + + // TODO: optimize to use base field when we know it's in the base field + fn curve_decode_from_quintic_ext(&mut self, w: QuinticExtensionTarget) -> CurveTarget { + let one = self.one(); + let zero_quintic_ext = self.zero_quintic_ext(); + let a = self.constant_quintic_ext(Point::A); + let bmul4 = self.constant_quintic_ext(Point::B_MUL4); + + let mut e = self.square_quintic_ext(w); + e = self.sub_quintic_ext(e, a); + + let mut delta = self.square_quintic_ext(e); + delta = self.sub_quintic_ext(delta, bmul4); + + let (r, delta_is_sqrt) = self.try_any_sqrt_quintic_ext(delta); + + // if delta is not a sqrt, then w must be zero. otherwise, it's not a valid point encoding + // we check this by asserting that delta_is_sqrt OR w == 0. + let w_is_zero = self.is_equal_quintic_ext(w, zero_quintic_ext); + let delta_is_sqrt_or_w_is_zero = self.or(delta_is_sqrt, w_is_zero); + self.assert_bool(delta_is_sqrt_or_w_is_zero); + + let mut x1 = self.add_quintic_ext(e, r); + x1 = self.div_const_quintic_ext(x1, GFp5::TWO); + + let mut x2 = self.sub_quintic_ext(e, r); + x2 = self.div_const_quintic_ext(x2, GFp5::TWO); + + let legendre_x1 = self.legendre_sym_quintic_ext(x1); + let legendre_is_one = self.is_equal(legendre_x1, one); + let x = self.select_quintic_ext(legendre_is_one, x1, x2); + + let negw = self.neg_quintic_ext(w); + let y = self.mul_quintic_ext(negw, x); + + let x = self.add_const_quintic_ext(x, Point::A / GFp5::from_canonical_u16(3)); + // since we checked above that w is zero if delta is not a sqrt, we can just set is_inf to delta_is_not_sqrt + let is_inf = self.not(delta_is_sqrt); + CurveTarget(([x, y], is_inf)) + } + + fn curve_muladd_2( + &mut self, + a: CurveTarget, + b: CurveTarget, + scalar_a: &NonNativeTarget, + scalar_b: &NonNativeTarget, + ) -> CurveTarget { + let a_window = self.precompute_window(a, 4); + let a_four_bit_limbs = self.split_nonnative_to_4_bit_limbs(&scalar_a); + + let b_window = self.precompute_window(b, 4); + let b_four_bit_limbs = self.split_nonnative_to_4_bit_limbs(&scalar_b); + + debug_assert!(a_four_bit_limbs.len() == b_four_bit_limbs.len()); + + let num_limbs = a_four_bit_limbs.len(); + let a_start = self.curve_random_access(a_four_bit_limbs[num_limbs - 1], &a_window); + let b_start = self.curve_random_access(b_four_bit_limbs[num_limbs - 1], &b_window); + let mut res = self.curve_add(a_start, b_start); + + for (a_limb, b_limb) in + a_four_bit_limbs.into_iter().zip(b_four_bit_limbs).rev().skip(1) + { + for _ in 0..4 { + res = self.curve_double(res); + } + + let a_addend = self.curve_random_access(a_limb, &a_window); + let b_addend = self.curve_random_access(b_limb, &b_window); + let addend = self.curve_add(a_addend, b_addend); + res = self.curve_add(res, addend); + } + + res + } + } + }; +} + +// impl_circuit_builder_for_extension_degree!(1); +impl_circuit_builder_for_extension_degree!(2); +// impl_circuit_builder_for_extension_degree!(4); +// impl_circuit_builder_for_extension_degree!(5); + +pub trait PartialWitnessCurve>: Witness { + fn get_curve_target(&self, target: CurveTarget) -> WeierstrassPoint; + fn get_curve_targets(&self, targets: &[CurveTarget]) -> Vec { + targets.iter().map(|&t| self.get_curve_target(t)).collect() + } + + fn set_curve_target(&mut self, target: CurveTarget, value: WeierstrassPoint); + + fn set_curve_targets(&mut self, targets: &[CurveTarget], values: &[WeierstrassPoint]) { + for (&t, &v) in targets.iter().zip(values.iter()) { + self.set_curve_target(t, v); + } + } +} + +impl> PartialWitnessCurve for W { + fn get_curve_target(&self, target: CurveTarget) -> WeierstrassPoint { + let CurveTarget(([x, y], is_inf)) = target; + let x = self.get_quintic_ext_target(x); + let y = self.get_quintic_ext_target(y); + let is_inf = self.get_bool_target(is_inf); + WeierstrassPoint { x, y, is_inf } + } + + fn set_curve_target(&mut self, target: CurveTarget, value: WeierstrassPoint) { + let CurveTarget(([x, y], is_inf)) = target; + self.set_quintic_ext_target(x, value.x); + self.set_quintic_ext_target(y, value.y); + self.set_bool_target(is_inf, value.is_inf); + } +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use plonky2::{ + field::types::Sample, + iop::witness::PartialWitness, + plonk::{ + circuit_data::CircuitConfig, + config::{GenericConfig, PoseidonGoldilocksConfig}, + }, + }; + use plonky2_ecdsa::gadgets::nonnative::CircuitBuilderNonNative; + use rand::thread_rng; + + use crate::curve::curve::Point; + + use super::*; + + #[test] + fn test_curve_add() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let mut rng = thread_rng(); + + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + + let p1 = Point::sample(&mut rng); + let p2 = Point::sample(&mut rng); + let p3_expected = p1 + p2; + + let p1 = builder.curve_constant(p1.to_weierstrass()); + let p2 = builder.curve_constant(p2.to_weierstrass()); + let p3 = builder.curve_add(p1, p2); + builder.register_curve_public_input(p3); + + let circuit = builder.build::(); + + let mut pw = PartialWitness::new(); + pw.set_curve_target(p3, p3_expected.to_weierstrass()); + + let proof = circuit.prove(pw)?; + circuit.verify(proof) + } + + #[test] + fn test_curve_double() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let mut rng = thread_rng(); + + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + + let p1 = Point::sample(&mut rng); + let p2_expected = p1.double(); + + let p1 = builder.curve_constant(p1.to_weierstrass()); + let p2 = builder.curve_double(p1); + builder.register_curve_public_input(p2); + + let circuit = builder.build::(); + + let mut pw = PartialWitness::new(); + pw.set_curve_target(p2, p2_expected.to_weierstrass()); + + let proof = circuit.prove(pw)?; + circuit.verify(proof) + } + + #[test] + fn test_curve_scalar_mul() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let mut rng = thread_rng(); + + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + + let p = Point::sample(&mut rng); + let s = Scalar::sample(&mut rng); + let prod_expected = p * s; + + let p = builder.curve_constant(p.to_weierstrass()); + let s = builder.constant_nonnative(s); + + let prod = builder.curve_scalar_mul(p, &s); + builder.register_curve_public_input(prod); + + let circuit = builder.build::(); + + let mut pw = PartialWitness::new(); + pw.set_curve_target(prod, prod_expected.to_weierstrass()); + + let proof = circuit.prove(pw)?; + circuit.verify(proof) + } + + #[test] + fn test_curve_scalar_mul_const() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let mut rng = thread_rng(); + + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + + let p = Point::sample(&mut rng); + let s = Scalar::sample(&mut rng); + let prod_expected = p * s; + + let s = builder.constant_nonnative(s); + + let prod = builder.curve_scalar_mul_const(p, &s); + builder.register_curve_public_input(prod); + + let circuit = builder.build::(); + + let mut pw = PartialWitness::new(); + pw.set_curve_target(prod, prod_expected.to_weierstrass()); + + let proof = circuit.prove(pw)?; + circuit.verify(proof) + } + + #[test] + fn test_curve_encode() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let mut rng = thread_rng(); + + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + + let p = Point::sample(&mut rng); + let w_expected = p.encode(); + + let p = builder.curve_constant(p.to_weierstrass()); + let w = builder.curve_encode_to_quintic_ext(p); + builder.register_quintic_ext_public_input(w); + + let circuit = builder.build::(); + + let mut pw = PartialWitness::new(); + pw.set_quintic_ext_target(w, w_expected); + + let proof = circuit.prove(pw)?; + circuit.verify(proof) + } + + #[test] + fn test_curve_decode() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let mut rng = thread_rng(); + + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + + let p_expected = Point::sample(&mut rng); + let w = p_expected.encode(); + + let w = builder.constant_quintic_ext(w); + let p = builder.curve_decode_from_quintic_ext(w); + builder.register_curve_public_input(p); + + let circuit = builder.build::(); + + let mut pw = PartialWitness::new(); + pw.set_curve_target(p, p_expected.to_weierstrass()); + + let proof = circuit.prove(pw)?; + circuit.verify(proof) + } + + #[test] + fn test_curve_muladd_2() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let mut rng = thread_rng(); + + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + + let p1 = Point::sample(&mut rng); + let p2 = Point::sample(&mut rng); + let s1 = Scalar::sample(&mut rng); + let s2 = Scalar::sample(&mut rng); + let prod_expected = p1 * s1 + p2 * s2; + + let p1 = builder.curve_constant(p1.to_weierstrass()); + let s1 = builder.constant_nonnative(s1); + + let p2 = builder.curve_constant(p2.to_weierstrass()); + let s2 = builder.constant_nonnative(s2); + + let prod = builder.curve_muladd_2(p1, p2, &s1, &s2); + builder.register_curve_public_input(prod); + + let circuit = builder.build::(); + + let mut pw = PartialWitness::new(); + pw.set_curve_target(prod, prod_expected.to_weierstrass()); + + let proof = circuit.prove(pw)?; + circuit.verify(proof) + } +} diff --git a/ecgfp5/src/gadgets/mod.rs b/ecgfp5/src/gadgets/mod.rs new file mode 100644 index 0000000000..2d2ef235a4 --- /dev/null +++ b/ecgfp5/src/gadgets/mod.rs @@ -0,0 +1,4 @@ +pub mod base_field; +pub mod curve; +pub mod scalar_field; +pub mod schnorr; diff --git a/ecgfp5/src/gadgets/scalar_field.rs b/ecgfp5/src/gadgets/scalar_field.rs new file mode 100644 index 0000000000..81205369bc --- /dev/null +++ b/ecgfp5/src/gadgets/scalar_field.rs @@ -0,0 +1,51 @@ +use num::BigUint; +use plonky2::{ + hash::hash_types::RichField, + iop::witness::{PartialWitness, Witness, WitnessWrite}, + plonk::circuit_builder::CircuitBuilder, +}; +use plonky2_ecdsa::gadgets::biguint::BigUintTarget; +use plonky2_field::{ + extension::Extendable, + types::{Field, PrimeField}, +}; + +use crate::curve::scalar_field::Scalar; + +pub trait CircuitBuilderScalar, const D: usize> { + fn register_scalar_public_input(&mut self, scalar: &BigUintTarget); +} + +impl, const D: usize> CircuitBuilderScalar + for CircuitBuilder +{ + fn register_scalar_public_input(&mut self, target: &BigUintTarget) { + for limb in target.limbs.iter() { + self.register_public_input(limb.0); + } + } +} + +pub trait PartialWitnessScalar { + fn set_scalar_target(&mut self, target: &BigUintTarget, value: Scalar); + fn get_scalar_target(&mut self, target: &BigUintTarget) -> Scalar; +} + +impl PartialWitnessScalar for PartialWitness { + fn set_scalar_target(&mut self, target: &BigUintTarget, value: Scalar) { + let value = value.to_canonical_biguint(); + for (&limb, limb_value) in target.limbs.iter().zip(value.to_u32_digits()) { + self.set_target(limb.0, F::from_canonical_u32(limb_value)); + } + } + + fn get_scalar_target(&mut self, target: &BigUintTarget) -> Scalar { + let mut limbs = Vec::new(); + for limb in target.limbs.iter() { + limbs.push(self.get_target(limb.0).to_canonical_u64() as u32); + } + + let as_biguint = BigUint::from_slice(&limbs); + Scalar::from_noncanonical_biguint(as_biguint) + } +} diff --git a/ecgfp5/src/gadgets/schnorr.rs b/ecgfp5/src/gadgets/schnorr.rs new file mode 100644 index 0000000000..457e64f215 --- /dev/null +++ b/ecgfp5/src/gadgets/schnorr.rs @@ -0,0 +1,161 @@ +#![allow(dead_code)] + +use crate::{ + curve::{curve::Point, scalar_field::Scalar}, + gadgets::curve::CircuitBuilderEcGFp5, +}; +use plonky2::{ + field::types::Field, + hash::{ + hashing::hash_n_to_m_no_pad, + poseidon::{PoseidonHash, PoseidonPermutation}, + }, + iop::target::Target, + plonk::{ + circuit_builder::CircuitBuilder, + config::{GenericConfig, PoseidonGoldilocksConfig}, + }, +}; +use plonky2_ecdsa::gadgets::nonnative::CircuitBuilderNonNative; +use plonky2_field::{ + extension::quintic::QuinticExtension, goldilocks_field::GoldilocksField, types::Sample, +}; +use rand::RngCore; + +use super::base_field::{CircuitBuilderGFp5, QuinticExtensionTarget}; + +pub const D: usize = 2; +pub type C = PoseidonGoldilocksConfig; +pub type F = >::F; + +#[derive(Clone, Debug)] +pub struct SchnorrSecretKey(pub(crate) Scalar); + +#[derive(Clone, Debug)] +pub struct SchnorrPublicKey(pub(crate) Point); + +#[derive(Clone, Debug)] +pub struct SchnorrSignature { + pub(crate) s: Scalar, + pub(crate) e: Scalar, +} + +pub fn schnorr_keygen(rng: &mut dyn RngCore) -> (SchnorrPublicKey, SchnorrSecretKey) { + let sk = Scalar::sample(rng); + let pk = Point::GENERATOR * sk; + (SchnorrPublicKey(pk), SchnorrSecretKey(sk)) +} + +pub fn schnorr_sign( + message: &[GoldilocksField], + sk: &SchnorrSecretKey, + rng: &mut dyn RngCore, +) -> SchnorrSignature { + // sample random k + let k = Scalar::sample(rng); + // compute r = k*G + let r = k * Point::GENERATOR; + // e = H(r || M) + let mut preimage = r.encode().0.to_vec(); + preimage.extend(message.iter()); + let e_elems = hash(&preimage); + let e = Scalar::from_gfp5(QuinticExtension(e_elems)); + // s = k - e*sk + let s = k - e * sk.0; + + // signature = (s, e) + SchnorrSignature { s, e } +} + +pub fn schnorr_verify_rust( + message: &[GoldilocksField], + pk: &SchnorrPublicKey, + sig: &SchnorrSignature, +) -> bool { + let r = sig.s * Point::GENERATOR + sig.e * pk.0; + let mut preimage = r.encode().0.to_vec(); + preimage.extend(message.iter()); + let e_elems = hash(&preimage); + let e = Scalar::from_gfp5(QuinticExtension(e_elems)); + e == sig.e +} + +pub fn schnorr_verify_circuit( + builder: &mut CircuitBuilder, + message: &[GoldilocksField], + pk: &SchnorrPublicKey, + sig: &SchnorrSignature, +) { + let message = builder.constants(message); + + let s = builder.constant_nonnative::(sig.s); + let e = builder.constant_nonnative::(sig.e); + let g = builder.curve_generator(); + let pk_target = builder.curve_constant(pk.0.to_weierstrass()); + + // r_v = s*G + e*pk + let r_v = builder.curve_muladd_2(g, pk_target, &s, &e); + + // e_v = H(r_v || M) + let mut preimage = builder.curve_encode_to_quintic_ext(r_v).0.to_vec(); + preimage.extend(message); + let e_v_ext = QuinticExtensionTarget(hash_target(builder, &preimage)); + let e_v = builder.encode_quintic_ext_as_scalar(e_v_ext); + + // check e_v == e + builder.connect_nonnative(&e, &e_v); +} + +/// we define a hash function whose digest is 5 GFp5 elems +/// +/// note: this doesn't apply any padding, so this is vulnerable to length extension attacks +fn hash(message: &[F]) -> [F; 5] { + let mut res = [F::ZERO; 5]; + let out = hash_n_to_m_no_pad::>(message, 5); + res.copy_from_slice(&out[..5]); + + res +} + +fn hash_target(builder: &mut CircuitBuilder, message: &[Target]) -> [Target; 5] { + builder.hash_n_to_m_no_pad::(message.to_vec(), 5).try_into().unwrap() +} + +#[cfg(test)] +mod tests { + use plonky2::{iop::witness::PartialWitness, plonk::circuit_data::CircuitConfig}; + use rand::thread_rng; + + use super::*; + + #[test] + fn test_verify_rust() { + let mut rng = thread_rng(); + let (pk, sk) = schnorr_keygen(&mut rng); + let message = b"Hello, world!"; + let message_f = message.map(|b| F::from_canonical_u8(b)); + let sig = schnorr_sign(&message_f, &sk, &mut rng); + assert!(schnorr_verify_rust(&message_f, &pk, &sig)); + } + + #[test] + fn test_verify_circuit() { + // keygen and sign + let mut rng = thread_rng(); + let (pk, sk) = schnorr_keygen(&mut rng); + let message = b"Hello, world!"; + let message_f = message.map(|b| F::from_canonical_u8(b)); + let sig = schnorr_sign(&message_f, &sk, &mut rng); + + // Verify in circuit + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + schnorr_verify_circuit(&mut builder, &message_f, &pk, &sig); + // build circuit + builder.print_gate_counts(0); + let pw = PartialWitness::new(); + let circuit = builder.build::(); + let proof = circuit.prove(pw).unwrap(); + circuit.verify(proof).expect("verifier failed"); + } +} diff --git a/ecgfp5/src/gates/gfp5_mul.rs b/ecgfp5/src/gates/gfp5_mul.rs new file mode 100644 index 0000000000..5030075c71 --- /dev/null +++ b/ecgfp5/src/gates/gfp5_mul.rs @@ -0,0 +1,377 @@ +use alloc::vec::Vec; +use core::ops::Range; + +use plonky2::{ + gates::{gate::Gate, util::StridedConstraintConsumer}, + hash::hash_types::RichField, + iop::{ + ext_target::ExtensionTarget, + generator::{GeneratedValues, SimpleGenerator, WitnessGenerator, WitnessGeneratorRef}, + target::Target, + witness::{PartitionWitness, Witness, WitnessWrite}, + }, + plonk::{ + circuit_builder::CircuitBuilder, + circuit_data::CircuitConfig, + vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase}, + }, +}; +use plonky2_field::{extension::Extendable, types::Field}; + +/// A gate which can perform a weighted multiply, i.e. `result = c0 x y`. If the config +/// supports enough routed wires, it can support several such operations in one gate. +#[derive(Debug, Clone)] +pub struct MulGFp5Gate { + /// Number of arithmetic operations performed by an arithmetic gate. + pub num_ops: usize, +} + +// extension degree of the GFp5 +const DEGREE: usize = 5; + +// need 20 wires per operation +// each element needs 5 wires +// each operation needs 3 elements (multiplicand 0, multiplicand 1, output) +const WIRES_PER_OP: usize = 3 * DEGREE; + +impl MulGFp5Gate { + pub fn new_from_config(config: &CircuitConfig) -> Self { + Self { num_ops: Self::num_ops(config) } + } + + /// Determine the maximum number of operations that can fit in one gate for the given config. + pub(crate) fn num_ops(config: &CircuitConfig) -> usize { + config.num_routed_wires / WIRES_PER_OP + } + + pub fn wires_ith_multiplicand_0(i: usize) -> Range { + 3 * DEGREE * i..3 * DEGREE * i + DEGREE + } + pub fn wires_ith_multiplicand_1(i: usize) -> Range { + 3 * DEGREE * i + DEGREE..3 * DEGREE * i + 2 * DEGREE + } + pub fn wires_ith_output(i: usize) -> Range { + 3 * DEGREE * i + 2 * DEGREE..3 * DEGREE * i + 3 * DEGREE + } +} + +impl, const D: usize> Gate for MulGFp5Gate { + fn id(&self) -> String { + format!("{self:?}") + } + fn export_circom_verification_code(&self) -> String { + todo!(); + } + fn export_solidity_verification_code(&self) -> String { + todo!(); + } + fn serialize( + &self, + _dst: &mut Vec, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult<()> { + todo!(); + } + fn deserialize( + _src: &mut plonky2::util::serialization::Buffer, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult + where + Self: Sized, + { + todo!(); + } + fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { + let c = vars.local_constants[0]; + + let mut constraints = Vec::new(); + for i in 0..self.num_ops { + let multiplicand_0_limbs: [F::Extension; 5] = + vars.local_wires[Self::wires_ith_multiplicand_0(i)].try_into().unwrap(); + let multiplicand_1_limbs: [F::Extension; 5] = + vars.local_wires[Self::wires_ith_multiplicand_1(i)].try_into().unwrap(); + let output_limbs: [F::Extension; 5] = + vars.local_wires[Self::wires_ith_output(i)].try_into().unwrap(); + + let prod_limbs = gfp5_mul_limbwise(multiplicand_0_limbs, multiplicand_1_limbs); + let computed_output_limbs = gfp5_scalar_mul_limbwise(c, prod_limbs); + + for (output_limb, computed_output_limb) in + output_limbs.into_iter().zip(computed_output_limbs) + { + constraints.push(output_limb - computed_output_limb); + } + } + + constraints + } + + fn eval_unfiltered_base_one( + &self, + vars: EvaluationVarsBase, + mut yield_constr: StridedConstraintConsumer, + ) { + let const_limbs = vars.local_constants[0]; + + for i in 0..self.num_ops { + let multiplicand_0_limbs = + vars.local_wires.view(Self::wires_ith_multiplicand_0(i)).try_into().unwrap(); + let multiplicand_1_limbs = + vars.local_wires.view(Self::wires_ith_multiplicand_1(i)).try_into().unwrap(); + let output_limbs: [F; 5] = + vars.local_wires.view(Self::wires_ith_output(i)).try_into().unwrap(); + + let prod_limbs = gfp5_mul_limbwise(multiplicand_0_limbs, multiplicand_1_limbs); + let computed_output_limbs = gfp5_scalar_mul_limbwise(const_limbs, prod_limbs); + + for (output_limb, computed_output_limb) in + output_limbs.into_iter().zip(computed_output_limbs) + { + yield_constr.one(output_limb - computed_output_limb); + } + } + } + + fn eval_unfiltered_circuit( + &self, + builder: &mut CircuitBuilder, + vars: EvaluationTargets, + ) -> Vec> { + let c = vars.local_constants[0]; + + let mut constraints = Vec::new(); + for i in 0..self.num_ops { + let multiplicand_0_limbs: [ExtensionTarget; 5] = + vars.local_wires[Self::wires_ith_multiplicand_0(i)].try_into().unwrap(); + let multiplicand_1_limbs: [ExtensionTarget; 5] = + vars.local_wires[Self::wires_ith_multiplicand_1(i)].try_into().unwrap(); + let output_limbs: [ExtensionTarget; 5] = + vars.local_wires[Self::wires_ith_output(i)].try_into().unwrap(); + + let prod_limbs = gfp5_mul_limbwise_circuit_lifted( + builder, + multiplicand_0_limbs, + multiplicand_1_limbs, + ); + let computed_output_limbs = + gfp5_scalar_mul_limbwise_circuit_lifted(builder, c, prod_limbs); + + for (output_limb, computed_output_limb) in + output_limbs.into_iter().zip(computed_output_limbs) + { + let diff = builder.sub_extension(output_limb, computed_output_limb); + constraints.push(diff); + } + } + + constraints + } + + fn generators(&self, row: usize, local_constants: &[F]) -> Vec> { + (0..self.num_ops) + .map(|op_idx| { + let g: Box> = + Box::new(MulGFp5Generator { row, c: local_constants[0], op_idx }.adapter()); + WitnessGeneratorRef(g) + }) + .collect() + } + + fn num_wires(&self) -> usize { + self.num_ops * WIRES_PER_OP + } + + fn num_constants(&self) -> usize { + 1 + } + + fn degree(&self) -> usize { + 3 + } + + fn num_constraints(&self) -> usize { + self.num_ops * DEGREE + } +} + +#[derive(Clone, Debug)] +pub struct MulGFp5Generator, const D: usize> { + row: usize, + c: F, + op_idx: usize, +} + +impl, const D: usize> SimpleGenerator + for MulGFp5Generator +{ + fn id(&self) -> String { + "MulGFp5Generator".to_string() + } + fn serialize( + &self, + _dst: &mut Vec, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult<()> { + todo!(); + } + fn deserialize( + _src: &mut plonky2::util::serialization::Buffer, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult + where + Self: Sized, + { + todo!(); + } + fn dependencies(&self) -> Vec { + MulGFp5Gate::wires_ith_multiplicand_0(self.op_idx) + .chain(MulGFp5Gate::wires_ith_multiplicand_1(self.op_idx)) + .map(|wire| Target::wire(self.row, wire)) + .collect() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let get_wire = |wire: usize| -> F { witness.get_target(Target::wire(self.row, wire)) }; + + let multiplicand_0_limbs: [F; 5] = MulGFp5Gate::wires_ith_multiplicand_0(self.op_idx) + .map(|wire| get_wire(wire)) + .collect::>() + .try_into() + .unwrap(); + let multiplicand_1_limbs: [F; 5] = MulGFp5Gate::wires_ith_multiplicand_1(self.op_idx) + .map(|wire| get_wire(wire)) + .collect::>() + .try_into() + .unwrap(); + let output_limbs = + MulGFp5Gate::wires_ith_output(self.op_idx).map(|wire| Target::wire(self.row, wire)); + + let prod_limbs = gfp5_mul_limbwise(multiplicand_0_limbs, multiplicand_1_limbs); + let computed_output_limbs = gfp5_scalar_mul_limbwise(self.c, prod_limbs); + + for (output_limb, computed_output_limb) in + output_limbs.into_iter().zip(computed_output_limbs) + { + out_buffer.set_target(output_limb, computed_output_limb); + } + } +} + +fn gfp5_mul_limbwise(a: [F; 5], b: [F; 5]) -> [F; 5] { + let [a0, a1, a2, a3, a4] = a; + let [b0, b1, b2, b3, b4] = b; + + // c0 ← a0b0 + 3(a1b4 + a2b3 + a3b2 + a4b1) + // c1 ← a0b1 + a1b0 + 3(a2b4 + a3b3 + a4b2) + // c2 ← a0b2 + a1b1 + a2b0 + 3(a3b4 + a4b3) + // c3 ← a0b3 + a1b2 + a2b1 + a3b0 + 3a4b4 + // c4 ← a0b4 + a1b3 + a2b2 + a3b1 + a4b0 + + let three = F::from_canonical_u16(3); + + let c0 = a0 * b0 + three * (a1 * b4 + a2 * b3 + a3 * b2 + a4 * b1); + let c1 = a0 * b1 + a1 * b0 + three * (a2 * b4 + a3 * b3 + a4 * b2); + let c2 = a0 * b2 + a1 * b1 + a2 * b0 + three * (a3 * b4 + a4 * b3); + let c3 = a0 * b3 + a1 * b2 + a2 * b1 + a3 * b0 + three * a4 * b4; + let c4 = a0 * b4 + a1 * b3 + a2 * b2 + a3 * b1 + a4 * b0; + + [c0, c1, c2, c3, c4] +} + +fn gfp5_scalar_mul_limbwise(c: F, a: [F; 5]) -> [F; 5] { + [c * a[0], c * a[1], c * a[2], c * a[3], c * a[4]] +} + +fn gfp5_mul_limbwise_circuit_lifted, const D: usize>( + builder: &mut CircuitBuilder, + a: [ExtensionTarget; 5], + b: [ExtensionTarget; 5], +) -> [ExtensionTarget; 5] { + let [a0, a1, a2, a3, a4] = a; + let [b0, b1, b2, b3, b4] = b; + + // c0 ← a0b0 + 3(a1b4 + a2b3 + a3b2 + a4b1) + // c1 ← a0b1 + a1b0 + 3(a2b4 + a3b3 + a4b2) + // c2 ← a0b2 + a1b1 + a2b0 + 3(a3b4 + a4b3) + // c3 ← a0b3 + a1b2 + a2b1 + a3b0 + 3a4b4 + + let mut c0 = builder.mul_extension(a4, b1); + c0 = builder.mul_add_extension(a3, b2, c0); + c0 = builder.mul_add_extension(a2, b3, c0); + c0 = builder.mul_add_extension(a1, b4, c0); + c0 = builder.mul_const_extension(F::from_canonical_u64(3), c0); + c0 = builder.mul_add_extension(a0, b0, c0); + + let mut c1 = builder.mul_extension(a4, b2); + c1 = builder.mul_add_extension(a3, b3, c1); + c1 = builder.mul_add_extension(a2, b4, c1); + c1 = builder.mul_const_extension(F::from_canonical_u64(3), c1); + c1 = builder.mul_add_extension(a1, b0, c1); + c1 = builder.mul_add_extension(a0, b1, c1); + + let mut c2 = builder.mul_extension(a4, b3); + c2 = builder.mul_add_extension(a3, b4, c2); + c2 = builder.mul_const_extension(F::from_canonical_u64(3), c2); + c2 = builder.mul_add_extension(a2, b0, c2); + c2 = builder.mul_add_extension(a1, b1, c2); + c2 = builder.mul_add_extension(a0, b2, c2); + + let mut c3 = builder.mul_extension(a4, b4); + c3 = builder.mul_const_extension(F::from_canonical_u64(3), c3); + c3 = builder.mul_add_extension(a3, b0, c3); + c3 = builder.mul_add_extension(a2, b1, c3); + c3 = builder.mul_add_extension(a1, b2, c3); + c3 = builder.mul_add_extension(a0, b3, c3); + + let mut c4 = builder.mul_extension(a4, b0); + c4 = builder.mul_add_extension(a3, b1, c4); + c4 = builder.mul_add_extension(a2, b2, c4); + c4 = builder.mul_add_extension(a1, b3, c4); + c4 = builder.mul_add_extension(a0, b4, c4); + + [c0, c1, c2, c3, c4] +} + +fn gfp5_scalar_mul_limbwise_circuit_lifted, const D: usize>( + builder: &mut CircuitBuilder, + c: ExtensionTarget, + a: [ExtensionTarget; 5], +) -> [ExtensionTarget; 5] { + [ + builder.mul_extension(c, a[0]), + builder.mul_extension(c, a[1]), + builder.mul_extension(c, a[2]), + builder.mul_extension(c, a[3]), + builder.mul_extension(c, a[4]), + ] +} + +#[cfg(test)] +mod tests { + use super::*; + use anyhow::Result; + + use plonky2::{ + field::goldilocks_field::GoldilocksField, + gates::gate_testing::{test_eval_fns, test_low_degree}, + plonk::{ + circuit_data::CircuitConfig, + config::{GenericConfig, PoseidonGoldilocksConfig}, + }, + }; + + #[test] + fn low_degree() { + let gate = MulGFp5Gate::new_from_config(&CircuitConfig::standard_recursion_config()); + test_low_degree::(gate); + } + + #[test] + fn eval_fns() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + let gate = MulGFp5Gate::new_from_config(&CircuitConfig::standard_recursion_config()); + test_eval_fns::(gate) + } +} diff --git a/ecgfp5/src/gates/mod.rs b/ecgfp5/src/gates/mod.rs new file mode 100644 index 0000000000..404f490ca2 --- /dev/null +++ b/ecgfp5/src/gates/mod.rs @@ -0,0 +1 @@ +pub(crate) mod gfp5_mul; diff --git a/ecgfp5/src/lib.rs b/ecgfp5/src/lib.rs new file mode 100644 index 0000000000..468ac06bbf --- /dev/null +++ b/ecgfp5/src/lib.rs @@ -0,0 +1,8 @@ +extern crate alloc; + +pub mod curve; +pub mod gadgets; +pub(crate) mod gates; + +#[cfg(test)] +mod macros; diff --git a/ecgfp5/src/macros.rs b/ecgfp5/src/macros.rs new file mode 100644 index 0000000000..4023eeb696 --- /dev/null +++ b/ecgfp5/src/macros.rs @@ -0,0 +1,97 @@ +// These were copied straight from plonky2/field +#[macro_export] +macro_rules! test_field_arithmetic { + ($field:ty) => { + mod field_arithmetic { + use num::bigint::BigUint; + use plonky2_field::types::{Field, Sample}; + use rand::Rng; + + #[test] + fn batch_inversion() { + for n in 0..20 { + let xs = + (1..=n as u64).map(|i| <$field>::from_canonical_u64(i)).collect::>(); + let invs = <$field>::batch_multiplicative_inverse(&xs); + assert_eq!(invs.len(), n); + for (x, inv) in xs.into_iter().zip(invs) { + assert_eq!(x * inv, <$field>::ONE); + } + } + } + + #[test] + fn primitive_root_order() { + let max_power = 8.min(<$field>::TWO_ADICITY); + for n_power in 0..max_power { + let root = <$field>::primitive_root_of_unity(n_power); + let order = <$field>::generator_order(root); + assert_eq!(order, 1 << n_power, "2^{}'th primitive root", n_power); + } + } + + #[test] + fn negation() { + type F = $field; + + for x in [F::ZERO, F::ONE, F::TWO, F::NEG_ONE] { + assert_eq!(x + -x, F::ZERO); + } + } + + #[test] + fn exponentiation() { + type F = $field; + + assert_eq!(F::ZERO.exp_u64(0), ::ONE); + assert_eq!(F::ONE.exp_u64(0), ::ONE); + assert_eq!(F::TWO.exp_u64(0), ::ONE); + + assert_eq!(F::ZERO.exp_u64(1), ::ZERO); + assert_eq!(F::ONE.exp_u64(1), ::ONE); + assert_eq!(F::TWO.exp_u64(1), ::TWO); + + assert_eq!(F::ZERO.kth_root_u64(1), ::ZERO); + assert_eq!(F::ONE.kth_root_u64(1), ::ONE); + assert_eq!(F::TWO.kth_root_u64(1), ::TWO); + + for power in 1..10 { + if F::is_monomial_permutation_u64(power) { + let x = F::rand(); + assert_eq!(x.exp_u64(power).kth_root_u64(power), x); + } + } + } + + #[test] + fn exponentiation_large() { + type F = $field; + + let mut rng = rand::thread_rng(); + + let base = F::rand(); + let pow = BigUint::from(rng.gen::()); + let cycles = rng.gen::(); + let mul_group_order = F::order() - 1u32; + let big_pow = &pow + &mul_group_order * cycles; + let big_pow_wrong = &pow + &mul_group_order * cycles + 1u32; + + assert_eq!(base.exp_biguint(&pow), base.exp_biguint(&big_pow)); + assert_ne!(base.exp_biguint(&pow), base.exp_biguint(&big_pow_wrong)); + } + + #[test] + fn inverses() { + type F = $field; + + let x = F::rand(); + let x1 = x.inverse(); + let x2 = x1.inverse(); + let x3 = x2.inverse(); + + assert_eq!(x, x2); + assert_eq!(x1, x3); + } + } + }; +} diff --git a/u32/.github/workflows/continuous-integration.yml b/u32/.github/workflows/continuous-integration.yml new file mode 100644 index 0000000000..640074b82e --- /dev/null +++ b/u32/.github/workflows/continuous-integration.yml @@ -0,0 +1,95 @@ +name: Continuous Integration + +on: + push: + branches: [main] + pull_request: + branches: + - "**" + workflow_dispatch: + branches: + - "**" + +jobs: + test: + name: Test Suite + runs-on: ubuntu-latest + if: "! contains(toJSON(github.event.commits.*.message), '[skip-ci]')" + steps: + - name: Checkout sources + uses: actions/checkout@v2 + + - name: Install nightly toolchain + id: rustc-toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: nightly + override: true + + - name: rust-cache + uses: actions/cache@v3 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: rustc-test-${{ steps.rustc-toolchain.outputs.rustc_hash }}-cargo-${{ hashFiles('**/Cargo.toml') }} + + - name: Run cargo test + uses: actions-rs/cargo@v1 + with: + command: test + args: --workspace + env: + RUSTFLAGS: -Copt-level=3 -Cdebug-assertions -Coverflow-checks=y -Cdebuginfo=0 + RUST_LOG: 1 + CARGO_INCREMENTAL: 1 + RUST_BACKTRACE: 1 + + lints: + name: Formatting and Clippy + runs-on: ubuntu-latest + if: "! contains(toJSON(github.event.commits.*.message), '[skip-ci]')" + steps: + - name: Checkout sources + uses: actions/checkout@v2 + + - name: Install nightly toolchain + id: rustc-toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: nightly + override: true + components: rustfmt, clippy + + - name: rust-cache + uses: actions/cache@v3 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: rustc-lints-${{ steps.rustc-toolchain.outputs.rustc_hash }}-cargo-${{ hashFiles('**/Cargo.toml') }} + + - name: Run cargo fmt + uses: actions-rs/cargo@v1 + with: + command: fmt + args: --all -- --check + env: + CARGO_INCREMENTAL: 1 + + - name: Run cargo clippy + uses: actions-rs/cargo@v1 + with: + command: clippy + args: --all-features --all-targets -- -D warnings -A incomplete-features + env: + # Seems necessary until https://github.com/rust-lang/rust/pull/115819 is merged. + CARGO_INCREMENTAL: 0 diff --git a/u32/.gitignore b/u32/.gitignore new file mode 100644 index 0000000000..ff355236d6 --- /dev/null +++ b/u32/.gitignore @@ -0,0 +1,11 @@ +# Cargo build +/target +Cargo.lock + +# Profile-guided optimization +/tmp +pgo-data.profdata + +# MacOS nuisances +.DS_Store + diff --git a/u32/Cargo.toml b/u32/Cargo.toml new file mode 100644 index 0000000000..c727cab4a9 --- /dev/null +++ b/u32/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "plonky2_u32" +description = "u32 gadget for Plonky2" +version = "0.2.0" +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +anyhow = { version = "1.0.40", default-features = false } +itertools = { version = "0.10.0", default-features = false } +num = { version = "0.4", default-features = false } +plonky2 = { path = "../plonky2" } + +[dev-dependencies] +rand = { version = "0.8.4", default-features = false, features = ["getrandom"] } diff --git a/u32/LICENSE-APACHE b/u32/LICENSE-APACHE new file mode 100644 index 0000000000..1e5006dc14 --- /dev/null +++ b/u32/LICENSE-APACHE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + diff --git a/u32/LICENSE-MIT b/u32/LICENSE-MIT new file mode 100644 index 0000000000..86d690b220 --- /dev/null +++ b/u32/LICENSE-MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2022 The Plonky2 Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/u32/README.md b/u32/README.md new file mode 100644 index 0000000000..bb4e2d8a92 --- /dev/null +++ b/u32/README.md @@ -0,0 +1,13 @@ +## License + +Licensed under either of + +* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) +* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. diff --git a/u32/rustfmt.toml b/u32/rustfmt.toml new file mode 100644 index 0000000000..e651f760b1 --- /dev/null +++ b/u32/rustfmt.toml @@ -0,0 +1,7 @@ +# This project uses rustfmt to format source code. Run `cargo +nightly fmt [-- --check]. +# https://github.com/rust-lang/rustfmt/blob/master/Configurations.md + +# Break complex but short statements a bit less. +use_small_heuristics = "Max" + +imports_granularity = "Crate" \ No newline at end of file diff --git a/u32/src/gadgets/arithmetic_u32.rs b/u32/src/gadgets/arithmetic_u32.rs new file mode 100644 index 0000000000..93d913c585 --- /dev/null +++ b/u32/src/gadgets/arithmetic_u32.rs @@ -0,0 +1,326 @@ +use alloc::{ + string::{String, ToString}, + vec, + vec::Vec, +}; +use core::marker::PhantomData; +use plonky2::{ + plonk::circuit_data::CommonCircuitData, + util::serialization::{Buffer, IoResult, Read, Write}, +}; + +use plonky2::{ + field::extension::Extendable, + hash::hash_types::RichField, + iop::{ + generator::{GeneratedValues, SimpleGenerator}, + target::Target, + witness::{PartitionWitness, Witness}, + }, + plonk::circuit_builder::CircuitBuilder, +}; + +use crate::{ + gates::{ + add_many_u32::U32AddManyGate, arithmetic_u32::U32ArithmeticGate, + subtraction_u32::U32SubtractionGate, + }, + serialization::{ReadU32, WriteU32}, + witness::GeneratedValuesU32, +}; + +#[derive(Clone, Copy, Debug)] +pub struct U32Target(pub Target); + +pub trait CircuitBuilderU32, const D: usize> { + fn add_virtual_u32_target(&mut self) -> U32Target; + + fn add_virtual_u32_targets(&mut self, n: usize) -> Vec; + + /// Returns a U32Target for the value `c`, which is assumed to be at most 32 bits. + fn constant_u32(&mut self, c: u32) -> U32Target; + + fn zero_u32(&mut self) -> U32Target; + + fn one_u32(&mut self) -> U32Target; + + fn connect_u32(&mut self, x: U32Target, y: U32Target); + + fn assert_zero_u32(&mut self, x: U32Target); + + /// Checks for special cases where the value of + /// `x * y + z` + /// can be determined without adding a `U32ArithmeticGate`. + fn arithmetic_u32_special_cases( + &mut self, + x: U32Target, + y: U32Target, + z: U32Target, + ) -> Option<(U32Target, U32Target)>; + + // Returns x * y + z. + fn mul_add_u32(&mut self, x: U32Target, y: U32Target, z: U32Target) -> (U32Target, U32Target); + + fn add_u32(&mut self, a: U32Target, b: U32Target) -> (U32Target, U32Target); + + fn add_many_u32(&mut self, to_add: &[U32Target]) -> (U32Target, U32Target); + + fn add_u32s_with_carry( + &mut self, + to_add: &[U32Target], + carry: U32Target, + ) -> (U32Target, U32Target); + + fn mul_u32(&mut self, a: U32Target, b: U32Target) -> (U32Target, U32Target); + + // Returns x - y - borrow, as a pair (result, borrow), where borrow is 0 or 1 depending on whether borrowing from the next digit is required (iff y + borrow > x). + fn sub_u32(&mut self, x: U32Target, y: U32Target, borrow: U32Target) -> (U32Target, U32Target); +} + +impl, const D: usize> CircuitBuilderU32 + for CircuitBuilder +{ + fn add_virtual_u32_target(&mut self) -> U32Target { + U32Target(self.add_virtual_target()) + } + + fn add_virtual_u32_targets(&mut self, n: usize) -> Vec { + self.add_virtual_targets(n).into_iter().map(U32Target).collect() + } + + /// Returns a U32Target for the value `c`, which is assumed to be at most 32 bits. + fn constant_u32(&mut self, c: u32) -> U32Target { + U32Target(self.constant(F::from_canonical_u32(c))) + } + + fn zero_u32(&mut self) -> U32Target { + U32Target(self.zero()) + } + + fn one_u32(&mut self) -> U32Target { + U32Target(self.one()) + } + + fn connect_u32(&mut self, x: U32Target, y: U32Target) { + self.connect(x.0, y.0) + } + + fn assert_zero_u32(&mut self, x: U32Target) { + self.assert_zero(x.0) + } + + /// Checks for special cases where the value of + /// `x * y + z` + /// can be determined without adding a `U32ArithmeticGate`. + fn arithmetic_u32_special_cases( + &mut self, + x: U32Target, + y: U32Target, + z: U32Target, + ) -> Option<(U32Target, U32Target)> { + let x_const = self.target_as_constant(x.0); + let y_const = self.target_as_constant(y.0); + let z_const = self.target_as_constant(z.0); + + // If both terms are constant, return their (constant) sum. + let first_term_const = + if let (Some(xx), Some(yy)) = (x_const, y_const) { Some(xx * yy) } else { None }; + + if let (Some(a), Some(b)) = (first_term_const, z_const) { + let sum = (a + b).to_canonical_u64(); + let (low, high) = (sum as u32, (sum >> 32) as u32); + return Some((self.constant_u32(low), self.constant_u32(high))); + } + + None + } + + // Returns x * y + z. + fn mul_add_u32(&mut self, x: U32Target, y: U32Target, z: U32Target) -> (U32Target, U32Target) { + if let Some(result) = self.arithmetic_u32_special_cases(x, y, z) { + return result; + } + + let gate = U32ArithmeticGate::::new_from_config(&self.config); + let (row, copy) = self.find_slot(gate, &[], &[]); + + self.connect(Target::wire(row, gate.wire_ith_multiplicand_0(copy)), x.0); + self.connect(Target::wire(row, gate.wire_ith_multiplicand_1(copy)), y.0); + self.connect(Target::wire(row, gate.wire_ith_addend(copy)), z.0); + + let output_low = U32Target(Target::wire(row, gate.wire_ith_output_low_half(copy))); + let output_high = U32Target(Target::wire(row, gate.wire_ith_output_high_half(copy))); + + (output_low, output_high) + } + + fn add_u32(&mut self, a: U32Target, b: U32Target) -> (U32Target, U32Target) { + let one = self.one_u32(); + self.mul_add_u32(a, one, b) + } + + fn add_many_u32(&mut self, to_add: &[U32Target]) -> (U32Target, U32Target) { + match to_add.len() { + 0 => (self.zero_u32(), self.zero_u32()), + 1 => (to_add[0], self.zero_u32()), + 2 => self.add_u32(to_add[0], to_add[1]), + _ => { + let num_addends = to_add.len(); + let gate = U32AddManyGate::::new_from_config(&self.config, num_addends); + let (row, copy) = + self.find_slot(gate, &[F::from_canonical_usize(num_addends)], &[]); + + for j in 0..num_addends { + self.connect( + Target::wire(row, gate.wire_ith_op_jth_addend(copy, j)), + to_add[j].0, + ); + } + let zero = self.zero(); + self.connect(Target::wire(row, gate.wire_ith_carry(copy)), zero); + + let output_low = U32Target(Target::wire(row, gate.wire_ith_output_result(copy))); + let output_high = U32Target(Target::wire(row, gate.wire_ith_output_carry(copy))); + + (output_low, output_high) + } + } + } + + fn add_u32s_with_carry( + &mut self, + to_add: &[U32Target], + carry: U32Target, + ) -> (U32Target, U32Target) { + if to_add.len() == 1 { + return self.add_u32(to_add[0], carry); + } + + let num_addends = to_add.len(); + + let gate = U32AddManyGate::::new_from_config(&self.config, num_addends); + let (row, copy) = self.find_slot(gate, &[F::from_canonical_usize(num_addends)], &[]); + + for j in 0..num_addends { + self.connect(Target::wire(row, gate.wire_ith_op_jth_addend(copy, j)), to_add[j].0); + } + self.connect(Target::wire(row, gate.wire_ith_carry(copy)), carry.0); + + let output = U32Target(Target::wire(row, gate.wire_ith_output_result(copy))); + let output_carry = U32Target(Target::wire(row, gate.wire_ith_output_carry(copy))); + + (output, output_carry) + } + + fn mul_u32(&mut self, a: U32Target, b: U32Target) -> (U32Target, U32Target) { + let zero = self.zero_u32(); + self.mul_add_u32(a, b, zero) + } + + // Returns x - y - borrow, as a pair (result, borrow), where borrow is 0 or 1 depending on whether borrowing from the next digit is required (iff y + borrow > x). + fn sub_u32(&mut self, x: U32Target, y: U32Target, borrow: U32Target) -> (U32Target, U32Target) { + let gate = U32SubtractionGate::::new_from_config(&self.config); + let (row, copy) = self.find_slot(gate, &[], &[]); + + self.connect(Target::wire(row, gate.wire_ith_input_x(copy)), x.0); + self.connect(Target::wire(row, gate.wire_ith_input_y(copy)), y.0); + self.connect(Target::wire(row, gate.wire_ith_input_borrow(copy)), borrow.0); + + let output_result = U32Target(Target::wire(row, gate.wire_ith_output_result(copy))); + let output_borrow = U32Target(Target::wire(row, gate.wire_ith_output_borrow(copy))); + + (output_result, output_borrow) + } +} + +#[derive(Debug, Clone)] +struct SplitToU32Generator, const D: usize> { + x: Target, + low: U32Target, + high: U32Target, + _phantom: PhantomData, +} + +impl, const D: usize> SimpleGenerator + for SplitToU32Generator +{ + fn id(&self) -> String { + "SplitToU32Generator".to_string() + } + + fn serialize(&self, dst: &mut Vec, _common_data: &CommonCircuitData) -> IoResult<()> { + dst.write_target(self.x)?; + dst.write_target_u32(self.low)?; + dst.write_target_u32(self.high) + } + + fn deserialize(src: &mut Buffer, _common_data: &CommonCircuitData) -> IoResult { + let x = src.read_target()?; + let low = src.read_target_u32()?; + let high = src.read_target_u32()?; + Ok(Self { x, low, high, _phantom: PhantomData }) + } + + fn dependencies(&self) -> Vec { + vec![self.x] + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let x = witness.get_target(self.x); + let x_u64 = x.to_canonical_u64(); + let low = x_u64 as u32; + let high = (x_u64 >> 32) as u32; + + out_buffer.set_u32_target(self.low, low); + out_buffer.set_u32_target(self.high, high); + } +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use plonky2::{ + iop::witness::PartialWitness, + plonk::{ + circuit_data::CircuitConfig, + config::{GenericConfig, PoseidonGoldilocksConfig}, + }, + }; + use rand::{rngs::OsRng, Rng}; + + use super::*; + + #[test] + pub fn test_add_many_u32s() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + const NUM_ADDENDS: usize = 15; + + let config = CircuitConfig::standard_recursion_config(); + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let mut rng = OsRng; + let mut to_add = Vec::new(); + let mut sum = 0u64; + for _ in 0..NUM_ADDENDS { + let x: u32 = rng.gen(); + sum += x as u64; + to_add.push(builder.constant_u32(x)); + } + let carry = builder.zero_u32(); + let (result_low, result_high) = builder.add_u32s_with_carry(&to_add, carry); + let expected_low = builder.constant_u32((sum % (1 << 32)) as u32); + let expected_high = builder.constant_u32((sum >> 32) as u32); + + builder.connect_u32(result_low, expected_low); + builder.connect_u32(result_high, expected_high); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + data.verify(proof) + } +} diff --git a/u32/src/gadgets/mod.rs b/u32/src/gadgets/mod.rs new file mode 100644 index 0000000000..622242eac1 --- /dev/null +++ b/u32/src/gadgets/mod.rs @@ -0,0 +1,3 @@ +pub mod arithmetic_u32; +pub mod multiple_comparison; +pub mod range_check; diff --git a/u32/src/gadgets/multiple_comparison.rs b/u32/src/gadgets/multiple_comparison.rs new file mode 100644 index 0000000000..ca81c35030 --- /dev/null +++ b/u32/src/gadgets/multiple_comparison.rs @@ -0,0 +1,123 @@ +use alloc::{vec, vec::Vec}; + +use plonky2::{ + field::extension::Extendable, + hash::hash_types::RichField, + iop::target::{BoolTarget, Target}, + plonk::circuit_builder::CircuitBuilder, + util::ceil_div_usize, +}; + +use crate::{gadgets::arithmetic_u32::U32Target, gates::comparison::ComparisonGate}; + +/// Returns true if a is less than or equal to b, considered as base-`2^num_bits` limbs of a large value. +/// This range-checks its inputs. +pub fn list_le_circuit, const D: usize>( + builder: &mut CircuitBuilder, + a: Vec, + b: Vec, + num_bits: usize, +) -> BoolTarget { + assert_eq!(a.len(), b.len(), "Comparison must be between same number of inputs and outputs"); + let n = a.len(); + + let chunk_bits = 2; + let num_chunks = ceil_div_usize(num_bits, chunk_bits); + + let one = builder.one(); + let mut result = one; + for i in 0..n { + let a_le_b_gate = ComparisonGate::new(num_bits, num_chunks); + let a_le_b_row = builder.add_gate(a_le_b_gate.clone(), vec![]); + builder.connect(Target::wire(a_le_b_row, a_le_b_gate.wire_first_input()), a[i]); + builder.connect(Target::wire(a_le_b_row, a_le_b_gate.wire_second_input()), b[i]); + let a_le_b_result = Target::wire(a_le_b_row, a_le_b_gate.wire_result_bool()); + + let b_le_a_gate = ComparisonGate::new(num_bits, num_chunks); + let b_le_a_row = builder.add_gate(b_le_a_gate.clone(), vec![]); + builder.connect(Target::wire(b_le_a_row, b_le_a_gate.wire_first_input()), b[i]); + builder.connect(Target::wire(b_le_a_row, b_le_a_gate.wire_second_input()), a[i]); + let b_le_a_result = Target::wire(b_le_a_row, b_le_a_gate.wire_result_bool()); + + let these_limbs_equal = builder.mul(a_le_b_result, b_le_a_result); + let these_limbs_less_than = builder.sub(one, b_le_a_result); + result = builder.mul_add(these_limbs_equal, result, these_limbs_less_than); + } + + // `result` being boolean is an invariant, maintained because its new value is always + // `x * result + y`, where `x` and `y` are booleans that are not simultaneously true. + BoolTarget::new_unsafe(result) +} + +/// Helper function for comparing, specifically, lists of `U32Target`s. +pub fn list_le_u32_circuit, const D: usize>( + builder: &mut CircuitBuilder, + a: Vec, + b: Vec, +) -> BoolTarget { + let a_targets: Vec = a.iter().map(|&t| t.0).collect(); + let b_targets: Vec = b.iter().map(|&t| t.0).collect(); + + list_le_circuit(builder, a_targets, b_targets, 32) +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use num::BigUint; + use plonky2::{ + field::types::Field, + iop::witness::PartialWitness, + plonk::{ + circuit_data::CircuitConfig, + config::{GenericConfig, PoseidonGoldilocksConfig}, + }, + }; + use rand::{rngs::OsRng, Rng}; + + use super::*; + + fn test_list_le(size: usize, num_bits: usize) -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + let config = CircuitConfig::standard_recursion_config(); + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let mut rng = OsRng; + + let lst1: Vec = (0..size).map(|_| rng.gen_range(0..(1 << num_bits))).collect(); + let lst2: Vec = (0..size).map(|_| rng.gen_range(0..(1 << num_bits))).collect(); + + let a_biguint = BigUint::from_slice( + &lst1.iter().flat_map(|&x| [x as u32, (x >> 32) as u32]).collect::>(), + ); + let b_biguint = BigUint::from_slice( + &lst2.iter().flat_map(|&x| [x as u32, (x >> 32) as u32]).collect::>(), + ); + + let a = lst1.iter().map(|&x| builder.constant(F::from_canonical_u64(x))).collect(); + let b = lst2.iter().map(|&x| builder.constant(F::from_canonical_u64(x))).collect(); + + let result = list_le_circuit(&mut builder, a, b, num_bits); + + let expected_result = builder.constant_bool(a_biguint <= b_biguint); + builder.connect(result.target, expected_result.target); + + let data = builder.build::(); + let proof = data.prove(pw).unwrap(); + data.verify(proof) + } + + #[test] + fn test_multiple_comparison() -> Result<()> { + for size in [1, 3, 6] { + for num_bits in [20, 32, 40, 44] { + test_list_le(size, num_bits).unwrap(); + } + } + + Ok(()) + } +} diff --git a/u32/src/gadgets/range_check.rs b/u32/src/gadgets/range_check.rs new file mode 100644 index 0000000000..4033583598 --- /dev/null +++ b/u32/src/gadgets/range_check.rs @@ -0,0 +1,21 @@ +use alloc::{vec, vec::Vec}; + +use plonky2::{ + field::extension::Extendable, hash::hash_types::RichField, iop::target::Target, + plonk::circuit_builder::CircuitBuilder, +}; + +use crate::{gadgets::arithmetic_u32::U32Target, gates::range_check_u32::U32RangeCheckGate}; + +pub fn range_check_u32_circuit, const D: usize>( + builder: &mut CircuitBuilder, + vals: Vec, +) { + let num_input_limbs = vals.len(); + let gate = U32RangeCheckGate::::new(num_input_limbs); + let row = builder.add_gate(gate, vec![]); + + for i in 0..num_input_limbs { + builder.connect(Target::wire(row, gate.wire_ith_input_limb(i)), vals[i].0); + } +} diff --git a/u32/src/gates/add_many_u32.rs b/u32/src/gates/add_many_u32.rs new file mode 100644 index 0000000000..f5cafb1394 --- /dev/null +++ b/u32/src/gates/add_many_u32.rs @@ -0,0 +1,480 @@ +use alloc::{ + format, + string::{String, ToString}, + vec::Vec, +}; +use core::marker::PhantomData; +use plonky2::util::serialization::{Buffer, IoResult, Read, Write}; + +use itertools::unfold; +use plonky2::{ + field::{extension::Extendable, types::Field}, + gates::{gate::Gate, util::StridedConstraintConsumer}, + hash::hash_types::RichField, + iop::{ + ext_target::ExtensionTarget, + generator::{GeneratedValues, SimpleGenerator, WitnessGeneratorRef}, + target::Target, + wire::Wire, + witness::{PartitionWitness, Witness, WitnessWrite}, + }, + plonk::{ + circuit_builder::CircuitBuilder, + circuit_data::{CircuitConfig, CommonCircuitData}, + vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase}, + }, + util::ceil_div_usize, +}; + +const LOG2_MAX_NUM_ADDENDS: usize = 4; +const MAX_NUM_ADDENDS: usize = 16; + +/// A gate to perform addition on `num_addends` different 32-bit values, plus a small carry +#[derive(Copy, Clone, Debug)] +pub struct U32AddManyGate, const D: usize> { + pub num_addends: usize, + pub num_ops: usize, + _phantom: PhantomData, +} + +impl, const D: usize> U32AddManyGate { + pub fn new_from_config(config: &CircuitConfig, num_addends: usize) -> Self { + Self { num_addends, num_ops: Self::num_ops(num_addends, config), _phantom: PhantomData } + } + + pub(crate) fn num_ops(num_addends: usize, config: &CircuitConfig) -> usize { + debug_assert!(num_addends <= MAX_NUM_ADDENDS); + let wires_per_op = (num_addends + 3) + Self::num_limbs(); + let routed_wires_per_op = num_addends + 3; + (config.num_wires / wires_per_op).min(config.num_routed_wires / routed_wires_per_op) + } + + pub fn wire_ith_op_jth_addend(&self, i: usize, j: usize) -> usize { + debug_assert!(i < self.num_ops); + debug_assert!(j < self.num_addends); + (self.num_addends + 3) * i + j + } + pub fn wire_ith_carry(&self, i: usize) -> usize { + debug_assert!(i < self.num_ops); + (self.num_addends + 3) * i + self.num_addends + } + + pub fn wire_ith_output_result(&self, i: usize) -> usize { + debug_assert!(i < self.num_ops); + (self.num_addends + 3) * i + self.num_addends + 1 + } + pub fn wire_ith_output_carry(&self, i: usize) -> usize { + debug_assert!(i < self.num_ops); + (self.num_addends + 3) * i + self.num_addends + 2 + } + + pub fn limb_bits() -> usize { + 2 + } + pub fn num_result_limbs() -> usize { + ceil_div_usize(32, Self::limb_bits()) + } + pub fn num_carry_limbs() -> usize { + ceil_div_usize(LOG2_MAX_NUM_ADDENDS, Self::limb_bits()) + } + pub fn num_limbs() -> usize { + Self::num_result_limbs() + Self::num_carry_limbs() + } + + pub fn wire_ith_output_jth_limb(&self, i: usize, j: usize) -> usize { + debug_assert!(i < self.num_ops); + debug_assert!(j < Self::num_limbs()); + (self.num_addends + 3) * self.num_ops + Self::num_limbs() * i + j + } +} + +impl, const D: usize> Gate for U32AddManyGate { + fn id(&self) -> String { + format!("{self:?}") + } + + fn export_circom_verification_code(&self) -> String { + todo!(); + } + fn export_solidity_verification_code(&self) -> String { + todo!(); + } + fn serialize(&self, dst: &mut Vec, _common_data: &CommonCircuitData) -> IoResult<()> { + dst.write_usize(self.num_addends)?; + dst.write_usize(self.num_ops) + } + + fn deserialize(src: &mut Buffer, _common_data: &CommonCircuitData) -> IoResult { + let num_addends = src.read_usize()?; + let num_ops = src.read_usize()?; + Ok(Self { num_addends, num_ops, _phantom: PhantomData }) + } + + fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { + let mut constraints = Vec::with_capacity(self.num_constraints()); + for i in 0..self.num_ops { + let addends: Vec = (0..self.num_addends) + .map(|j| vars.local_wires[self.wire_ith_op_jth_addend(i, j)]) + .collect(); + let carry = vars.local_wires[self.wire_ith_carry(i)]; + + let computed_output = addends.iter().fold(F::Extension::ZERO, |x, &y| x + y) + carry; + + let output_result = vars.local_wires[self.wire_ith_output_result(i)]; + let output_carry = vars.local_wires[self.wire_ith_output_carry(i)]; + + let base = F::Extension::from_canonical_u64(1 << 32u64); + let combined_output = output_carry * base + output_result; + + constraints.push(combined_output - computed_output); + + let mut combined_result_limbs = F::Extension::ZERO; + let mut combined_carry_limbs = F::Extension::ZERO; + let base = F::Extension::from_canonical_u64(1u64 << Self::limb_bits()); + for j in (0..Self::num_limbs()).rev() { + let this_limb = vars.local_wires[self.wire_ith_output_jth_limb(i, j)]; + let max_limb = 1 << Self::limb_bits(); + let product = (0..max_limb) + .map(|x| this_limb - F::Extension::from_canonical_usize(x)) + .product(); + constraints.push(product); + + if j < Self::num_result_limbs() { + combined_result_limbs = base * combined_result_limbs + this_limb; + } else { + combined_carry_limbs = base * combined_carry_limbs + this_limb; + } + } + constraints.push(combined_result_limbs - output_result); + constraints.push(combined_carry_limbs - output_carry); + } + + constraints + } + + fn eval_unfiltered_base_one( + &self, + vars: EvaluationVarsBase, + mut yield_constr: StridedConstraintConsumer, + ) { + for i in 0..self.num_ops { + let addends: Vec = (0..self.num_addends) + .map(|j| vars.local_wires[self.wire_ith_op_jth_addend(i, j)]) + .collect(); + let carry = vars.local_wires[self.wire_ith_carry(i)]; + + let computed_output = addends.iter().fold(F::ZERO, |x, &y| x + y) + carry; + + let output_result = vars.local_wires[self.wire_ith_output_result(i)]; + let output_carry = vars.local_wires[self.wire_ith_output_carry(i)]; + + let base = F::from_canonical_u64(1 << 32u64); + let combined_output = output_carry * base + output_result; + + yield_constr.one(combined_output - computed_output); + + let mut combined_result_limbs = F::ZERO; + let mut combined_carry_limbs = F::ZERO; + let base = F::from_canonical_u64(1u64 << Self::limb_bits()); + for j in (0..Self::num_limbs()).rev() { + let this_limb = vars.local_wires[self.wire_ith_output_jth_limb(i, j)]; + let max_limb = 1 << Self::limb_bits(); + let product = + (0..max_limb).map(|x| this_limb - F::from_canonical_usize(x)).product(); + yield_constr.one(product); + + if j < Self::num_result_limbs() { + combined_result_limbs = base * combined_result_limbs + this_limb; + } else { + combined_carry_limbs = base * combined_carry_limbs + this_limb; + } + } + yield_constr.one(combined_result_limbs - output_result); + yield_constr.one(combined_carry_limbs - output_carry); + } + } + + fn eval_unfiltered_circuit( + &self, + builder: &mut CircuitBuilder, + vars: EvaluationTargets, + ) -> Vec> { + let mut constraints = Vec::with_capacity(self.num_constraints()); + + for i in 0..self.num_ops { + let addends: Vec> = (0..self.num_addends) + .map(|j| vars.local_wires[self.wire_ith_op_jth_addend(i, j)]) + .collect(); + let carry = vars.local_wires[self.wire_ith_carry(i)]; + + let mut computed_output = carry; + for addend in addends { + computed_output = builder.add_extension(computed_output, addend); + } + + let output_result = vars.local_wires[self.wire_ith_output_result(i)]; + let output_carry = vars.local_wires[self.wire_ith_output_carry(i)]; + + let base: F::Extension = F::from_canonical_u64(1 << 32u64).into(); + let base_target = builder.constant_extension(base); + let combined_output = + builder.mul_add_extension(output_carry, base_target, output_result); + + constraints.push(builder.sub_extension(combined_output, computed_output)); + + let mut combined_result_limbs = builder.zero_extension(); + let mut combined_carry_limbs = builder.zero_extension(); + let base = builder + .constant_extension(F::Extension::from_canonical_u64(1u64 << Self::limb_bits())); + for j in (0..Self::num_limbs()).rev() { + let this_limb = vars.local_wires[self.wire_ith_output_jth_limb(i, j)]; + let max_limb = 1 << Self::limb_bits(); + + let mut product = builder.one_extension(); + for x in 0..max_limb { + let x_target = + builder.constant_extension(F::Extension::from_canonical_usize(x)); + let diff = builder.sub_extension(this_limb, x_target); + product = builder.mul_extension(product, diff); + } + constraints.push(product); + + if j < Self::num_result_limbs() { + combined_result_limbs = + builder.mul_add_extension(base, combined_result_limbs, this_limb); + } else { + combined_carry_limbs = + builder.mul_add_extension(base, combined_carry_limbs, this_limb); + } + } + constraints.push(builder.sub_extension(combined_result_limbs, output_result)); + constraints.push(builder.sub_extension(combined_carry_limbs, output_carry)); + } + + constraints + } + + fn generators(&self, row: usize, _local_constants: &[F]) -> Vec> { + (0..self.num_ops) + .map(|i| { + WitnessGeneratorRef::new( + U32AddManyGenerator { gate: *self, row, i, _phantom: PhantomData }.adapter(), + ) + }) + .collect() + } + + fn num_wires(&self) -> usize { + (self.num_addends + 3) * self.num_ops + Self::num_limbs() * self.num_ops + } + + fn num_constants(&self) -> usize { + 0 + } + + fn degree(&self) -> usize { + 1 << Self::limb_bits() + } + + fn num_constraints(&self) -> usize { + self.num_ops * (3 + Self::num_limbs()) + } +} + +#[derive(Clone, Debug)] +struct U32AddManyGenerator, const D: usize> { + gate: U32AddManyGate, + row: usize, + i: usize, + _phantom: PhantomData, +} + +impl, const D: usize> SimpleGenerator + for U32AddManyGenerator +{ + fn id(&self) -> String { + "U32AddManyGenerator".to_string() + } + + fn serialize(&self, dst: &mut Vec, common_data: &CommonCircuitData) -> IoResult<()> { + self.gate.serialize(dst, common_data)?; + dst.write_usize(self.row)?; + dst.write_usize(self.i) + } + + fn deserialize(src: &mut Buffer, common_data: &CommonCircuitData) -> IoResult { + let gate = U32AddManyGate::deserialize(src, common_data)?; + let row = src.read_usize()?; + let i = src.read_usize()?; + Ok(Self { gate, row, i, _phantom: PhantomData }) + } + + fn dependencies(&self) -> Vec { + let local_target = |column| Target::wire(self.row, column); + + (0..self.gate.num_addends) + .map(|j| local_target(self.gate.wire_ith_op_jth_addend(self.i, j))) + .chain([local_target(self.gate.wire_ith_carry(self.i))]) + .collect() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let local_wire = |column| Wire { row: self.row, column }; + + let get_local_wire = |column| witness.get_wire(local_wire(column)); + + let addends: Vec<_> = (0..self.gate.num_addends) + .map(|j| get_local_wire(self.gate.wire_ith_op_jth_addend(self.i, j))) + .collect(); + let carry = get_local_wire(self.gate.wire_ith_carry(self.i)); + + let output = addends.iter().fold(F::ZERO, |x, &y| x + y) + carry; + let output_u64 = output.to_canonical_u64(); + + let output_carry_u64 = output_u64 >> 32; + let output_result_u64 = output_u64 & ((1 << 32) - 1); + + let output_carry = F::from_canonical_u64(output_carry_u64); + let output_result = F::from_canonical_u64(output_result_u64); + + let output_carry_wire = local_wire(self.gate.wire_ith_output_carry(self.i)); + let output_result_wire = local_wire(self.gate.wire_ith_output_result(self.i)); + + out_buffer.set_wire(output_carry_wire, output_carry); + out_buffer.set_wire(output_result_wire, output_result); + + let num_result_limbs = U32AddManyGate::::num_result_limbs(); + let num_carry_limbs = U32AddManyGate::::num_carry_limbs(); + let limb_base = 1 << U32AddManyGate::::limb_bits(); + + let split_to_limbs = |mut val, num| { + unfold((), move |_| { + let ret = val % limb_base; + val /= limb_base; + Some(ret) + }) + .take(num) + .map(F::from_canonical_u64) + }; + + let result_limbs = split_to_limbs(output_result_u64, num_result_limbs); + let carry_limbs = split_to_limbs(output_carry_u64, num_carry_limbs); + + for (j, limb) in result_limbs.chain(carry_limbs).enumerate() { + let wire = local_wire(self.gate.wire_ith_output_jth_limb(self.i, j)); + out_buffer.set_wire(wire, limb); + } + } +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use plonky2::{ + field::{ + extension::quartic::QuarticExtension, goldilocks_field::GoldilocksField, types::Sample, + }, + gates::gate_testing::{test_eval_fns, test_low_degree}, + hash::hash_types::HashOut, + plonk::config::{GenericConfig, PoseidonGoldilocksConfig}, + }; + use rand::{rngs::OsRng, Rng}; + + use super::*; + + #[test] + fn low_degree() { + test_low_degree::(U32AddManyGate:: { + num_addends: 4, + num_ops: 3, + _phantom: PhantomData, + }) + } + + #[test] + fn eval_fns() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + test_eval_fns::(U32AddManyGate:: { + num_addends: 4, + num_ops: 3, + _phantom: PhantomData, + }) + } + + #[test] + fn test_gate_constraint() { + type F = GoldilocksField; + type FF = QuarticExtension; + const D: usize = 4; + const NUM_ADDENDS: usize = 10; + const NUM_U32_ADD_MANY_OPS: usize = 3; + + fn get_wires(addends: Vec>, carries: Vec) -> Vec { + let mut v0 = Vec::new(); + let mut v1 = Vec::new(); + + let num_result_limbs = U32AddManyGate::::num_result_limbs(); + let num_carry_limbs = U32AddManyGate::::num_carry_limbs(); + let limb_base = 1 << U32AddManyGate::::limb_bits(); + for op in 0..NUM_U32_ADD_MANY_OPS { + let adds = &addends[op]; + let ca = carries[op]; + + let output = adds.iter().sum::() + ca; + let output_result = output & ((1 << 32) - 1); + let output_carry = output >> 32; + + let split_to_limbs = |mut val, num| { + unfold((), move |_| { + let ret = val % limb_base; + val /= limb_base; + Some(ret) + }) + .take(num) + .map(F::from_canonical_u64) + }; + + let mut result_limbs: Vec<_> = + split_to_limbs(output_result, num_result_limbs).collect(); + let mut carry_limbs: Vec<_> = + split_to_limbs(output_carry, num_carry_limbs).collect(); + + for a in adds { + v0.push(F::from_canonical_u64(*a)); + } + v0.push(F::from_canonical_u64(ca)); + v0.push(F::from_canonical_u64(output_result)); + v0.push(F::from_canonical_u64(output_carry)); + v1.append(&mut result_limbs); + v1.append(&mut carry_limbs); + } + + v0.iter().chain(v1.iter()).map(|&x| x.into()).collect() + } + + let mut rng = OsRng; + let addends: Vec> = (0..NUM_U32_ADD_MANY_OPS) + .map(|_| (0..NUM_ADDENDS).map(|_| rng.gen::() as u64).collect()) + .collect(); + let carries: Vec<_> = (0..NUM_U32_ADD_MANY_OPS).map(|_| rng.gen::() as u64).collect(); + + let gate = U32AddManyGate:: { + num_addends: NUM_ADDENDS, + num_ops: NUM_U32_ADD_MANY_OPS, + _phantom: PhantomData, + }; + + let vars = EvaluationVars { + local_constants: &[], + local_wires: &get_wires(addends, carries), + public_inputs_hash: &HashOut::rand(), + }; + + assert!( + gate.eval_unfiltered(vars).iter().all(|x| x.is_zero()), + "Gate constraints are not satisfied." + ); + } +} diff --git a/u32/src/gates/arithmetic_u32.rs b/u32/src/gates/arithmetic_u32.rs new file mode 100644 index 0000000000..a7300c9b91 --- /dev/null +++ b/u32/src/gates/arithmetic_u32.rs @@ -0,0 +1,582 @@ +use alloc::{ + format, + string::{String, ToString}, + vec, + vec::Vec, +}; +use core::marker::PhantomData; +use plonky2::util::serialization::{Buffer, IoResult, Read, Write}; + +use itertools::unfold; +use plonky2::{ + field::{extension::Extendable, packed::PackedField, types::Field}, + gates::{gate::Gate, packed_util::PackedEvaluableBase, util::StridedConstraintConsumer}, + hash::hash_types::RichField, + iop::{ + ext_target::ExtensionTarget, + generator::{GeneratedValues, SimpleGenerator, WitnessGeneratorRef}, + target::Target, + wire::Wire, + witness::{PartitionWitness, Witness, WitnessWrite}, + }, + plonk::{ + circuit_builder::CircuitBuilder, + circuit_data::{CircuitConfig, CommonCircuitData}, + vars::{ + EvaluationTargets, EvaluationVars, EvaluationVarsBase, EvaluationVarsBaseBatch, + EvaluationVarsBasePacked, + }, + }, +}; + +/// A gate to perform a basic mul-add on 32-bit values (we assume they are range-checked beforehand). +#[derive(Copy, Clone, Debug)] +pub struct U32ArithmeticGate, const D: usize> { + pub num_ops: usize, + _phantom: PhantomData, +} + +impl, const D: usize> U32ArithmeticGate { + pub fn new_from_config(config: &CircuitConfig) -> Self { + Self { num_ops: Self::num_ops(config), _phantom: PhantomData } + } + + pub(crate) fn num_ops(config: &CircuitConfig) -> usize { + let wires_per_op = Self::routed_wires_per_op() + Self::num_limbs(); + (config.num_wires / wires_per_op).min(config.num_routed_wires / Self::routed_wires_per_op()) + } + + pub fn wire_ith_multiplicand_0(&self, i: usize) -> usize { + debug_assert!(i < self.num_ops); + Self::routed_wires_per_op() * i + } + pub fn wire_ith_multiplicand_1(&self, i: usize) -> usize { + debug_assert!(i < self.num_ops); + Self::routed_wires_per_op() * i + 1 + } + pub fn wire_ith_addend(&self, i: usize) -> usize { + debug_assert!(i < self.num_ops); + Self::routed_wires_per_op() * i + 2 + } + + pub fn wire_ith_output_low_half(&self, i: usize) -> usize { + debug_assert!(i < self.num_ops); + Self::routed_wires_per_op() * i + 3 + } + + pub fn wire_ith_output_high_half(&self, i: usize) -> usize { + debug_assert!(i < self.num_ops); + Self::routed_wires_per_op() * i + 4 + } + + pub fn wire_ith_inverse(&self, i: usize) -> usize { + debug_assert!(i < self.num_ops); + Self::routed_wires_per_op() * i + 5 + } + + pub fn limb_bits() -> usize { + 2 + } + pub fn num_limbs() -> usize { + 64 / Self::limb_bits() + } + pub fn routed_wires_per_op() -> usize { + 6 + } + pub fn wire_ith_output_jth_limb(&self, i: usize, j: usize) -> usize { + debug_assert!(i < self.num_ops); + debug_assert!(j < Self::num_limbs()); + Self::routed_wires_per_op() * self.num_ops + Self::num_limbs() * i + j + } +} + +impl, const D: usize> Gate for U32ArithmeticGate { + fn id(&self) -> String { + format!("{self:?}") + } + + fn export_circom_verification_code(&self) -> String { + todo!(); + } + fn export_solidity_verification_code(&self) -> String { + todo!(); + } + fn serialize(&self, dst: &mut Vec, _common_data: &CommonCircuitData) -> IoResult<()> { + dst.write_usize(self.num_ops) + } + + fn deserialize(src: &mut Buffer, _common_data: &CommonCircuitData) -> IoResult { + let num_ops = src.read_usize()?; + Ok(Self { num_ops, _phantom: PhantomData }) + } + + fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { + let mut constraints = Vec::with_capacity(self.num_constraints()); + for i in 0..self.num_ops { + let multiplicand_0 = vars.local_wires[self.wire_ith_multiplicand_0(i)]; + let multiplicand_1 = vars.local_wires[self.wire_ith_multiplicand_1(i)]; + let addend = vars.local_wires[self.wire_ith_addend(i)]; + + let computed_output = multiplicand_0 * multiplicand_1 + addend; + + let output_low = vars.local_wires[self.wire_ith_output_low_half(i)]; + let output_high = vars.local_wires[self.wire_ith_output_high_half(i)]; + let inverse = vars.local_wires[self.wire_ith_inverse(i)]; + + // Check canonicity of combined_output = output_high * 2^32 + output_low + let combined_output = { + let base = F::Extension::from_canonical_u64(1 << 32u64); + let one = F::Extension::ONE; + let u32_max = F::Extension::from_canonical_u32(u32::MAX); + + // This is zero if and only if the high limb is `u32::MAX`. + // u32::MAX - output_high + let diff = u32_max - output_high; + // If this is zero, the diff is invertible, so the high limb is not `u32::MAX`. + // inverse * diff - 1 + let hi_not_max = inverse * diff - one; + // If this is zero, either the high limb is not `u32::MAX`, or the low limb is zero. + // hi_not_max * limb_0_u32 + let hi_not_max_or_lo_zero = hi_not_max * output_low; + + constraints.push(hi_not_max_or_lo_zero); + + output_high * base + output_low + }; + + constraints.push(combined_output - computed_output); + + let mut combined_low_limbs = F::Extension::ZERO; + let mut combined_high_limbs = F::Extension::ZERO; + let midpoint = Self::num_limbs() / 2; + let base = F::Extension::from_canonical_u64(1u64 << Self::limb_bits()); + for j in (0..Self::num_limbs()).rev() { + let this_limb = vars.local_wires[self.wire_ith_output_jth_limb(i, j)]; + let max_limb = 1 << Self::limb_bits(); + let product = (0..max_limb) + .map(|x| this_limb - F::Extension::from_canonical_usize(x)) + .product(); + constraints.push(product); + + if j < midpoint { + combined_low_limbs = base * combined_low_limbs + this_limb; + } else { + combined_high_limbs = base * combined_high_limbs + this_limb; + } + } + constraints.push(combined_low_limbs - output_low); + constraints.push(combined_high_limbs - output_high); + } + + constraints + } + + fn eval_unfiltered_base_one( + &self, + _vars: EvaluationVarsBase, + _yield_constr: StridedConstraintConsumer, + ) { + panic!("use eval_unfiltered_base_packed instead"); + } + + fn eval_unfiltered_base_batch(&self, vars_base: EvaluationVarsBaseBatch) -> Vec { + self.eval_unfiltered_base_batch_packed(vars_base) + } + + fn eval_unfiltered_circuit( + &self, + builder: &mut CircuitBuilder, + vars: EvaluationTargets, + ) -> Vec> { + let mut constraints = Vec::with_capacity(self.num_constraints()); + + for i in 0..self.num_ops { + let multiplicand_0 = vars.local_wires[self.wire_ith_multiplicand_0(i)]; + let multiplicand_1 = vars.local_wires[self.wire_ith_multiplicand_1(i)]; + let addend = vars.local_wires[self.wire_ith_addend(i)]; + + let computed_output = builder.mul_add_extension(multiplicand_0, multiplicand_1, addend); + + let output_low = vars.local_wires[self.wire_ith_output_low_half(i)]; + let output_high = vars.local_wires[self.wire_ith_output_high_half(i)]; + let inverse = vars.local_wires[self.wire_ith_inverse(i)]; + + // Check canonicity of combined_output = output_high * 2^32 + output_low + let combined_output = { + let base: F::Extension = F::from_canonical_u64(1 << 32u64).into(); + let base_target = builder.constant_extension(base); + let one = builder.one_extension(); + let u32_max = + builder.constant_extension(F::Extension::from_canonical_u32(u32::MAX)); + + // This is zero if and only if the high limb is `u32::MAX`. + let diff = builder.sub_extension(u32_max, output_high); + // If this is zero, the diff is invertible, so the high limb is not `u32::MAX`. + let hi_not_max = builder.mul_sub_extension(inverse, diff, one); + // If this is zero, either the high limb is not `u32::MAX`, or the low limb is zero. + let hi_not_max_or_lo_zero = builder.mul_extension(hi_not_max, output_low); + + constraints.push(hi_not_max_or_lo_zero); + + builder.mul_add_extension(output_high, base_target, output_low) + }; + + constraints.push(builder.sub_extension(combined_output, computed_output)); + + let mut combined_low_limbs = builder.zero_extension(); + let mut combined_high_limbs = builder.zero_extension(); + let midpoint = Self::num_limbs() / 2; + let base = builder + .constant_extension(F::Extension::from_canonical_u64(1u64 << Self::limb_bits())); + for j in (0..Self::num_limbs()).rev() { + let this_limb = vars.local_wires[self.wire_ith_output_jth_limb(i, j)]; + let max_limb = 1 << Self::limb_bits(); + + let mut product = builder.one_extension(); + for x in 0..max_limb { + let x_target = + builder.constant_extension(F::Extension::from_canonical_usize(x)); + let diff = builder.sub_extension(this_limb, x_target); + product = builder.mul_extension(product, diff); + } + constraints.push(product); + + if j < midpoint { + combined_low_limbs = + builder.mul_add_extension(base, combined_low_limbs, this_limb); + } else { + combined_high_limbs = + builder.mul_add_extension(base, combined_high_limbs, this_limb); + } + } + + constraints.push(builder.sub_extension(combined_low_limbs, output_low)); + constraints.push(builder.sub_extension(combined_high_limbs, output_high)); + } + + constraints + } + + fn generators(&self, row: usize, _local_constants: &[F]) -> Vec> { + (0..self.num_ops) + .map(|i| { + let g: WitnessGeneratorRef = WitnessGeneratorRef::new( + U32ArithmeticGenerator { gate: *self, row, i, _phantom: PhantomData }.adapter(), + ); + g + }) + .collect() + } + + fn num_wires(&self) -> usize { + self.num_ops * (Self::routed_wires_per_op() + Self::num_limbs()) + } + + fn num_constants(&self) -> usize { + 0 + } + + fn degree(&self) -> usize { + 1 << Self::limb_bits() + } + + fn num_constraints(&self) -> usize { + self.num_ops * (4 + Self::num_limbs()) + } +} + +impl, const D: usize> PackedEvaluableBase + for U32ArithmeticGate +{ + fn eval_unfiltered_base_packed>( + &self, + vars: EvaluationVarsBasePacked

, + mut yield_constr: StridedConstraintConsumer

, + ) { + for i in 0..self.num_ops { + let multiplicand_0 = vars.local_wires[self.wire_ith_multiplicand_0(i)]; + let multiplicand_1 = vars.local_wires[self.wire_ith_multiplicand_1(i)]; + let addend = vars.local_wires[self.wire_ith_addend(i)]; + + let computed_output = multiplicand_0 * multiplicand_1 + addend; + + let output_low = vars.local_wires[self.wire_ith_output_low_half(i)]; + let output_high = vars.local_wires[self.wire_ith_output_high_half(i)]; + let inverse = vars.local_wires[self.wire_ith_inverse(i)]; + + let combined_output = { + let base = P::from(F::from_canonical_u64(1 << 32u64)); + let one = P::ONES; + let u32_max = P::from(F::from_canonical_u32(u32::MAX)); + + // This is zero if and only if the high limb is `u32::MAX`. + // u32::MAX - output_high + let diff = u32_max - output_high; + // If this is zero, the diff is invertible, so the high limb is not `u32::MAX`. + // inverse * diff - 1 + let hi_not_max = inverse * diff - one; + // If this is zero, either the high limb is not `u32::MAX`, or the low limb is zero. + // hi_not_max * limb_0_u32 + let hi_not_max_or_lo_zero = hi_not_max * output_low; + + yield_constr.one(hi_not_max_or_lo_zero); + + output_high * base + output_low + }; + + yield_constr.one(combined_output - computed_output); + + let mut combined_low_limbs = P::ZEROS; + let mut combined_high_limbs = P::ZEROS; + let midpoint = Self::num_limbs() / 2; + let base = F::from_canonical_u64(1u64 << Self::limb_bits()); + for j in (0..Self::num_limbs()).rev() { + let this_limb = vars.local_wires[self.wire_ith_output_jth_limb(i, j)]; + let max_limb = 1 << Self::limb_bits(); + let product = + (0..max_limb).map(|x| this_limb - F::from_canonical_usize(x)).product(); + yield_constr.one(product); + + if j < midpoint { + combined_low_limbs = combined_low_limbs * base + this_limb; + } else { + combined_high_limbs = combined_high_limbs * base + this_limb; + } + } + yield_constr.one(combined_low_limbs - output_low); + yield_constr.one(combined_high_limbs - output_high); + } + } +} + +#[derive(Clone, Debug)] +struct U32ArithmeticGenerator, const D: usize> { + gate: U32ArithmeticGate, + row: usize, + i: usize, + _phantom: PhantomData, +} + +impl, const D: usize> SimpleGenerator + for U32ArithmeticGenerator +{ + fn id(&self) -> String { + "U32ArithmeticGenerator".to_string() + } + + fn dependencies(&self) -> Vec { + let local_target = |column| Target::wire(self.row, column); + + vec![ + local_target(self.gate.wire_ith_multiplicand_0(self.i)), + local_target(self.gate.wire_ith_multiplicand_1(self.i)), + local_target(self.gate.wire_ith_addend(self.i)), + ] + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let local_wire = |column| Wire { row: self.row, column }; + + let get_local_wire = |column| witness.get_wire(local_wire(column)); + + let multiplicand_0 = get_local_wire(self.gate.wire_ith_multiplicand_0(self.i)); + let multiplicand_1 = get_local_wire(self.gate.wire_ith_multiplicand_1(self.i)); + let addend = get_local_wire(self.gate.wire_ith_addend(self.i)); + + let output = multiplicand_0 * multiplicand_1 + addend; + let mut output_u64 = output.to_canonical_u64(); + + let output_high_u64 = output_u64 >> 32; + let output_low_u64 = output_u64 & ((1 << 32) - 1); + + let output_high = F::from_canonical_u64(output_high_u64); + let output_low = F::from_canonical_u64(output_low_u64); + + let output_high_wire = local_wire(self.gate.wire_ith_output_high_half(self.i)); + let output_low_wire = local_wire(self.gate.wire_ith_output_low_half(self.i)); + + out_buffer.set_wire(output_high_wire, output_high); + out_buffer.set_wire(output_low_wire, output_low); + + let diff = u32::MAX as u64 - output_high_u64; + let inverse = if diff == 0 { F::ZERO } else { F::from_canonical_u64(diff).inverse() }; + let inverse_wire = local_wire(self.gate.wire_ith_inverse(self.i)); + out_buffer.set_wire(inverse_wire, inverse); + + let num_limbs = U32ArithmeticGate::::num_limbs(); + let limb_base = 1 << U32ArithmeticGate::::limb_bits(); + let output_limbs_u64 = unfold((), move |_| { + let ret = output_u64 % limb_base; + output_u64 /= limb_base; + Some(ret) + }) + .take(num_limbs); + let output_limbs_f = output_limbs_u64.map(F::from_canonical_u64); + + for (j, output_limb) in output_limbs_f.enumerate() { + let wire = local_wire(self.gate.wire_ith_output_jth_limb(self.i, j)); + out_buffer.set_wire(wire, output_limb); + } + } + + fn serialize(&self, dst: &mut Vec, common_data: &CommonCircuitData) -> IoResult<()> { + self.gate.serialize(dst, common_data)?; + dst.write_usize(self.row)?; + dst.write_usize(self.i) + } + + fn deserialize(src: &mut Buffer, common_data: &CommonCircuitData) -> IoResult { + let gate = U32ArithmeticGate::deserialize(src, common_data)?; + let row = src.read_usize()?; + let i = src.read_usize()?; + Ok(Self { gate, row, i, _phantom: PhantomData }) + } +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use plonky2::{ + field::{goldilocks_field::GoldilocksField, types::Sample}, + gates::gate_testing::{test_eval_fns, test_low_degree}, + hash::hash_types::HashOut, + plonk::config::{GenericConfig, PoseidonGoldilocksConfig}, + }; + use rand::{rngs::OsRng, Rng}; + + use super::*; + + #[test] + fn low_degree() { + test_low_degree::(U32ArithmeticGate:: { + num_ops: 3, + _phantom: PhantomData, + }) + } + + #[test] + fn eval_fns() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + test_eval_fns::(U32ArithmeticGate:: { + num_ops: 3, + _phantom: PhantomData, + }) + } + + fn get_wires< + F: RichField + Extendable, + FF: From, + const D: usize, + const NUM_U32_ARITHMETIC_OPS: usize, + >( + multiplicands_0: Vec, + multiplicands_1: Vec, + addends: Vec, + ) -> Vec { + let mut v0 = Vec::new(); + let mut v1 = Vec::new(); + + let limb_bits = U32ArithmeticGate::::limb_bits(); + let num_limbs = U32ArithmeticGate::::num_limbs(); + let limb_base = 1 << limb_bits; + for c in 0..NUM_U32_ARITHMETIC_OPS { + let m0 = multiplicands_0[c]; + let m1 = multiplicands_1[c]; + let a = addends[c]; + + let mut output = m0 * m1 + a; + let output_low = output & ((1 << 32) - 1); + let output_high = output >> 32; + let diff = u32::MAX as u64 - output_high; + let inverse = if diff == 0 { F::ZERO } else { F::from_canonical_u64(diff).inverse() }; + + let mut output_limbs = Vec::with_capacity(num_limbs); + for _i in 0..num_limbs { + output_limbs.push(output % limb_base); + output /= limb_base; + } + let mut output_limbs_f: Vec<_> = + output_limbs.into_iter().map(F::from_canonical_u64).collect(); + + v0.push(F::from_canonical_u64(m0)); + v0.push(F::from_canonical_u64(m1)); + v0.push(F::from_noncanonical_u64(a)); + v0.push(F::from_canonical_u64(output_low)); + v0.push(F::from_canonical_u64(output_high)); + v0.push(inverse); + v1.append(&mut output_limbs_f); + } + + v0.iter().chain(v1.iter()).map(|&x| x.into()).collect() + } + + #[test] + fn test_gate_constraint() { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + type FF = >::FE; + const NUM_U32_ARITHMETIC_OPS: usize = 3; + + let mut rng = OsRng; + let multiplicands_0: Vec<_> = + (0..NUM_U32_ARITHMETIC_OPS).map(|_| rng.gen::() as u64).collect(); + let multiplicands_1: Vec<_> = + (0..NUM_U32_ARITHMETIC_OPS).map(|_| rng.gen::() as u64).collect(); + let addends: Vec<_> = + (0..NUM_U32_ARITHMETIC_OPS).map(|_| rng.gen::() as u64).collect(); + + let gate = + U32ArithmeticGate:: { num_ops: NUM_U32_ARITHMETIC_OPS, _phantom: PhantomData }; + + let vars = EvaluationVars { + local_constants: &[], + local_wires: &get_wires::( + multiplicands_0, + multiplicands_1, + addends, + ), + public_inputs_hash: &HashOut::rand(), + }; + + assert!( + gate.eval_unfiltered(vars).iter().all(|x| x.is_zero()), + "Gate constraints are not satisfied." + ); + } + + #[test] + fn test_canonicity() { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + type FF = >::FE; + const NUM_U32_ARITHMETIC_OPS: usize = 3; + + let multiplicands_0 = vec![0; NUM_U32_ARITHMETIC_OPS]; + let multiplicands_1 = vec![0; NUM_U32_ARITHMETIC_OPS]; + // A non-canonical addend will produce a non-canonical output using + // get_wires. + let addends = vec![0xFFFFFFFF00000001; NUM_U32_ARITHMETIC_OPS]; + + let gate = + U32ArithmeticGate:: { num_ops: NUM_U32_ARITHMETIC_OPS, _phantom: PhantomData }; + + let vars = EvaluationVars { + local_constants: &[], + local_wires: &get_wires::( + multiplicands_0, + multiplicands_1, + addends, + ), + public_inputs_hash: &HashOut::rand(), + }; + + assert!( + !gate.eval_unfiltered(vars).iter().all(|x| x.is_zero()), + "Non-canonical output should not pass constraints." + ); + } +} diff --git a/u32/src/gates/comparison.rs b/u32/src/gates/comparison.rs new file mode 100644 index 0000000000..39937f83ab --- /dev/null +++ b/u32/src/gates/comparison.rs @@ -0,0 +1,702 @@ +use alloc::{ + format, + string::{String, ToString}, + vec, + vec::Vec, +}; +use core::marker::PhantomData; +use plonky2::{ + plonk::circuit_data::CommonCircuitData, + util::serialization::{Buffer, IoResult, Read, Write}, +}; + +use plonky2::{ + field::{ + extension::Extendable, + packed::PackedField, + types::{Field, Field64}, + }, + gates::{gate::Gate, packed_util::PackedEvaluableBase, util::StridedConstraintConsumer}, + hash::hash_types::RichField, + iop::{ + ext_target::ExtensionTarget, + generator::{GeneratedValues, SimpleGenerator, WitnessGeneratorRef}, + target::Target, + wire::Wire, + witness::{PartitionWitness, Witness, WitnessWrite}, + }, + plonk::{ + circuit_builder::CircuitBuilder, + plonk_common::{reduce_with_powers, reduce_with_powers_ext_circuit}, + vars::{ + EvaluationTargets, EvaluationVars, EvaluationVarsBase, EvaluationVarsBaseBatch, + EvaluationVarsBasePacked, + }, + }, + util::{bits_u64, ceil_div_usize}, +}; + +/// A gate for checking that one value is less than or equal to another. +#[derive(Clone, Debug)] +pub struct ComparisonGate, const D: usize> { + pub(crate) num_bits: usize, + pub(crate) num_chunks: usize, + _phantom: PhantomData, +} + +impl, const D: usize> ComparisonGate { + pub fn new(num_bits: usize, num_chunks: usize) -> Self { + debug_assert!(num_bits < bits_u64(F::ORDER)); + Self { num_bits, num_chunks, _phantom: PhantomData } + } + + pub fn chunk_bits(&self) -> usize { + ceil_div_usize(self.num_bits, self.num_chunks) + } + + pub fn wire_first_input(&self) -> usize { + 0 + } + + pub fn wire_second_input(&self) -> usize { + 1 + } + + pub fn wire_result_bool(&self) -> usize { + 2 + } + + pub fn wire_most_significant_diff(&self) -> usize { + 3 + } + + pub fn wire_first_chunk_val(&self, chunk: usize) -> usize { + debug_assert!(chunk < self.num_chunks); + 4 + chunk + } + + pub fn wire_second_chunk_val(&self, chunk: usize) -> usize { + debug_assert!(chunk < self.num_chunks); + 4 + self.num_chunks + chunk + } + + pub fn wire_equality_dummy(&self, chunk: usize) -> usize { + debug_assert!(chunk < self.num_chunks); + 4 + 2 * self.num_chunks + chunk + } + + pub fn wire_chunks_equal(&self, chunk: usize) -> usize { + debug_assert!(chunk < self.num_chunks); + 4 + 3 * self.num_chunks + chunk + } + + pub fn wire_intermediate_value(&self, chunk: usize) -> usize { + debug_assert!(chunk < self.num_chunks); + 4 + 4 * self.num_chunks + chunk + } + + /// The `bit_index`th bit of 2^n - 1 + most_significant_diff. + pub fn wire_most_significant_diff_bit(&self, bit_index: usize) -> usize { + 4 + 5 * self.num_chunks + bit_index + } +} + +impl, const D: usize> Gate for ComparisonGate { + fn id(&self) -> String { + format!("{self:?}") + } + + fn export_circom_verification_code(&self) -> String { + todo!(); + } + fn export_solidity_verification_code(&self) -> String { + todo!(); + } + fn serialize(&self, dst: &mut Vec, _common_data: &CommonCircuitData) -> IoResult<()> { + dst.write_usize(self.num_bits)?; + dst.write_usize(self.num_chunks)?; + Ok(()) + } + + fn deserialize(src: &mut Buffer, _common_data: &CommonCircuitData) -> IoResult { + let num_bits = src.read_usize()?; + let num_chunks = src.read_usize()?; + Ok(Self { num_bits, num_chunks, _phantom: PhantomData }) + } + + fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { + let mut constraints = Vec::with_capacity(self.num_constraints()); + + let first_input = vars.local_wires[self.wire_first_input()]; + let second_input = vars.local_wires[self.wire_second_input()]; + + // Get chunks and assert that they match + let first_chunks: Vec = + (0..self.num_chunks).map(|i| vars.local_wires[self.wire_first_chunk_val(i)]).collect(); + let second_chunks: Vec = + (0..self.num_chunks).map(|i| vars.local_wires[self.wire_second_chunk_val(i)]).collect(); + + let first_chunks_combined = reduce_with_powers( + &first_chunks, + F::Extension::from_canonical_usize(1 << self.chunk_bits()), + ); + let second_chunks_combined = reduce_with_powers( + &second_chunks, + F::Extension::from_canonical_usize(1 << self.chunk_bits()), + ); + + constraints.push(first_chunks_combined - first_input); + constraints.push(second_chunks_combined - second_input); + + let chunk_size = 1 << self.chunk_bits(); + + let mut most_significant_diff_so_far = F::Extension::ZERO; + + for i in 0..self.num_chunks { + // Range-check the chunks to be less than `chunk_size`. + let first_product: F::Extension = (0..chunk_size) + .map(|x| first_chunks[i] - F::Extension::from_canonical_usize(x)) + .product(); + let second_product: F::Extension = (0..chunk_size) + .map(|x| second_chunks[i] - F::Extension::from_canonical_usize(x)) + .product(); + constraints.push(first_product); + constraints.push(second_product); + + let difference = second_chunks[i] - first_chunks[i]; + let equality_dummy = vars.local_wires[self.wire_equality_dummy(i)]; + let chunks_equal = vars.local_wires[self.wire_chunks_equal(i)]; + + // Two constraints to assert that `chunks_equal` is valid. + constraints.push(difference * equality_dummy - (F::Extension::ONE - chunks_equal)); + constraints.push(chunks_equal * difference); + + // Update `most_significant_diff_so_far`. + let intermediate_value = vars.local_wires[self.wire_intermediate_value(i)]; + constraints.push(intermediate_value - chunks_equal * most_significant_diff_so_far); + most_significant_diff_so_far = + intermediate_value + (F::Extension::ONE - chunks_equal) * difference; + } + + let most_significant_diff = vars.local_wires[self.wire_most_significant_diff()]; + constraints.push(most_significant_diff - most_significant_diff_so_far); + + let most_significant_diff_bits: Vec = (0..self.chunk_bits() + 1) + .map(|i| vars.local_wires[self.wire_most_significant_diff_bit(i)]) + .collect(); + + // Range-check the bits. + for &bit in &most_significant_diff_bits { + constraints.push(bit * (F::Extension::ONE - bit)); + } + + let bits_combined = reduce_with_powers(&most_significant_diff_bits, F::Extension::TWO); + let two_n = F::Extension::from_canonical_u64(1 << self.chunk_bits()); + constraints.push((two_n + most_significant_diff) - bits_combined); + + // Iff first <= second, the top (n + 1st) bit of (2^n + most_significant_diff) will be 1. + let result_bool = vars.local_wires[self.wire_result_bool()]; + constraints.push(result_bool - most_significant_diff_bits[self.chunk_bits()]); + + constraints + } + + fn eval_unfiltered_base_one( + &self, + _vars: EvaluationVarsBase, + _yield_constr: StridedConstraintConsumer, + ) { + panic!("use eval_unfiltered_base_packed instead"); + } + + fn eval_unfiltered_base_batch(&self, vars_base: EvaluationVarsBaseBatch) -> Vec { + self.eval_unfiltered_base_batch_packed(vars_base) + } + + fn eval_unfiltered_circuit( + &self, + builder: &mut CircuitBuilder, + vars: EvaluationTargets, + ) -> Vec> { + let mut constraints = Vec::with_capacity(self.num_constraints()); + + let first_input = vars.local_wires[self.wire_first_input()]; + let second_input = vars.local_wires[self.wire_second_input()]; + + // Get chunks and assert that they match + let first_chunks: Vec> = + (0..self.num_chunks).map(|i| vars.local_wires[self.wire_first_chunk_val(i)]).collect(); + let second_chunks: Vec> = + (0..self.num_chunks).map(|i| vars.local_wires[self.wire_second_chunk_val(i)]).collect(); + + let chunk_base = builder.constant(F::from_canonical_usize(1 << self.chunk_bits())); + let first_chunks_combined = + reduce_with_powers_ext_circuit(builder, &first_chunks, chunk_base); + let second_chunks_combined = + reduce_with_powers_ext_circuit(builder, &second_chunks, chunk_base); + + constraints.push(builder.sub_extension(first_chunks_combined, first_input)); + constraints.push(builder.sub_extension(second_chunks_combined, second_input)); + + let chunk_size = 1 << self.chunk_bits(); + + let mut most_significant_diff_so_far = builder.zero_extension(); + + let one = builder.one_extension(); + // Find the chosen chunk. + for i in 0..self.num_chunks { + // Range-check the chunks to be less than `chunk_size`. + let mut first_product = one; + let mut second_product = one; + for x in 0..chunk_size { + let x_f = builder.constant_extension(F::Extension::from_canonical_usize(x)); + let first_diff = builder.sub_extension(first_chunks[i], x_f); + let second_diff = builder.sub_extension(second_chunks[i], x_f); + first_product = builder.mul_extension(first_product, first_diff); + second_product = builder.mul_extension(second_product, second_diff); + } + constraints.push(first_product); + constraints.push(second_product); + + let difference = builder.sub_extension(second_chunks[i], first_chunks[i]); + let equality_dummy = vars.local_wires[self.wire_equality_dummy(i)]; + let chunks_equal = vars.local_wires[self.wire_chunks_equal(i)]; + + // Two constraints to assert that `chunks_equal` is valid. + let diff_times_equal = builder.mul_extension(difference, equality_dummy); + let not_equal = builder.sub_extension(one, chunks_equal); + constraints.push(builder.sub_extension(diff_times_equal, not_equal)); + constraints.push(builder.mul_extension(chunks_equal, difference)); + + // Update `most_significant_diff_so_far`. + let intermediate_value = vars.local_wires[self.wire_intermediate_value(i)]; + let old_diff = builder.mul_extension(chunks_equal, most_significant_diff_so_far); + constraints.push(builder.sub_extension(intermediate_value, old_diff)); + + let not_equal = builder.sub_extension(one, chunks_equal); + let new_diff = builder.mul_extension(not_equal, difference); + most_significant_diff_so_far = builder.add_extension(intermediate_value, new_diff); + } + + let most_significant_diff = vars.local_wires[self.wire_most_significant_diff()]; + constraints + .push(builder.sub_extension(most_significant_diff, most_significant_diff_so_far)); + + let most_significant_diff_bits: Vec> = (0..self.chunk_bits() + 1) + .map(|i| vars.local_wires[self.wire_most_significant_diff_bit(i)]) + .collect(); + + // Range-check the bits. + for &this_bit in &most_significant_diff_bits { + let inverse = builder.sub_extension(one, this_bit); + constraints.push(builder.mul_extension(this_bit, inverse)); + } + + let two = builder.two(); + let bits_combined = + reduce_with_powers_ext_circuit(builder, &most_significant_diff_bits, two); + let two_n = + builder.constant_extension(F::Extension::from_canonical_u64(1 << self.chunk_bits())); + let sum = builder.add_extension(two_n, most_significant_diff); + constraints.push(builder.sub_extension(sum, bits_combined)); + + // Iff first <= second, the top (n + 1st) bit of (2^n + most_significant_diff) will be 1. + let result_bool = vars.local_wires[self.wire_result_bool()]; + constraints.push( + builder.sub_extension(result_bool, most_significant_diff_bits[self.chunk_bits()]), + ); + + constraints + } + + fn generators(&self, row: usize, _local_constants: &[F]) -> Vec> { + let gen = ComparisonGenerator:: { row, gate: self.clone() }; + vec![WitnessGeneratorRef::new(gen.adapter())] + } + + fn num_wires(&self) -> usize { + 4 + 5 * self.num_chunks + (self.chunk_bits() + 1) + } + + fn num_constants(&self) -> usize { + 0 + } + + fn degree(&self) -> usize { + 1 << self.chunk_bits() + } + + fn num_constraints(&self) -> usize { + 6 + 5 * self.num_chunks + self.chunk_bits() + } +} + +impl, const D: usize> PackedEvaluableBase + for ComparisonGate +{ + fn eval_unfiltered_base_packed>( + &self, + vars: EvaluationVarsBasePacked

, + mut yield_constr: StridedConstraintConsumer

, + ) { + let first_input = vars.local_wires[self.wire_first_input()]; + let second_input = vars.local_wires[self.wire_second_input()]; + + // Get chunks and assert that they match + let first_chunks: Vec<_> = + (0..self.num_chunks).map(|i| vars.local_wires[self.wire_first_chunk_val(i)]).collect(); + let second_chunks: Vec<_> = + (0..self.num_chunks).map(|i| vars.local_wires[self.wire_second_chunk_val(i)]).collect(); + + let first_chunks_combined = + reduce_with_powers(&first_chunks, F::from_canonical_usize(1 << self.chunk_bits())); + let second_chunks_combined = + reduce_with_powers(&second_chunks, F::from_canonical_usize(1 << self.chunk_bits())); + + yield_constr.one(first_chunks_combined - first_input); + yield_constr.one(second_chunks_combined - second_input); + + let chunk_size = 1 << self.chunk_bits(); + + let mut most_significant_diff_so_far = P::ZEROS; + + for i in 0..self.num_chunks { + // Range-check the chunks to be less than `chunk_size`. + let first_product: P = + (0..chunk_size).map(|x| first_chunks[i] - F::from_canonical_usize(x)).product(); + let second_product: P = + (0..chunk_size).map(|x| second_chunks[i] - F::from_canonical_usize(x)).product(); + yield_constr.one(first_product); + yield_constr.one(second_product); + + let difference = second_chunks[i] - first_chunks[i]; + let equality_dummy = vars.local_wires[self.wire_equality_dummy(i)]; + let chunks_equal = vars.local_wires[self.wire_chunks_equal(i)]; + + // Two constraints to assert that `chunks_equal` is valid. + yield_constr.one(difference * equality_dummy - (P::ONES - chunks_equal)); + yield_constr.one(chunks_equal * difference); + + // Update `most_significant_diff_so_far`. + let intermediate_value = vars.local_wires[self.wire_intermediate_value(i)]; + yield_constr.one(intermediate_value - chunks_equal * most_significant_diff_so_far); + most_significant_diff_so_far = + intermediate_value + (P::ONES - chunks_equal) * difference; + } + + let most_significant_diff = vars.local_wires[self.wire_most_significant_diff()]; + yield_constr.one(most_significant_diff - most_significant_diff_so_far); + + let most_significant_diff_bits: Vec<_> = (0..self.chunk_bits() + 1) + .map(|i| vars.local_wires[self.wire_most_significant_diff_bit(i)]) + .collect(); + + // Range-check the bits. + for &bit in &most_significant_diff_bits { + yield_constr.one(bit * (P::ONES - bit)); + } + + let bits_combined = reduce_with_powers(&most_significant_diff_bits, F::TWO); + let two_n = F::from_canonical_u64(1 << self.chunk_bits()); + yield_constr.one((most_significant_diff + two_n) - bits_combined); + + // Iff first <= second, the top (n + 1st) bit of (2^n - 1 + most_significant_diff) will be 1. + let result_bool = vars.local_wires[self.wire_result_bool()]; + yield_constr.one(result_bool - most_significant_diff_bits[self.chunk_bits()]); + } +} + +#[derive(Debug, Clone)] +struct ComparisonGenerator, const D: usize> { + row: usize, + gate: ComparisonGate, +} + +impl, const D: usize> SimpleGenerator + for ComparisonGenerator +{ + fn id(&self) -> String { + "ComparisonGenerator".to_string() + } + + fn dependencies(&self) -> Vec { + let local_target = |column| Target::wire(self.row, column); + + vec![ + local_target(self.gate.wire_first_input()), + local_target(self.gate.wire_second_input()), + ] + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let local_wire = |column| Wire { row: self.row, column }; + + let get_local_wire = |column| witness.get_wire(local_wire(column)); + + let first_input = get_local_wire(self.gate.wire_first_input()); + let second_input = get_local_wire(self.gate.wire_second_input()); + + let first_input_u64 = first_input.to_canonical_u64(); + let second_input_u64 = second_input.to_canonical_u64(); + + let result = F::from_canonical_usize((first_input_u64 <= second_input_u64) as usize); + + let chunk_size = 1 << self.gate.chunk_bits(); + let first_input_chunks: Vec = (0..self.gate.num_chunks) + .scan(first_input_u64, |acc, _| { + let tmp = *acc % chunk_size; + *acc /= chunk_size; + Some(F::from_canonical_u64(tmp)) + }) + .collect(); + let second_input_chunks: Vec = (0..self.gate.num_chunks) + .scan(second_input_u64, |acc, _| { + let tmp = *acc % chunk_size; + *acc /= chunk_size; + Some(F::from_canonical_u64(tmp)) + }) + .collect(); + + let chunks_equal: Vec = (0..self.gate.num_chunks) + .map(|i| F::from_bool(first_input_chunks[i] == second_input_chunks[i])) + .collect(); + let equality_dummies: Vec = first_input_chunks + .iter() + .zip(second_input_chunks.iter()) + .map(|(&f, &s)| if f == s { F::ONE } else { F::ONE / (s - f) }) + .collect(); + + let mut most_significant_diff_so_far = F::ZERO; + let mut intermediate_values = Vec::new(); + for i in 0..self.gate.num_chunks { + if first_input_chunks[i] != second_input_chunks[i] { + most_significant_diff_so_far = second_input_chunks[i] - first_input_chunks[i]; + intermediate_values.push(F::ZERO); + } else { + intermediate_values.push(most_significant_diff_so_far); + } + } + let most_significant_diff = most_significant_diff_so_far; + + let two_n = F::from_canonical_usize(1 << self.gate.chunk_bits()); + let two_n_plus_msd = (two_n + most_significant_diff).to_canonical_u64(); + + let msd_bits_u64: Vec = (0..self.gate.chunk_bits() + 1) + .scan(two_n_plus_msd, |acc, _| { + let tmp = *acc % 2; + *acc /= 2; + Some(tmp) + }) + .collect(); + let msd_bits: Vec = msd_bits_u64.iter().map(|x| F::from_canonical_u64(*x)).collect(); + + out_buffer.set_wire(local_wire(self.gate.wire_result_bool()), result); + out_buffer + .set_wire(local_wire(self.gate.wire_most_significant_diff()), most_significant_diff); + for i in 0..self.gate.num_chunks { + out_buffer + .set_wire(local_wire(self.gate.wire_first_chunk_val(i)), first_input_chunks[i]); + out_buffer + .set_wire(local_wire(self.gate.wire_second_chunk_val(i)), second_input_chunks[i]); + out_buffer.set_wire(local_wire(self.gate.wire_equality_dummy(i)), equality_dummies[i]); + out_buffer.set_wire(local_wire(self.gate.wire_chunks_equal(i)), chunks_equal[i]); + out_buffer + .set_wire(local_wire(self.gate.wire_intermediate_value(i)), intermediate_values[i]); + } + for i in 0..self.gate.chunk_bits() + 1 { + out_buffer + .set_wire(local_wire(self.gate.wire_most_significant_diff_bit(i)), msd_bits[i]); + } + } + + fn serialize(&self, dst: &mut Vec, common_data: &CommonCircuitData) -> IoResult<()> { + dst.write_usize(self.row)?; + self.gate.serialize(dst, common_data) + } + + fn deserialize(src: &mut Buffer, common_data: &CommonCircuitData) -> IoResult { + let row = src.read_usize()?; + let gate = ComparisonGate::deserialize(src, common_data)?; + Ok(Self { row, gate }) + } +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use plonky2::{ + field::{ + goldilocks_field::GoldilocksField, + types::{PrimeField64, Sample}, + }, + gates::gate_testing::{test_eval_fns, test_low_degree}, + hash::hash_types::HashOut, + plonk::config::{GenericConfig, PoseidonGoldilocksConfig}, + }; + use rand::{rngs::OsRng, Rng}; + + use super::*; + + #[test] + fn wire_indices() { + type CG = ComparisonGate; + let num_bits = 40; + let num_chunks = 5; + + let gate = CG { num_bits, num_chunks, _phantom: PhantomData }; + + assert_eq!(gate.wire_first_input(), 0); + assert_eq!(gate.wire_second_input(), 1); + assert_eq!(gate.wire_result_bool(), 2); + assert_eq!(gate.wire_most_significant_diff(), 3); + assert_eq!(gate.wire_first_chunk_val(0), 4); + assert_eq!(gate.wire_first_chunk_val(4), 8); + assert_eq!(gate.wire_second_chunk_val(0), 9); + assert_eq!(gate.wire_second_chunk_val(4), 13); + assert_eq!(gate.wire_equality_dummy(0), 14); + assert_eq!(gate.wire_equality_dummy(4), 18); + assert_eq!(gate.wire_chunks_equal(0), 19); + assert_eq!(gate.wire_chunks_equal(4), 23); + assert_eq!(gate.wire_intermediate_value(0), 24); + assert_eq!(gate.wire_intermediate_value(4), 28); + assert_eq!(gate.wire_most_significant_diff_bit(0), 29); + assert_eq!(gate.wire_most_significant_diff_bit(8), 37); + } + + #[test] + fn low_degree() { + let num_bits = 40; + let num_chunks = 5; + + test_low_degree::(ComparisonGate::<_, 4>::new(num_bits, num_chunks)) + } + + #[test] + fn eval_fns() -> Result<()> { + let num_bits = 40; + let num_chunks = 5; + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + test_eval_fns::(ComparisonGate::<_, 2>::new(num_bits, num_chunks)) + } + + #[test] + fn test_gate_constraint() { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + type FF = >::FE; + + let num_bits = 40; + let num_chunks = 5; + let chunk_bits = num_bits / num_chunks; + + // Returns the local wires for a comparison gate given the two inputs. + let get_wires = |first_input: F, second_input: F| -> Vec { + let mut v = Vec::new(); + + let first_input_u64 = first_input.to_canonical_u64(); + let second_input_u64 = second_input.to_canonical_u64(); + + let result_bool = F::from_bool(first_input_u64 <= second_input_u64); + + let chunk_size = 1 << chunk_bits; + let mut first_input_chunks: Vec = (0..num_chunks) + .scan(first_input_u64, |acc, _| { + let tmp = *acc % chunk_size; + *acc /= chunk_size; + Some(F::from_canonical_u64(tmp)) + }) + .collect(); + let mut second_input_chunks: Vec = (0..num_chunks) + .scan(second_input_u64, |acc, _| { + let tmp = *acc % chunk_size; + *acc /= chunk_size; + Some(F::from_canonical_u64(tmp)) + }) + .collect(); + + let mut chunks_equal: Vec = (0..num_chunks) + .map(|i| F::from_bool(first_input_chunks[i] == second_input_chunks[i])) + .collect(); + let mut equality_dummies: Vec = first_input_chunks + .iter() + .zip(second_input_chunks.iter()) + .map(|(&f, &s)| if f == s { F::ONE } else { F::ONE / (s - f) }) + .collect(); + + let mut most_significant_diff_so_far = F::ZERO; + let mut intermediate_values = Vec::new(); + for i in 0..num_chunks { + if first_input_chunks[i] != second_input_chunks[i] { + most_significant_diff_so_far = second_input_chunks[i] - first_input_chunks[i]; + intermediate_values.push(F::ZERO); + } else { + intermediate_values.push(most_significant_diff_so_far); + } + } + let most_significant_diff = most_significant_diff_so_far; + + let two_n_plus_msd = + (1 << chunk_bits) as u64 + most_significant_diff.to_canonical_u64(); + let mut msd_bits: Vec = (0..chunk_bits + 1) + .scan(two_n_plus_msd, |acc, _| { + let tmp = *acc % 2; + *acc /= 2; + Some(F::from_canonical_u64(tmp)) + }) + .collect(); + + v.push(first_input); + v.push(second_input); + v.push(result_bool); + v.push(most_significant_diff); + v.append(&mut first_input_chunks); + v.append(&mut second_input_chunks); + v.append(&mut equality_dummies); + v.append(&mut chunks_equal); + v.append(&mut intermediate_values); + v.append(&mut msd_bits); + + v.iter().map(|&x| x.into()).collect() + }; + + let mut rng = OsRng; + let max: u64 = 1 << (num_bits - 1); + let first_input_u64 = rng.gen_range(0..max); + let second_input_u64 = { + let mut val = rng.gen_range(0..max); + while val < first_input_u64 { + val = rng.gen_range(0..max); + } + val + }; + + let first_input = F::from_canonical_u64(first_input_u64); + let second_input = F::from_canonical_u64(second_input_u64); + + let less_than_gate = ComparisonGate:: { num_bits, num_chunks, _phantom: PhantomData }; + let less_than_vars = EvaluationVars { + local_constants: &[], + local_wires: &get_wires(first_input, second_input), + public_inputs_hash: &HashOut::rand(), + }; + assert!( + less_than_gate.eval_unfiltered(less_than_vars).iter().all(|x| x.is_zero()), + "Gate constraints are not satisfied." + ); + + let equal_gate = ComparisonGate:: { num_bits, num_chunks, _phantom: PhantomData }; + let equal_vars = EvaluationVars { + local_constants: &[], + local_wires: &get_wires(first_input, first_input), + public_inputs_hash: &HashOut::rand(), + }; + assert!( + equal_gate.eval_unfiltered(equal_vars).iter().all(|x| x.is_zero()), + "Gate constraints are not satisfied." + ); + } +} diff --git a/u32/src/gates/mod.rs b/u32/src/gates/mod.rs new file mode 100644 index 0000000000..1880b16358 --- /dev/null +++ b/u32/src/gates/mod.rs @@ -0,0 +1,5 @@ +pub mod add_many_u32; +pub mod arithmetic_u32; +pub mod comparison; +pub mod range_check_u32; +pub mod subtraction_u32; diff --git a/u32/src/gates/range_check_u32.rs b/u32/src/gates/range_check_u32.rs new file mode 100644 index 0000000000..a1e4c376ca --- /dev/null +++ b/u32/src/gates/range_check_u32.rs @@ -0,0 +1,341 @@ +use alloc::{ + format, + string::{String, ToString}, + vec, + vec::Vec, +}; +use core::marker::PhantomData; +use plonky2::{ + plonk::circuit_data::CommonCircuitData, + util::serialization::{Buffer, IoResult, Read, Write}, +}; + +use plonky2::{ + field::{extension::Extendable, types::Field}, + gates::{gate::Gate, util::StridedConstraintConsumer}, + hash::hash_types::RichField, + iop::{ + ext_target::ExtensionTarget, + generator::{GeneratedValues, SimpleGenerator, WitnessGeneratorRef}, + target::Target, + witness::{PartitionWitness, Witness, WitnessWrite}, + }, + plonk::{ + circuit_builder::CircuitBuilder, + plonk_common::{reduce_with_powers, reduce_with_powers_ext_circuit}, + vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase}, + }, + util::ceil_div_usize, +}; + +/// A gate which can decompose a number into base B little-endian limbs. +#[derive(Copy, Clone, Debug)] +pub struct U32RangeCheckGate, const D: usize> { + pub num_input_limbs: usize, + _phantom: PhantomData, +} + +impl, const D: usize> U32RangeCheckGate { + pub fn new(num_input_limbs: usize) -> Self { + Self { num_input_limbs, _phantom: PhantomData } + } + + pub const AUX_LIMB_BITS: usize = 2; + pub const BASE: usize = 1 << Self::AUX_LIMB_BITS; + + fn aux_limbs_per_input_limb(&self) -> usize { + ceil_div_usize(32, Self::AUX_LIMB_BITS) + } + pub fn wire_ith_input_limb(&self, i: usize) -> usize { + debug_assert!(i < self.num_input_limbs); + i + } + pub fn wire_ith_input_limb_jth_aux_limb(&self, i: usize, j: usize) -> usize { + debug_assert!(i < self.num_input_limbs); + debug_assert!(j < self.aux_limbs_per_input_limb()); + self.num_input_limbs + self.aux_limbs_per_input_limb() * i + j + } +} + +impl, const D: usize> Gate for U32RangeCheckGate { + fn id(&self) -> String { + format!("{self:?}") + } + + fn export_circom_verification_code(&self) -> String { + todo!(); + } + fn export_solidity_verification_code(&self) -> String { + todo!(); + } + fn serialize(&self, dst: &mut Vec, _common_data: &CommonCircuitData) -> IoResult<()> { + dst.write_usize(self.num_input_limbs) + } + + fn deserialize(src: &mut Buffer, _common_data: &CommonCircuitData) -> IoResult { + let num_input_limbs = src.read_usize()?; + Ok(Self { num_input_limbs, _phantom: PhantomData }) + } + + fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { + let mut constraints = Vec::with_capacity(self.num_constraints()); + + let base = F::Extension::from_canonical_usize(Self::BASE); + for i in 0..self.num_input_limbs { + let input_limb = vars.local_wires[self.wire_ith_input_limb(i)]; + let aux_limbs: Vec<_> = (0..self.aux_limbs_per_input_limb()) + .map(|j| vars.local_wires[self.wire_ith_input_limb_jth_aux_limb(i, j)]) + .collect(); + let computed_sum = reduce_with_powers(&aux_limbs, base); + + constraints.push(computed_sum - input_limb); + for aux_limb in aux_limbs { + constraints.push( + (0..Self::BASE) + .map(|i| aux_limb - F::Extension::from_canonical_usize(i)) + .product(), + ); + } + } + + constraints + } + + fn eval_unfiltered_base_one( + &self, + vars: EvaluationVarsBase, + mut yield_constr: StridedConstraintConsumer, + ) { + let base = F::from_canonical_usize(Self::BASE); + for i in 0..self.num_input_limbs { + let input_limb = vars.local_wires[self.wire_ith_input_limb(i)]; + let aux_limbs: Vec<_> = (0..self.aux_limbs_per_input_limb()) + .map(|j| vars.local_wires[self.wire_ith_input_limb_jth_aux_limb(i, j)]) + .collect(); + let computed_sum = reduce_with_powers(&aux_limbs, base); + + yield_constr.one(computed_sum - input_limb); + for aux_limb in aux_limbs { + yield_constr + .one((0..Self::BASE).map(|i| aux_limb - F::from_canonical_usize(i)).product()); + } + } + } + + fn eval_unfiltered_circuit( + &self, + builder: &mut CircuitBuilder, + vars: EvaluationTargets, + ) -> Vec> { + let mut constraints = Vec::with_capacity(self.num_constraints()); + + let base = builder.constant(F::from_canonical_usize(Self::BASE)); + for i in 0..self.num_input_limbs { + let input_limb = vars.local_wires[self.wire_ith_input_limb(i)]; + let aux_limbs: Vec<_> = (0..self.aux_limbs_per_input_limb()) + .map(|j| vars.local_wires[self.wire_ith_input_limb_jth_aux_limb(i, j)]) + .collect(); + let computed_sum = reduce_with_powers_ext_circuit(builder, &aux_limbs, base); + + constraints.push(builder.sub_extension(computed_sum, input_limb)); + for aux_limb in aux_limbs { + constraints.push({ + let mut acc = builder.one_extension(); + (0..Self::BASE).for_each(|i| { + // We update our accumulator as: + // acc' = acc (x - i) + // = acc x + (-i) acc + // Since -i is constant, we can do this in one arithmetic_extension call. + let neg_i = -F::from_canonical_usize(i); + acc = builder.arithmetic_extension(F::ONE, neg_i, acc, aux_limb, acc) + }); + acc + }); + } + } + + constraints + } + + fn generators(&self, row: usize, _local_constants: &[F]) -> Vec> { + let gen = U32RangeCheckGenerator { gate: *self, row }; + vec![WitnessGeneratorRef::new(gen.adapter())] + } + + fn num_wires(&self) -> usize { + self.num_input_limbs * (1 + self.aux_limbs_per_input_limb()) + } + + fn num_constants(&self) -> usize { + 0 + } + + // Bounded by the range-check (x-0)*(x-1)*...*(x-BASE+1). + fn degree(&self) -> usize { + Self::BASE + } + + // 1 for checking the each sum of aux limbs, plus a range check for each aux limb. + fn num_constraints(&self) -> usize { + self.num_input_limbs * (1 + self.aux_limbs_per_input_limb()) + } +} + +#[derive(Debug, Clone)] +pub struct U32RangeCheckGenerator, const D: usize> { + gate: U32RangeCheckGate, + row: usize, +} + +impl, const D: usize> SimpleGenerator + for U32RangeCheckGenerator +{ + fn id(&self) -> String { + "U32RangeCheckGenerator".to_string() + } + + fn dependencies(&self) -> Vec { + let num_input_limbs = self.gate.num_input_limbs; + (0..num_input_limbs) + .map(|i| Target::wire(self.row, self.gate.wire_ith_input_limb(i))) + .collect() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let num_input_limbs = self.gate.num_input_limbs; + for i in 0..num_input_limbs { + let sum_value = witness + .get_target(Target::wire(self.row, self.gate.wire_ith_input_limb(i))) + .to_canonical_u64() as u32; + + let base = U32RangeCheckGate::::BASE as u32; + let limbs = (0..self.gate.aux_limbs_per_input_limb()) + .map(|j| Target::wire(self.row, self.gate.wire_ith_input_limb_jth_aux_limb(i, j))); + let limbs_value = (0..self.gate.aux_limbs_per_input_limb()) + .scan(sum_value, |acc, _| { + let tmp = *acc % base; + *acc /= base; + Some(F::from_canonical_u32(tmp)) + }) + .collect::>(); + + for (b, b_value) in limbs.zip(limbs_value) { + out_buffer.set_target(b, b_value); + } + } + } + + fn serialize(&self, dst: &mut Vec, common_data: &CommonCircuitData) -> IoResult<()> { + self.gate.serialize(dst, common_data)?; + dst.write_usize(self.row) + } + + fn deserialize(src: &mut Buffer, common_data: &CommonCircuitData) -> IoResult { + let gate = U32RangeCheckGate::deserialize(src, common_data)?; + let row = src.read_usize()?; + Ok(Self { row, gate }) + } +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use itertools::unfold; + use plonky2::{ + field::{ + extension::quartic::QuarticExtension, + goldilocks_field::GoldilocksField, + types::{Field, Sample}, + }, + gates::gate_testing::{test_eval_fns, test_low_degree}, + hash::hash_types::HashOut, + plonk::config::{GenericConfig, PoseidonGoldilocksConfig}, + }; + use rand::{rngs::OsRng, Rng}; + + use super::*; + + #[test] + fn low_degree() { + test_low_degree::(U32RangeCheckGate::new(8)) + } + + #[test] + fn eval_fns() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + test_eval_fns::(U32RangeCheckGate::new(8)) + } + + fn test_gate_constraint(input_limbs: Vec) { + type F = GoldilocksField; + type FF = QuarticExtension; + const D: usize = 4; + const AUX_LIMB_BITS: usize = 2; + const BASE: usize = 1 << AUX_LIMB_BITS; + const AUX_LIMBS_PER_INPUT_LIMB: usize = ceil_div_usize(32, AUX_LIMB_BITS); + + fn get_wires(input_limbs: Vec) -> Vec { + let num_input_limbs = input_limbs.len(); + let mut v = Vec::new(); + + for i in 0..num_input_limbs { + let input_limb = input_limbs[i]; + + let split_to_limbs = |mut val, num| { + unfold((), move |_| { + let ret = val % (BASE as u64); + val /= BASE as u64; + Some(ret) + }) + .take(num) + .map(F::from_canonical_u64) + }; + + let mut aux_limbs: Vec<_> = + split_to_limbs(input_limb, AUX_LIMBS_PER_INPUT_LIMB).collect(); + + v.append(&mut aux_limbs); + } + + input_limbs + .iter() + .cloned() + .map(F::from_canonical_u64) + .chain(v.iter().cloned()) + .map(|x| x.into()) + .collect() + } + + let gate = U32RangeCheckGate:: { num_input_limbs: 8, _phantom: PhantomData }; + + let vars = EvaluationVars { + local_constants: &[], + local_wires: &get_wires(input_limbs), + public_inputs_hash: &HashOut::rand(), + }; + + assert!( + gate.eval_unfiltered(vars).iter().all(|x| x.is_zero()), + "Gate constraints are not satisfied." + ); + } + + #[test] + fn test_gate_constraint_good() { + let mut rng = OsRng; + let input_limbs: Vec<_> = (0..8).map(|_| rng.gen::() as u64).collect(); + + test_gate_constraint(input_limbs); + } + + #[test] + #[should_panic] + fn test_gate_constraint_bad() { + let mut rng = OsRng; + let input_limbs: Vec<_> = (0..8).map(|_| rng.gen()).collect(); + + test_gate_constraint(input_limbs); + } +} diff --git a/u32/src/gates/subtraction_u32.rs b/u32/src/gates/subtraction_u32.rs new file mode 100644 index 0000000000..14fb166d1e --- /dev/null +++ b/u32/src/gates/subtraction_u32.rs @@ -0,0 +1,456 @@ +use alloc::{ + format, + string::{String, ToString}, + vec, + vec::Vec, +}; +use core::marker::PhantomData; +use plonky2::util::serialization::{Buffer, IoResult, Read, Write}; + +use plonky2::{ + field::{extension::Extendable, packed::PackedField, types::Field}, + gates::{gate::Gate, packed_util::PackedEvaluableBase, util::StridedConstraintConsumer}, + hash::hash_types::RichField, + iop::{ + ext_target::ExtensionTarget, + generator::{GeneratedValues, SimpleGenerator, WitnessGeneratorRef}, + target::Target, + wire::Wire, + witness::{PartitionWitness, Witness, WitnessWrite}, + }, + plonk::{ + circuit_builder::CircuitBuilder, + circuit_data::{CircuitConfig, CommonCircuitData}, + vars::{ + EvaluationTargets, EvaluationVars, EvaluationVarsBase, EvaluationVarsBaseBatch, + EvaluationVarsBasePacked, + }, + }, +}; + +/// A gate to perform a subtraction on 32-bit limbs: given `x`, `y`, and `borrow`, it returns +/// the result `x - y - borrow` and, if this underflows, a new `borrow`. Inputs are not range-checked. +#[derive(Copy, Clone, Debug)] +pub struct U32SubtractionGate, const D: usize> { + pub num_ops: usize, + _phantom: PhantomData, +} + +impl, const D: usize> U32SubtractionGate { + pub fn new_from_config(config: &CircuitConfig) -> Self { + Self { num_ops: Self::num_ops(config), _phantom: PhantomData } + } + + pub(crate) fn num_ops(config: &CircuitConfig) -> usize { + let wires_per_op = 5 + Self::num_limbs(); + let routed_wires_per_op = 5; + (config.num_wires / wires_per_op).min(config.num_routed_wires / routed_wires_per_op) + } + + pub fn wire_ith_input_x(&self, i: usize) -> usize { + debug_assert!(i < self.num_ops); + 5 * i + } + pub fn wire_ith_input_y(&self, i: usize) -> usize { + debug_assert!(i < self.num_ops); + 5 * i + 1 + } + pub fn wire_ith_input_borrow(&self, i: usize) -> usize { + debug_assert!(i < self.num_ops); + 5 * i + 2 + } + + pub fn wire_ith_output_result(&self, i: usize) -> usize { + debug_assert!(i < self.num_ops); + 5 * i + 3 + } + pub fn wire_ith_output_borrow(&self, i: usize) -> usize { + debug_assert!(i < self.num_ops); + 5 * i + 4 + } + + pub fn limb_bits() -> usize { + 2 + } + // We have limbs for the 32 bits of `output_result`. + pub fn num_limbs() -> usize { + 32 / Self::limb_bits() + } + + pub fn wire_ith_output_jth_limb(&self, i: usize, j: usize) -> usize { + debug_assert!(i < self.num_ops); + debug_assert!(j < Self::num_limbs()); + 5 * self.num_ops + Self::num_limbs() * i + j + } +} + +impl, const D: usize> Gate for U32SubtractionGate { + fn id(&self) -> String { + format!("{self:?}") + } + + fn export_circom_verification_code(&self) -> String { + todo!(); + } + fn export_solidity_verification_code(&self) -> String { + todo!(); + } + fn serialize(&self, dst: &mut Vec, _common_data: &CommonCircuitData) -> IoResult<()> { + dst.write_usize(self.num_ops) + } + + fn deserialize(src: &mut Buffer, _common_data: &CommonCircuitData) -> IoResult { + let num_ops = src.read_usize()?; + Ok(Self { num_ops, _phantom: PhantomData }) + } + + fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { + let mut constraints = Vec::with_capacity(self.num_constraints()); + for i in 0..self.num_ops { + let input_x = vars.local_wires[self.wire_ith_input_x(i)]; + let input_y = vars.local_wires[self.wire_ith_input_y(i)]; + let input_borrow = vars.local_wires[self.wire_ith_input_borrow(i)]; + + let result_initial = input_x - input_y - input_borrow; + let base = F::Extension::from_canonical_u64(1 << 32u64); + + let output_result = vars.local_wires[self.wire_ith_output_result(i)]; + let output_borrow = vars.local_wires[self.wire_ith_output_borrow(i)]; + + constraints.push(output_result - (result_initial + base * output_borrow)); + + // Range-check output_result to be at most 32 bits. + let mut combined_limbs = F::Extension::ZERO; + let limb_base = F::Extension::from_canonical_u64(1u64 << Self::limb_bits()); + for j in (0..Self::num_limbs()).rev() { + let this_limb = vars.local_wires[self.wire_ith_output_jth_limb(i, j)]; + let max_limb = 1 << Self::limb_bits(); + let product = (0..max_limb) + .map(|x| this_limb - F::Extension::from_canonical_usize(x)) + .product(); + constraints.push(product); + + combined_limbs = limb_base * combined_limbs + this_limb; + } + constraints.push(combined_limbs - output_result); + + // Range-check output_borrow to be one bit. + constraints.push(output_borrow * (F::Extension::ONE - output_borrow)); + } + + constraints + } + + fn eval_unfiltered_base_one( + &self, + _vars: EvaluationVarsBase, + _yield_constr: StridedConstraintConsumer, + ) { + panic!("use eval_unfiltered_base_packed instead"); + } + + fn eval_unfiltered_base_batch(&self, vars_base: EvaluationVarsBaseBatch) -> Vec { + self.eval_unfiltered_base_batch_packed(vars_base) + } + + fn eval_unfiltered_circuit( + &self, + builder: &mut CircuitBuilder, + vars: EvaluationTargets, + ) -> Vec> { + let mut constraints = Vec::with_capacity(self.num_constraints()); + for i in 0..self.num_ops { + let input_x = vars.local_wires[self.wire_ith_input_x(i)]; + let input_y = vars.local_wires[self.wire_ith_input_y(i)]; + let input_borrow = vars.local_wires[self.wire_ith_input_borrow(i)]; + + let diff = builder.sub_extension(input_x, input_y); + let result_initial = builder.sub_extension(diff, input_borrow); + let base = builder.constant_extension(F::Extension::from_canonical_u64(1 << 32u64)); + + let output_result = vars.local_wires[self.wire_ith_output_result(i)]; + let output_borrow = vars.local_wires[self.wire_ith_output_borrow(i)]; + + let computed_output = builder.mul_add_extension(base, output_borrow, result_initial); + constraints.push(builder.sub_extension(output_result, computed_output)); + + // Range-check output_result to be at most 32 bits. + let mut combined_limbs = builder.zero_extension(); + let limb_base = builder + .constant_extension(F::Extension::from_canonical_u64(1u64 << Self::limb_bits())); + for j in (0..Self::num_limbs()).rev() { + let this_limb = vars.local_wires[self.wire_ith_output_jth_limb(i, j)]; + let max_limb = 1 << Self::limb_bits(); + let mut product = builder.one_extension(); + for x in 0..max_limb { + let x_target = + builder.constant_extension(F::Extension::from_canonical_usize(x)); + let diff = builder.sub_extension(this_limb, x_target); + product = builder.mul_extension(product, diff); + } + constraints.push(product); + + combined_limbs = builder.mul_add_extension(limb_base, combined_limbs, this_limb); + } + constraints.push(builder.sub_extension(combined_limbs, output_result)); + + // Range-check output_borrow to be one bit. + let one = builder.one_extension(); + let not_borrow = builder.sub_extension(one, output_borrow); + constraints.push(builder.mul_extension(output_borrow, not_borrow)); + } + + constraints + } + + fn generators(&self, row: usize, _local_constants: &[F]) -> Vec> { + (0..self.num_ops) + .map(|i| { + WitnessGeneratorRef::new( + U32SubtractionGenerator { gate: *self, row, i, _phantom: PhantomData } + .adapter(), + ) + }) + .collect() + } + + fn num_wires(&self) -> usize { + self.num_ops * (5 + Self::num_limbs()) + } + + fn num_constants(&self) -> usize { + 0 + } + + fn degree(&self) -> usize { + 1 << Self::limb_bits() + } + + fn num_constraints(&self) -> usize { + self.num_ops * (3 + Self::num_limbs()) + } +} + +impl, const D: usize> PackedEvaluableBase + for U32SubtractionGate +{ + fn eval_unfiltered_base_packed>( + &self, + vars: EvaluationVarsBasePacked

, + mut yield_constr: StridedConstraintConsumer

, + ) { + for i in 0..self.num_ops { + let input_x = vars.local_wires[self.wire_ith_input_x(i)]; + let input_y = vars.local_wires[self.wire_ith_input_y(i)]; + let input_borrow = vars.local_wires[self.wire_ith_input_borrow(i)]; + + let result_initial = input_x - input_y - input_borrow; + let base = F::from_canonical_u64(1 << 32u64); + + let output_result = vars.local_wires[self.wire_ith_output_result(i)]; + let output_borrow = vars.local_wires[self.wire_ith_output_borrow(i)]; + + yield_constr.one(output_result - (result_initial + output_borrow * base)); + + // Range-check output_result to be at most 32 bits. + let mut combined_limbs = P::ZEROS; + let limb_base = F::from_canonical_u64(1u64 << Self::limb_bits()); + for j in (0..Self::num_limbs()).rev() { + let this_limb = vars.local_wires[self.wire_ith_output_jth_limb(i, j)]; + let max_limb = 1 << Self::limb_bits(); + let product = + (0..max_limb).map(|x| this_limb - F::from_canonical_usize(x)).product(); + yield_constr.one(product); + + combined_limbs = combined_limbs * limb_base + this_limb; + } + yield_constr.one(combined_limbs - output_result); + + // Range-check output_borrow to be one bit. + yield_constr.one(output_borrow * (P::ONES - output_borrow)); + } + } +} + +#[derive(Clone, Debug)] +struct U32SubtractionGenerator, const D: usize> { + gate: U32SubtractionGate, + row: usize, + i: usize, + _phantom: PhantomData, +} + +impl, const D: usize> SimpleGenerator + for U32SubtractionGenerator +{ + fn id(&self) -> String { + "U32SubtractionGenerator".to_string() + } + + fn dependencies(&self) -> Vec { + let local_target = |column| Target::wire(self.row, column); + + vec![ + local_target(self.gate.wire_ith_input_x(self.i)), + local_target(self.gate.wire_ith_input_y(self.i)), + local_target(self.gate.wire_ith_input_borrow(self.i)), + ] + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let local_wire = |column| Wire { row: self.row, column }; + + let get_local_wire = |column| witness.get_wire(local_wire(column)); + + let input_x = get_local_wire(self.gate.wire_ith_input_x(self.i)); + let input_y = get_local_wire(self.gate.wire_ith_input_y(self.i)); + let input_borrow = get_local_wire(self.gate.wire_ith_input_borrow(self.i)); + + let result_initial = input_x - input_y - input_borrow; + let result_initial_u64 = result_initial.to_canonical_u64(); + let output_borrow = if result_initial_u64 > 1 << 32u64 { F::ONE } else { F::ZERO }; + + let base = F::from_canonical_u64(1 << 32u64); + let output_result = result_initial + base * output_borrow; + + let output_result_wire = local_wire(self.gate.wire_ith_output_result(self.i)); + let output_borrow_wire = local_wire(self.gate.wire_ith_output_borrow(self.i)); + + out_buffer.set_wire(output_result_wire, output_result); + out_buffer.set_wire(output_borrow_wire, output_borrow); + + let output_result_u64 = output_result.to_canonical_u64(); + + let num_limbs = U32SubtractionGate::::num_limbs(); + let limb_base = 1 << U32SubtractionGate::::limb_bits(); + let output_limbs: Vec<_> = (0..num_limbs) + .scan(output_result_u64, |acc, _| { + let tmp = *acc % limb_base; + *acc /= limb_base; + Some(F::from_canonical_u64(tmp)) + }) + .collect(); + + for j in 0..num_limbs { + let wire = local_wire(self.gate.wire_ith_output_jth_limb(self.i, j)); + out_buffer.set_wire(wire, output_limbs[j]); + } + } + + fn serialize(&self, dst: &mut Vec, common_data: &CommonCircuitData) -> IoResult<()> { + self.gate.serialize(dst, common_data)?; + dst.write_usize(self.row)?; + dst.write_usize(self.i) + } + + fn deserialize(src: &mut Buffer, common_data: &CommonCircuitData) -> IoResult { + let gate = U32SubtractionGate::deserialize(src, common_data)?; + let row = src.read_usize()?; + let i = src.read_usize()?; + Ok(Self { gate, row, i, _phantom: PhantomData }) + } +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use plonky2::{ + field::{ + extension::quartic::QuarticExtension, + goldilocks_field::GoldilocksField, + types::{PrimeField64, Sample}, + }, + gates::gate_testing::{test_eval_fns, test_low_degree}, + hash::hash_types::HashOut, + plonk::config::{GenericConfig, PoseidonGoldilocksConfig}, + }; + use rand::{rngs::OsRng, Rng}; + + use super::*; + + #[test] + fn low_degree() { + test_low_degree::(U32SubtractionGate:: { + num_ops: 3, + _phantom: PhantomData, + }) + } + + #[test] + fn eval_fns() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + test_eval_fns::(U32SubtractionGate:: { + num_ops: 3, + _phantom: PhantomData, + }) + } + + #[test] + fn test_gate_constraint() { + type F = GoldilocksField; + type FF = QuarticExtension; + const D: usize = 4; + const NUM_U32_SUBTRACTION_OPS: usize = 3; + + fn get_wires(inputs_x: Vec, inputs_y: Vec, borrows: Vec) -> Vec { + let mut v0 = Vec::new(); + let mut v1 = Vec::new(); + + let limb_bits = U32SubtractionGate::::limb_bits(); + let num_limbs = U32SubtractionGate::::num_limbs(); + let limb_base = 1 << limb_bits; + for c in 0..NUM_U32_SUBTRACTION_OPS { + let input_x = F::from_canonical_u64(inputs_x[c]); + let input_y = F::from_canonical_u64(inputs_y[c]); + let input_borrow = F::from_canonical_u64(borrows[c]); + + let result_initial = input_x - input_y - input_borrow; + let result_initial_u64 = result_initial.to_canonical_u64(); + let output_borrow = if result_initial_u64 > 1 << 32u64 { F::ONE } else { F::ZERO }; + + let base = F::from_canonical_u64(1 << 32u64); + let output_result = result_initial + base * output_borrow; + + let output_result_u64 = output_result.to_canonical_u64(); + + let mut output_limbs: Vec<_> = (0..num_limbs) + .scan(output_result_u64, |acc, _| { + let tmp = *acc % limb_base; + *acc /= limb_base; + Some(F::from_canonical_u64(tmp)) + }) + .collect(); + + v0.push(input_x); + v0.push(input_y); + v0.push(input_borrow); + v0.push(output_result); + v0.push(output_borrow); + v1.append(&mut output_limbs); + } + + v0.iter().chain(v1.iter()).map(|&x| x.into()).collect() + } + + let mut rng = OsRng; + let inputs_x = (0..NUM_U32_SUBTRACTION_OPS).map(|_| rng.gen::() as u64).collect(); + let inputs_y = (0..NUM_U32_SUBTRACTION_OPS).map(|_| rng.gen::() as u64).collect(); + let borrows = (0..NUM_U32_SUBTRACTION_OPS).map(|_| (rng.gen::() % 2) as u64).collect(); + + let gate = + U32SubtractionGate:: { num_ops: NUM_U32_SUBTRACTION_OPS, _phantom: PhantomData }; + + let vars = EvaluationVars { + local_constants: &[], + local_wires: &get_wires(inputs_x, inputs_y, borrows), + public_inputs_hash: &HashOut::rand(), + }; + + assert!( + gate.eval_unfiltered(vars).iter().all(|x| x.is_zero()), + "Gate constraints are not satisfied." + ); + } +} diff --git a/u32/src/lib.rs b/u32/src/lib.rs new file mode 100644 index 0000000000..c630269ecf --- /dev/null +++ b/u32/src/lib.rs @@ -0,0 +1,9 @@ +#![allow(clippy::needless_range_loop)] +#![no_std] + +extern crate alloc; + +pub mod gadgets; +pub mod gates; +pub mod serialization; +pub mod witness; diff --git a/u32/src/serialization.rs b/u32/src/serialization.rs new file mode 100644 index 0000000000..7beca40cdf --- /dev/null +++ b/u32/src/serialization.rs @@ -0,0 +1,26 @@ +use alloc::vec::Vec; +use plonky2::util::serialization::{Buffer, IoResult, Read, Write}; + +use crate::gadgets::arithmetic_u32::U32Target; + +pub trait WriteU32 { + fn write_target_u32(&mut self, x: U32Target) -> IoResult<()>; +} + +impl WriteU32 for Vec { + #[inline] + fn write_target_u32(&mut self, x: U32Target) -> IoResult<()> { + self.write_target(x.0) + } +} + +pub trait ReadU32 { + fn read_target_u32(&mut self) -> IoResult; +} + +impl ReadU32 for Buffer<'_> { + #[inline] + fn read_target_u32(&mut self) -> IoResult { + Ok(U32Target(self.read_target()?)) + } +} diff --git a/u32/src/witness.rs b/u32/src/witness.rs new file mode 100644 index 0000000000..26388ccf5a --- /dev/null +++ b/u32/src/witness.rs @@ -0,0 +1,37 @@ +use plonky2::{ + field::types::{Field, PrimeField64}, + iop::{ + generator::GeneratedValues, + witness::{Witness, WitnessWrite}, + }, +}; + +use crate::gadgets::arithmetic_u32::U32Target; + +pub trait WitnessU32: Witness { + fn set_u32_target(&mut self, target: U32Target, value: u32); + fn get_u32_target(&self, target: U32Target) -> (u32, u32); +} + +impl, F: PrimeField64> WitnessU32 for T { + fn set_u32_target(&mut self, target: U32Target, value: u32) { + self.set_target(target.0, F::from_canonical_u32(value)); + } + + fn get_u32_target(&self, target: U32Target) -> (u32, u32) { + let x_u64 = self.get_target(target.0).to_canonical_u64(); + let low = x_u64 as u32; + let high = (x_u64 >> 32) as u32; + (low, high) + } +} + +pub trait GeneratedValuesU32 { + fn set_u32_target(&mut self, target: U32Target, value: u32); +} + +impl GeneratedValuesU32 for GeneratedValues { + fn set_u32_target(&mut self, target: U32Target, value: u32) { + self.set_target(target.0, F::from_canonical_u32(value)) + } +}