diff --git a/crypto3.nix b/crypto3.nix index 393a810489..eb0b2f74eb 100644 --- a/crypto3.nix +++ b/crypto3.nix @@ -33,6 +33,7 @@ in stdenv.mkDerivation { (if runTests then "-DCMAKE_ENABLE_TESTS=TRUE" else "") (if enableDebug then "-DCMAKE_BUILD_TYPE=Debug" else "-DCMAKE_BUILD_TYPE=Release") (if enableDebug then "-DCMAKE_CXX_FLAGS=-ggdb" else "") + (if enableDebug then "-DCMAKE_CXX_FLAGS=-O0" else "") ]; doCheck = runTests; # tests are inside crypto3-tests derivation diff --git a/libs/algebra/include/nil/crypto3/algebra/fields/detail/element/fp.hpp b/libs/algebra/include/nil/crypto3/algebra/fields/detail/element/fp.hpp index 8a07499f2c..110f91da2f 100644 --- a/libs/algebra/include/nil/crypto3/algebra/fields/detail/element/fp.hpp +++ b/libs/algebra/include/nil/crypto3/algebra/fields/detail/element/fp.hpp @@ -246,7 +246,7 @@ namespace nil { template::value>::type> constexpr element_fp pow(const PowerType pwr) const { - return element_fp(boost::multiprecision::powm(data, boost::multiprecision::uint128_t(pwr))); + return element_fp(boost::multiprecision::powm(data, boost::multiprecision::uint128_modular_t(pwr))); } template diff --git a/libs/mac/include/nil/crypto3/mac/detail/poly1305/poly1305_functions.hpp b/libs/mac/include/nil/crypto3/mac/detail/poly1305/poly1305_functions.hpp index 813629e67d..705a4c10a3 100644 --- a/libs/mac/include/nil/crypto3/mac/detail/poly1305/poly1305_functions.hpp +++ b/libs/mac/include/nil/crypto3/mac/detail/poly1305/poly1305_functions.hpp @@ -99,9 +99,9 @@ namespace nil { h2 += (((t1 >> 24)) & 0x3ffffffffff) | hibit; /* h *= r */ - uint128_t d0 = uint128_t(h0) * r0 + uint128_t(h1) * s2 + uint128_t(h2) * s1; - uint128_t d1 = uint128_t(h0) * r1 + uint128_t(h1) * r0 + uint128_t(h2) * s2; - uint128_t d2 = uint128_t(h0) * r2 + uint128_t(h1) * r1 + uint128_t(h2) * r0; + uint128_modular_t d0 = uint128_modular_t(h0) * r0 + uint128_modular_t(h1) * s2 + uint128_modular_t(h2) * s1; + uint128_modular_t d1 = uint128_modular_t(h0) * r1 + uint128_modular_t(h1) * r0 + uint128_modular_t(h2) * s2; + uint128_modular_t d2 = uint128_modular_t(h0) * r2 + uint128_modular_t(h1) * r1 + uint128_modular_t(h2) * r0; /* (partial) h %= p */ word_type c = carry_shift(d0, 44); diff --git a/libs/multiprecision/example/mixed_integer_arithmetic.cpp b/libs/multiprecision/example/mixed_integer_arithmetic.cpp index 66c7a74e0c..e9c5982634 100644 --- a/libs/multiprecision/example/mixed_integer_arithmetic.cpp +++ b/libs/multiprecision/example/mixed_integer_arithmetic.cpp @@ -21,7 +21,7 @@ int main() boost::uint64_t i = (std::numeric_limits::max)(); boost::uint64_t j = 1; - uint128_t ui128; + uint128_modular_t ui128; uint256_t ui256; // // Start by performing arithmetic on 64-bit integers to yield 128-bit results: @@ -32,7 +32,7 @@ int main() // // The try squaring a 128-bit integer to yield a 256-bit result: // - ui128 = (std::numeric_limits::max)(); + ui128 = (std::numeric_limits::max)(); std::cout << std::hex << std::showbase << multiply(ui256, ui128, ui128) << std::endl; return 0; diff --git a/libs/multiprecision/include/nil/crypto3/multiprecision/cpp_int_modular/bitwise.hpp b/libs/multiprecision/include/nil/crypto3/multiprecision/cpp_int_modular/bitwise.hpp index 258630d2f1..7c466a9415 100644 --- a/libs/multiprecision/include/nil/crypto3/multiprecision/cpp_int_modular/bitwise.hpp +++ b/libs/multiprecision/include/nil/crypto3/multiprecision/cpp_int_modular/bitwise.hpp @@ -99,10 +99,7 @@ namespace boost { template BOOST_MP_FORCEINLINE BOOST_MP_CXX14_CONSTEXPR typename std::enable_if< - boost::multiprecision::is_unsigned_number>::value && - !boost::multiprecision::backends::is_trivial_cpp_int_modular>::value && - !boost::multiprecision::backends::is_trivial_cpp_int_modular>::value>:: - type + !boost::multiprecision::backends::is_trivial_cpp_int_modular>::value>::type eval_complement(cpp_int_modular_backend& result, const cpp_int_modular_backend& o) noexcept { unsigned os = o.size(); @@ -110,7 +107,7 @@ namespace boost { result.limbs()[i] = ~o.limbs()[i]; result.normalize(); } -#ifndef TVM + // Left shift will throw away upper bits. // This function must be called only when s % 8 == 0, i.e. we shift bytes. template @@ -129,7 +126,6 @@ namespace boost { std::memset(pc, 0, bytes); } } -#endif // Left shift will throw away upper bits. // This function must be called only when s % limb_bits == 0, i.e. we shift limbs, which are normally 64 bit. diff --git a/libs/multiprecision/include/nil/crypto3/multiprecision/modular/modular_functions_fixed.hpp b/libs/multiprecision/include/nil/crypto3/multiprecision/modular/modular_functions_fixed.hpp index 01cede1df4..df37ebe852 100644 --- a/libs/multiprecision/include/nil/crypto3/multiprecision/modular/modular_functions_fixed.hpp +++ b/libs/multiprecision/include/nil/crypto3/multiprecision/modular/modular_functions_fixed.hpp @@ -245,6 +245,8 @@ namespace boost { initialize_modulus(m); initialize_barrett_params(); initialize_montgomery_params(); + + m_no_carry_montgomery_mul_allowed = is_applicable_for_no_carry_montgomery_mul(); } public: @@ -417,79 +419,190 @@ namespace boost { barrett_reduce(result, tmp); } - template - BOOST_MP_CXX14_CONSTEXPR void montgomery_mul(Backend1 &result, const Backend1 &y) const { - return montgomery_mul_impl(result, y, std::integral_constant::value>()); - } - - // - // WARNING: could be errors here due to trivial backend -- more tests needed - // TODO(martun): optimize this function, it obviously does not need to be this long. - // - // A specialization for trivial cpp_int_modular types only. - template - BOOST_MP_CXX14_CONSTEXPR void montgomery_mul_impl(Backend1 &result, const Backend1 &y, std::integral_constant const&) const { - BOOST_ASSERT(eval_lt(result, m_mod) && eval_lt(y, m_mod)); - - Backend_padded_limbs A(internal_limb_type(0u)); - const size_t mod_size = m_mod.size(); - auto mod_last_limb = static_cast(get_limb_value(m_mod, 0)); - auto y_last_limb = get_limb_value(y, 0); + // Delegates Montgomery multiplication to one of corresponding algorithms. + BOOST_MP_CXX14_CONSTEXPR void montgomery_mul( + Backend &result, const Backend &y, + std::integral_constant const&) const { - for (size_t i = 0; i < mod_size; i++) { - auto x_i = get_limb_value(result, i); - auto A_0 = A.limbs()[0]; - internal_limb_type u_i = (A_0 + x_i * y_last_limb) * m_montgomery_p_dash; + if ( m_no_carry_montgomery_mul_allowed ) + montgomery_mul_no_carry_impl( + result, + y); + else + montgomery_mul_CIOS_impl( + result, + y, + std::integral_constant() ); + } + + void montgomery_mul(Backend &result, const Backend &y, std::integral_constant const&) const { + montgomery_mul_CIOS_impl( + result, + y, + std::integral_constant() ); + } + + // Given a value represented in 'double_limb_type', decomposes it into + // two 'limb_type' variables, based on high order bits and low order bits. + // There 'a' receives high order bits of 'X', and 'b' receives the low order bits. + static BOOST_MP_CXX14_CONSTEXPR void dbl_limb_to_limbs( + const internal_double_limb_type& X, + internal_limb_type& a, + internal_limb_type& b ) { + b = X; + a = X >> limb_bits; + } + + // Tests if the faster implementation of Montgomery multiplication is possible. + // We don't need the template argument Backend1, it's just here to enable specialization. + template + BOOST_MP_CXX14_CONSTEXPR typename boost::enable_if_c::value, bool>::type + is_applicable_for_no_carry_montgomery_mul() const { + + // Check that + // 1. The most significant bit of modulus is non-zero, meaning we have at least 1 additional + // bit in the number. I.E. if modulus is 255 bits, then we have 1 additional "unused" bit in the number. + // 2. Some other bit in modulus is 0. + // 3. The number has < 12 limbs. + return m_mod.internal_limb_count < 12 && (Bits % sizeof(internal_limb_type) != 0) && + !eval_eq(m_mod_compliment, Backend(internal_limb_type(1u))); + } + + template + BOOST_MP_CXX14_CONSTEXPR typename boost::enable_if_c::value, bool>::type + is_applicable_for_no_carry_montgomery_mul() const { + return false; + } + + // Non-carry implementation of Montgomery multiplication. + // Implemented from pseudo-code at + // "https://hackmd.io/@gnark/modular_multiplication". + template< typename Backend1 > + BOOST_MP_CXX14_CONSTEXPR void montgomery_mul_no_carry_impl( + Backend1& c, + const Backend1& b) const { + BOOST_ASSERT( eval_lt(c, m_mod) && eval_lt(b, m_mod) ); + BOOST_ASSERT( is_applicable_for_no_carry_montgomery_mul() ); + + // Obtain number of limbs + constexpr int N = Backend1::internal_limb_count; + + const Backend1 a( c ); // Copy the first argument, as the implemented + // algorithm doesn't work in-place. + + // We cannot write directly to 'c', because b may be equal to c, and by changing the value of + // 'c' we will change 'b' as well. + Backend1 result = internal_limb_type(0u); + + // Prepare temporary variables + internal_limb_type A( 0u ), C( 0u ); + internal_double_limb_type tmp( 0u ); + internal_limb_type dummy ( 0u ); + + auto* a_limbs = a.limbs(); + auto* b_limbs = b.limbs(); + auto* result_limbs = result.limbs(); + auto* m_mod_limbs = m_mod.limbs(); + + for ( int i = 0; i < N; ++i ) { + // "(A,t[0]) := t[0] + a[0]*b[i]" + tmp = a_limbs[0]; + tmp *= b_limbs[i]; + tmp += result_limbs[0]; + modular_functions_fixed::dbl_limb_to_limbs( tmp, A, result_limbs[0] ); + + // "m := t[0]*q'[0] mod W" + tmp = result_limbs[0]; + //tmp *= q.limbs()[0]; + tmp *= m_montgomery_p_dash; + // tmp = -tmp; + // Note that m is a shorter integer, and we are taking the last bits of tmp. + internal_limb_type m = tmp; + + // "(C,_) := t[0] + m*q[0]" + tmp = m; + tmp *= m_mod_limbs[0]; + tmp += result_limbs[0]; + modular_functions_fixed::dbl_limb_to_limbs( tmp, C, dummy ); + + // The lower loop is unrolled. We want to do this for every 3, because normally mod_size == 4. + std::size_t j = 1; + for (; j + 3 <= N; j += 3) { + // For j + // "(A,t[j]) := t[j] + a[j]*b[i] + A" + tmp = a_limbs[j]; + tmp *= b_limbs[i]; + tmp += result_limbs[j]; + tmp += A; + modular_functions_fixed::dbl_limb_to_limbs( tmp, A, result_limbs[j] ); + + // "(C,t[j-1]) := t[j] + m*q[j] + C" + tmp = m; + tmp *= m_mod_limbs[j]; + tmp += result_limbs[j]; + tmp += C; + modular_functions_fixed::dbl_limb_to_limbs( tmp, C, result_limbs[j-1] ); - // A += x[i] * y + u_i * m followed by a 1 limb-shift to the right - internal_limb_type k = 0; - internal_limb_type k2 = 0; + // For j + 1 + // "(A,t[j]) := t[j] + a[j]*b[i] + A" + tmp = a_limbs[j + 1]; + tmp *= b_limbs[i]; + tmp += result_limbs[j + 1]; + tmp += A; + modular_functions_fixed::dbl_limb_to_limbs( tmp, A, result_limbs[j + 1] ); + + // "(C,t[j-1]) := t[j] + m*q[j] + C" + tmp = m; + tmp *= m_mod_limbs[j + 1]; + tmp += result_limbs[j + 1]; + tmp += C; + modular_functions_fixed::dbl_limb_to_limbs( tmp, C, result_limbs[j] ); - internal_double_limb_type z = static_cast(y_last_limb) * - static_cast(x_i) + - A_0 + k; - internal_double_limb_type z2 = mod_last_limb * static_cast(u_i) + - static_cast(z) + k2; - k = static_cast(z >> std::numeric_limits::digits); - k2 = static_cast(z2 >> std::numeric_limits::digits); + // For j + 2 + // "(A,t[j]) := t[j] + a[j]*b[i] + A" + tmp = a_limbs[j + 2]; + tmp *= b_limbs[i]; + tmp += result_limbs[j + 2]; + tmp += A; + modular_functions_fixed::dbl_limb_to_limbs( tmp, A, result_limbs[j + 2] ); + + // "(C,t[j-1]) := t[j] + m*q[j] + C" + tmp = m; + tmp *= m_mod_limbs[j + 2]; + tmp += result_limbs[j + 2]; + tmp += C; + modular_functions_fixed::dbl_limb_to_limbs( tmp, C, result_limbs[j + 1] ); + } - for (size_t j = 1; j < mod_size; ++j) { - internal_double_limb_type t = - static_cast(get_limb_value(y, j)) * - static_cast(x_i) + - A.limbs()[j] + k; - internal_double_limb_type t2 = - static_cast(get_limb_value(m_mod, j)) * - static_cast(u_i) + - static_cast(t) + k2; - A.limbs()[j - 1] = static_cast(t2); - k = static_cast(t >> - std::numeric_limits::digits); - k2 = static_cast(t2 >> - std::numeric_limits::digits); + for ( ; j < N; ++j ) { + // "(A,t[j]) := t[j] + a[j]*b[i] + A" + tmp = a_limbs[j]; + tmp *= b_limbs[i]; + tmp += result_limbs[j]; + tmp += A; + modular_functions_fixed::dbl_limb_to_limbs( tmp, A, result_limbs[j] ); + + // "(C,t[j-1]) := t[j] + m*q[j] + C" + tmp = m; + tmp *= m_mod_limbs[j]; + tmp += result_limbs[j]; + tmp += C; + modular_functions_fixed::dbl_limb_to_limbs( tmp, C, result_limbs[j-1] ); } - internal_double_limb_type tmp = - static_cast( - custom_get_limb_value(A, mod_size)) + - k + k2; - custom_set_limb_value(A, mod_size - 1, - static_cast(tmp)); - custom_set_limb_value( - A, mod_size, - static_cast(tmp >> - std::numeric_limits::digits)); - } - if (!eval_lt(A, m_mod)) { - eval_subtract(A, m_mod); + // "t[N-1] = C + A" + result_limbs[N-1] = C + A; } - result = A; + if (!eval_lt(result, m_mod)) { + eval_subtract(result, m_mod); + } + c = result; } // A specialization for non-trivial cpp_int_modular types only. template - BOOST_MP_CXX14_CONSTEXPR void montgomery_mul_impl(Backend1 &result, const Backend1 &y, + BOOST_MP_CXX14_CONSTEXPR void montgomery_mul_CIOS_impl(Backend1 &result, const Backend1 &y, std::integral_constant const&) const { BOOST_ASSERT(eval_lt(result, m_mod) && eval_lt(y, m_mod)); @@ -514,8 +627,7 @@ namespace boost { internal_double_limb_type z = 0; internal_double_limb_type z2 = 0; - std::size_t i = 0; - while (i < mod_size) { + for (std::size_t i = 0; i < mod_size; ++i) { x_i = x_limbs[i]; A_0 = A_limbs[0]; u_i = (A_0 + x_i * y_last_limb) * m_montgomery_p_dash; @@ -534,7 +646,7 @@ namespace boost { std::size_t j = 1; - // We want to do this for every 3, because normally mod_size == 4. + // The lower loop is unrolled. We want to do this for every 3, because normally mod_size == 4. internal_double_limb_type t = 0, t2 = 0; for (; j + 3 <= mod_size; j += 3) { // For j @@ -589,7 +701,6 @@ namespace boost { A_limbs[mod_size - 1] = static_cast(tmp); carry = static_cast( tmp >> std::numeric_limits::digits); - ++i; } if (carry) { @@ -602,6 +713,75 @@ namespace boost { result = A; } + // + // WARNING: could be errors here due to trivial backend -- more tests needed + // TODO(martun): optimize this function, it obviously does not need to be this long. + // + // A specialization for trivial cpp_int_modular types only. + template< typename Backend1 > + BOOST_MP_CXX14_CONSTEXPR void montgomery_mul_CIOS_impl( + Backend1& result, + const Backend1& y, + std::integral_constant const& ) const { + + BOOST_ASSERT(eval_lt(result, m_mod) && eval_lt(y, m_mod)); + + Backend_padded_limbs A(internal_limb_type(0u)); + const size_t mod_size = m_mod.size(); + auto mod_last_limb = static_cast(get_limb_value(m_mod, 0)); + auto y_last_limb = get_limb_value(y, 0); + + for (size_t i = 0; i < mod_size; i++) { + auto x_i = get_limb_value(result, i); + auto A_0 = A.limbs()[0]; + internal_limb_type u_i = (A_0 + x_i * y_last_limb) * m_montgomery_p_dash; + + // A += x[i] * y + u_i * m followed by a 1 limb-shift to the right + internal_limb_type k = 0; + internal_limb_type k2 = 0; + + internal_double_limb_type z = static_cast(y_last_limb) * + static_cast(x_i) + + A_0 + k; + internal_double_limb_type z2 = mod_last_limb * static_cast(u_i) + + static_cast(z) + k2; + k = static_cast(z >> std::numeric_limits::digits); + k2 = static_cast(z2 >> std::numeric_limits::digits); + + for (size_t j = 1; j < mod_size; ++j) { + internal_double_limb_type t = + static_cast(get_limb_value(y, j)) * + static_cast(x_i) + + A.limbs()[j] + k; + internal_double_limb_type t2 = + static_cast(get_limb_value(m_mod, j)) * + static_cast(u_i) + + static_cast(t) + k2; + A.limbs()[j - 1] = static_cast(t2); + k = static_cast(t >> + std::numeric_limits::digits); + k2 = static_cast(t2 >> + std::numeric_limits::digits); + } + internal_double_limb_type tmp = + static_cast( + custom_get_limb_value(A, mod_size)) + + k + k2; + custom_set_limb_value(A, mod_size - 1, + static_cast(tmp)); + custom_set_limb_value( + A, mod_size, + static_cast(tmp >> + std::numeric_limits::digits)); + } + + if (!eval_lt(A, m_mod)) { + eval_subtract(A, m_mod); + } + + result = A; + } + template::value >= @@ -670,12 +850,12 @@ namespace boost { internal_limb_type lsb = exp.limbs()[0] & 1u; custom_right_shift(exp, static_cast(1u)); if (lsb) { - montgomery_mul(R_mod_m, base); + montgomery_mul(R_mod_m, base, std::integral_constant::value>()); if (eval_eq(exp, static_cast(0u))) { break; } } - montgomery_mul(base, base); + montgomery_mul(base, base, std::integral_constant::value>()); } result = R_mod_m; } @@ -686,6 +866,7 @@ namespace boost { m_montgomery_r2 = o.get_r2(); m_montgomery_p_dash = o.get_p_dash(); m_mod_compliment = o.get_mod_compliment(); + m_no_carry_montgomery_mul_allowed = is_applicable_for_no_carry_montgomery_mul(); return *this; } @@ -703,9 +884,14 @@ namespace boost { Backend_doubled_1 m_barrett_mu; Backend m_montgomery_r2; internal_limb_type m_montgomery_p_dash = 0; + + // If set, no-carry optimization is allowed. Must be initialized by function + // is_applicable_for_no_carry_montgomery_mul() after initialization. + bool m_no_carry_montgomery_mul_allowed = false; }; } // namespace backends } // namespace multiprecision } // namespace boost #endif // CRYPTO3_MULTIPRECISION_MODULAR_FUNCTIONS_FIXED_PRECISION_HPP + diff --git a/libs/multiprecision/include/nil/crypto3/multiprecision/modular/modular_params_fixed.hpp b/libs/multiprecision/include/nil/crypto3/multiprecision/modular/modular_params_fixed.hpp index eca3e708c6..57746a9832 100644 --- a/libs/multiprecision/include/nil/crypto3/multiprecision/modular/modular_params_fixed.hpp +++ b/libs/multiprecision/include/nil/crypto3/multiprecision/modular/modular_params_fixed.hpp @@ -122,10 +122,11 @@ namespace boost { } } - template - BOOST_MP_CXX14_CONSTEXPR void mod_mul(Backend1 &result, const Backend2 &y) const { + template + BOOST_MP_CXX14_CONSTEXPR void mod_mul(Backend1 &result, const Backend1 &y) const { if (is_odd_mod) { - m_mod_obj.montgomery_mul(result, y); + m_mod_obj.montgomery_mul(result, y, + std::integral_constant::value>()); } else { m_mod_obj.regular_mul(result, y); } diff --git a/libs/multiprecision/test/bench_test/bench_test_modular_adaptor_fixed.cpp b/libs/multiprecision/test/bench_test/bench_test_modular_adaptor_fixed.cpp index 783d353e87..02ba419759 100644 --- a/libs/multiprecision/test/bench_test/bench_test_modular_adaptor_fixed.cpp +++ b/libs/multiprecision/test/bench_test/bench_test_modular_adaptor_fixed.cpp @@ -64,13 +64,14 @@ BOOST_AUTO_TEST_CASE(modular_adaptor_montgomery_mult_perf_test) { auto mod_object = x_modular.mod_data().get_mod_obj(); auto base_data = x_modular.base_data(); for (int i = 0; i < SAMPLES; ++i) { - mod_object.montgomery_mul(base_data, res_modular.base_data()); + mod_object.montgomery_mul(base_data, res_modular.base_data(), + std::integral_constant::value>()); } std::cout << base_data << std::endl; auto elapsed = std::chrono::duration_cast( std::chrono::high_resolution_clock::now() - start); - std::cout << "Multiplication time: " << std::fixed << std::setprecision(3) + std::cout << "Multiplication time (when montgomery_mul is called directly): " << std::fixed << std::setprecision(3) << std::dec << elapsed.count() / SAMPLES << " ns" << std::endl; } @@ -162,7 +163,7 @@ BOOST_AUTO_TEST_CASE(modular_adaptor_backend_mult_perf_test) { auto elapsed = std::chrono::duration_cast( std::chrono::high_resolution_clock::now() - start); - std::cout << "Multiplication time: " << std::fixed << std::setprecision(3) + std::cout << "Multiplication time (when called from modular adaptor): " << std::fixed << std::setprecision(3) << elapsed.count() / SAMPLES << " ns" << std::endl; // Print something so the whole computation is not optimized out. diff --git a/libs/multiprecision/test/constexpr_test_cpp_int_2.cpp b/libs/multiprecision/test/constexpr_test_cpp_int_2.cpp index 1aa63510ca..d046803ee7 100644 --- a/libs/multiprecision/test/constexpr_test_cpp_int_2.cpp +++ b/libs/multiprecision/test/constexpr_test_cpp_int_2.cpp @@ -18,7 +18,7 @@ decltype(std::declval()(std::declval())) non_constexpr_invoke(F f, V v) { int main() { typedef boost::multiprecision::int128_t int_backend; - typedef boost::multiprecision::uint128_t unsigned_backend; + typedef boost::multiprecision::uint128_modular_t unsigned_backend; { constexpr int_backend a(22); diff --git a/libs/multiprecision/test/constexpr_test_cpp_int_4.cpp b/libs/multiprecision/test/constexpr_test_cpp_int_4.cpp index be469a8646..f19fe35253 100644 --- a/libs/multiprecision/test/constexpr_test_cpp_int_4.cpp +++ b/libs/multiprecision/test/constexpr_test_cpp_int_4.cpp @@ -14,7 +14,7 @@ decltype(std::declval()(std::declval())) non_constexpr_invoke(F f, V v) { int main() { typedef boost::multiprecision::checked_int128_t int_backend; - typedef boost::multiprecision::checked_uint128_t unsigned_backend; + typedef boost::multiprecision::checked_uint128_modular_t unsigned_backend; { constexpr int_backend a(22); diff --git a/libs/multiprecision/test/modular_adaptor_fixed.cpp b/libs/multiprecision/test/modular_adaptor_fixed.cpp index a86248cd3a..598e3f1a66 100644 --- a/libs/multiprecision/test/modular_adaptor_fixed.cpp +++ b/libs/multiprecision/test/modular_adaptor_fixed.cpp @@ -118,10 +118,14 @@ bool base_operations_test(std::array test_set) { standard_number a_powm_b_s = powm(a_cppint, b_cppint, e_cppint); standard_number a_bit_set_s = a_cppint; bit_set(a_bit_set_s, 1); + a_bit_set_s %= e_cppint; standard_number a_bit_unset_s = a_cppint; + a_bit_unset_s %= e_cppint; bit_unset(a_bit_unset_s, 2); standard_number a_bit_flip_s = a_cppint; bit_flip(a_bit_flip_s, 3); + a_bit_flip_s %= e_cppint; + int b_msb_s = msb(b_cppint); int b_lsb_s = lsb(b_cppint); @@ -142,6 +146,7 @@ bool base_operations_test(std::array test_set) { bit_unset(a_bit_unset, 2); modular_number a_bit_flip = a; bit_flip(a_bit_flip, 3); + int b_msb = msb(b_cppint); int b_lsb = lsb(b_cppint); diff --git a/libs/multiprecision/test/test_cpp_int_conv.cpp b/libs/multiprecision/test/test_cpp_int_conv.cpp index 7b179914a3..19859e1e23 100644 --- a/libs/multiprecision/test/test_cpp_int_conv.cpp +++ b/libs/multiprecision/test/test_cpp_int_conv.cpp @@ -41,7 +41,7 @@ int main() { i3 = -1234567; - uint128_t i5(i3); + uint128_modular_t i5(i3); BOOST_TEST(i5 == -1234567); int128_t i6(i4); diff --git a/libs/multiprecision/test/test_nothrow_cpp_int.cpp b/libs/multiprecision/test/test_nothrow_cpp_int.cpp index 69002c09fb..e72a75474e 100644 --- a/libs/multiprecision/test/test_nothrow_cpp_int.cpp +++ b/libs/multiprecision/test/test_nothrow_cpp_int.cpp @@ -20,8 +20,8 @@ BOOST_STATIC_ASSERT(boost::is_nothrow_move_constructible::value); BOOST_STATIC_ASSERT(boost::is_nothrow_move_constructible::value); BOOST_STATIC_ASSERT(boost::is_nothrow_move_constructible::value); -BOOST_STATIC_ASSERT(boost::is_nothrow_move_constructible::value); -BOOST_STATIC_ASSERT(boost::is_nothrow_move_constructible::value); +BOOST_STATIC_ASSERT(boost::is_nothrow_move_constructible::value); +BOOST_STATIC_ASSERT(boost::is_nothrow_move_constructible::value); BOOST_STATIC_ASSERT(boost::is_nothrow_move_constructible::value); BOOST_STATIC_ASSERT(boost::is_nothrow_move_constructible::value); BOOST_STATIC_ASSERT(boost::is_nothrow_move_constructible::value); @@ -36,8 +36,8 @@ BOOST_STATIC_ASSERT(boost::is_nothrow_move_constructible::value); BOOST_STATIC_ASSERT(boost::is_nothrow_move_assignable::value); BOOST_STATIC_ASSERT(boost::is_nothrow_move_assignable::value); -BOOST_STATIC_ASSERT(boost::is_nothrow_move_assignable::value); -BOOST_STATIC_ASSERT(boost::is_nothrow_move_assignable::value); +BOOST_STATIC_ASSERT(boost::is_nothrow_move_assignable::value); +BOOST_STATIC_ASSERT(boost::is_nothrow_move_assignable::value); BOOST_STATIC_ASSERT(boost::is_nothrow_move_assignable::value); BOOST_STATIC_ASSERT(boost::is_nothrow_move_assignable::value); BOOST_STATIC_ASSERT(boost::is_nothrow_move_assignable::value); @@ -52,8 +52,8 @@ BOOST_STATIC_ASSERT(boost::is_nothrow_move_assignable::value); BOOST_STATIC_ASSERT(boost::has_nothrow_constructor::value); BOOST_STATIC_ASSERT(boost::has_nothrow_constructor::value); -BOOST_STATIC_ASSERT(boost::has_nothrow_constructor::value); -BOOST_STATIC_ASSERT(boost::has_nothrow_constructor::value); +BOOST_STATIC_ASSERT(boost::has_nothrow_constructor::value); +BOOST_STATIC_ASSERT(boost::has_nothrow_constructor::value); BOOST_STATIC_ASSERT(boost::has_nothrow_constructor::value); BOOST_STATIC_ASSERT(boost::has_nothrow_constructor::value); BOOST_STATIC_ASSERT(boost::has_nothrow_constructor::value); @@ -66,8 +66,8 @@ BOOST_STATIC_ASSERT(boost::has_nothrow_constructor::value); BOOST_STATIC_ASSERT(boost::has_nothrow_copy::value); BOOST_STATIC_ASSERT(boost::has_nothrow_copy::value); -BOOST_STATIC_ASSERT(boost::has_nothrow_copy::value); -BOOST_STATIC_ASSERT(boost::has_nothrow_copy::value); +BOOST_STATIC_ASSERT(boost::has_nothrow_copy::value); +BOOST_STATIC_ASSERT(boost::has_nothrow_copy::value); BOOST_STATIC_ASSERT(boost::has_nothrow_copy::value); BOOST_STATIC_ASSERT(boost::has_nothrow_copy::value); BOOST_STATIC_ASSERT(boost::has_nothrow_copy::value); @@ -80,8 +80,8 @@ BOOST_STATIC_ASSERT(boost::has_nothrow_copy::value); BOOST_STATIC_ASSERT(boost::has_nothrow_assign::value); BOOST_STATIC_ASSERT(boost::has_nothrow_assign::value); -BOOST_STATIC_ASSERT(boost::has_nothrow_assign::value); -BOOST_STATIC_ASSERT(boost::has_nothrow_assign::value); +BOOST_STATIC_ASSERT(boost::has_nothrow_assign::value); +BOOST_STATIC_ASSERT(boost::has_nothrow_assign::value); BOOST_STATIC_ASSERT(boost::has_nothrow_assign::value); BOOST_STATIC_ASSERT(boost::has_nothrow_assign::value); BOOST_STATIC_ASSERT(boost::has_nothrow_assign::value); @@ -97,9 +97,9 @@ BOOST_STATIC_ASSERT( BOOST_STATIC_ASSERT(noexcept( boost::multiprecision::checked_int128_t(std::declval()))); BOOST_STATIC_ASSERT( - noexcept(boost::multiprecision::uint128_t(std::declval()))); + noexcept(boost::multiprecision::uint128_modular_t(std::declval()))); BOOST_STATIC_ASSERT(!noexcept( - boost::multiprecision::checked_uint128_t(std::declval()))); + boost::multiprecision::checked_uint128_modular_t(std::declval()))); BOOST_STATIC_ASSERT( noexcept(boost::multiprecision::int512_t(std::declval()))); BOOST_STATIC_ASSERT(noexcept( @@ -118,9 +118,9 @@ BOOST_STATIC_ASSERT( BOOST_STATIC_ASSERT( noexcept(boost::multiprecision::checked_int128_t(std::declval()))); BOOST_STATIC_ASSERT( - noexcept(boost::multiprecision::uint128_t(std::declval()))); + noexcept(boost::multiprecision::uint128_modular_t(std::declval()))); BOOST_STATIC_ASSERT( - noexcept(boost::multiprecision::checked_uint128_t(std::declval()))); + noexcept(boost::multiprecision::checked_uint128_modular_t(std::declval()))); BOOST_STATIC_ASSERT( noexcept(boost::multiprecision::int512_t(std::declval()))); BOOST_STATIC_ASSERT( @@ -138,9 +138,9 @@ BOOST_STATIC_ASSERT(noexcept(std::declval() = std::declval())); BOOST_STATIC_ASSERT(noexcept(std::declval() = std::declval())); -BOOST_STATIC_ASSERT(noexcept(std::declval() = +BOOST_STATIC_ASSERT(noexcept(std::declval() = std::declval())); -BOOST_STATIC_ASSERT(!noexcept(std::declval() = +BOOST_STATIC_ASSERT(!noexcept(std::declval() = std::declval())); BOOST_STATIC_ASSERT(noexcept(std::declval() = std::declval())); @@ -160,8 +160,8 @@ BOOST_STATIC_ASSERT(noexcept( BOOST_STATIC_ASSERT(noexcept(std::declval() = std::declval())); BOOST_STATIC_ASSERT(noexcept( - std::declval() = std::declval())); -BOOST_STATIC_ASSERT(noexcept(std::declval() = + std::declval() = std::declval())); +BOOST_STATIC_ASSERT(noexcept(std::declval() = std::declval())); BOOST_STATIC_ASSERT(noexcept( std::declval() = std::declval())); diff --git a/libs/multiprecision/test/test_nothrow_cpp_rational.cpp b/libs/multiprecision/test/test_nothrow_cpp_rational.cpp index f3294550ff..c23b31b0f9 100644 --- a/libs/multiprecision/test/test_nothrow_cpp_rational.cpp +++ b/libs/multiprecision/test/test_nothrow_cpp_rational.cpp @@ -15,7 +15,7 @@ typedef boost::multiprecision::number< boost::multiprecision::rational_adaptor> rat128_t; typedef boost::multiprecision::number< - boost::multiprecision::rational_adaptor> + boost::multiprecision::rational_adaptor> urat128_t; typedef boost::multiprecision::number< boost::multiprecision::rational_adaptor> @@ -28,7 +28,7 @@ typedef boost::multiprecision::number< boost::multiprecision::rational_adaptor> checked_rat128_t; typedef boost::multiprecision::number< - boost::multiprecision::rational_adaptor> + boost::multiprecision::rational_adaptor> checked_urat128_t; typedef boost::multiprecision::number< boost::multiprecision::rational_adaptor>