diff --git a/native/.gitignore b/native/.gitignore index b645839db..251a2aba2 100644 --- a/native/.gitignore +++ b/native/.gitignore @@ -18,6 +18,7 @@ DerivedData *.dSYM.zip *.xcuserstate project.xcworkspace +iosTest/Pods/boost-for-react-native # Android/IntelliJ # diff --git a/native/iosTest/Pods/DoubleConversion/LICENSE b/native/iosTest/Pods/DoubleConversion/LICENSE new file mode 100644 index 000000000..933718a9e --- /dev/null +++ b/native/iosTest/Pods/DoubleConversion/LICENSE @@ -0,0 +1,26 @@ +Copyright 2006-2011, the V8 project authors. All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/native/iosTest/Pods/DoubleConversion/README b/native/iosTest/Pods/DoubleConversion/README new file mode 100644 index 000000000..167f9c5e9 --- /dev/null +++ b/native/iosTest/Pods/DoubleConversion/README @@ -0,0 +1,54 @@ +http://code.google.com/p/double-conversion + +This project (double-conversion) provides binary-decimal and decimal-binary +routines for IEEE doubles. + +The library consists of efficient conversion routines that have been extracted +from the V8 JavaScript engine. The code has been refactored and improved so that +it can be used more easily in other projects. + +There is extensive documentation in src/double-conversion.h. Other examples can +be found in test/cctest/test-conversions.cc. + + +Building +======== + +This library can be built with scons [0] or cmake [1]. +The checked-in Makefile simply forwards to scons, and provides a +shortcut to run all tests: + + make + make test + +Scons +----- + +The easiest way to install this library is to use `scons`. It builds +the static and shared library, and is set up to install those at the +correct locations: + + scons install + +Use the `DESTDIR` option to change the target directory: + + scons DESTDIR=alternative_directory install + +Cmake +----- + +To use cmake run `cmake .` in the root directory. This overwrites the +existing Makefile. + +Use `-DBUILD_SHARED_LIBS=ON` to enable the compilation of shared libraries. +Note that this disables static libraries. There is currently no way to +build both libraries at the same time with cmake. + +Use `-DBUILD_TESTING=ON` to build the test executable. + + cmake . -DBUILD_TESTING=ON + make + test/cctest/cctest --list | tr -d '<' | xargs test/cctest/cctest + +[0]: http://www.scons.org +[1]: http://www.cmake.org diff --git a/native/iosTest/Pods/DoubleConversion/double-conversion/bignum-dtoa.cc b/native/iosTest/Pods/DoubleConversion/double-conversion/bignum-dtoa.cc new file mode 100644 index 000000000..f1ad7a5ae --- /dev/null +++ b/native/iosTest/Pods/DoubleConversion/double-conversion/bignum-dtoa.cc @@ -0,0 +1,641 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include "bignum-dtoa.h" + +#include "bignum.h" +#include "ieee.h" + +namespace double_conversion { + +static int NormalizedExponent(uint64_t significand, int exponent) { + ASSERT(significand != 0); + while ((significand & Double::kHiddenBit) == 0) { + significand = significand << 1; + exponent = exponent - 1; + } + return exponent; +} + + +// Forward declarations: +// Returns an estimation of k such that 10^(k-1) <= v < 10^k. +static int EstimatePower(int exponent); +// Computes v / 10^estimated_power exactly, as a ratio of two bignums, numerator +// and denominator. +static void InitialScaledStartValues(uint64_t significand, + int exponent, + bool lower_boundary_is_closer, + int estimated_power, + bool need_boundary_deltas, + Bignum* numerator, + Bignum* denominator, + Bignum* delta_minus, + Bignum* delta_plus); +// Multiplies numerator/denominator so that its values lies in the range 1-10. +// Returns decimal_point s.t. +// v = numerator'/denominator' * 10^(decimal_point-1) +// where numerator' and denominator' are the values of numerator and +// denominator after the call to this function. +static void FixupMultiply10(int estimated_power, bool is_even, + int* decimal_point, + Bignum* numerator, Bignum* denominator, + Bignum* delta_minus, Bignum* delta_plus); +// Generates digits from the left to the right and stops when the generated +// digits yield the shortest decimal representation of v. +static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator, + Bignum* delta_minus, Bignum* delta_plus, + bool is_even, + Vector buffer, int* length); +// Generates 'requested_digits' after the decimal point. +static void BignumToFixed(int requested_digits, int* decimal_point, + Bignum* numerator, Bignum* denominator, + Vector(buffer), int* length); +// Generates 'count' digits of numerator/denominator. +// Once 'count' digits have been produced rounds the result depending on the +// remainder (remainders of exactly .5 round upwards). Might update the +// decimal_point when rounding up (for example for 0.9999). +static void GenerateCountedDigits(int count, int* decimal_point, + Bignum* numerator, Bignum* denominator, + Vector(buffer), int* length); + + +void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits, + Vector buffer, int* length, int* decimal_point) { + ASSERT(v > 0); + ASSERT(!Double(v).IsSpecial()); + uint64_t significand; + int exponent; + bool lower_boundary_is_closer; + if (mode == BIGNUM_DTOA_SHORTEST_SINGLE) { + float f = static_cast(v); + ASSERT(f == v); + significand = Single(f).Significand(); + exponent = Single(f).Exponent(); + lower_boundary_is_closer = Single(f).LowerBoundaryIsCloser(); + } else { + significand = Double(v).Significand(); + exponent = Double(v).Exponent(); + lower_boundary_is_closer = Double(v).LowerBoundaryIsCloser(); + } + bool need_boundary_deltas = + (mode == BIGNUM_DTOA_SHORTEST || mode == BIGNUM_DTOA_SHORTEST_SINGLE); + + bool is_even = (significand & 1) == 0; + int normalized_exponent = NormalizedExponent(significand, exponent); + // estimated_power might be too low by 1. + int estimated_power = EstimatePower(normalized_exponent); + + // Shortcut for Fixed. + // The requested digits correspond to the digits after the point. If the + // number is much too small, then there is no need in trying to get any + // digits. + if (mode == BIGNUM_DTOA_FIXED && -estimated_power - 1 > requested_digits) { + buffer[0] = '\0'; + *length = 0; + // Set decimal-point to -requested_digits. This is what Gay does. + // Note that it should not have any effect anyways since the string is + // empty. + *decimal_point = -requested_digits; + return; + } + + Bignum numerator; + Bignum denominator; + Bignum delta_minus; + Bignum delta_plus; + // Make sure the bignum can grow large enough. The smallest double equals + // 4e-324. In this case the denominator needs fewer than 324*4 binary digits. + // The maximum double is 1.7976931348623157e308 which needs fewer than + // 308*4 binary digits. + ASSERT(Bignum::kMaxSignificantBits >= 324*4); + InitialScaledStartValues(significand, exponent, lower_boundary_is_closer, + estimated_power, need_boundary_deltas, + &numerator, &denominator, + &delta_minus, &delta_plus); + // We now have v = (numerator / denominator) * 10^estimated_power. + FixupMultiply10(estimated_power, is_even, decimal_point, + &numerator, &denominator, + &delta_minus, &delta_plus); + // We now have v = (numerator / denominator) * 10^(decimal_point-1), and + // 1 <= (numerator + delta_plus) / denominator < 10 + switch (mode) { + case BIGNUM_DTOA_SHORTEST: + case BIGNUM_DTOA_SHORTEST_SINGLE: + GenerateShortestDigits(&numerator, &denominator, + &delta_minus, &delta_plus, + is_even, buffer, length); + break; + case BIGNUM_DTOA_FIXED: + BignumToFixed(requested_digits, decimal_point, + &numerator, &denominator, + buffer, length); + break; + case BIGNUM_DTOA_PRECISION: + GenerateCountedDigits(requested_digits, decimal_point, + &numerator, &denominator, + buffer, length); + break; + default: + UNREACHABLE(); + } + buffer[*length] = '\0'; +} + + +// The procedure starts generating digits from the left to the right and stops +// when the generated digits yield the shortest decimal representation of v. A +// decimal representation of v is a number lying closer to v than to any other +// double, so it converts to v when read. +// +// This is true if d, the decimal representation, is between m- and m+, the +// upper and lower boundaries. d must be strictly between them if !is_even. +// m- := (numerator - delta_minus) / denominator +// m+ := (numerator + delta_plus) / denominator +// +// Precondition: 0 <= (numerator+delta_plus) / denominator < 10. +// If 1 <= (numerator+delta_plus) / denominator < 10 then no leading 0 digit +// will be produced. This should be the standard precondition. +static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator, + Bignum* delta_minus, Bignum* delta_plus, + bool is_even, + Vector buffer, int* length) { + // Small optimization: if delta_minus and delta_plus are the same just reuse + // one of the two bignums. + if (Bignum::Equal(*delta_minus, *delta_plus)) { + delta_plus = delta_minus; + } + *length = 0; + for (;;) { + uint16_t digit; + digit = numerator->DivideModuloIntBignum(*denominator); + ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive. + // digit = numerator / denominator (integer division). + // numerator = numerator % denominator. + buffer[(*length)++] = static_cast(digit + '0'); + + // Can we stop already? + // If the remainder of the division is less than the distance to the lower + // boundary we can stop. In this case we simply round down (discarding the + // remainder). + // Similarly we test if we can round up (using the upper boundary). + bool in_delta_room_minus; + bool in_delta_room_plus; + if (is_even) { + in_delta_room_minus = Bignum::LessEqual(*numerator, *delta_minus); + } else { + in_delta_room_minus = Bignum::Less(*numerator, *delta_minus); + } + if (is_even) { + in_delta_room_plus = + Bignum::PlusCompare(*numerator, *delta_plus, *denominator) >= 0; + } else { + in_delta_room_plus = + Bignum::PlusCompare(*numerator, *delta_plus, *denominator) > 0; + } + if (!in_delta_room_minus && !in_delta_room_plus) { + // Prepare for next iteration. + numerator->Times10(); + delta_minus->Times10(); + // We optimized delta_plus to be equal to delta_minus (if they share the + // same value). So don't multiply delta_plus if they point to the same + // object. + if (delta_minus != delta_plus) { + delta_plus->Times10(); + } + } else if (in_delta_room_minus && in_delta_room_plus) { + // Let's see if 2*numerator < denominator. + // If yes, then the next digit would be < 5 and we can round down. + int compare = Bignum::PlusCompare(*numerator, *numerator, *denominator); + if (compare < 0) { + // Remaining digits are less than .5. -> Round down (== do nothing). + } else if (compare > 0) { + // Remaining digits are more than .5 of denominator. -> Round up. + // Note that the last digit could not be a '9' as otherwise the whole + // loop would have stopped earlier. + // We still have an assert here in case the preconditions were not + // satisfied. + ASSERT(buffer[(*length) - 1] != '9'); + buffer[(*length) - 1]++; + } else { + // Halfway case. + // TODO(floitsch): need a way to solve half-way cases. + // For now let's round towards even (since this is what Gay seems to + // do). + + if ((buffer[(*length) - 1] - '0') % 2 == 0) { + // Round down => Do nothing. + } else { + ASSERT(buffer[(*length) - 1] != '9'); + buffer[(*length) - 1]++; + } + } + return; + } else if (in_delta_room_minus) { + // Round down (== do nothing). + return; + } else { // in_delta_room_plus + // Round up. + // Note again that the last digit could not be '9' since this would have + // stopped the loop earlier. + // We still have an ASSERT here, in case the preconditions were not + // satisfied. + ASSERT(buffer[(*length) -1] != '9'); + buffer[(*length) - 1]++; + return; + } + } +} + + +// Let v = numerator / denominator < 10. +// Then we generate 'count' digits of d = x.xxxxx... (without the decimal point) +// from left to right. Once 'count' digits have been produced we decide wether +// to round up or down. Remainders of exactly .5 round upwards. Numbers such +// as 9.999999 propagate a carry all the way, and change the +// exponent (decimal_point), when rounding upwards. +static void GenerateCountedDigits(int count, int* decimal_point, + Bignum* numerator, Bignum* denominator, + Vector buffer, int* length) { + ASSERT(count >= 0); + for (int i = 0; i < count - 1; ++i) { + uint16_t digit; + digit = numerator->DivideModuloIntBignum(*denominator); + ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive. + // digit = numerator / denominator (integer division). + // numerator = numerator % denominator. + buffer[i] = static_cast(digit + '0'); + // Prepare for next iteration. + numerator->Times10(); + } + // Generate the last digit. + uint16_t digit; + digit = numerator->DivideModuloIntBignum(*denominator); + if (Bignum::PlusCompare(*numerator, *numerator, *denominator) >= 0) { + digit++; + } + ASSERT(digit <= 10); + buffer[count - 1] = static_cast(digit + '0'); + // Correct bad digits (in case we had a sequence of '9's). Propagate the + // carry until we hat a non-'9' or til we reach the first digit. + for (int i = count - 1; i > 0; --i) { + if (buffer[i] != '0' + 10) break; + buffer[i] = '0'; + buffer[i - 1]++; + } + if (buffer[0] == '0' + 10) { + // Propagate a carry past the top place. + buffer[0] = '1'; + (*decimal_point)++; + } + *length = count; +} + + +// Generates 'requested_digits' after the decimal point. It might omit +// trailing '0's. If the input number is too small then no digits at all are +// generated (ex.: 2 fixed digits for 0.00001). +// +// Input verifies: 1 <= (numerator + delta) / denominator < 10. +static void BignumToFixed(int requested_digits, int* decimal_point, + Bignum* numerator, Bignum* denominator, + Vector(buffer), int* length) { + // Note that we have to look at more than just the requested_digits, since + // a number could be rounded up. Example: v=0.5 with requested_digits=0. + // Even though the power of v equals 0 we can't just stop here. + if (-(*decimal_point) > requested_digits) { + // The number is definitively too small. + // Ex: 0.001 with requested_digits == 1. + // Set decimal-point to -requested_digits. This is what Gay does. + // Note that it should not have any effect anyways since the string is + // empty. + *decimal_point = -requested_digits; + *length = 0; + return; + } else if (-(*decimal_point) == requested_digits) { + // We only need to verify if the number rounds down or up. + // Ex: 0.04 and 0.06 with requested_digits == 1. + ASSERT(*decimal_point == -requested_digits); + // Initially the fraction lies in range (1, 10]. Multiply the denominator + // by 10 so that we can compare more easily. + denominator->Times10(); + if (Bignum::PlusCompare(*numerator, *numerator, *denominator) >= 0) { + // If the fraction is >= 0.5 then we have to include the rounded + // digit. + buffer[0] = '1'; + *length = 1; + (*decimal_point)++; + } else { + // Note that we caught most of similar cases earlier. + *length = 0; + } + return; + } else { + // The requested digits correspond to the digits after the point. + // The variable 'needed_digits' includes the digits before the point. + int needed_digits = (*decimal_point) + requested_digits; + GenerateCountedDigits(needed_digits, decimal_point, + numerator, denominator, + buffer, length); + } +} + + +// Returns an estimation of k such that 10^(k-1) <= v < 10^k where +// v = f * 2^exponent and 2^52 <= f < 2^53. +// v is hence a normalized double with the given exponent. The output is an +// approximation for the exponent of the decimal approimation .digits * 10^k. +// +// The result might undershoot by 1 in which case 10^k <= v < 10^k+1. +// Note: this property holds for v's upper boundary m+ too. +// 10^k <= m+ < 10^k+1. +// (see explanation below). +// +// Examples: +// EstimatePower(0) => 16 +// EstimatePower(-52) => 0 +// +// Note: e >= 0 => EstimatedPower(e) > 0. No similar claim can be made for e<0. +static int EstimatePower(int exponent) { + // This function estimates log10 of v where v = f*2^e (with e == exponent). + // Note that 10^floor(log10(v)) <= v, but v <= 10^ceil(log10(v)). + // Note that f is bounded by its container size. Let p = 53 (the double's + // significand size). Then 2^(p-1) <= f < 2^p. + // + // Given that log10(v) == log2(v)/log2(10) and e+(len(f)-1) is quite close + // to log2(v) the function is simplified to (e+(len(f)-1)/log2(10)). + // The computed number undershoots by less than 0.631 (when we compute log3 + // and not log10). + // + // Optimization: since we only need an approximated result this computation + // can be performed on 64 bit integers. On x86/x64 architecture the speedup is + // not really measurable, though. + // + // Since we want to avoid overshooting we decrement by 1e10 so that + // floating-point imprecisions don't affect us. + // + // Explanation for v's boundary m+: the computation takes advantage of + // the fact that 2^(p-1) <= f < 2^p. Boundaries still satisfy this requirement + // (even for denormals where the delta can be much more important). + + const double k1Log10 = 0.30102999566398114; // 1/lg(10) + + // For doubles len(f) == 53 (don't forget the hidden bit). + const int kSignificandSize = Double::kSignificandSize; + double estimate = ceil((exponent + kSignificandSize - 1) * k1Log10 - 1e-10); + return static_cast(estimate); +} + + +// See comments for InitialScaledStartValues. +static void InitialScaledStartValuesPositiveExponent( + uint64_t significand, int exponent, + int estimated_power, bool need_boundary_deltas, + Bignum* numerator, Bignum* denominator, + Bignum* delta_minus, Bignum* delta_plus) { + // A positive exponent implies a positive power. + ASSERT(estimated_power >= 0); + // Since the estimated_power is positive we simply multiply the denominator + // by 10^estimated_power. + + // numerator = v. + numerator->AssignUInt64(significand); + numerator->ShiftLeft(exponent); + // denominator = 10^estimated_power. + denominator->AssignPowerUInt16(10, estimated_power); + + if (need_boundary_deltas) { + // Introduce a common denominator so that the deltas to the boundaries are + // integers. + denominator->ShiftLeft(1); + numerator->ShiftLeft(1); + // Let v = f * 2^e, then m+ - v = 1/2 * 2^e; With the common + // denominator (of 2) delta_plus equals 2^e. + delta_plus->AssignUInt16(1); + delta_plus->ShiftLeft(exponent); + // Same for delta_minus. The adjustments if f == 2^p-1 are done later. + delta_minus->AssignUInt16(1); + delta_minus->ShiftLeft(exponent); + } +} + + +// See comments for InitialScaledStartValues +static void InitialScaledStartValuesNegativeExponentPositivePower( + uint64_t significand, int exponent, + int estimated_power, bool need_boundary_deltas, + Bignum* numerator, Bignum* denominator, + Bignum* delta_minus, Bignum* delta_plus) { + // v = f * 2^e with e < 0, and with estimated_power >= 0. + // This means that e is close to 0 (have a look at how estimated_power is + // computed). + + // numerator = significand + // since v = significand * 2^exponent this is equivalent to + // numerator = v * / 2^-exponent + numerator->AssignUInt64(significand); + // denominator = 10^estimated_power * 2^-exponent (with exponent < 0) + denominator->AssignPowerUInt16(10, estimated_power); + denominator->ShiftLeft(-exponent); + + if (need_boundary_deltas) { + // Introduce a common denominator so that the deltas to the boundaries are + // integers. + denominator->ShiftLeft(1); + numerator->ShiftLeft(1); + // Let v = f * 2^e, then m+ - v = 1/2 * 2^e; With the common + // denominator (of 2) delta_plus equals 2^e. + // Given that the denominator already includes v's exponent the distance + // to the boundaries is simply 1. + delta_plus->AssignUInt16(1); + // Same for delta_minus. The adjustments if f == 2^p-1 are done later. + delta_minus->AssignUInt16(1); + } +} + + +// See comments for InitialScaledStartValues +static void InitialScaledStartValuesNegativeExponentNegativePower( + uint64_t significand, int exponent, + int estimated_power, bool need_boundary_deltas, + Bignum* numerator, Bignum* denominator, + Bignum* delta_minus, Bignum* delta_plus) { + // Instead of multiplying the denominator with 10^estimated_power we + // multiply all values (numerator and deltas) by 10^-estimated_power. + + // Use numerator as temporary container for power_ten. + Bignum* power_ten = numerator; + power_ten->AssignPowerUInt16(10, -estimated_power); + + if (need_boundary_deltas) { + // Since power_ten == numerator we must make a copy of 10^estimated_power + // before we complete the computation of the numerator. + // delta_plus = delta_minus = 10^estimated_power + delta_plus->AssignBignum(*power_ten); + delta_minus->AssignBignum(*power_ten); + } + + // numerator = significand * 2 * 10^-estimated_power + // since v = significand * 2^exponent this is equivalent to + // numerator = v * 10^-estimated_power * 2 * 2^-exponent. + // Remember: numerator has been abused as power_ten. So no need to assign it + // to itself. + ASSERT(numerator == power_ten); + numerator->MultiplyByUInt64(significand); + + // denominator = 2 * 2^-exponent with exponent < 0. + denominator->AssignUInt16(1); + denominator->ShiftLeft(-exponent); + + if (need_boundary_deltas) { + // Introduce a common denominator so that the deltas to the boundaries are + // integers. + numerator->ShiftLeft(1); + denominator->ShiftLeft(1); + // With this shift the boundaries have their correct value, since + // delta_plus = 10^-estimated_power, and + // delta_minus = 10^-estimated_power. + // These assignments have been done earlier. + // The adjustments if f == 2^p-1 (lower boundary is closer) are done later. + } +} + + +// Let v = significand * 2^exponent. +// Computes v / 10^estimated_power exactly, as a ratio of two bignums, numerator +// and denominator. The functions GenerateShortestDigits and +// GenerateCountedDigits will then convert this ratio to its decimal +// representation d, with the required accuracy. +// Then d * 10^estimated_power is the representation of v. +// (Note: the fraction and the estimated_power might get adjusted before +// generating the decimal representation.) +// +// The initial start values consist of: +// - a scaled numerator: s.t. numerator/denominator == v / 10^estimated_power. +// - a scaled (common) denominator. +// optionally (used by GenerateShortestDigits to decide if it has the shortest +// decimal converting back to v): +// - v - m-: the distance to the lower boundary. +// - m+ - v: the distance to the upper boundary. +// +// v, m+, m-, and therefore v - m- and m+ - v all share the same denominator. +// +// Let ep == estimated_power, then the returned values will satisfy: +// v / 10^ep = numerator / denominator. +// v's boundarys m- and m+: +// m- / 10^ep == v / 10^ep - delta_minus / denominator +// m+ / 10^ep == v / 10^ep + delta_plus / denominator +// Or in other words: +// m- == v - delta_minus * 10^ep / denominator; +// m+ == v + delta_plus * 10^ep / denominator; +// +// Since 10^(k-1) <= v < 10^k (with k == estimated_power) +// or 10^k <= v < 10^(k+1) +// we then have 0.1 <= numerator/denominator < 1 +// or 1 <= numerator/denominator < 10 +// +// It is then easy to kickstart the digit-generation routine. +// +// The boundary-deltas are only filled if the mode equals BIGNUM_DTOA_SHORTEST +// or BIGNUM_DTOA_SHORTEST_SINGLE. + +static void InitialScaledStartValues(uint64_t significand, + int exponent, + bool lower_boundary_is_closer, + int estimated_power, + bool need_boundary_deltas, + Bignum* numerator, + Bignum* denominator, + Bignum* delta_minus, + Bignum* delta_plus) { + if (exponent >= 0) { + InitialScaledStartValuesPositiveExponent( + significand, exponent, estimated_power, need_boundary_deltas, + numerator, denominator, delta_minus, delta_plus); + } else if (estimated_power >= 0) { + InitialScaledStartValuesNegativeExponentPositivePower( + significand, exponent, estimated_power, need_boundary_deltas, + numerator, denominator, delta_minus, delta_plus); + } else { + InitialScaledStartValuesNegativeExponentNegativePower( + significand, exponent, estimated_power, need_boundary_deltas, + numerator, denominator, delta_minus, delta_plus); + } + + if (need_boundary_deltas && lower_boundary_is_closer) { + // The lower boundary is closer at half the distance of "normal" numbers. + // Increase the common denominator and adapt all but the delta_minus. + denominator->ShiftLeft(1); // *2 + numerator->ShiftLeft(1); // *2 + delta_plus->ShiftLeft(1); // *2 + } +} + + +// This routine multiplies numerator/denominator so that its values lies in the +// range 1-10. That is after a call to this function we have: +// 1 <= (numerator + delta_plus) /denominator < 10. +// Let numerator the input before modification and numerator' the argument +// after modification, then the output-parameter decimal_point is such that +// numerator / denominator * 10^estimated_power == +// numerator' / denominator' * 10^(decimal_point - 1) +// In some cases estimated_power was too low, and this is already the case. We +// then simply adjust the power so that 10^(k-1) <= v < 10^k (with k == +// estimated_power) but do not touch the numerator or denominator. +// Otherwise the routine multiplies the numerator and the deltas by 10. +static void FixupMultiply10(int estimated_power, bool is_even, + int* decimal_point, + Bignum* numerator, Bignum* denominator, + Bignum* delta_minus, Bignum* delta_plus) { + bool in_range; + if (is_even) { + // For IEEE doubles half-way cases (in decimal system numbers ending with 5) + // are rounded to the closest floating-point number with even significand. + in_range = Bignum::PlusCompare(*numerator, *delta_plus, *denominator) >= 0; + } else { + in_range = Bignum::PlusCompare(*numerator, *delta_plus, *denominator) > 0; + } + if (in_range) { + // Since numerator + delta_plus >= denominator we already have + // 1 <= numerator/denominator < 10. Simply update the estimated_power. + *decimal_point = estimated_power + 1; + } else { + *decimal_point = estimated_power; + numerator->Times10(); + if (Bignum::Equal(*delta_minus, *delta_plus)) { + delta_minus->Times10(); + delta_plus->AssignBignum(*delta_minus); + } else { + delta_minus->Times10(); + delta_plus->Times10(); + } + } +} + +} // namespace double_conversion diff --git a/native/iosTest/Pods/DoubleConversion/double-conversion/bignum-dtoa.h b/native/iosTest/Pods/DoubleConversion/double-conversion/bignum-dtoa.h new file mode 100644 index 000000000..34b961992 --- /dev/null +++ b/native/iosTest/Pods/DoubleConversion/double-conversion/bignum-dtoa.h @@ -0,0 +1,84 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_BIGNUM_DTOA_H_ +#define DOUBLE_CONVERSION_BIGNUM_DTOA_H_ + +#include "utils.h" + +namespace double_conversion { + +enum BignumDtoaMode { + // Return the shortest correct representation. + // For example the output of 0.299999999999999988897 is (the less accurate but + // correct) 0.3. + BIGNUM_DTOA_SHORTEST, + // Same as BIGNUM_DTOA_SHORTEST but for single-precision floats. + BIGNUM_DTOA_SHORTEST_SINGLE, + // Return a fixed number of digits after the decimal point. + // For instance fixed(0.1, 4) becomes 0.1000 + // If the input number is big, the output will be big. + BIGNUM_DTOA_FIXED, + // Return a fixed number of digits, no matter what the exponent is. + BIGNUM_DTOA_PRECISION +}; + +// Converts the given double 'v' to ascii. +// The result should be interpreted as buffer * 10^(point-length). +// The buffer will be null-terminated. +// +// The input v must be > 0 and different from NaN, and Infinity. +// +// The output depends on the given mode: +// - SHORTEST: produce the least amount of digits for which the internal +// identity requirement is still satisfied. If the digits are printed +// (together with the correct exponent) then reading this number will give +// 'v' again. The buffer will choose the representation that is closest to +// 'v'. If there are two at the same distance, than the number is round up. +// In this mode the 'requested_digits' parameter is ignored. +// - FIXED: produces digits necessary to print a given number with +// 'requested_digits' digits after the decimal point. The produced digits +// might be too short in which case the caller has to fill the gaps with '0's. +// Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2. +// Halfway cases are rounded up. The call toFixed(0.15, 2) thus returns +// buffer="2", point=0. +// Note: the length of the returned buffer has no meaning wrt the significance +// of its digits. That is, just because it contains '0's does not mean that +// any other digit would not satisfy the internal identity requirement. +// - PRECISION: produces 'requested_digits' where the first digit is not '0'. +// Even though the length of produced digits usually equals +// 'requested_digits', the function is allowed to return fewer digits, in +// which case the caller has to fill the missing digits with '0's. +// Halfway cases are again rounded up. +// 'BignumDtoa' expects the given buffer to be big enough to hold all digits +// and a terminating null-character. +void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits, + Vector buffer, int* length, int* point); + +} // namespace double_conversion + +#endif // DOUBLE_CONVERSION_BIGNUM_DTOA_H_ diff --git a/native/iosTest/Pods/DoubleConversion/double-conversion/bignum.cc b/native/iosTest/Pods/DoubleConversion/double-conversion/bignum.cc new file mode 100644 index 000000000..2743d67e8 --- /dev/null +++ b/native/iosTest/Pods/DoubleConversion/double-conversion/bignum.cc @@ -0,0 +1,766 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "bignum.h" +#include "utils.h" + +namespace double_conversion { + +Bignum::Bignum() + : bigits_(bigits_buffer_, kBigitCapacity), used_digits_(0), exponent_(0) { + for (int i = 0; i < kBigitCapacity; ++i) { + bigits_[i] = 0; + } +} + + +template +static int BitSize(S value) { + (void) value; // Mark variable as used. + return 8 * sizeof(value); +} + +// Guaranteed to lie in one Bigit. +void Bignum::AssignUInt16(uint16_t value) { + ASSERT(kBigitSize >= BitSize(value)); + Zero(); + if (value == 0) return; + + EnsureCapacity(1); + bigits_[0] = value; + used_digits_ = 1; +} + + +void Bignum::AssignUInt64(uint64_t value) { + const int kUInt64Size = 64; + + Zero(); + if (value == 0) return; + + int needed_bigits = kUInt64Size / kBigitSize + 1; + EnsureCapacity(needed_bigits); + for (int i = 0; i < needed_bigits; ++i) { + bigits_[i] = value & kBigitMask; + value = value >> kBigitSize; + } + used_digits_ = needed_bigits; + Clamp(); +} + + +void Bignum::AssignBignum(const Bignum& other) { + exponent_ = other.exponent_; + for (int i = 0; i < other.used_digits_; ++i) { + bigits_[i] = other.bigits_[i]; + } + // Clear the excess digits (if there were any). + for (int i = other.used_digits_; i < used_digits_; ++i) { + bigits_[i] = 0; + } + used_digits_ = other.used_digits_; +} + + +static uint64_t ReadUInt64(Vector buffer, + int from, + int digits_to_read) { + uint64_t result = 0; + for (int i = from; i < from + digits_to_read; ++i) { + int digit = buffer[i] - '0'; + ASSERT(0 <= digit && digit <= 9); + result = result * 10 + digit; + } + return result; +} + + +void Bignum::AssignDecimalString(Vector value) { + // 2^64 = 18446744073709551616 > 10^19 + const int kMaxUint64DecimalDigits = 19; + Zero(); + int length = value.length(); + int pos = 0; + // Let's just say that each digit needs 4 bits. + while (length >= kMaxUint64DecimalDigits) { + uint64_t digits = ReadUInt64(value, pos, kMaxUint64DecimalDigits); + pos += kMaxUint64DecimalDigits; + length -= kMaxUint64DecimalDigits; + MultiplyByPowerOfTen(kMaxUint64DecimalDigits); + AddUInt64(digits); + } + uint64_t digits = ReadUInt64(value, pos, length); + MultiplyByPowerOfTen(length); + AddUInt64(digits); + Clamp(); +} + + +static int HexCharValue(char c) { + if ('0' <= c && c <= '9') return c - '0'; + if ('a' <= c && c <= 'f') return 10 + c - 'a'; + ASSERT('A' <= c && c <= 'F'); + return 10 + c - 'A'; +} + + +void Bignum::AssignHexString(Vector value) { + Zero(); + int length = value.length(); + + int needed_bigits = length * 4 / kBigitSize + 1; + EnsureCapacity(needed_bigits); + int string_index = length - 1; + for (int i = 0; i < needed_bigits - 1; ++i) { + // These bigits are guaranteed to be "full". + Chunk current_bigit = 0; + for (int j = 0; j < kBigitSize / 4; j++) { + current_bigit += HexCharValue(value[string_index--]) << (j * 4); + } + bigits_[i] = current_bigit; + } + used_digits_ = needed_bigits - 1; + + Chunk most_significant_bigit = 0; // Could be = 0; + for (int j = 0; j <= string_index; ++j) { + most_significant_bigit <<= 4; + most_significant_bigit += HexCharValue(value[j]); + } + if (most_significant_bigit != 0) { + bigits_[used_digits_] = most_significant_bigit; + used_digits_++; + } + Clamp(); +} + + +void Bignum::AddUInt64(uint64_t operand) { + if (operand == 0) return; + Bignum other; + other.AssignUInt64(operand); + AddBignum(other); +} + + +void Bignum::AddBignum(const Bignum& other) { + ASSERT(IsClamped()); + ASSERT(other.IsClamped()); + + // If this has a greater exponent than other append zero-bigits to this. + // After this call exponent_ <= other.exponent_. + Align(other); + + // There are two possibilities: + // aaaaaaaaaaa 0000 (where the 0s represent a's exponent) + // bbbbb 00000000 + // ---------------- + // ccccccccccc 0000 + // or + // aaaaaaaaaa 0000 + // bbbbbbbbb 0000000 + // ----------------- + // cccccccccccc 0000 + // In both cases we might need a carry bigit. + + EnsureCapacity(1 + Max(BigitLength(), other.BigitLength()) - exponent_); + Chunk carry = 0; + int bigit_pos = other.exponent_ - exponent_; + ASSERT(bigit_pos >= 0); + for (int i = 0; i < other.used_digits_; ++i) { + Chunk sum = bigits_[bigit_pos] + other.bigits_[i] + carry; + bigits_[bigit_pos] = sum & kBigitMask; + carry = sum >> kBigitSize; + bigit_pos++; + } + + while (carry != 0) { + Chunk sum = bigits_[bigit_pos] + carry; + bigits_[bigit_pos] = sum & kBigitMask; + carry = sum >> kBigitSize; + bigit_pos++; + } + used_digits_ = Max(bigit_pos, used_digits_); + ASSERT(IsClamped()); +} + + +void Bignum::SubtractBignum(const Bignum& other) { + ASSERT(IsClamped()); + ASSERT(other.IsClamped()); + // We require this to be bigger than other. + ASSERT(LessEqual(other, *this)); + + Align(other); + + int offset = other.exponent_ - exponent_; + Chunk borrow = 0; + int i; + for (i = 0; i < other.used_digits_; ++i) { + ASSERT((borrow == 0) || (borrow == 1)); + Chunk difference = bigits_[i + offset] - other.bigits_[i] - borrow; + bigits_[i + offset] = difference & kBigitMask; + borrow = difference >> (kChunkSize - 1); + } + while (borrow != 0) { + Chunk difference = bigits_[i + offset] - borrow; + bigits_[i + offset] = difference & kBigitMask; + borrow = difference >> (kChunkSize - 1); + ++i; + } + Clamp(); +} + + +void Bignum::ShiftLeft(int shift_amount) { + if (used_digits_ == 0) return; + exponent_ += shift_amount / kBigitSize; + int local_shift = shift_amount % kBigitSize; + EnsureCapacity(used_digits_ + 1); + BigitsShiftLeft(local_shift); +} + + +void Bignum::MultiplyByUInt32(uint32_t factor) { + if (factor == 1) return; + if (factor == 0) { + Zero(); + return; + } + if (used_digits_ == 0) return; + + // The product of a bigit with the factor is of size kBigitSize + 32. + // Assert that this number + 1 (for the carry) fits into double chunk. + ASSERT(kDoubleChunkSize >= kBigitSize + 32 + 1); + DoubleChunk carry = 0; + for (int i = 0; i < used_digits_; ++i) { + DoubleChunk product = static_cast(factor) * bigits_[i] + carry; + bigits_[i] = static_cast(product & kBigitMask); + carry = (product >> kBigitSize); + } + while (carry != 0) { + EnsureCapacity(used_digits_ + 1); + bigits_[used_digits_] = carry & kBigitMask; + used_digits_++; + carry >>= kBigitSize; + } +} + + +void Bignum::MultiplyByUInt64(uint64_t factor) { + if (factor == 1) return; + if (factor == 0) { + Zero(); + return; + } + ASSERT(kBigitSize < 32); + uint64_t carry = 0; + uint64_t low = factor & 0xFFFFFFFF; + uint64_t high = factor >> 32; + for (int i = 0; i < used_digits_; ++i) { + uint64_t product_low = low * bigits_[i]; + uint64_t product_high = high * bigits_[i]; + uint64_t tmp = (carry & kBigitMask) + product_low; + bigits_[i] = tmp & kBigitMask; + carry = (carry >> kBigitSize) + (tmp >> kBigitSize) + + (product_high << (32 - kBigitSize)); + } + while (carry != 0) { + EnsureCapacity(used_digits_ + 1); + bigits_[used_digits_] = carry & kBigitMask; + used_digits_++; + carry >>= kBigitSize; + } +} + + +void Bignum::MultiplyByPowerOfTen(int exponent) { + const uint64_t kFive27 = UINT64_2PART_C(0x6765c793, fa10079d); + const uint16_t kFive1 = 5; + const uint16_t kFive2 = kFive1 * 5; + const uint16_t kFive3 = kFive2 * 5; + const uint16_t kFive4 = kFive3 * 5; + const uint16_t kFive5 = kFive4 * 5; + const uint16_t kFive6 = kFive5 * 5; + const uint32_t kFive7 = kFive6 * 5; + const uint32_t kFive8 = kFive7 * 5; + const uint32_t kFive9 = kFive8 * 5; + const uint32_t kFive10 = kFive9 * 5; + const uint32_t kFive11 = kFive10 * 5; + const uint32_t kFive12 = kFive11 * 5; + const uint32_t kFive13 = kFive12 * 5; + const uint32_t kFive1_to_12[] = + { kFive1, kFive2, kFive3, kFive4, kFive5, kFive6, + kFive7, kFive8, kFive9, kFive10, kFive11, kFive12 }; + + ASSERT(exponent >= 0); + if (exponent == 0) return; + if (used_digits_ == 0) return; + + // We shift by exponent at the end just before returning. + int remaining_exponent = exponent; + while (remaining_exponent >= 27) { + MultiplyByUInt64(kFive27); + remaining_exponent -= 27; + } + while (remaining_exponent >= 13) { + MultiplyByUInt32(kFive13); + remaining_exponent -= 13; + } + if (remaining_exponent > 0) { + MultiplyByUInt32(kFive1_to_12[remaining_exponent - 1]); + } + ShiftLeft(exponent); +} + + +void Bignum::Square() { + ASSERT(IsClamped()); + int product_length = 2 * used_digits_; + EnsureCapacity(product_length); + + // Comba multiplication: compute each column separately. + // Example: r = a2a1a0 * b2b1b0. + // r = 1 * a0b0 + + // 10 * (a1b0 + a0b1) + + // 100 * (a2b0 + a1b1 + a0b2) + + // 1000 * (a2b1 + a1b2) + + // 10000 * a2b2 + // + // In the worst case we have to accumulate nb-digits products of digit*digit. + // + // Assert that the additional number of bits in a DoubleChunk are enough to + // sum up used_digits of Bigit*Bigit. + if ((1 << (2 * (kChunkSize - kBigitSize))) <= used_digits_) { + UNIMPLEMENTED(); + } + DoubleChunk accumulator = 0; + // First shift the digits so we don't overwrite them. + int copy_offset = used_digits_; + for (int i = 0; i < used_digits_; ++i) { + bigits_[copy_offset + i] = bigits_[i]; + } + // We have two loops to avoid some 'if's in the loop. + for (int i = 0; i < used_digits_; ++i) { + // Process temporary digit i with power i. + // The sum of the two indices must be equal to i. + int bigit_index1 = i; + int bigit_index2 = 0; + // Sum all of the sub-products. + while (bigit_index1 >= 0) { + Chunk chunk1 = bigits_[copy_offset + bigit_index1]; + Chunk chunk2 = bigits_[copy_offset + bigit_index2]; + accumulator += static_cast(chunk1) * chunk2; + bigit_index1--; + bigit_index2++; + } + bigits_[i] = static_cast(accumulator) & kBigitMask; + accumulator >>= kBigitSize; + } + for (int i = used_digits_; i < product_length; ++i) { + int bigit_index1 = used_digits_ - 1; + int bigit_index2 = i - bigit_index1; + // Invariant: sum of both indices is again equal to i. + // Inner loop runs 0 times on last iteration, emptying accumulator. + while (bigit_index2 < used_digits_) { + Chunk chunk1 = bigits_[copy_offset + bigit_index1]; + Chunk chunk2 = bigits_[copy_offset + bigit_index2]; + accumulator += static_cast(chunk1) * chunk2; + bigit_index1--; + bigit_index2++; + } + // The overwritten bigits_[i] will never be read in further loop iterations, + // because bigit_index1 and bigit_index2 are always greater + // than i - used_digits_. + bigits_[i] = static_cast(accumulator) & kBigitMask; + accumulator >>= kBigitSize; + } + // Since the result was guaranteed to lie inside the number the + // accumulator must be 0 now. + ASSERT(accumulator == 0); + + // Don't forget to update the used_digits and the exponent. + used_digits_ = product_length; + exponent_ *= 2; + Clamp(); +} + + +void Bignum::AssignPowerUInt16(uint16_t base, int power_exponent) { + ASSERT(base != 0); + ASSERT(power_exponent >= 0); + if (power_exponent == 0) { + AssignUInt16(1); + return; + } + Zero(); + int shifts = 0; + // We expect base to be in range 2-32, and most often to be 10. + // It does not make much sense to implement different algorithms for counting + // the bits. + while ((base & 1) == 0) { + base >>= 1; + shifts++; + } + int bit_size = 0; + int tmp_base = base; + while (tmp_base != 0) { + tmp_base >>= 1; + bit_size++; + } + int final_size = bit_size * power_exponent; + // 1 extra bigit for the shifting, and one for rounded final_size. + EnsureCapacity(final_size / kBigitSize + 2); + + // Left to Right exponentiation. + int mask = 1; + while (power_exponent >= mask) mask <<= 1; + + // The mask is now pointing to the bit above the most significant 1-bit of + // power_exponent. + // Get rid of first 1-bit; + mask >>= 2; + uint64_t this_value = base; + + bool delayed_multipliciation = false; + const uint64_t max_32bits = 0xFFFFFFFF; + while (mask != 0 && this_value <= max_32bits) { + this_value = this_value * this_value; + // Verify that there is enough space in this_value to perform the + // multiplication. The first bit_size bits must be 0. + if ((power_exponent & mask) != 0) { + uint64_t base_bits_mask = + ~((static_cast(1) << (64 - bit_size)) - 1); + bool high_bits_zero = (this_value & base_bits_mask) == 0; + if (high_bits_zero) { + this_value *= base; + } else { + delayed_multipliciation = true; + } + } + mask >>= 1; + } + AssignUInt64(this_value); + if (delayed_multipliciation) { + MultiplyByUInt32(base); + } + + // Now do the same thing as a bignum. + while (mask != 0) { + Square(); + if ((power_exponent & mask) != 0) { + MultiplyByUInt32(base); + } + mask >>= 1; + } + + // And finally add the saved shifts. + ShiftLeft(shifts * power_exponent); +} + + +// Precondition: this/other < 16bit. +uint16_t Bignum::DivideModuloIntBignum(const Bignum& other) { + ASSERT(IsClamped()); + ASSERT(other.IsClamped()); + ASSERT(other.used_digits_ > 0); + + // Easy case: if we have less digits than the divisor than the result is 0. + // Note: this handles the case where this == 0, too. + if (BigitLength() < other.BigitLength()) { + return 0; + } + + Align(other); + + uint16_t result = 0; + + // Start by removing multiples of 'other' until both numbers have the same + // number of digits. + while (BigitLength() > other.BigitLength()) { + // This naive approach is extremely inefficient if `this` divided by other + // is big. This function is implemented for doubleToString where + // the result should be small (less than 10). + ASSERT(other.bigits_[other.used_digits_ - 1] >= ((1 << kBigitSize) / 16)); + ASSERT(bigits_[used_digits_ - 1] < 0x10000); + // Remove the multiples of the first digit. + // Example this = 23 and other equals 9. -> Remove 2 multiples. + result += static_cast(bigits_[used_digits_ - 1]); + SubtractTimes(other, bigits_[used_digits_ - 1]); + } + + ASSERT(BigitLength() == other.BigitLength()); + + // Both bignums are at the same length now. + // Since other has more than 0 digits we know that the access to + // bigits_[used_digits_ - 1] is safe. + Chunk this_bigit = bigits_[used_digits_ - 1]; + Chunk other_bigit = other.bigits_[other.used_digits_ - 1]; + + if (other.used_digits_ == 1) { + // Shortcut for easy (and common) case. + int quotient = this_bigit / other_bigit; + bigits_[used_digits_ - 1] = this_bigit - other_bigit * quotient; + ASSERT(quotient < 0x10000); + result += static_cast(quotient); + Clamp(); + return result; + } + + int division_estimate = this_bigit / (other_bigit + 1); + ASSERT(division_estimate < 0x10000); + result += static_cast(division_estimate); + SubtractTimes(other, division_estimate); + + if (other_bigit * (division_estimate + 1) > this_bigit) { + // No need to even try to subtract. Even if other's remaining digits were 0 + // another subtraction would be too much. + return result; + } + + while (LessEqual(other, *this)) { + SubtractBignum(other); + result++; + } + return result; +} + + +template +static int SizeInHexChars(S number) { + ASSERT(number > 0); + int result = 0; + while (number != 0) { + number >>= 4; + result++; + } + return result; +} + + +static char HexCharOfValue(int value) { + ASSERT(0 <= value && value <= 16); + if (value < 10) return static_cast(value + '0'); + return static_cast(value - 10 + 'A'); +} + + +bool Bignum::ToHexString(char* buffer, int buffer_size) const { + ASSERT(IsClamped()); + // Each bigit must be printable as separate hex-character. + ASSERT(kBigitSize % 4 == 0); + const int kHexCharsPerBigit = kBigitSize / 4; + + if (used_digits_ == 0) { + if (buffer_size < 2) return false; + buffer[0] = '0'; + buffer[1] = '\0'; + return true; + } + // We add 1 for the terminating '\0' character. + int needed_chars = (BigitLength() - 1) * kHexCharsPerBigit + + SizeInHexChars(bigits_[used_digits_ - 1]) + 1; + if (needed_chars > buffer_size) return false; + int string_index = needed_chars - 1; + buffer[string_index--] = '\0'; + for (int i = 0; i < exponent_; ++i) { + for (int j = 0; j < kHexCharsPerBigit; ++j) { + buffer[string_index--] = '0'; + } + } + for (int i = 0; i < used_digits_ - 1; ++i) { + Chunk current_bigit = bigits_[i]; + for (int j = 0; j < kHexCharsPerBigit; ++j) { + buffer[string_index--] = HexCharOfValue(current_bigit & 0xF); + current_bigit >>= 4; + } + } + // And finally the last bigit. + Chunk most_significant_bigit = bigits_[used_digits_ - 1]; + while (most_significant_bigit != 0) { + buffer[string_index--] = HexCharOfValue(most_significant_bigit & 0xF); + most_significant_bigit >>= 4; + } + return true; +} + + +Bignum::Chunk Bignum::BigitAt(int index) const { + if (index >= BigitLength()) return 0; + if (index < exponent_) return 0; + return bigits_[index - exponent_]; +} + + +int Bignum::Compare(const Bignum& a, const Bignum& b) { + ASSERT(a.IsClamped()); + ASSERT(b.IsClamped()); + int bigit_length_a = a.BigitLength(); + int bigit_length_b = b.BigitLength(); + if (bigit_length_a < bigit_length_b) return -1; + if (bigit_length_a > bigit_length_b) return +1; + for (int i = bigit_length_a - 1; i >= Min(a.exponent_, b.exponent_); --i) { + Chunk bigit_a = a.BigitAt(i); + Chunk bigit_b = b.BigitAt(i); + if (bigit_a < bigit_b) return -1; + if (bigit_a > bigit_b) return +1; + // Otherwise they are equal up to this digit. Try the next digit. + } + return 0; +} + + +int Bignum::PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c) { + ASSERT(a.IsClamped()); + ASSERT(b.IsClamped()); + ASSERT(c.IsClamped()); + if (a.BigitLength() < b.BigitLength()) { + return PlusCompare(b, a, c); + } + if (a.BigitLength() + 1 < c.BigitLength()) return -1; + if (a.BigitLength() > c.BigitLength()) return +1; + // The exponent encodes 0-bigits. So if there are more 0-digits in 'a' than + // 'b' has digits, then the bigit-length of 'a'+'b' must be equal to the one + // of 'a'. + if (a.exponent_ >= b.BigitLength() && a.BigitLength() < c.BigitLength()) { + return -1; + } + + Chunk borrow = 0; + // Starting at min_exponent all digits are == 0. So no need to compare them. + int min_exponent = Min(Min(a.exponent_, b.exponent_), c.exponent_); + for (int i = c.BigitLength() - 1; i >= min_exponent; --i) { + Chunk chunk_a = a.BigitAt(i); + Chunk chunk_b = b.BigitAt(i); + Chunk chunk_c = c.BigitAt(i); + Chunk sum = chunk_a + chunk_b; + if (sum > chunk_c + borrow) { + return +1; + } else { + borrow = chunk_c + borrow - sum; + if (borrow > 1) return -1; + borrow <<= kBigitSize; + } + } + if (borrow == 0) return 0; + return -1; +} + + +void Bignum::Clamp() { + while (used_digits_ > 0 && bigits_[used_digits_ - 1] == 0) { + used_digits_--; + } + if (used_digits_ == 0) { + // Zero. + exponent_ = 0; + } +} + + +bool Bignum::IsClamped() const { + return used_digits_ == 0 || bigits_[used_digits_ - 1] != 0; +} + + +void Bignum::Zero() { + for (int i = 0; i < used_digits_; ++i) { + bigits_[i] = 0; + } + used_digits_ = 0; + exponent_ = 0; +} + + +void Bignum::Align(const Bignum& other) { + if (exponent_ > other.exponent_) { + // If "X" represents a "hidden" digit (by the exponent) then we are in the + // following case (a == this, b == other): + // a: aaaaaaXXXX or a: aaaaaXXX + // b: bbbbbbX b: bbbbbbbbXX + // We replace some of the hidden digits (X) of a with 0 digits. + // a: aaaaaa000X or a: aaaaa0XX + int zero_digits = exponent_ - other.exponent_; + EnsureCapacity(used_digits_ + zero_digits); + for (int i = used_digits_ - 1; i >= 0; --i) { + bigits_[i + zero_digits] = bigits_[i]; + } + for (int i = 0; i < zero_digits; ++i) { + bigits_[i] = 0; + } + used_digits_ += zero_digits; + exponent_ -= zero_digits; + ASSERT(used_digits_ >= 0); + ASSERT(exponent_ >= 0); + } +} + + +void Bignum::BigitsShiftLeft(int shift_amount) { + ASSERT(shift_amount < kBigitSize); + ASSERT(shift_amount >= 0); + Chunk carry = 0; + for (int i = 0; i < used_digits_; ++i) { + Chunk new_carry = bigits_[i] >> (kBigitSize - shift_amount); + bigits_[i] = ((bigits_[i] << shift_amount) + carry) & kBigitMask; + carry = new_carry; + } + if (carry != 0) { + bigits_[used_digits_] = carry; + used_digits_++; + } +} + + +void Bignum::SubtractTimes(const Bignum& other, int factor) { + ASSERT(exponent_ <= other.exponent_); + if (factor < 3) { + for (int i = 0; i < factor; ++i) { + SubtractBignum(other); + } + return; + } + Chunk borrow = 0; + int exponent_diff = other.exponent_ - exponent_; + for (int i = 0; i < other.used_digits_; ++i) { + DoubleChunk product = static_cast(factor) * other.bigits_[i]; + DoubleChunk remove = borrow + product; + Chunk difference = bigits_[i + exponent_diff] - (remove & kBigitMask); + bigits_[i + exponent_diff] = difference & kBigitMask; + borrow = static_cast((difference >> (kChunkSize - 1)) + + (remove >> kBigitSize)); + } + for (int i = other.used_digits_ + exponent_diff; i < used_digits_; ++i) { + if (borrow == 0) return; + Chunk difference = bigits_[i] - borrow; + bigits_[i] = difference & kBigitMask; + borrow = difference >> (kChunkSize - 1); + } + Clamp(); +} + + +} // namespace double_conversion diff --git a/native/iosTest/Pods/DoubleConversion/double-conversion/bignum.h b/native/iosTest/Pods/DoubleConversion/double-conversion/bignum.h new file mode 100644 index 000000000..5ec3544f5 --- /dev/null +++ b/native/iosTest/Pods/DoubleConversion/double-conversion/bignum.h @@ -0,0 +1,145 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_BIGNUM_H_ +#define DOUBLE_CONVERSION_BIGNUM_H_ + +#include "utils.h" + +namespace double_conversion { + +class Bignum { + public: + // 3584 = 128 * 28. We can represent 2^3584 > 10^1000 accurately. + // This bignum can encode much bigger numbers, since it contains an + // exponent. + static const int kMaxSignificantBits = 3584; + + Bignum(); + void AssignUInt16(uint16_t value); + void AssignUInt64(uint64_t value); + void AssignBignum(const Bignum& other); + + void AssignDecimalString(Vector value); + void AssignHexString(Vector value); + + void AssignPowerUInt16(uint16_t base, int exponent); + + void AddUInt16(uint16_t operand); + void AddUInt64(uint64_t operand); + void AddBignum(const Bignum& other); + // Precondition: this >= other. + void SubtractBignum(const Bignum& other); + + void Square(); + void ShiftLeft(int shift_amount); + void MultiplyByUInt32(uint32_t factor); + void MultiplyByUInt64(uint64_t factor); + void MultiplyByPowerOfTen(int exponent); + void Times10() { return MultiplyByUInt32(10); } + // Pseudocode: + // int result = this / other; + // this = this % other; + // In the worst case this function is in O(this/other). + uint16_t DivideModuloIntBignum(const Bignum& other); + + bool ToHexString(char* buffer, int buffer_size) const; + + // Returns + // -1 if a < b, + // 0 if a == b, and + // +1 if a > b. + static int Compare(const Bignum& a, const Bignum& b); + static bool Equal(const Bignum& a, const Bignum& b) { + return Compare(a, b) == 0; + } + static bool LessEqual(const Bignum& a, const Bignum& b) { + return Compare(a, b) <= 0; + } + static bool Less(const Bignum& a, const Bignum& b) { + return Compare(a, b) < 0; + } + // Returns Compare(a + b, c); + static int PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c); + // Returns a + b == c + static bool PlusEqual(const Bignum& a, const Bignum& b, const Bignum& c) { + return PlusCompare(a, b, c) == 0; + } + // Returns a + b <= c + static bool PlusLessEqual(const Bignum& a, const Bignum& b, const Bignum& c) { + return PlusCompare(a, b, c) <= 0; + } + // Returns a + b < c + static bool PlusLess(const Bignum& a, const Bignum& b, const Bignum& c) { + return PlusCompare(a, b, c) < 0; + } + private: + typedef uint32_t Chunk; + typedef uint64_t DoubleChunk; + + static const int kChunkSize = sizeof(Chunk) * 8; + static const int kDoubleChunkSize = sizeof(DoubleChunk) * 8; + // With bigit size of 28 we loose some bits, but a double still fits easily + // into two chunks, and more importantly we can use the Comba multiplication. + static const int kBigitSize = 28; + static const Chunk kBigitMask = (1 << kBigitSize) - 1; + // Every instance allocates kBigitLength chunks on the stack. Bignums cannot + // grow. There are no checks if the stack-allocated space is sufficient. + static const int kBigitCapacity = kMaxSignificantBits / kBigitSize; + + void EnsureCapacity(int size) { + if (size > kBigitCapacity) { + UNREACHABLE(); + } + } + void Align(const Bignum& other); + void Clamp(); + bool IsClamped() const; + void Zero(); + // Requires this to have enough capacity (no tests done). + // Updates used_digits_ if necessary. + // shift_amount must be < kBigitSize. + void BigitsShiftLeft(int shift_amount); + // BigitLength includes the "hidden" digits encoded in the exponent. + int BigitLength() const { return used_digits_ + exponent_; } + Chunk BigitAt(int index) const; + void SubtractTimes(const Bignum& other, int factor); + + Chunk bigits_buffer_[kBigitCapacity]; + // A vector backed by bigits_buffer_. This way accesses to the array are + // checked for out-of-bounds errors. + Vector bigits_; + int used_digits_; + // The Bignum's value equals value(bigits_) * 2^(exponent_ * kBigitSize). + int exponent_; + + DISALLOW_COPY_AND_ASSIGN(Bignum); +}; + +} // namespace double_conversion + +#endif // DOUBLE_CONVERSION_BIGNUM_H_ diff --git a/native/iosTest/Pods/DoubleConversion/double-conversion/cached-powers.cc b/native/iosTest/Pods/DoubleConversion/double-conversion/cached-powers.cc new file mode 100644 index 000000000..d1359ffe4 --- /dev/null +++ b/native/iosTest/Pods/DoubleConversion/double-conversion/cached-powers.cc @@ -0,0 +1,176 @@ +// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include +#include +#include + +#include "utils.h" + +#include "cached-powers.h" + +namespace double_conversion { + +struct CachedPower { + uint64_t significand; + int16_t binary_exponent; + int16_t decimal_exponent; +}; + +static const CachedPower kCachedPowers[] = { + {UINT64_2PART_C(0xfa8fd5a0, 081c0288), -1220, -348}, + {UINT64_2PART_C(0xbaaee17f, a23ebf76), -1193, -340}, + {UINT64_2PART_C(0x8b16fb20, 3055ac76), -1166, -332}, + {UINT64_2PART_C(0xcf42894a, 5dce35ea), -1140, -324}, + {UINT64_2PART_C(0x9a6bb0aa, 55653b2d), -1113, -316}, + {UINT64_2PART_C(0xe61acf03, 3d1a45df), -1087, -308}, + {UINT64_2PART_C(0xab70fe17, c79ac6ca), -1060, -300}, + {UINT64_2PART_C(0xff77b1fc, bebcdc4f), -1034, -292}, + {UINT64_2PART_C(0xbe5691ef, 416bd60c), -1007, -284}, + {UINT64_2PART_C(0x8dd01fad, 907ffc3c), -980, -276}, + {UINT64_2PART_C(0xd3515c28, 31559a83), -954, -268}, + {UINT64_2PART_C(0x9d71ac8f, ada6c9b5), -927, -260}, + {UINT64_2PART_C(0xea9c2277, 23ee8bcb), -901, -252}, + {UINT64_2PART_C(0xaecc4991, 4078536d), -874, -244}, + {UINT64_2PART_C(0x823c1279, 5db6ce57), -847, -236}, + {UINT64_2PART_C(0xc2109436, 4dfb5637), -821, -228}, + {UINT64_2PART_C(0x9096ea6f, 3848984f), -794, -220}, + {UINT64_2PART_C(0xd77485cb, 25823ac7), -768, -212}, + {UINT64_2PART_C(0xa086cfcd, 97bf97f4), -741, -204}, + {UINT64_2PART_C(0xef340a98, 172aace5), -715, -196}, + {UINT64_2PART_C(0xb23867fb, 2a35b28e), -688, -188}, + {UINT64_2PART_C(0x84c8d4df, d2c63f3b), -661, -180}, + {UINT64_2PART_C(0xc5dd4427, 1ad3cdba), -635, -172}, + {UINT64_2PART_C(0x936b9fce, bb25c996), -608, -164}, + {UINT64_2PART_C(0xdbac6c24, 7d62a584), -582, -156}, + {UINT64_2PART_C(0xa3ab6658, 0d5fdaf6), -555, -148}, + {UINT64_2PART_C(0xf3e2f893, dec3f126), -529, -140}, + {UINT64_2PART_C(0xb5b5ada8, aaff80b8), -502, -132}, + {UINT64_2PART_C(0x87625f05, 6c7c4a8b), -475, -124}, + {UINT64_2PART_C(0xc9bcff60, 34c13053), -449, -116}, + {UINT64_2PART_C(0x964e858c, 91ba2655), -422, -108}, + {UINT64_2PART_C(0xdff97724, 70297ebd), -396, -100}, + {UINT64_2PART_C(0xa6dfbd9f, b8e5b88f), -369, -92}, + {UINT64_2PART_C(0xf8a95fcf, 88747d94), -343, -84}, + {UINT64_2PART_C(0xb9447093, 8fa89bcf), -316, -76}, + {UINT64_2PART_C(0x8a08f0f8, bf0f156b), -289, -68}, + {UINT64_2PART_C(0xcdb02555, 653131b6), -263, -60}, + {UINT64_2PART_C(0x993fe2c6, d07b7fac), -236, -52}, + {UINT64_2PART_C(0xe45c10c4, 2a2b3b06), -210, -44}, + {UINT64_2PART_C(0xaa242499, 697392d3), -183, -36}, + {UINT64_2PART_C(0xfd87b5f2, 8300ca0e), -157, -28}, + {UINT64_2PART_C(0xbce50864, 92111aeb), -130, -20}, + {UINT64_2PART_C(0x8cbccc09, 6f5088cc), -103, -12}, + {UINT64_2PART_C(0xd1b71758, e219652c), -77, -4}, + {UINT64_2PART_C(0x9c400000, 00000000), -50, 4}, + {UINT64_2PART_C(0xe8d4a510, 00000000), -24, 12}, + {UINT64_2PART_C(0xad78ebc5, ac620000), 3, 20}, + {UINT64_2PART_C(0x813f3978, f8940984), 30, 28}, + {UINT64_2PART_C(0xc097ce7b, c90715b3), 56, 36}, + {UINT64_2PART_C(0x8f7e32ce, 7bea5c70), 83, 44}, + {UINT64_2PART_C(0xd5d238a4, abe98068), 109, 52}, + {UINT64_2PART_C(0x9f4f2726, 179a2245), 136, 60}, + {UINT64_2PART_C(0xed63a231, d4c4fb27), 162, 68}, + {UINT64_2PART_C(0xb0de6538, 8cc8ada8), 189, 76}, + {UINT64_2PART_C(0x83c7088e, 1aab65db), 216, 84}, + {UINT64_2PART_C(0xc45d1df9, 42711d9a), 242, 92}, + {UINT64_2PART_C(0x924d692c, a61be758), 269, 100}, + {UINT64_2PART_C(0xda01ee64, 1a708dea), 295, 108}, + {UINT64_2PART_C(0xa26da399, 9aef774a), 322, 116}, + {UINT64_2PART_C(0xf209787b, b47d6b85), 348, 124}, + {UINT64_2PART_C(0xb454e4a1, 79dd1877), 375, 132}, + {UINT64_2PART_C(0x865b8692, 5b9bc5c2), 402, 140}, + {UINT64_2PART_C(0xc83553c5, c8965d3d), 428, 148}, + {UINT64_2PART_C(0x952ab45c, fa97a0b3), 455, 156}, + {UINT64_2PART_C(0xde469fbd, 99a05fe3), 481, 164}, + {UINT64_2PART_C(0xa59bc234, db398c25), 508, 172}, + {UINT64_2PART_C(0xf6c69a72, a3989f5c), 534, 180}, + {UINT64_2PART_C(0xb7dcbf53, 54e9bece), 561, 188}, + {UINT64_2PART_C(0x88fcf317, f22241e2), 588, 196}, + {UINT64_2PART_C(0xcc20ce9b, d35c78a5), 614, 204}, + {UINT64_2PART_C(0x98165af3, 7b2153df), 641, 212}, + {UINT64_2PART_C(0xe2a0b5dc, 971f303a), 667, 220}, + {UINT64_2PART_C(0xa8d9d153, 5ce3b396), 694, 228}, + {UINT64_2PART_C(0xfb9b7cd9, a4a7443c), 720, 236}, + {UINT64_2PART_C(0xbb764c4c, a7a44410), 747, 244}, + {UINT64_2PART_C(0x8bab8eef, b6409c1a), 774, 252}, + {UINT64_2PART_C(0xd01fef10, a657842c), 800, 260}, + {UINT64_2PART_C(0x9b10a4e5, e9913129), 827, 268}, + {UINT64_2PART_C(0xe7109bfb, a19c0c9d), 853, 276}, + {UINT64_2PART_C(0xac2820d9, 623bf429), 880, 284}, + {UINT64_2PART_C(0x80444b5e, 7aa7cf85), 907, 292}, + {UINT64_2PART_C(0xbf21e440, 03acdd2d), 933, 300}, + {UINT64_2PART_C(0x8e679c2f, 5e44ff8f), 960, 308}, + {UINT64_2PART_C(0xd433179d, 9c8cb841), 986, 316}, + {UINT64_2PART_C(0x9e19db92, b4e31ba9), 1013, 324}, + {UINT64_2PART_C(0xeb96bf6e, badf77d9), 1039, 332}, + {UINT64_2PART_C(0xaf87023b, 9bf0ee6b), 1066, 340}, +}; + +static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers); +static const int kCachedPowersOffset = 348; // -1 * the first decimal_exponent. +static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10) +// Difference between the decimal exponents in the table above. +const int PowersOfTenCache::kDecimalExponentDistance = 8; +const int PowersOfTenCache::kMinDecimalExponent = -348; +const int PowersOfTenCache::kMaxDecimalExponent = 340; + +void PowersOfTenCache::GetCachedPowerForBinaryExponentRange( + int min_exponent, + int max_exponent, + DiyFp* power, + int* decimal_exponent) { + int kQ = DiyFp::kSignificandSize; + double k = ceil((min_exponent + kQ - 1) * kD_1_LOG2_10); + int foo = kCachedPowersOffset; + int index = + (foo + static_cast(k) - 1) / kDecimalExponentDistance + 1; + ASSERT(0 <= index && index < kCachedPowersLength); + CachedPower cached_power = kCachedPowers[index]; + ASSERT(min_exponent <= cached_power.binary_exponent); + (void) max_exponent; // Mark variable as used. + ASSERT(cached_power.binary_exponent <= max_exponent); + *decimal_exponent = cached_power.decimal_exponent; + *power = DiyFp(cached_power.significand, cached_power.binary_exponent); +} + + +void PowersOfTenCache::GetCachedPowerForDecimalExponent(int requested_exponent, + DiyFp* power, + int* found_exponent) { + ASSERT(kMinDecimalExponent <= requested_exponent); + ASSERT(requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance); + int index = + (requested_exponent + kCachedPowersOffset) / kDecimalExponentDistance; + CachedPower cached_power = kCachedPowers[index]; + *power = DiyFp(cached_power.significand, cached_power.binary_exponent); + *found_exponent = cached_power.decimal_exponent; + ASSERT(*found_exponent <= requested_exponent); + ASSERT(requested_exponent < *found_exponent + kDecimalExponentDistance); +} + +} // namespace double_conversion diff --git a/native/iosTest/Pods/DoubleConversion/double-conversion/cached-powers.h b/native/iosTest/Pods/DoubleConversion/double-conversion/cached-powers.h new file mode 100644 index 000000000..61a50614c --- /dev/null +++ b/native/iosTest/Pods/DoubleConversion/double-conversion/cached-powers.h @@ -0,0 +1,64 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_CACHED_POWERS_H_ +#define DOUBLE_CONVERSION_CACHED_POWERS_H_ + +#include "diy-fp.h" + +namespace double_conversion { + +class PowersOfTenCache { + public: + + // Not all powers of ten are cached. The decimal exponent of two neighboring + // cached numbers will differ by kDecimalExponentDistance. + static const int kDecimalExponentDistance; + + static const int kMinDecimalExponent; + static const int kMaxDecimalExponent; + + // Returns a cached power-of-ten with a binary exponent in the range + // [min_exponent; max_exponent] (boundaries included). + static void GetCachedPowerForBinaryExponentRange(int min_exponent, + int max_exponent, + DiyFp* power, + int* decimal_exponent); + + // Returns a cached power of ten x ~= 10^k such that + // k <= decimal_exponent < k + kCachedPowersDecimalDistance. + // The given decimal_exponent must satisfy + // kMinDecimalExponent <= requested_exponent, and + // requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance. + static void GetCachedPowerForDecimalExponent(int requested_exponent, + DiyFp* power, + int* found_exponent); +}; + +} // namespace double_conversion + +#endif // DOUBLE_CONVERSION_CACHED_POWERS_H_ diff --git a/native/iosTest/Pods/DoubleConversion/double-conversion/diy-fp.cc b/native/iosTest/Pods/DoubleConversion/double-conversion/diy-fp.cc new file mode 100644 index 000000000..ddd1891b1 --- /dev/null +++ b/native/iosTest/Pods/DoubleConversion/double-conversion/diy-fp.cc @@ -0,0 +1,57 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +#include "diy-fp.h" +#include "utils.h" + +namespace double_conversion { + +void DiyFp::Multiply(const DiyFp& other) { + // Simply "emulates" a 128 bit multiplication. + // However: the resulting number only contains 64 bits. The least + // significant 64 bits are only used for rounding the most significant 64 + // bits. + const uint64_t kM32 = 0xFFFFFFFFU; + uint64_t a = f_ >> 32; + uint64_t b = f_ & kM32; + uint64_t c = other.f_ >> 32; + uint64_t d = other.f_ & kM32; + uint64_t ac = a * c; + uint64_t bc = b * c; + uint64_t ad = a * d; + uint64_t bd = b * d; + uint64_t tmp = (bd >> 32) + (ad & kM32) + (bc & kM32); + // By adding 1U << 31 to tmp we round the final result. + // Halfway cases will be round up. + tmp += 1U << 31; + uint64_t result_f = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32); + e_ += other.e_ + 64; + f_ = result_f; +} + +} // namespace double_conversion diff --git a/native/iosTest/Pods/DoubleConversion/double-conversion/diy-fp.h b/native/iosTest/Pods/DoubleConversion/double-conversion/diy-fp.h new file mode 100644 index 000000000..9dcf8fbdb --- /dev/null +++ b/native/iosTest/Pods/DoubleConversion/double-conversion/diy-fp.h @@ -0,0 +1,118 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_DIY_FP_H_ +#define DOUBLE_CONVERSION_DIY_FP_H_ + +#include "utils.h" + +namespace double_conversion { + +// This "Do It Yourself Floating Point" class implements a floating-point number +// with a uint64 significand and an int exponent. Normalized DiyFp numbers will +// have the most significant bit of the significand set. +// Multiplication and Subtraction do not normalize their results. +// DiyFp are not designed to contain special doubles (NaN and Infinity). +class DiyFp { + public: + static const int kSignificandSize = 64; + + DiyFp() : f_(0), e_(0) {} + DiyFp(uint64_t f, int e) : f_(f), e_(e) {} + + // this = this - other. + // The exponents of both numbers must be the same and the significand of this + // must be bigger than the significand of other. + // The result will not be normalized. + void Subtract(const DiyFp& other) { + ASSERT(e_ == other.e_); + ASSERT(f_ >= other.f_); + f_ -= other.f_; + } + + // Returns a - b. + // The exponents of both numbers must be the same and this must be bigger + // than other. The result will not be normalized. + static DiyFp Minus(const DiyFp& a, const DiyFp& b) { + DiyFp result = a; + result.Subtract(b); + return result; + } + + + // this = this * other. + void Multiply(const DiyFp& other); + + // returns a * b; + static DiyFp Times(const DiyFp& a, const DiyFp& b) { + DiyFp result = a; + result.Multiply(b); + return result; + } + + void Normalize() { + ASSERT(f_ != 0); + uint64_t f = f_; + int e = e_; + + // This method is mainly called for normalizing boundaries. In general + // boundaries need to be shifted by 10 bits. We thus optimize for this case. + const uint64_t k10MSBits = UINT64_2PART_C(0xFFC00000, 00000000); + while ((f & k10MSBits) == 0) { + f <<= 10; + e -= 10; + } + while ((f & kUint64MSB) == 0) { + f <<= 1; + e--; + } + f_ = f; + e_ = e; + } + + static DiyFp Normalize(const DiyFp& a) { + DiyFp result = a; + result.Normalize(); + return result; + } + + uint64_t f() const { return f_; } + int e() const { return e_; } + + void set_f(uint64_t new_value) { f_ = new_value; } + void set_e(int new_value) { e_ = new_value; } + + private: + static const uint64_t kUint64MSB = UINT64_2PART_C(0x80000000, 00000000); + + uint64_t f_; + int e_; +}; + +} // namespace double_conversion + +#endif // DOUBLE_CONVERSION_DIY_FP_H_ diff --git a/native/iosTest/Pods/DoubleConversion/double-conversion/double-conversion.cc b/native/iosTest/Pods/DoubleConversion/double-conversion/double-conversion.cc new file mode 100644 index 000000000..db3feecb2 --- /dev/null +++ b/native/iosTest/Pods/DoubleConversion/double-conversion/double-conversion.cc @@ -0,0 +1,910 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include +#include + +#include "double-conversion.h" + +#include "bignum-dtoa.h" +#include "fast-dtoa.h" +#include "fixed-dtoa.h" +#include "ieee.h" +#include "strtod.h" +#include "utils.h" + +namespace double_conversion { + +const DoubleToStringConverter& DoubleToStringConverter::EcmaScriptConverter() { + int flags = UNIQUE_ZERO | EMIT_POSITIVE_EXPONENT_SIGN; + static DoubleToStringConverter converter(flags, + "Infinity", + "NaN", + 'e', + -6, 21, + 6, 0); + return converter; +} + + +bool DoubleToStringConverter::HandleSpecialValues( + double value, + StringBuilder* result_builder) const { + Double double_inspect(value); + if (double_inspect.IsInfinite()) { + if (infinity_symbol_ == NULL) return false; + if (value < 0) { + result_builder->AddCharacter('-'); + } + result_builder->AddString(infinity_symbol_); + return true; + } + if (double_inspect.IsNan()) { + if (nan_symbol_ == NULL) return false; + result_builder->AddString(nan_symbol_); + return true; + } + return false; +} + + +void DoubleToStringConverter::CreateExponentialRepresentation( + const char* decimal_digits, + int length, + int exponent, + StringBuilder* result_builder) const { + ASSERT(length != 0); + result_builder->AddCharacter(decimal_digits[0]); + if (length != 1) { + result_builder->AddCharacter('.'); + result_builder->AddSubstring(&decimal_digits[1], length-1); + } + result_builder->AddCharacter(exponent_character_); + if (exponent < 0) { + result_builder->AddCharacter('-'); + exponent = -exponent; + } else { + if ((flags_ & EMIT_POSITIVE_EXPONENT_SIGN) != 0) { + result_builder->AddCharacter('+'); + } + } + if (exponent == 0) { + result_builder->AddCharacter('0'); + return; + } + ASSERT(exponent < 1e4); + const int kMaxExponentLength = 5; + char buffer[kMaxExponentLength + 1]; + buffer[kMaxExponentLength] = '\0'; + int first_char_pos = kMaxExponentLength; + while (exponent > 0) { + buffer[--first_char_pos] = '0' + (exponent % 10); + exponent /= 10; + } + result_builder->AddSubstring(&buffer[first_char_pos], + kMaxExponentLength - first_char_pos); +} + + +void DoubleToStringConverter::CreateDecimalRepresentation( + const char* decimal_digits, + int length, + int decimal_point, + int digits_after_point, + StringBuilder* result_builder) const { + // Create a representation that is padded with zeros if needed. + if (decimal_point <= 0) { + // "0.00000decimal_rep". + result_builder->AddCharacter('0'); + if (digits_after_point > 0) { + result_builder->AddCharacter('.'); + result_builder->AddPadding('0', -decimal_point); + ASSERT(length <= digits_after_point - (-decimal_point)); + result_builder->AddSubstring(decimal_digits, length); + int remaining_digits = digits_after_point - (-decimal_point) - length; + result_builder->AddPadding('0', remaining_digits); + } + } else if (decimal_point >= length) { + // "decimal_rep0000.00000" or "decimal_rep.0000" + result_builder->AddSubstring(decimal_digits, length); + result_builder->AddPadding('0', decimal_point - length); + if (digits_after_point > 0) { + result_builder->AddCharacter('.'); + result_builder->AddPadding('0', digits_after_point); + } + } else { + // "decima.l_rep000" + ASSERT(digits_after_point > 0); + result_builder->AddSubstring(decimal_digits, decimal_point); + result_builder->AddCharacter('.'); + ASSERT(length - decimal_point <= digits_after_point); + result_builder->AddSubstring(&decimal_digits[decimal_point], + length - decimal_point); + int remaining_digits = digits_after_point - (length - decimal_point); + result_builder->AddPadding('0', remaining_digits); + } + if (digits_after_point == 0) { + if ((flags_ & EMIT_TRAILING_DECIMAL_POINT) != 0) { + result_builder->AddCharacter('.'); + } + if ((flags_ & EMIT_TRAILING_ZERO_AFTER_POINT) != 0) { + result_builder->AddCharacter('0'); + } + } +} + + +bool DoubleToStringConverter::ToShortestIeeeNumber( + double value, + StringBuilder* result_builder, + DoubleToStringConverter::DtoaMode mode) const { + ASSERT(mode == SHORTEST || mode == SHORTEST_SINGLE); + if (Double(value).IsSpecial()) { + return HandleSpecialValues(value, result_builder); + } + + int decimal_point; + bool sign; + const int kDecimalRepCapacity = kBase10MaximalLength + 1; + char decimal_rep[kDecimalRepCapacity]; + int decimal_rep_length; + + DoubleToAscii(value, mode, 0, decimal_rep, kDecimalRepCapacity, + &sign, &decimal_rep_length, &decimal_point); + + bool unique_zero = (flags_ & UNIQUE_ZERO) != 0; + if (sign && (value != 0.0 || !unique_zero)) { + result_builder->AddCharacter('-'); + } + + int exponent = decimal_point - 1; + if ((decimal_in_shortest_low_ <= exponent) && + (exponent < decimal_in_shortest_high_)) { + CreateDecimalRepresentation(decimal_rep, decimal_rep_length, + decimal_point, + Max(0, decimal_rep_length - decimal_point), + result_builder); + } else { + CreateExponentialRepresentation(decimal_rep, decimal_rep_length, exponent, + result_builder); + } + return true; +} + + +bool DoubleToStringConverter::ToFixed(double value, + int requested_digits, + StringBuilder* result_builder) const { + ASSERT(kMaxFixedDigitsBeforePoint == 60); + const double kFirstNonFixed = 1e60; + + if (Double(value).IsSpecial()) { + return HandleSpecialValues(value, result_builder); + } + + if (requested_digits > kMaxFixedDigitsAfterPoint) return false; + if (value >= kFirstNonFixed || value <= -kFirstNonFixed) return false; + + // Find a sufficiently precise decimal representation of n. + int decimal_point; + bool sign; + // Add space for the '\0' byte. + const int kDecimalRepCapacity = + kMaxFixedDigitsBeforePoint + kMaxFixedDigitsAfterPoint + 1; + char decimal_rep[kDecimalRepCapacity]; + int decimal_rep_length; + DoubleToAscii(value, FIXED, requested_digits, + decimal_rep, kDecimalRepCapacity, + &sign, &decimal_rep_length, &decimal_point); + + bool unique_zero = ((flags_ & UNIQUE_ZERO) != 0); + if (sign && (value != 0.0 || !unique_zero)) { + result_builder->AddCharacter('-'); + } + + CreateDecimalRepresentation(decimal_rep, decimal_rep_length, decimal_point, + requested_digits, result_builder); + return true; +} + + +bool DoubleToStringConverter::ToExponential( + double value, + int requested_digits, + StringBuilder* result_builder) const { + if (Double(value).IsSpecial()) { + return HandleSpecialValues(value, result_builder); + } + + if (requested_digits < -1) return false; + if (requested_digits > kMaxExponentialDigits) return false; + + int decimal_point; + bool sign; + // Add space for digit before the decimal point and the '\0' character. + const int kDecimalRepCapacity = kMaxExponentialDigits + 2; + ASSERT(kDecimalRepCapacity > kBase10MaximalLength); + char decimal_rep[kDecimalRepCapacity]; + int decimal_rep_length; + + if (requested_digits == -1) { + DoubleToAscii(value, SHORTEST, 0, + decimal_rep, kDecimalRepCapacity, + &sign, &decimal_rep_length, &decimal_point); + } else { + DoubleToAscii(value, PRECISION, requested_digits + 1, + decimal_rep, kDecimalRepCapacity, + &sign, &decimal_rep_length, &decimal_point); + ASSERT(decimal_rep_length <= requested_digits + 1); + + for (int i = decimal_rep_length; i < requested_digits + 1; ++i) { + decimal_rep[i] = '0'; + } + decimal_rep_length = requested_digits + 1; + } + + bool unique_zero = ((flags_ & UNIQUE_ZERO) != 0); + if (sign && (value != 0.0 || !unique_zero)) { + result_builder->AddCharacter('-'); + } + + int exponent = decimal_point - 1; + CreateExponentialRepresentation(decimal_rep, + decimal_rep_length, + exponent, + result_builder); + return true; +} + + +bool DoubleToStringConverter::ToPrecision(double value, + int precision, + StringBuilder* result_builder) const { + if (Double(value).IsSpecial()) { + return HandleSpecialValues(value, result_builder); + } + + if (precision < kMinPrecisionDigits || precision > kMaxPrecisionDigits) { + return false; + } + + // Find a sufficiently precise decimal representation of n. + int decimal_point; + bool sign; + // Add one for the terminating null character. + const int kDecimalRepCapacity = kMaxPrecisionDigits + 1; + char decimal_rep[kDecimalRepCapacity]; + int decimal_rep_length; + + DoubleToAscii(value, PRECISION, precision, + decimal_rep, kDecimalRepCapacity, + &sign, &decimal_rep_length, &decimal_point); + ASSERT(decimal_rep_length <= precision); + + bool unique_zero = ((flags_ & UNIQUE_ZERO) != 0); + if (sign && (value != 0.0 || !unique_zero)) { + result_builder->AddCharacter('-'); + } + + // The exponent if we print the number as x.xxeyyy. That is with the + // decimal point after the first digit. + int exponent = decimal_point - 1; + + int extra_zero = ((flags_ & EMIT_TRAILING_ZERO_AFTER_POINT) != 0) ? 1 : 0; + if ((-decimal_point + 1 > max_leading_padding_zeroes_in_precision_mode_) || + (decimal_point - precision + extra_zero > + max_trailing_padding_zeroes_in_precision_mode_)) { + // Fill buffer to contain 'precision' digits. + // Usually the buffer is already at the correct length, but 'DoubleToAscii' + // is allowed to return less characters. + for (int i = decimal_rep_length; i < precision; ++i) { + decimal_rep[i] = '0'; + } + + CreateExponentialRepresentation(decimal_rep, + precision, + exponent, + result_builder); + } else { + CreateDecimalRepresentation(decimal_rep, decimal_rep_length, decimal_point, + Max(0, precision - decimal_point), + result_builder); + } + return true; +} + + +static BignumDtoaMode DtoaToBignumDtoaMode( + DoubleToStringConverter::DtoaMode dtoa_mode) { + switch (dtoa_mode) { + case DoubleToStringConverter::SHORTEST: return BIGNUM_DTOA_SHORTEST; + case DoubleToStringConverter::SHORTEST_SINGLE: + return BIGNUM_DTOA_SHORTEST_SINGLE; + case DoubleToStringConverter::FIXED: return BIGNUM_DTOA_FIXED; + case DoubleToStringConverter::PRECISION: return BIGNUM_DTOA_PRECISION; + default: + UNREACHABLE(); + } +} + + +void DoubleToStringConverter::DoubleToAscii(double v, + DtoaMode mode, + int requested_digits, + char* buffer, + int buffer_length, + bool* sign, + int* length, + int* point) { + Vector vector(buffer, buffer_length); + ASSERT(!Double(v).IsSpecial()); + ASSERT(mode == SHORTEST || mode == SHORTEST_SINGLE || requested_digits >= 0); + + if (Double(v).Sign() < 0) { + *sign = true; + v = -v; + } else { + *sign = false; + } + + if (mode == PRECISION && requested_digits == 0) { + vector[0] = '\0'; + *length = 0; + return; + } + + if (v == 0) { + vector[0] = '0'; + vector[1] = '\0'; + *length = 1; + *point = 1; + return; + } + + bool fast_worked; + switch (mode) { + case SHORTEST: + fast_worked = FastDtoa(v, FAST_DTOA_SHORTEST, 0, vector, length, point); + break; + case SHORTEST_SINGLE: + fast_worked = FastDtoa(v, FAST_DTOA_SHORTEST_SINGLE, 0, + vector, length, point); + break; + case FIXED: + fast_worked = FastFixedDtoa(v, requested_digits, vector, length, point); + break; + case PRECISION: + fast_worked = FastDtoa(v, FAST_DTOA_PRECISION, requested_digits, + vector, length, point); + break; + default: + fast_worked = false; + UNREACHABLE(); + } + if (fast_worked) return; + + // If the fast dtoa didn't succeed use the slower bignum version. + BignumDtoaMode bignum_mode = DtoaToBignumDtoaMode(mode); + BignumDtoa(v, bignum_mode, requested_digits, vector, length, point); + vector[*length] = '\0'; +} + + +// Consumes the given substring from the iterator. +// Returns false, if the substring does not match. +static bool ConsumeSubString(const char** current, + const char* end, + const char* substring) { + ASSERT(**current == *substring); + for (substring++; *substring != '\0'; substring++) { + ++*current; + if (*current == end || **current != *substring) return false; + } + ++*current; + return true; +} + + +// Maximum number of significant digits in decimal representation. +// The longest possible double in decimal representation is +// (2^53 - 1) * 2 ^ -1074 that is (2 ^ 53 - 1) * 5 ^ 1074 / 10 ^ 1074 +// (768 digits). If we parse a number whose first digits are equal to a +// mean of 2 adjacent doubles (that could have up to 769 digits) the result +// must be rounded to the bigger one unless the tail consists of zeros, so +// we don't need to preserve all the digits. +const int kMaxSignificantDigits = 772; + + +// Returns true if a nonspace found and false if the end has reached. +static inline bool AdvanceToNonspace(const char** current, const char* end) { + while (*current != end) { + if (**current != ' ') return true; + ++*current; + } + return false; +} + + +static bool isDigit(int x, int radix) { + return (x >= '0' && x <= '9' && x < '0' + radix) + || (radix > 10 && x >= 'a' && x < 'a' + radix - 10) + || (radix > 10 && x >= 'A' && x < 'A' + radix - 10); +} + + +static double SignedZero(bool sign) { + return sign ? -0.0 : 0.0; +} + + +// Returns true if 'c' is a decimal digit that is valid for the given radix. +// +// The function is small and could be inlined, but VS2012 emitted a warning +// because it constant-propagated the radix and concluded that the last +// condition was always true. By moving it into a separate function the +// compiler wouldn't warn anymore. +static bool IsDecimalDigitForRadix(int c, int radix) { + return '0' <= c && c <= '9' && (c - '0') < radix; +} + +// Returns true if 'c' is a character digit that is valid for the given radix. +// The 'a_character' should be 'a' or 'A'. +// +// The function is small and could be inlined, but VS2012 emitted a warning +// because it constant-propagated the radix and concluded that the first +// condition was always false. By moving it into a separate function the +// compiler wouldn't warn anymore. +static bool IsCharacterDigitForRadix(int c, int radix, char a_character) { + return radix > 10 && c >= a_character && c < a_character + radix - 10; +} + + +// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end. +template +static double RadixStringToIeee(const char* current, + const char* end, + bool sign, + bool allow_trailing_junk, + double junk_string_value, + bool read_as_double, + const char** trailing_pointer) { + ASSERT(current != end); + + const int kDoubleSize = Double::kSignificandSize; + const int kSingleSize = Single::kSignificandSize; + const int kSignificandSize = read_as_double? kDoubleSize: kSingleSize; + + // Skip leading 0s. + while (*current == '0') { + ++current; + if (current == end) { + *trailing_pointer = end; + return SignedZero(sign); + } + } + + int64_t number = 0; + int exponent = 0; + const int radix = (1 << radix_log_2); + + do { + int digit; + if (IsDecimalDigitForRadix(*current, radix)) { + digit = static_cast(*current) - '0'; + } else if (IsCharacterDigitForRadix(*current, radix, 'a')) { + digit = static_cast(*current) - 'a' + 10; + } else if (IsCharacterDigitForRadix(*current, radix, 'A')) { + digit = static_cast(*current) - 'A' + 10; + } else { + if (allow_trailing_junk || !AdvanceToNonspace(¤t, end)) { + break; + } else { + return junk_string_value; + } + } + + number = number * radix + digit; + int overflow = static_cast(number >> kSignificandSize); + if (overflow != 0) { + // Overflow occurred. Need to determine which direction to round the + // result. + int overflow_bits_count = 1; + while (overflow > 1) { + overflow_bits_count++; + overflow >>= 1; + } + + int dropped_bits_mask = ((1 << overflow_bits_count) - 1); + int dropped_bits = static_cast(number) & dropped_bits_mask; + number >>= overflow_bits_count; + exponent = overflow_bits_count; + + bool zero_tail = true; + for (;;) { + ++current; + if (current == end || !isDigit(*current, radix)) break; + zero_tail = zero_tail && *current == '0'; + exponent += radix_log_2; + } + + if (!allow_trailing_junk && AdvanceToNonspace(¤t, end)) { + return junk_string_value; + } + + int middle_value = (1 << (overflow_bits_count - 1)); + if (dropped_bits > middle_value) { + number++; // Rounding up. + } else if (dropped_bits == middle_value) { + // Rounding to even to consistency with decimals: half-way case rounds + // up if significant part is odd and down otherwise. + if ((number & 1) != 0 || !zero_tail) { + number++; // Rounding up. + } + } + + // Rounding up may cause overflow. + if ((number & ((int64_t)1 << kSignificandSize)) != 0) { + exponent++; + number >>= 1; + } + break; + } + ++current; + } while (current != end); + + ASSERT(number < ((int64_t)1 << kSignificandSize)); + ASSERT(static_cast(static_cast(number)) == number); + + *trailing_pointer = current; + + if (exponent == 0) { + if (sign) { + if (number == 0) return -0.0; + number = -number; + } + return static_cast(number); + } + + ASSERT(number != 0); + return Double(DiyFp(number, exponent)).value(); +} + + +double StringToDoubleConverter::StringToIeee( + const char* input, + int length, + int* processed_characters_count, + bool read_as_double) const { + const char* current = input; + const char* end = input + length; + + *processed_characters_count = 0; + + const bool allow_trailing_junk = (flags_ & ALLOW_TRAILING_JUNK) != 0; + const bool allow_leading_spaces = (flags_ & ALLOW_LEADING_SPACES) != 0; + const bool allow_trailing_spaces = (flags_ & ALLOW_TRAILING_SPACES) != 0; + const bool allow_spaces_after_sign = (flags_ & ALLOW_SPACES_AFTER_SIGN) != 0; + + // To make sure that iterator dereferencing is valid the following + // convention is used: + // 1. Each '++current' statement is followed by check for equality to 'end'. + // 2. If AdvanceToNonspace returned false then current == end. + // 3. If 'current' becomes equal to 'end' the function returns or goes to + // 'parsing_done'. + // 4. 'current' is not dereferenced after the 'parsing_done' label. + // 5. Code before 'parsing_done' may rely on 'current != end'. + if (current == end) return empty_string_value_; + + if (allow_leading_spaces || allow_trailing_spaces) { + if (!AdvanceToNonspace(¤t, end)) { + *processed_characters_count = static_cast(current - input); + return empty_string_value_; + } + if (!allow_leading_spaces && (input != current)) { + // No leading spaces allowed, but AdvanceToNonspace moved forward. + return junk_string_value_; + } + } + + // The longest form of simplified number is: "-.1eXXX\0". + const int kBufferSize = kMaxSignificantDigits + 10; + char buffer[kBufferSize]; // NOLINT: size is known at compile time. + int buffer_pos = 0; + + // Exponent will be adjusted if insignificant digits of the integer part + // or insignificant leading zeros of the fractional part are dropped. + int exponent = 0; + int significant_digits = 0; + int insignificant_digits = 0; + bool nonzero_digit_dropped = false; + + bool sign = false; + + if (*current == '+' || *current == '-') { + sign = (*current == '-'); + ++current; + const char* next_non_space = current; + // Skip following spaces (if allowed). + if (!AdvanceToNonspace(&next_non_space, end)) return junk_string_value_; + if (!allow_spaces_after_sign && (current != next_non_space)) { + return junk_string_value_; + } + current = next_non_space; + } + + if (infinity_symbol_ != NULL) { + if (*current == infinity_symbol_[0]) { + if (!ConsumeSubString(¤t, end, infinity_symbol_)) { + return junk_string_value_; + } + + if (!(allow_trailing_spaces || allow_trailing_junk) && (current != end)) { + return junk_string_value_; + } + if (!allow_trailing_junk && AdvanceToNonspace(¤t, end)) { + return junk_string_value_; + } + + ASSERT(buffer_pos == 0); + *processed_characters_count = static_cast(current - input); + return sign ? -Double::Infinity() : Double::Infinity(); + } + } + + if (nan_symbol_ != NULL) { + if (*current == nan_symbol_[0]) { + if (!ConsumeSubString(¤t, end, nan_symbol_)) { + return junk_string_value_; + } + + if (!(allow_trailing_spaces || allow_trailing_junk) && (current != end)) { + return junk_string_value_; + } + if (!allow_trailing_junk && AdvanceToNonspace(¤t, end)) { + return junk_string_value_; + } + + ASSERT(buffer_pos == 0); + *processed_characters_count = static_cast(current - input); + return sign ? -Double::NaN() : Double::NaN(); + } + } + + bool leading_zero = false; + if (*current == '0') { + ++current; + if (current == end) { + *processed_characters_count = static_cast(current - input); + return SignedZero(sign); + } + + leading_zero = true; + + // It could be hexadecimal value. + if ((flags_ & ALLOW_HEX) && (*current == 'x' || *current == 'X')) { + ++current; + if (current == end || !isDigit(*current, 16)) { + return junk_string_value_; // "0x". + } + + const char* tail_pointer = NULL; + double result = RadixStringToIeee<4>(current, + end, + sign, + allow_trailing_junk, + junk_string_value_, + read_as_double, + &tail_pointer); + if (tail_pointer != NULL) { + if (allow_trailing_spaces) AdvanceToNonspace(&tail_pointer, end); + *processed_characters_count = static_cast(tail_pointer - input); + } + return result; + } + + // Ignore leading zeros in the integer part. + while (*current == '0') { + ++current; + if (current == end) { + *processed_characters_count = static_cast(current - input); + return SignedZero(sign); + } + } + } + + bool octal = leading_zero && (flags_ & ALLOW_OCTALS) != 0; + + // Copy significant digits of the integer part (if any) to the buffer. + while (*current >= '0' && *current <= '9') { + if (significant_digits < kMaxSignificantDigits) { + ASSERT(buffer_pos < kBufferSize); + buffer[buffer_pos++] = static_cast(*current); + significant_digits++; + // Will later check if it's an octal in the buffer. + } else { + insignificant_digits++; // Move the digit into the exponential part. + nonzero_digit_dropped = nonzero_digit_dropped || *current != '0'; + } + octal = octal && *current < '8'; + ++current; + if (current == end) goto parsing_done; + } + + if (significant_digits == 0) { + octal = false; + } + + if (*current == '.') { + if (octal && !allow_trailing_junk) return junk_string_value_; + if (octal) goto parsing_done; + + ++current; + if (current == end) { + if (significant_digits == 0 && !leading_zero) { + return junk_string_value_; + } else { + goto parsing_done; + } + } + + if (significant_digits == 0) { + // octal = false; + // Integer part consists of 0 or is absent. Significant digits start after + // leading zeros (if any). + while (*current == '0') { + ++current; + if (current == end) { + *processed_characters_count = static_cast(current - input); + return SignedZero(sign); + } + exponent--; // Move this 0 into the exponent. + } + } + + // There is a fractional part. + // We don't emit a '.', but adjust the exponent instead. + while (*current >= '0' && *current <= '9') { + if (significant_digits < kMaxSignificantDigits) { + ASSERT(buffer_pos < kBufferSize); + buffer[buffer_pos++] = static_cast(*current); + significant_digits++; + exponent--; + } else { + // Ignore insignificant digits in the fractional part. + nonzero_digit_dropped = nonzero_digit_dropped || *current != '0'; + } + ++current; + if (current == end) goto parsing_done; + } + } + + if (!leading_zero && exponent == 0 && significant_digits == 0) { + // If leading_zeros is true then the string contains zeros. + // If exponent < 0 then string was [+-]\.0*... + // If significant_digits != 0 the string is not equal to 0. + // Otherwise there are no digits in the string. + return junk_string_value_; + } + + // Parse exponential part. + if (*current == 'e' || *current == 'E') { + if (octal && !allow_trailing_junk) return junk_string_value_; + if (octal) goto parsing_done; + ++current; + if (current == end) { + if (allow_trailing_junk) { + goto parsing_done; + } else { + return junk_string_value_; + } + } + char sign = '+'; + if (*current == '+' || *current == '-') { + sign = static_cast(*current); + ++current; + if (current == end) { + if (allow_trailing_junk) { + goto parsing_done; + } else { + return junk_string_value_; + } + } + } + + if (current == end || *current < '0' || *current > '9') { + if (allow_trailing_junk) { + goto parsing_done; + } else { + return junk_string_value_; + } + } + + const int max_exponent = INT_MAX / 2; + ASSERT(-max_exponent / 2 <= exponent && exponent <= max_exponent / 2); + int num = 0; + do { + // Check overflow. + int digit = *current - '0'; + if (num >= max_exponent / 10 + && !(num == max_exponent / 10 && digit <= max_exponent % 10)) { + num = max_exponent; + } else { + num = num * 10 + digit; + } + ++current; + } while (current != end && *current >= '0' && *current <= '9'); + + exponent += (sign == '-' ? -num : num); + } + + if (!(allow_trailing_spaces || allow_trailing_junk) && (current != end)) { + return junk_string_value_; + } + if (!allow_trailing_junk && AdvanceToNonspace(¤t, end)) { + return junk_string_value_; + } + if (allow_trailing_spaces) { + AdvanceToNonspace(¤t, end); + } + + parsing_done: + exponent += insignificant_digits; + + if (octal) { + double result; + const char* tail_pointer = NULL; + result = RadixStringToIeee<3>(buffer, + buffer + buffer_pos, + sign, + allow_trailing_junk, + junk_string_value_, + read_as_double, + &tail_pointer); + ASSERT(tail_pointer != NULL); + *processed_characters_count = static_cast(current - input); + return result; + } + + if (nonzero_digit_dropped) { + buffer[buffer_pos++] = '1'; + exponent--; + } + + ASSERT(buffer_pos < kBufferSize); + buffer[buffer_pos] = '\0'; + + double converted; + if (read_as_double) { + converted = Strtod(Vector(buffer, buffer_pos), exponent); + } else { + converted = Strtof(Vector(buffer, buffer_pos), exponent); + } + *processed_characters_count = static_cast(current - input); + return sign? -converted: converted; +} + +} // namespace double_conversion diff --git a/native/iosTest/Pods/DoubleConversion/double-conversion/double-conversion.h b/native/iosTest/Pods/DoubleConversion/double-conversion/double-conversion.h new file mode 100644 index 000000000..1c3387d4f --- /dev/null +++ b/native/iosTest/Pods/DoubleConversion/double-conversion/double-conversion.h @@ -0,0 +1,536 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_ +#define DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_ + +#include "utils.h" + +namespace double_conversion { + +class DoubleToStringConverter { + public: + // When calling ToFixed with a double > 10^kMaxFixedDigitsBeforePoint + // or a requested_digits parameter > kMaxFixedDigitsAfterPoint then the + // function returns false. + static const int kMaxFixedDigitsBeforePoint = 60; + static const int kMaxFixedDigitsAfterPoint = 60; + + // When calling ToExponential with a requested_digits + // parameter > kMaxExponentialDigits then the function returns false. + static const int kMaxExponentialDigits = 120; + + // When calling ToPrecision with a requested_digits + // parameter < kMinPrecisionDigits or requested_digits > kMaxPrecisionDigits + // then the function returns false. + static const int kMinPrecisionDigits = 1; + static const int kMaxPrecisionDigits = 120; + + enum Flags { + NO_FLAGS = 0, + EMIT_POSITIVE_EXPONENT_SIGN = 1, + EMIT_TRAILING_DECIMAL_POINT = 2, + EMIT_TRAILING_ZERO_AFTER_POINT = 4, + UNIQUE_ZERO = 8 + }; + + // Flags should be a bit-or combination of the possible Flags-enum. + // - NO_FLAGS: no special flags. + // - EMIT_POSITIVE_EXPONENT_SIGN: when the number is converted into exponent + // form, emits a '+' for positive exponents. Example: 1.2e+2. + // - EMIT_TRAILING_DECIMAL_POINT: when the input number is an integer and is + // converted into decimal format then a trailing decimal point is appended. + // Example: 2345.0 is converted to "2345.". + // - EMIT_TRAILING_ZERO_AFTER_POINT: in addition to a trailing decimal point + // emits a trailing '0'-character. This flag requires the + // EXMIT_TRAILING_DECIMAL_POINT flag. + // Example: 2345.0 is converted to "2345.0". + // - UNIQUE_ZERO: "-0.0" is converted to "0.0". + // + // Infinity symbol and nan_symbol provide the string representation for these + // special values. If the string is NULL and the special value is encountered + // then the conversion functions return false. + // + // The exponent_character is used in exponential representations. It is + // usually 'e' or 'E'. + // + // When converting to the shortest representation the converter will + // represent input numbers in decimal format if they are in the interval + // [10^decimal_in_shortest_low; 10^decimal_in_shortest_high[ + // (lower boundary included, greater boundary excluded). + // Example: with decimal_in_shortest_low = -6 and + // decimal_in_shortest_high = 21: + // ToShortest(0.000001) -> "0.000001" + // ToShortest(0.0000001) -> "1e-7" + // ToShortest(111111111111111111111.0) -> "111111111111111110000" + // ToShortest(100000000000000000000.0) -> "100000000000000000000" + // ToShortest(1111111111111111111111.0) -> "1.1111111111111111e+21" + // + // When converting to precision mode the converter may add + // max_leading_padding_zeroes before returning the number in exponential + // format. + // Example with max_leading_padding_zeroes_in_precision_mode = 6. + // ToPrecision(0.0000012345, 2) -> "0.0000012" + // ToPrecision(0.00000012345, 2) -> "1.2e-7" + // Similarily the converter may add up to + // max_trailing_padding_zeroes_in_precision_mode in precision mode to avoid + // returning an exponential representation. A zero added by the + // EMIT_TRAILING_ZERO_AFTER_POINT flag is counted for this limit. + // Examples for max_trailing_padding_zeroes_in_precision_mode = 1: + // ToPrecision(230.0, 2) -> "230" + // ToPrecision(230.0, 2) -> "230." with EMIT_TRAILING_DECIMAL_POINT. + // ToPrecision(230.0, 2) -> "2.3e2" with EMIT_TRAILING_ZERO_AFTER_POINT. + DoubleToStringConverter(int flags, + const char* infinity_symbol, + const char* nan_symbol, + char exponent_character, + int decimal_in_shortest_low, + int decimal_in_shortest_high, + int max_leading_padding_zeroes_in_precision_mode, + int max_trailing_padding_zeroes_in_precision_mode) + : flags_(flags), + infinity_symbol_(infinity_symbol), + nan_symbol_(nan_symbol), + exponent_character_(exponent_character), + decimal_in_shortest_low_(decimal_in_shortest_low), + decimal_in_shortest_high_(decimal_in_shortest_high), + max_leading_padding_zeroes_in_precision_mode_( + max_leading_padding_zeroes_in_precision_mode), + max_trailing_padding_zeroes_in_precision_mode_( + max_trailing_padding_zeroes_in_precision_mode) { + // When 'trailing zero after the point' is set, then 'trailing point' + // must be set too. + ASSERT(((flags & EMIT_TRAILING_DECIMAL_POINT) != 0) || + !((flags & EMIT_TRAILING_ZERO_AFTER_POINT) != 0)); + } + + // Returns a converter following the EcmaScript specification. + static const DoubleToStringConverter& EcmaScriptConverter(); + + // Computes the shortest string of digits that correctly represent the input + // number. Depending on decimal_in_shortest_low and decimal_in_shortest_high + // (see constructor) it then either returns a decimal representation, or an + // exponential representation. + // Example with decimal_in_shortest_low = -6, + // decimal_in_shortest_high = 21, + // EMIT_POSITIVE_EXPONENT_SIGN activated, and + // EMIT_TRAILING_DECIMAL_POINT deactived: + // ToShortest(0.000001) -> "0.000001" + // ToShortest(0.0000001) -> "1e-7" + // ToShortest(111111111111111111111.0) -> "111111111111111110000" + // ToShortest(100000000000000000000.0) -> "100000000000000000000" + // ToShortest(1111111111111111111111.0) -> "1.1111111111111111e+21" + // + // Note: the conversion may round the output if the returned string + // is accurate enough to uniquely identify the input-number. + // For example the most precise representation of the double 9e59 equals + // "899999999999999918767229449717619953810131273674690656206848", but + // the converter will return the shorter (but still correct) "9e59". + // + // Returns true if the conversion succeeds. The conversion always succeeds + // except when the input value is special and no infinity_symbol or + // nan_symbol has been given to the constructor. + bool ToShortest(double value, StringBuilder* result_builder) const { + return ToShortestIeeeNumber(value, result_builder, SHORTEST); + } + + // Same as ToShortest, but for single-precision floats. + bool ToShortestSingle(float value, StringBuilder* result_builder) const { + return ToShortestIeeeNumber(value, result_builder, SHORTEST_SINGLE); + } + + + // Computes a decimal representation with a fixed number of digits after the + // decimal point. The last emitted digit is rounded. + // + // Examples: + // ToFixed(3.12, 1) -> "3.1" + // ToFixed(3.1415, 3) -> "3.142" + // ToFixed(1234.56789, 4) -> "1234.5679" + // ToFixed(1.23, 5) -> "1.23000" + // ToFixed(0.1, 4) -> "0.1000" + // ToFixed(1e30, 2) -> "1000000000000000019884624838656.00" + // ToFixed(0.1, 30) -> "0.100000000000000005551115123126" + // ToFixed(0.1, 17) -> "0.10000000000000001" + // + // If requested_digits equals 0, then the tail of the result depends on + // the EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT. + // Examples, for requested_digits == 0, + // let EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT be + // - false and false: then 123.45 -> 123 + // 0.678 -> 1 + // - true and false: then 123.45 -> 123. + // 0.678 -> 1. + // - true and true: then 123.45 -> 123.0 + // 0.678 -> 1.0 + // + // Returns true if the conversion succeeds. The conversion always succeeds + // except for the following cases: + // - the input value is special and no infinity_symbol or nan_symbol has + // been provided to the constructor, + // - 'value' > 10^kMaxFixedDigitsBeforePoint, or + // - 'requested_digits' > kMaxFixedDigitsAfterPoint. + // The last two conditions imply that the result will never contain more than + // 1 + kMaxFixedDigitsBeforePoint + 1 + kMaxFixedDigitsAfterPoint characters + // (one additional character for the sign, and one for the decimal point). + bool ToFixed(double value, + int requested_digits, + StringBuilder* result_builder) const; + + // Computes a representation in exponential format with requested_digits + // after the decimal point. The last emitted digit is rounded. + // If requested_digits equals -1, then the shortest exponential representation + // is computed. + // + // Examples with EMIT_POSITIVE_EXPONENT_SIGN deactivated, and + // exponent_character set to 'e'. + // ToExponential(3.12, 1) -> "3.1e0" + // ToExponential(5.0, 3) -> "5.000e0" + // ToExponential(0.001, 2) -> "1.00e-3" + // ToExponential(3.1415, -1) -> "3.1415e0" + // ToExponential(3.1415, 4) -> "3.1415e0" + // ToExponential(3.1415, 3) -> "3.142e0" + // ToExponential(123456789000000, 3) -> "1.235e14" + // ToExponential(1000000000000000019884624838656.0, -1) -> "1e30" + // ToExponential(1000000000000000019884624838656.0, 32) -> + // "1.00000000000000001988462483865600e30" + // ToExponential(1234, 0) -> "1e3" + // + // Returns true if the conversion succeeds. The conversion always succeeds + // except for the following cases: + // - the input value is special and no infinity_symbol or nan_symbol has + // been provided to the constructor, + // - 'requested_digits' > kMaxExponentialDigits. + // The last condition implies that the result will never contain more than + // kMaxExponentialDigits + 8 characters (the sign, the digit before the + // decimal point, the decimal point, the exponent character, the + // exponent's sign, and at most 3 exponent digits). + bool ToExponential(double value, + int requested_digits, + StringBuilder* result_builder) const; + + // Computes 'precision' leading digits of the given 'value' and returns them + // either in exponential or decimal format, depending on + // max_{leading|trailing}_padding_zeroes_in_precision_mode (given to the + // constructor). + // The last computed digit is rounded. + // + // Example with max_leading_padding_zeroes_in_precision_mode = 6. + // ToPrecision(0.0000012345, 2) -> "0.0000012" + // ToPrecision(0.00000012345, 2) -> "1.2e-7" + // Similarily the converter may add up to + // max_trailing_padding_zeroes_in_precision_mode in precision mode to avoid + // returning an exponential representation. A zero added by the + // EMIT_TRAILING_ZERO_AFTER_POINT flag is counted for this limit. + // Examples for max_trailing_padding_zeroes_in_precision_mode = 1: + // ToPrecision(230.0, 2) -> "230" + // ToPrecision(230.0, 2) -> "230." with EMIT_TRAILING_DECIMAL_POINT. + // ToPrecision(230.0, 2) -> "2.3e2" with EMIT_TRAILING_ZERO_AFTER_POINT. + // Examples for max_trailing_padding_zeroes_in_precision_mode = 3, and no + // EMIT_TRAILING_ZERO_AFTER_POINT: + // ToPrecision(123450.0, 6) -> "123450" + // ToPrecision(123450.0, 5) -> "123450" + // ToPrecision(123450.0, 4) -> "123500" + // ToPrecision(123450.0, 3) -> "123000" + // ToPrecision(123450.0, 2) -> "1.2e5" + // + // Returns true if the conversion succeeds. The conversion always succeeds + // except for the following cases: + // - the input value is special and no infinity_symbol or nan_symbol has + // been provided to the constructor, + // - precision < kMinPericisionDigits + // - precision > kMaxPrecisionDigits + // The last condition implies that the result will never contain more than + // kMaxPrecisionDigits + 7 characters (the sign, the decimal point, the + // exponent character, the exponent's sign, and at most 3 exponent digits). + bool ToPrecision(double value, + int precision, + StringBuilder* result_builder) const; + + enum DtoaMode { + // Produce the shortest correct representation. + // For example the output of 0.299999999999999988897 is (the less accurate + // but correct) 0.3. + SHORTEST, + // Same as SHORTEST, but for single-precision floats. + SHORTEST_SINGLE, + // Produce a fixed number of digits after the decimal point. + // For instance fixed(0.1, 4) becomes 0.1000 + // If the input number is big, the output will be big. + FIXED, + // Fixed number of digits (independent of the decimal point). + PRECISION + }; + + // The maximal number of digits that are needed to emit a double in base 10. + // A higher precision can be achieved by using more digits, but the shortest + // accurate representation of any double will never use more digits than + // kBase10MaximalLength. + // Note that DoubleToAscii null-terminates its input. So the given buffer + // should be at least kBase10MaximalLength + 1 characters long. + static const int kBase10MaximalLength = 17; + + // Converts the given double 'v' to ascii. 'v' must not be NaN, +Infinity, or + // -Infinity. In SHORTEST_SINGLE-mode this restriction also applies to 'v' + // after it has been casted to a single-precision float. That is, in this + // mode static_cast(v) must not be NaN, +Infinity or -Infinity. + // + // The result should be interpreted as buffer * 10^(point-length). + // + // The output depends on the given mode: + // - SHORTEST: produce the least amount of digits for which the internal + // identity requirement is still satisfied. If the digits are printed + // (together with the correct exponent) then reading this number will give + // 'v' again. The buffer will choose the representation that is closest to + // 'v'. If there are two at the same distance, than the one farther away + // from 0 is chosen (halfway cases - ending with 5 - are rounded up). + // In this mode the 'requested_digits' parameter is ignored. + // - SHORTEST_SINGLE: same as SHORTEST but with single-precision. + // - FIXED: produces digits necessary to print a given number with + // 'requested_digits' digits after the decimal point. The produced digits + // might be too short in which case the caller has to fill the remainder + // with '0's. + // Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2. + // Halfway cases are rounded towards +/-Infinity (away from 0). The call + // toFixed(0.15, 2) thus returns buffer="2", point=0. + // The returned buffer may contain digits that would be truncated from the + // shortest representation of the input. + // - PRECISION: produces 'requested_digits' where the first digit is not '0'. + // Even though the length of produced digits usually equals + // 'requested_digits', the function is allowed to return fewer digits, in + // which case the caller has to fill the missing digits with '0's. + // Halfway cases are again rounded away from 0. + // DoubleToAscii expects the given buffer to be big enough to hold all + // digits and a terminating null-character. In SHORTEST-mode it expects a + // buffer of at least kBase10MaximalLength + 1. In all other modes the + // requested_digits parameter and the padding-zeroes limit the size of the + // output. Don't forget the decimal point, the exponent character and the + // terminating null-character when computing the maximal output size. + // The given length is only used in debug mode to ensure the buffer is big + // enough. + static void DoubleToAscii(double v, + DtoaMode mode, + int requested_digits, + char* buffer, + int buffer_length, + bool* sign, + int* length, + int* point); + + private: + // Implementation for ToShortest and ToShortestSingle. + bool ToShortestIeeeNumber(double value, + StringBuilder* result_builder, + DtoaMode mode) const; + + // If the value is a special value (NaN or Infinity) constructs the + // corresponding string using the configured infinity/nan-symbol. + // If either of them is NULL or the value is not special then the + // function returns false. + bool HandleSpecialValues(double value, StringBuilder* result_builder) const; + // Constructs an exponential representation (i.e. 1.234e56). + // The given exponent assumes a decimal point after the first decimal digit. + void CreateExponentialRepresentation(const char* decimal_digits, + int length, + int exponent, + StringBuilder* result_builder) const; + // Creates a decimal representation (i.e 1234.5678). + void CreateDecimalRepresentation(const char* decimal_digits, + int length, + int decimal_point, + int digits_after_point, + StringBuilder* result_builder) const; + + const int flags_; + const char* const infinity_symbol_; + const char* const nan_symbol_; + const char exponent_character_; + const int decimal_in_shortest_low_; + const int decimal_in_shortest_high_; + const int max_leading_padding_zeroes_in_precision_mode_; + const int max_trailing_padding_zeroes_in_precision_mode_; + + DISALLOW_IMPLICIT_CONSTRUCTORS(DoubleToStringConverter); +}; + + +class StringToDoubleConverter { + public: + // Enumeration for allowing octals and ignoring junk when converting + // strings to numbers. + enum Flags { + NO_FLAGS = 0, + ALLOW_HEX = 1, + ALLOW_OCTALS = 2, + ALLOW_TRAILING_JUNK = 4, + ALLOW_LEADING_SPACES = 8, + ALLOW_TRAILING_SPACES = 16, + ALLOW_SPACES_AFTER_SIGN = 32 + }; + + // Flags should be a bit-or combination of the possible Flags-enum. + // - NO_FLAGS: no special flags. + // - ALLOW_HEX: recognizes the prefix "0x". Hex numbers may only be integers. + // Ex: StringToDouble("0x1234") -> 4660.0 + // In StringToDouble("0x1234.56") the characters ".56" are trailing + // junk. The result of the call is hence dependent on + // the ALLOW_TRAILING_JUNK flag and/or the junk value. + // With this flag "0x" is a junk-string. Even with ALLOW_TRAILING_JUNK, + // the string will not be parsed as "0" followed by junk. + // + // - ALLOW_OCTALS: recognizes the prefix "0" for octals: + // If a sequence of octal digits starts with '0', then the number is + // read as octal integer. Octal numbers may only be integers. + // Ex: StringToDouble("01234") -> 668.0 + // StringToDouble("012349") -> 12349.0 // Not a sequence of octal + // // digits. + // In StringToDouble("01234.56") the characters ".56" are trailing + // junk. The result of the call is hence dependent on + // the ALLOW_TRAILING_JUNK flag and/or the junk value. + // In StringToDouble("01234e56") the characters "e56" are trailing + // junk, too. + // - ALLOW_TRAILING_JUNK: ignore trailing characters that are not part of + // a double literal. + // - ALLOW_LEADING_SPACES: skip over leading spaces. + // - ALLOW_TRAILING_SPACES: ignore trailing spaces. + // - ALLOW_SPACES_AFTER_SIGN: ignore spaces after the sign. + // Ex: StringToDouble("- 123.2") -> -123.2. + // StringToDouble("+ 123.2") -> 123.2 + // + // empty_string_value is returned when an empty string is given as input. + // If ALLOW_LEADING_SPACES or ALLOW_TRAILING_SPACES are set, then a string + // containing only spaces is converted to the 'empty_string_value', too. + // + // junk_string_value is returned when + // a) ALLOW_TRAILING_JUNK is not set, and a junk character (a character not + // part of a double-literal) is found. + // b) ALLOW_TRAILING_JUNK is set, but the string does not start with a + // double literal. + // + // infinity_symbol and nan_symbol are strings that are used to detect + // inputs that represent infinity and NaN. They can be null, in which case + // they are ignored. + // The conversion routine first reads any possible signs. Then it compares the + // following character of the input-string with the first character of + // the infinity, and nan-symbol. If either matches, the function assumes, that + // a match has been found, and expects the following input characters to match + // the remaining characters of the special-value symbol. + // This means that the following restrictions apply to special-value symbols: + // - they must not start with signs ('+', or '-'), + // - they must not have the same first character. + // - they must not start with digits. + // + // Examples: + // flags = ALLOW_HEX | ALLOW_TRAILING_JUNK, + // empty_string_value = 0.0, + // junk_string_value = NaN, + // infinity_symbol = "infinity", + // nan_symbol = "nan": + // StringToDouble("0x1234") -> 4660.0. + // StringToDouble("0x1234K") -> 4660.0. + // StringToDouble("") -> 0.0 // empty_string_value. + // StringToDouble(" ") -> NaN // junk_string_value. + // StringToDouble(" 1") -> NaN // junk_string_value. + // StringToDouble("0x") -> NaN // junk_string_value. + // StringToDouble("-123.45") -> -123.45. + // StringToDouble("--123.45") -> NaN // junk_string_value. + // StringToDouble("123e45") -> 123e45. + // StringToDouble("123E45") -> 123e45. + // StringToDouble("123e+45") -> 123e45. + // StringToDouble("123E-45") -> 123e-45. + // StringToDouble("123e") -> 123.0 // trailing junk ignored. + // StringToDouble("123e-") -> 123.0 // trailing junk ignored. + // StringToDouble("+NaN") -> NaN // NaN string literal. + // StringToDouble("-infinity") -> -inf. // infinity literal. + // StringToDouble("Infinity") -> NaN // junk_string_value. + // + // flags = ALLOW_OCTAL | ALLOW_LEADING_SPACES, + // empty_string_value = 0.0, + // junk_string_value = NaN, + // infinity_symbol = NULL, + // nan_symbol = NULL: + // StringToDouble("0x1234") -> NaN // junk_string_value. + // StringToDouble("01234") -> 668.0. + // StringToDouble("") -> 0.0 // empty_string_value. + // StringToDouble(" ") -> 0.0 // empty_string_value. + // StringToDouble(" 1") -> 1.0 + // StringToDouble("0x") -> NaN // junk_string_value. + // StringToDouble("0123e45") -> NaN // junk_string_value. + // StringToDouble("01239E45") -> 1239e45. + // StringToDouble("-infinity") -> NaN // junk_string_value. + // StringToDouble("NaN") -> NaN // junk_string_value. + StringToDoubleConverter(int flags, + double empty_string_value, + double junk_string_value, + const char* infinity_symbol, + const char* nan_symbol) + : flags_(flags), + empty_string_value_(empty_string_value), + junk_string_value_(junk_string_value), + infinity_symbol_(infinity_symbol), + nan_symbol_(nan_symbol) { + } + + // Performs the conversion. + // The output parameter 'processed_characters_count' is set to the number + // of characters that have been processed to read the number. + // Spaces than are processed with ALLOW_{LEADING|TRAILING}_SPACES are included + // in the 'processed_characters_count'. Trailing junk is never included. + double StringToDouble(const char* buffer, + int length, + int* processed_characters_count) const { + return StringToIeee(buffer, length, processed_characters_count, true); + } + + // Same as StringToDouble but reads a float. + // Note that this is not equivalent to static_cast(StringToDouble(...)) + // due to potential double-rounding. + float StringToFloat(const char* buffer, + int length, + int* processed_characters_count) const { + return static_cast(StringToIeee(buffer, length, + processed_characters_count, false)); + } + + private: + const int flags_; + const double empty_string_value_; + const double junk_string_value_; + const char* const infinity_symbol_; + const char* const nan_symbol_; + + double StringToIeee(const char* buffer, + int length, + int* processed_characters_count, + bool read_as_double) const; + + DISALLOW_IMPLICIT_CONSTRUCTORS(StringToDoubleConverter); +}; + +} // namespace double_conversion + +#endif // DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_ diff --git a/native/iosTest/Pods/DoubleConversion/double-conversion/fast-dtoa.cc b/native/iosTest/Pods/DoubleConversion/double-conversion/fast-dtoa.cc new file mode 100644 index 000000000..61350383a --- /dev/null +++ b/native/iosTest/Pods/DoubleConversion/double-conversion/fast-dtoa.cc @@ -0,0 +1,665 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "fast-dtoa.h" + +#include "cached-powers.h" +#include "diy-fp.h" +#include "ieee.h" + +namespace double_conversion { + +// The minimal and maximal target exponent define the range of w's binary +// exponent, where 'w' is the result of multiplying the input by a cached power +// of ten. +// +// A different range might be chosen on a different platform, to optimize digit +// generation, but a smaller range requires more powers of ten to be cached. +static const int kMinimalTargetExponent = -60; +static const int kMaximalTargetExponent = -32; + + +// Adjusts the last digit of the generated number, and screens out generated +// solutions that may be inaccurate. A solution may be inaccurate if it is +// outside the safe interval, or if we cannot prove that it is closer to the +// input than a neighboring representation of the same length. +// +// Input: * buffer containing the digits of too_high / 10^kappa +// * the buffer's length +// * distance_too_high_w == (too_high - w).f() * unit +// * unsafe_interval == (too_high - too_low).f() * unit +// * rest = (too_high - buffer * 10^kappa).f() * unit +// * ten_kappa = 10^kappa * unit +// * unit = the common multiplier +// Output: returns true if the buffer is guaranteed to contain the closest +// representable number to the input. +// Modifies the generated digits in the buffer to approach (round towards) w. +static bool RoundWeed(Vector buffer, + int length, + uint64_t distance_too_high_w, + uint64_t unsafe_interval, + uint64_t rest, + uint64_t ten_kappa, + uint64_t unit) { + uint64_t small_distance = distance_too_high_w - unit; + uint64_t big_distance = distance_too_high_w + unit; + // Let w_low = too_high - big_distance, and + // w_high = too_high - small_distance. + // Note: w_low < w < w_high + // + // The real w (* unit) must lie somewhere inside the interval + // ]w_low; w_high[ (often written as "(w_low; w_high)") + + // Basically the buffer currently contains a number in the unsafe interval + // ]too_low; too_high[ with too_low < w < too_high + // + // too_high - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + // ^v 1 unit ^ ^ ^ ^ + // boundary_high --------------------- . . . . + // ^v 1 unit . . . . + // - - - - - - - - - - - - - - - - - - - + - - + - - - - - - . . + // . . ^ . . + // . big_distance . . . + // . . . . rest + // small_distance . . . . + // v . . . . + // w_high - - - - - - - - - - - - - - - - - - . . . . + // ^v 1 unit . . . . + // w ---------------------------------------- . . . . + // ^v 1 unit v . . . + // w_low - - - - - - - - - - - - - - - - - - - - - . . . + // . . v + // buffer --------------------------------------------------+-------+-------- + // . . + // safe_interval . + // v . + // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - . + // ^v 1 unit . + // boundary_low ------------------------- unsafe_interval + // ^v 1 unit v + // too_low - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + // + // + // Note that the value of buffer could lie anywhere inside the range too_low + // to too_high. + // + // boundary_low, boundary_high and w are approximations of the real boundaries + // and v (the input number). They are guaranteed to be precise up to one unit. + // In fact the error is guaranteed to be strictly less than one unit. + // + // Anything that lies outside the unsafe interval is guaranteed not to round + // to v when read again. + // Anything that lies inside the safe interval is guaranteed to round to v + // when read again. + // If the number inside the buffer lies inside the unsafe interval but not + // inside the safe interval then we simply do not know and bail out (returning + // false). + // + // Similarly we have to take into account the imprecision of 'w' when finding + // the closest representation of 'w'. If we have two potential + // representations, and one is closer to both w_low and w_high, then we know + // it is closer to the actual value v. + // + // By generating the digits of too_high we got the largest (closest to + // too_high) buffer that is still in the unsafe interval. In the case where + // w_high < buffer < too_high we try to decrement the buffer. + // This way the buffer approaches (rounds towards) w. + // There are 3 conditions that stop the decrementation process: + // 1) the buffer is already below w_high + // 2) decrementing the buffer would make it leave the unsafe interval + // 3) decrementing the buffer would yield a number below w_high and farther + // away than the current number. In other words: + // (buffer{-1} < w_high) && w_high - buffer{-1} > buffer - w_high + // Instead of using the buffer directly we use its distance to too_high. + // Conceptually rest ~= too_high - buffer + // We need to do the following tests in this order to avoid over- and + // underflows. + ASSERT(rest <= unsafe_interval); + while (rest < small_distance && // Negated condition 1 + unsafe_interval - rest >= ten_kappa && // Negated condition 2 + (rest + ten_kappa < small_distance || // buffer{-1} > w_high + small_distance - rest >= rest + ten_kappa - small_distance)) { + buffer[length - 1]--; + rest += ten_kappa; + } + + // We have approached w+ as much as possible. We now test if approaching w- + // would require changing the buffer. If yes, then we have two possible + // representations close to w, but we cannot decide which one is closer. + if (rest < big_distance && + unsafe_interval - rest >= ten_kappa && + (rest + ten_kappa < big_distance || + big_distance - rest > rest + ten_kappa - big_distance)) { + return false; + } + + // Weeding test. + // The safe interval is [too_low + 2 ulp; too_high - 2 ulp] + // Since too_low = too_high - unsafe_interval this is equivalent to + // [too_high - unsafe_interval + 4 ulp; too_high - 2 ulp] + // Conceptually we have: rest ~= too_high - buffer + return (2 * unit <= rest) && (rest <= unsafe_interval - 4 * unit); +} + + +// Rounds the buffer upwards if the result is closer to v by possibly adding +// 1 to the buffer. If the precision of the calculation is not sufficient to +// round correctly, return false. +// The rounding might shift the whole buffer in which case the kappa is +// adjusted. For example "99", kappa = 3 might become "10", kappa = 4. +// +// If 2*rest > ten_kappa then the buffer needs to be round up. +// rest can have an error of +/- 1 unit. This function accounts for the +// imprecision and returns false, if the rounding direction cannot be +// unambiguously determined. +// +// Precondition: rest < ten_kappa. +static bool RoundWeedCounted(Vector buffer, + int length, + uint64_t rest, + uint64_t ten_kappa, + uint64_t unit, + int* kappa) { + ASSERT(rest < ten_kappa); + // The following tests are done in a specific order to avoid overflows. They + // will work correctly with any uint64 values of rest < ten_kappa and unit. + // + // If the unit is too big, then we don't know which way to round. For example + // a unit of 50 means that the real number lies within rest +/- 50. If + // 10^kappa == 40 then there is no way to tell which way to round. + if (unit >= ten_kappa) return false; + // Even if unit is just half the size of 10^kappa we are already completely + // lost. (And after the previous test we know that the expression will not + // over/underflow.) + if (ten_kappa - unit <= unit) return false; + // If 2 * (rest + unit) <= 10^kappa we can safely round down. + if ((ten_kappa - rest > rest) && (ten_kappa - 2 * rest >= 2 * unit)) { + return true; + } + // If 2 * (rest - unit) >= 10^kappa, then we can safely round up. + if ((rest > unit) && (ten_kappa - (rest - unit) <= (rest - unit))) { + // Increment the last digit recursively until we find a non '9' digit. + buffer[length - 1]++; + for (int i = length - 1; i > 0; --i) { + if (buffer[i] != '0' + 10) break; + buffer[i] = '0'; + buffer[i - 1]++; + } + // If the first digit is now '0'+ 10 we had a buffer with all '9's. With the + // exception of the first digit all digits are now '0'. Simply switch the + // first digit to '1' and adjust the kappa. Example: "99" becomes "10" and + // the power (the kappa) is increased. + if (buffer[0] == '0' + 10) { + buffer[0] = '1'; + (*kappa) += 1; + } + return true; + } + return false; +} + +// Returns the biggest power of ten that is less than or equal to the given +// number. We furthermore receive the maximum number of bits 'number' has. +// +// Returns power == 10^(exponent_plus_one-1) such that +// power <= number < power * 10. +// If number_bits == 0 then 0^(0-1) is returned. +// The number of bits must be <= 32. +// Precondition: number < (1 << (number_bits + 1)). + +// Inspired by the method for finding an integer log base 10 from here: +// http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10 +static unsigned int const kSmallPowersOfTen[] = + {0, 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, + 1000000000}; + +static void BiggestPowerTen(uint32_t number, + int number_bits, + uint32_t* power, + int* exponent_plus_one) { + ASSERT(number < (1u << (number_bits + 1))); + // 1233/4096 is approximately 1/lg(10). + int exponent_plus_one_guess = ((number_bits + 1) * 1233 >> 12); + // We increment to skip over the first entry in the kPowersOf10 table. + // Note: kPowersOf10[i] == 10^(i-1). + exponent_plus_one_guess++; + // We don't have any guarantees that 2^number_bits <= number. + if (number < kSmallPowersOfTen[exponent_plus_one_guess]) { + exponent_plus_one_guess--; + } + *power = kSmallPowersOfTen[exponent_plus_one_guess]; + *exponent_plus_one = exponent_plus_one_guess; +} + +// Generates the digits of input number w. +// w is a floating-point number (DiyFp), consisting of a significand and an +// exponent. Its exponent is bounded by kMinimalTargetExponent and +// kMaximalTargetExponent. +// Hence -60 <= w.e() <= -32. +// +// Returns false if it fails, in which case the generated digits in the buffer +// should not be used. +// Preconditions: +// * low, w and high are correct up to 1 ulp (unit in the last place). That +// is, their error must be less than a unit of their last digits. +// * low.e() == w.e() == high.e() +// * low < w < high, and taking into account their error: low~ <= high~ +// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent +// Postconditions: returns false if procedure fails. +// otherwise: +// * buffer is not null-terminated, but len contains the number of digits. +// * buffer contains the shortest possible decimal digit-sequence +// such that LOW < buffer * 10^kappa < HIGH, where LOW and HIGH are the +// correct values of low and high (without their error). +// * if more than one decimal representation gives the minimal number of +// decimal digits then the one closest to W (where W is the correct value +// of w) is chosen. +// Remark: this procedure takes into account the imprecision of its input +// numbers. If the precision is not enough to guarantee all the postconditions +// then false is returned. This usually happens rarely (~0.5%). +// +// Say, for the sake of example, that +// w.e() == -48, and w.f() == 0x1234567890abcdef +// w's value can be computed by w.f() * 2^w.e() +// We can obtain w's integral digits by simply shifting w.f() by -w.e(). +// -> w's integral part is 0x1234 +// w's fractional part is therefore 0x567890abcdef. +// Printing w's integral part is easy (simply print 0x1234 in decimal). +// In order to print its fraction we repeatedly multiply the fraction by 10 and +// get each digit. Example the first digit after the point would be computed by +// (0x567890abcdef * 10) >> 48. -> 3 +// The whole thing becomes slightly more complicated because we want to stop +// once we have enough digits. That is, once the digits inside the buffer +// represent 'w' we can stop. Everything inside the interval low - high +// represents w. However we have to pay attention to low, high and w's +// imprecision. +static bool DigitGen(DiyFp low, + DiyFp w, + DiyFp high, + Vector buffer, + int* length, + int* kappa) { + ASSERT(low.e() == w.e() && w.e() == high.e()); + ASSERT(low.f() + 1 <= high.f() - 1); + ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent); + // low, w and high are imprecise, but by less than one ulp (unit in the last + // place). + // If we remove (resp. add) 1 ulp from low (resp. high) we are certain that + // the new numbers are outside of the interval we want the final + // representation to lie in. + // Inversely adding (resp. removing) 1 ulp from low (resp. high) would yield + // numbers that are certain to lie in the interval. We will use this fact + // later on. + // We will now start by generating the digits within the uncertain + // interval. Later we will weed out representations that lie outside the safe + // interval and thus _might_ lie outside the correct interval. + uint64_t unit = 1; + DiyFp too_low = DiyFp(low.f() - unit, low.e()); + DiyFp too_high = DiyFp(high.f() + unit, high.e()); + // too_low and too_high are guaranteed to lie outside the interval we want the + // generated number in. + DiyFp unsafe_interval = DiyFp::Minus(too_high, too_low); + // We now cut the input number into two parts: the integral digits and the + // fractionals. We will not write any decimal separator though, but adapt + // kappa instead. + // Reminder: we are currently computing the digits (stored inside the buffer) + // such that: too_low < buffer * 10^kappa < too_high + // We use too_high for the digit_generation and stop as soon as possible. + // If we stop early we effectively round down. + DiyFp one = DiyFp(static_cast(1) << -w.e(), w.e()); + // Division by one is a shift. + uint32_t integrals = static_cast(too_high.f() >> -one.e()); + // Modulo by one is an and. + uint64_t fractionals = too_high.f() & (one.f() - 1); + uint32_t divisor; + int divisor_exponent_plus_one; + BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()), + &divisor, &divisor_exponent_plus_one); + *kappa = divisor_exponent_plus_one; + *length = 0; + // Loop invariant: buffer = too_high / 10^kappa (integer division) + // The invariant holds for the first iteration: kappa has been initialized + // with the divisor exponent + 1. And the divisor is the biggest power of ten + // that is smaller than integrals. + while (*kappa > 0) { + int digit = integrals / divisor; + ASSERT(digit <= 9); + buffer[*length] = static_cast('0' + digit); + (*length)++; + integrals %= divisor; + (*kappa)--; + // Note that kappa now equals the exponent of the divisor and that the + // invariant thus holds again. + uint64_t rest = + (static_cast(integrals) << -one.e()) + fractionals; + // Invariant: too_high = buffer * 10^kappa + DiyFp(rest, one.e()) + // Reminder: unsafe_interval.e() == one.e() + if (rest < unsafe_interval.f()) { + // Rounding down (by not emitting the remaining digits) yields a number + // that lies within the unsafe interval. + return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f(), + unsafe_interval.f(), rest, + static_cast(divisor) << -one.e(), unit); + } + divisor /= 10; + } + + // The integrals have been generated. We are at the point of the decimal + // separator. In the following loop we simply multiply the remaining digits by + // 10 and divide by one. We just need to pay attention to multiply associated + // data (like the interval or 'unit'), too. + // Note that the multiplication by 10 does not overflow, because w.e >= -60 + // and thus one.e >= -60. + ASSERT(one.e() >= -60); + ASSERT(fractionals < one.f()); + ASSERT(UINT64_2PART_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f()); + for (;;) { + fractionals *= 10; + unit *= 10; + unsafe_interval.set_f(unsafe_interval.f() * 10); + // Integer division by one. + int digit = static_cast(fractionals >> -one.e()); + ASSERT(digit <= 9); + buffer[*length] = static_cast('0' + digit); + (*length)++; + fractionals &= one.f() - 1; // Modulo by one. + (*kappa)--; + if (fractionals < unsafe_interval.f()) { + return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f() * unit, + unsafe_interval.f(), fractionals, one.f(), unit); + } + } +} + + + +// Generates (at most) requested_digits digits of input number w. +// w is a floating-point number (DiyFp), consisting of a significand and an +// exponent. Its exponent is bounded by kMinimalTargetExponent and +// kMaximalTargetExponent. +// Hence -60 <= w.e() <= -32. +// +// Returns false if it fails, in which case the generated digits in the buffer +// should not be used. +// Preconditions: +// * w is correct up to 1 ulp (unit in the last place). That +// is, its error must be strictly less than a unit of its last digit. +// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent +// +// Postconditions: returns false if procedure fails. +// otherwise: +// * buffer is not null-terminated, but length contains the number of +// digits. +// * the representation in buffer is the most precise representation of +// requested_digits digits. +// * buffer contains at most requested_digits digits of w. If there are less +// than requested_digits digits then some trailing '0's have been removed. +// * kappa is such that +// w = buffer * 10^kappa + eps with |eps| < 10^kappa / 2. +// +// Remark: This procedure takes into account the imprecision of its input +// numbers. If the precision is not enough to guarantee all the postconditions +// then false is returned. This usually happens rarely, but the failure-rate +// increases with higher requested_digits. +static bool DigitGenCounted(DiyFp w, + int requested_digits, + Vector buffer, + int* length, + int* kappa) { + ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent); + ASSERT(kMinimalTargetExponent >= -60); + ASSERT(kMaximalTargetExponent <= -32); + // w is assumed to have an error less than 1 unit. Whenever w is scaled we + // also scale its error. + uint64_t w_error = 1; + // We cut the input number into two parts: the integral digits and the + // fractional digits. We don't emit any decimal separator, but adapt kappa + // instead. Example: instead of writing "1.2" we put "12" into the buffer and + // increase kappa by 1. + DiyFp one = DiyFp(static_cast(1) << -w.e(), w.e()); + // Division by one is a shift. + uint32_t integrals = static_cast(w.f() >> -one.e()); + // Modulo by one is an and. + uint64_t fractionals = w.f() & (one.f() - 1); + uint32_t divisor; + int divisor_exponent_plus_one; + BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()), + &divisor, &divisor_exponent_plus_one); + *kappa = divisor_exponent_plus_one; + *length = 0; + + // Loop invariant: buffer = w / 10^kappa (integer division) + // The invariant holds for the first iteration: kappa has been initialized + // with the divisor exponent + 1. And the divisor is the biggest power of ten + // that is smaller than 'integrals'. + while (*kappa > 0) { + int digit = integrals / divisor; + ASSERT(digit <= 9); + buffer[*length] = static_cast('0' + digit); + (*length)++; + requested_digits--; + integrals %= divisor; + (*kappa)--; + // Note that kappa now equals the exponent of the divisor and that the + // invariant thus holds again. + if (requested_digits == 0) break; + divisor /= 10; + } + + if (requested_digits == 0) { + uint64_t rest = + (static_cast(integrals) << -one.e()) + fractionals; + return RoundWeedCounted(buffer, *length, rest, + static_cast(divisor) << -one.e(), w_error, + kappa); + } + + // The integrals have been generated. We are at the point of the decimal + // separator. In the following loop we simply multiply the remaining digits by + // 10 and divide by one. We just need to pay attention to multiply associated + // data (the 'unit'), too. + // Note that the multiplication by 10 does not overflow, because w.e >= -60 + // and thus one.e >= -60. + ASSERT(one.e() >= -60); + ASSERT(fractionals < one.f()); + ASSERT(UINT64_2PART_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f()); + while (requested_digits > 0 && fractionals > w_error) { + fractionals *= 10; + w_error *= 10; + // Integer division by one. + int digit = static_cast(fractionals >> -one.e()); + ASSERT(digit <= 9); + buffer[*length] = static_cast('0' + digit); + (*length)++; + requested_digits--; + fractionals &= one.f() - 1; // Modulo by one. + (*kappa)--; + } + if (requested_digits != 0) return false; + return RoundWeedCounted(buffer, *length, fractionals, one.f(), w_error, + kappa); +} + + +// Provides a decimal representation of v. +// Returns true if it succeeds, otherwise the result cannot be trusted. +// There will be *length digits inside the buffer (not null-terminated). +// If the function returns true then +// v == (double) (buffer * 10^decimal_exponent). +// The digits in the buffer are the shortest representation possible: no +// 0.09999999999999999 instead of 0.1. The shorter representation will even be +// chosen even if the longer one would be closer to v. +// The last digit will be closest to the actual v. That is, even if several +// digits might correctly yield 'v' when read again, the closest will be +// computed. +static bool Grisu3(double v, + FastDtoaMode mode, + Vector buffer, + int* length, + int* decimal_exponent) { + DiyFp w = Double(v).AsNormalizedDiyFp(); + // boundary_minus and boundary_plus are the boundaries between v and its + // closest floating-point neighbors. Any number strictly between + // boundary_minus and boundary_plus will round to v when convert to a double. + // Grisu3 will never output representations that lie exactly on a boundary. + DiyFp boundary_minus, boundary_plus; + if (mode == FAST_DTOA_SHORTEST) { + Double(v).NormalizedBoundaries(&boundary_minus, &boundary_plus); + } else { + ASSERT(mode == FAST_DTOA_SHORTEST_SINGLE); + float single_v = static_cast(v); + Single(single_v).NormalizedBoundaries(&boundary_minus, &boundary_plus); + } + ASSERT(boundary_plus.e() == w.e()); + DiyFp ten_mk; // Cached power of ten: 10^-k + int mk; // -k + int ten_mk_minimal_binary_exponent = + kMinimalTargetExponent - (w.e() + DiyFp::kSignificandSize); + int ten_mk_maximal_binary_exponent = + kMaximalTargetExponent - (w.e() + DiyFp::kSignificandSize); + PowersOfTenCache::GetCachedPowerForBinaryExponentRange( + ten_mk_minimal_binary_exponent, + ten_mk_maximal_binary_exponent, + &ten_mk, &mk); + ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() + + DiyFp::kSignificandSize) && + (kMaximalTargetExponent >= w.e() + ten_mk.e() + + DiyFp::kSignificandSize)); + // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a + // 64 bit significand and ten_mk is thus only precise up to 64 bits. + + // The DiyFp::Times procedure rounds its result, and ten_mk is approximated + // too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now + // off by a small amount. + // In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w. + // In other words: let f = scaled_w.f() and e = scaled_w.e(), then + // (f-1) * 2^e < w*10^k < (f+1) * 2^e + DiyFp scaled_w = DiyFp::Times(w, ten_mk); + ASSERT(scaled_w.e() == + boundary_plus.e() + ten_mk.e() + DiyFp::kSignificandSize); + // In theory it would be possible to avoid some recomputations by computing + // the difference between w and boundary_minus/plus (a power of 2) and to + // compute scaled_boundary_minus/plus by subtracting/adding from + // scaled_w. However the code becomes much less readable and the speed + // enhancements are not terriffic. + DiyFp scaled_boundary_minus = DiyFp::Times(boundary_minus, ten_mk); + DiyFp scaled_boundary_plus = DiyFp::Times(boundary_plus, ten_mk); + + // DigitGen will generate the digits of scaled_w. Therefore we have + // v == (double) (scaled_w * 10^-mk). + // Set decimal_exponent == -mk and pass it to DigitGen. If scaled_w is not an + // integer than it will be updated. For instance if scaled_w == 1.23 then + // the buffer will be filled with "123" und the decimal_exponent will be + // decreased by 2. + int kappa; + bool result = DigitGen(scaled_boundary_minus, scaled_w, scaled_boundary_plus, + buffer, length, &kappa); + *decimal_exponent = -mk + kappa; + return result; +} + + +// The "counted" version of grisu3 (see above) only generates requested_digits +// number of digits. This version does not generate the shortest representation, +// and with enough requested digits 0.1 will at some point print as 0.9999999... +// Grisu3 is too imprecise for real halfway cases (1.5 will not work) and +// therefore the rounding strategy for halfway cases is irrelevant. +static bool Grisu3Counted(double v, + int requested_digits, + Vector buffer, + int* length, + int* decimal_exponent) { + DiyFp w = Double(v).AsNormalizedDiyFp(); + DiyFp ten_mk; // Cached power of ten: 10^-k + int mk; // -k + int ten_mk_minimal_binary_exponent = + kMinimalTargetExponent - (w.e() + DiyFp::kSignificandSize); + int ten_mk_maximal_binary_exponent = + kMaximalTargetExponent - (w.e() + DiyFp::kSignificandSize); + PowersOfTenCache::GetCachedPowerForBinaryExponentRange( + ten_mk_minimal_binary_exponent, + ten_mk_maximal_binary_exponent, + &ten_mk, &mk); + ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() + + DiyFp::kSignificandSize) && + (kMaximalTargetExponent >= w.e() + ten_mk.e() + + DiyFp::kSignificandSize)); + // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a + // 64 bit significand and ten_mk is thus only precise up to 64 bits. + + // The DiyFp::Times procedure rounds its result, and ten_mk is approximated + // too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now + // off by a small amount. + // In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w. + // In other words: let f = scaled_w.f() and e = scaled_w.e(), then + // (f-1) * 2^e < w*10^k < (f+1) * 2^e + DiyFp scaled_w = DiyFp::Times(w, ten_mk); + + // We now have (double) (scaled_w * 10^-mk). + // DigitGen will generate the first requested_digits digits of scaled_w and + // return together with a kappa such that scaled_w ~= buffer * 10^kappa. (It + // will not always be exactly the same since DigitGenCounted only produces a + // limited number of digits.) + int kappa; + bool result = DigitGenCounted(scaled_w, requested_digits, + buffer, length, &kappa); + *decimal_exponent = -mk + kappa; + return result; +} + + +bool FastDtoa(double v, + FastDtoaMode mode, + int requested_digits, + Vector buffer, + int* length, + int* decimal_point) { + ASSERT(v > 0); + ASSERT(!Double(v).IsSpecial()); + + bool result = false; + int decimal_exponent = 0; + switch (mode) { + case FAST_DTOA_SHORTEST: + case FAST_DTOA_SHORTEST_SINGLE: + result = Grisu3(v, mode, buffer, length, &decimal_exponent); + break; + case FAST_DTOA_PRECISION: + result = Grisu3Counted(v, requested_digits, + buffer, length, &decimal_exponent); + break; + default: + UNREACHABLE(); + } + if (result) { + *decimal_point = *length + decimal_exponent; + buffer[*length] = '\0'; + } + return result; +} + +} // namespace double_conversion diff --git a/native/iosTest/Pods/DoubleConversion/double-conversion/fast-dtoa.h b/native/iosTest/Pods/DoubleConversion/double-conversion/fast-dtoa.h new file mode 100644 index 000000000..5f1e8eee5 --- /dev/null +++ b/native/iosTest/Pods/DoubleConversion/double-conversion/fast-dtoa.h @@ -0,0 +1,88 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_FAST_DTOA_H_ +#define DOUBLE_CONVERSION_FAST_DTOA_H_ + +#include "utils.h" + +namespace double_conversion { + +enum FastDtoaMode { + // Computes the shortest representation of the given input. The returned + // result will be the most accurate number of this length. Longer + // representations might be more accurate. + FAST_DTOA_SHORTEST, + // Same as FAST_DTOA_SHORTEST but for single-precision floats. + FAST_DTOA_SHORTEST_SINGLE, + // Computes a representation where the precision (number of digits) is + // given as input. The precision is independent of the decimal point. + FAST_DTOA_PRECISION +}; + +// FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not +// include the terminating '\0' character. +static const int kFastDtoaMaximalLength = 17; +// Same for single-precision numbers. +static const int kFastDtoaMaximalSingleLength = 9; + +// Provides a decimal representation of v. +// The result should be interpreted as buffer * 10^(point - length). +// +// Precondition: +// * v must be a strictly positive finite double. +// +// Returns true if it succeeds, otherwise the result can not be trusted. +// There will be *length digits inside the buffer followed by a null terminator. +// If the function returns true and mode equals +// - FAST_DTOA_SHORTEST, then +// the parameter requested_digits is ignored. +// The result satisfies +// v == (double) (buffer * 10^(point - length)). +// The digits in the buffer are the shortest representation possible. E.g. +// if 0.099999999999 and 0.1 represent the same double then "1" is returned +// with point = 0. +// The last digit will be closest to the actual v. That is, even if several +// digits might correctly yield 'v' when read again, the buffer will contain +// the one closest to v. +// - FAST_DTOA_PRECISION, then +// the buffer contains requested_digits digits. +// the difference v - (buffer * 10^(point-length)) is closest to zero for +// all possible representations of requested_digits digits. +// If there are two values that are equally close, then FastDtoa returns +// false. +// For both modes the buffer must be large enough to hold the result. +bool FastDtoa(double d, + FastDtoaMode mode, + int requested_digits, + Vector buffer, + int* length, + int* decimal_point); + +} // namespace double_conversion + +#endif // DOUBLE_CONVERSION_FAST_DTOA_H_ diff --git a/native/iosTest/Pods/DoubleConversion/double-conversion/fixed-dtoa.cc b/native/iosTest/Pods/DoubleConversion/double-conversion/fixed-dtoa.cc new file mode 100644 index 000000000..aef65fdc2 --- /dev/null +++ b/native/iosTest/Pods/DoubleConversion/double-conversion/fixed-dtoa.cc @@ -0,0 +1,404 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include "fixed-dtoa.h" +#include "ieee.h" + +namespace double_conversion { + +// Represents a 128bit type. This class should be replaced by a native type on +// platforms that support 128bit integers. +class UInt128 { + public: + UInt128() : high_bits_(0), low_bits_(0) { } + UInt128(uint64_t high, uint64_t low) : high_bits_(high), low_bits_(low) { } + + void Multiply(uint32_t multiplicand) { + uint64_t accumulator; + + accumulator = (low_bits_ & kMask32) * multiplicand; + uint32_t part = static_cast(accumulator & kMask32); + accumulator >>= 32; + accumulator = accumulator + (low_bits_ >> 32) * multiplicand; + low_bits_ = (accumulator << 32) + part; + accumulator >>= 32; + accumulator = accumulator + (high_bits_ & kMask32) * multiplicand; + part = static_cast(accumulator & kMask32); + accumulator >>= 32; + accumulator = accumulator + (high_bits_ >> 32) * multiplicand; + high_bits_ = (accumulator << 32) + part; + ASSERT((accumulator >> 32) == 0); + } + + void Shift(int shift_amount) { + ASSERT(-64 <= shift_amount && shift_amount <= 64); + if (shift_amount == 0) { + return; + } else if (shift_amount == -64) { + high_bits_ = low_bits_; + low_bits_ = 0; + } else if (shift_amount == 64) { + low_bits_ = high_bits_; + high_bits_ = 0; + } else if (shift_amount <= 0) { + high_bits_ <<= -shift_amount; + high_bits_ += low_bits_ >> (64 + shift_amount); + low_bits_ <<= -shift_amount; + } else { + low_bits_ >>= shift_amount; + low_bits_ += high_bits_ << (64 - shift_amount); + high_bits_ >>= shift_amount; + } + } + + // Modifies *this to *this MOD (2^power). + // Returns *this DIV (2^power). + int DivModPowerOf2(int power) { + if (power >= 64) { + int result = static_cast(high_bits_ >> (power - 64)); + high_bits_ -= static_cast(result) << (power - 64); + return result; + } else { + uint64_t part_low = low_bits_ >> power; + uint64_t part_high = high_bits_ << (64 - power); + int result = static_cast(part_low + part_high); + high_bits_ = 0; + low_bits_ -= part_low << power; + return result; + } + } + + bool IsZero() const { + return high_bits_ == 0 && low_bits_ == 0; + } + + int BitAt(int position) { + if (position >= 64) { + return static_cast(high_bits_ >> (position - 64)) & 1; + } else { + return static_cast(low_bits_ >> position) & 1; + } + } + + private: + static const uint64_t kMask32 = 0xFFFFFFFF; + // Value == (high_bits_ << 64) + low_bits_ + uint64_t high_bits_; + uint64_t low_bits_; +}; + + +static const int kDoubleSignificandSize = 53; // Includes the hidden bit. + + +static void FillDigits32FixedLength(uint32_t number, int requested_length, + Vector buffer, int* length) { + for (int i = requested_length - 1; i >= 0; --i) { + buffer[(*length) + i] = '0' + number % 10; + number /= 10; + } + *length += requested_length; +} + + +static void FillDigits32(uint32_t number, Vector buffer, int* length) { + int number_length = 0; + // We fill the digits in reverse order and exchange them afterwards. + while (number != 0) { + int digit = number % 10; + number /= 10; + buffer[(*length) + number_length] = static_cast('0' + digit); + number_length++; + } + // Exchange the digits. + int i = *length; + int j = *length + number_length - 1; + while (i < j) { + char tmp = buffer[i]; + buffer[i] = buffer[j]; + buffer[j] = tmp; + i++; + j--; + } + *length += number_length; +} + + +static void FillDigits64FixedLength(uint64_t number, + Vector buffer, int* length) { + const uint32_t kTen7 = 10000000; + // For efficiency cut the number into 3 uint32_t parts, and print those. + uint32_t part2 = static_cast(number % kTen7); + number /= kTen7; + uint32_t part1 = static_cast(number % kTen7); + uint32_t part0 = static_cast(number / kTen7); + + FillDigits32FixedLength(part0, 3, buffer, length); + FillDigits32FixedLength(part1, 7, buffer, length); + FillDigits32FixedLength(part2, 7, buffer, length); +} + + +static void FillDigits64(uint64_t number, Vector buffer, int* length) { + const uint32_t kTen7 = 10000000; + // For efficiency cut the number into 3 uint32_t parts, and print those. + uint32_t part2 = static_cast(number % kTen7); + number /= kTen7; + uint32_t part1 = static_cast(number % kTen7); + uint32_t part0 = static_cast(number / kTen7); + + if (part0 != 0) { + FillDigits32(part0, buffer, length); + FillDigits32FixedLength(part1, 7, buffer, length); + FillDigits32FixedLength(part2, 7, buffer, length); + } else if (part1 != 0) { + FillDigits32(part1, buffer, length); + FillDigits32FixedLength(part2, 7, buffer, length); + } else { + FillDigits32(part2, buffer, length); + } +} + + +static void RoundUp(Vector buffer, int* length, int* decimal_point) { + // An empty buffer represents 0. + if (*length == 0) { + buffer[0] = '1'; + *decimal_point = 1; + *length = 1; + return; + } + // Round the last digit until we either have a digit that was not '9' or until + // we reached the first digit. + buffer[(*length) - 1]++; + for (int i = (*length) - 1; i > 0; --i) { + if (buffer[i] != '0' + 10) { + return; + } + buffer[i] = '0'; + buffer[i - 1]++; + } + // If the first digit is now '0' + 10, we would need to set it to '0' and add + // a '1' in front. However we reach the first digit only if all following + // digits had been '9' before rounding up. Now all trailing digits are '0' and + // we simply switch the first digit to '1' and update the decimal-point + // (indicating that the point is now one digit to the right). + if (buffer[0] == '0' + 10) { + buffer[0] = '1'; + (*decimal_point)++; + } +} + + +// The given fractionals number represents a fixed-point number with binary +// point at bit (-exponent). +// Preconditions: +// -128 <= exponent <= 0. +// 0 <= fractionals * 2^exponent < 1 +// The buffer holds the result. +// The function will round its result. During the rounding-process digits not +// generated by this function might be updated, and the decimal-point variable +// might be updated. If this function generates the digits 99 and the buffer +// already contained "199" (thus yielding a buffer of "19999") then a +// rounding-up will change the contents of the buffer to "20000". +static void FillFractionals(uint64_t fractionals, int exponent, + int fractional_count, Vector buffer, + int* length, int* decimal_point) { + ASSERT(-128 <= exponent && exponent <= 0); + // 'fractionals' is a fixed-point number, with binary point at bit + // (-exponent). Inside the function the non-converted remainder of fractionals + // is a fixed-point number, with binary point at bit 'point'. + if (-exponent <= 64) { + // One 64 bit number is sufficient. + ASSERT(fractionals >> 56 == 0); + int point = -exponent; + for (int i = 0; i < fractional_count; ++i) { + if (fractionals == 0) break; + // Instead of multiplying by 10 we multiply by 5 and adjust the point + // location. This way the fractionals variable will not overflow. + // Invariant at the beginning of the loop: fractionals < 2^point. + // Initially we have: point <= 64 and fractionals < 2^56 + // After each iteration the point is decremented by one. + // Note that 5^3 = 125 < 128 = 2^7. + // Therefore three iterations of this loop will not overflow fractionals + // (even without the subtraction at the end of the loop body). At this + // time point will satisfy point <= 61 and therefore fractionals < 2^point + // and any further multiplication of fractionals by 5 will not overflow. + fractionals *= 5; + point--; + int digit = static_cast(fractionals >> point); + ASSERT(digit <= 9); + buffer[*length] = static_cast('0' + digit); + (*length)++; + fractionals -= static_cast(digit) << point; + } + // If the first bit after the point is set we have to round up. + if (((fractionals >> (point - 1)) & 1) == 1) { + RoundUp(buffer, length, decimal_point); + } + } else { // We need 128 bits. + ASSERT(64 < -exponent && -exponent <= 128); + UInt128 fractionals128 = UInt128(fractionals, 0); + fractionals128.Shift(-exponent - 64); + int point = 128; + for (int i = 0; i < fractional_count; ++i) { + if (fractionals128.IsZero()) break; + // As before: instead of multiplying by 10 we multiply by 5 and adjust the + // point location. + // This multiplication will not overflow for the same reasons as before. + fractionals128.Multiply(5); + point--; + int digit = fractionals128.DivModPowerOf2(point); + ASSERT(digit <= 9); + buffer[*length] = static_cast('0' + digit); + (*length)++; + } + if (fractionals128.BitAt(point - 1) == 1) { + RoundUp(buffer, length, decimal_point); + } + } +} + + +// Removes leading and trailing zeros. +// If leading zeros are removed then the decimal point position is adjusted. +static void TrimZeros(Vector buffer, int* length, int* decimal_point) { + while (*length > 0 && buffer[(*length) - 1] == '0') { + (*length)--; + } + int first_non_zero = 0; + while (first_non_zero < *length && buffer[first_non_zero] == '0') { + first_non_zero++; + } + if (first_non_zero != 0) { + for (int i = first_non_zero; i < *length; ++i) { + buffer[i - first_non_zero] = buffer[i]; + } + *length -= first_non_zero; + *decimal_point -= first_non_zero; + } +} + + +bool FastFixedDtoa(double v, + int fractional_count, + Vector buffer, + int* length, + int* decimal_point) { + const uint32_t kMaxUInt32 = 0xFFFFFFFF; + uint64_t significand = Double(v).Significand(); + int exponent = Double(v).Exponent(); + // v = significand * 2^exponent (with significand a 53bit integer). + // If the exponent is larger than 20 (i.e. we may have a 73bit number) then we + // don't know how to compute the representation. 2^73 ~= 9.5*10^21. + // If necessary this limit could probably be increased, but we don't need + // more. + if (exponent > 20) return false; + if (fractional_count > 20) return false; + *length = 0; + // At most kDoubleSignificandSize bits of the significand are non-zero. + // Given a 64 bit integer we have 11 0s followed by 53 potentially non-zero + // bits: 0..11*..0xxx..53*..xx + if (exponent + kDoubleSignificandSize > 64) { + // The exponent must be > 11. + // + // We know that v = significand * 2^exponent. + // And the exponent > 11. + // We simplify the task by dividing v by 10^17. + // The quotient delivers the first digits, and the remainder fits into a 64 + // bit number. + // Dividing by 10^17 is equivalent to dividing by 5^17*2^17. + const uint64_t kFive17 = UINT64_2PART_C(0xB1, A2BC2EC5); // 5^17 + uint64_t divisor = kFive17; + int divisor_power = 17; + uint64_t dividend = significand; + uint32_t quotient; + uint64_t remainder; + // Let v = f * 2^e with f == significand and e == exponent. + // Then need q (quotient) and r (remainder) as follows: + // v = q * 10^17 + r + // f * 2^e = q * 10^17 + r + // f * 2^e = q * 5^17 * 2^17 + r + // If e > 17 then + // f * 2^(e-17) = q * 5^17 + r/2^17 + // else + // f = q * 5^17 * 2^(17-e) + r/2^e + if (exponent > divisor_power) { + // We only allow exponents of up to 20 and therefore (17 - e) <= 3 + dividend <<= exponent - divisor_power; + quotient = static_cast(dividend / divisor); + remainder = (dividend % divisor) << divisor_power; + } else { + divisor <<= divisor_power - exponent; + quotient = static_cast(dividend / divisor); + remainder = (dividend % divisor) << exponent; + } + FillDigits32(quotient, buffer, length); + FillDigits64FixedLength(remainder, buffer, length); + *decimal_point = *length; + } else if (exponent >= 0) { + // 0 <= exponent <= 11 + significand <<= exponent; + FillDigits64(significand, buffer, length); + *decimal_point = *length; + } else if (exponent > -kDoubleSignificandSize) { + // We have to cut the number. + uint64_t integrals = significand >> -exponent; + uint64_t fractionals = significand - (integrals << -exponent); + if (integrals > kMaxUInt32) { + FillDigits64(integrals, buffer, length); + } else { + FillDigits32(static_cast(integrals), buffer, length); + } + *decimal_point = *length; + FillFractionals(fractionals, exponent, fractional_count, + buffer, length, decimal_point); + } else if (exponent < -128) { + // This configuration (with at most 20 digits) means that all digits must be + // 0. + ASSERT(fractional_count <= 20); + buffer[0] = '\0'; + *length = 0; + *decimal_point = -fractional_count; + } else { + *decimal_point = 0; + FillFractionals(significand, exponent, fractional_count, + buffer, length, decimal_point); + } + TrimZeros(buffer, length, decimal_point); + buffer[*length] = '\0'; + if ((*length) == 0) { + // The string is empty and the decimal_point thus has no importance. Mimick + // Gay's dtoa and and set it to -fractional_count. + *decimal_point = -fractional_count; + } + return true; +} + +} // namespace double_conversion diff --git a/native/iosTest/Pods/DoubleConversion/double-conversion/fixed-dtoa.h b/native/iosTest/Pods/DoubleConversion/double-conversion/fixed-dtoa.h new file mode 100644 index 000000000..3bdd08e21 --- /dev/null +++ b/native/iosTest/Pods/DoubleConversion/double-conversion/fixed-dtoa.h @@ -0,0 +1,56 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_FIXED_DTOA_H_ +#define DOUBLE_CONVERSION_FIXED_DTOA_H_ + +#include "utils.h" + +namespace double_conversion { + +// Produces digits necessary to print a given number with +// 'fractional_count' digits after the decimal point. +// The buffer must be big enough to hold the result plus one terminating null +// character. +// +// The produced digits might be too short in which case the caller has to fill +// the gaps with '0's. +// Example: FastFixedDtoa(0.001, 5, ...) is allowed to return buffer = "1", and +// decimal_point = -2. +// Halfway cases are rounded towards +/-Infinity (away from 0). The call +// FastFixedDtoa(0.15, 2, ...) thus returns buffer = "2", decimal_point = 0. +// The returned buffer may contain digits that would be truncated from the +// shortest representation of the input. +// +// This method only works for some parameters. If it can't handle the input it +// returns false. The output is null-terminated when the function succeeds. +bool FastFixedDtoa(double v, int fractional_count, + Vector buffer, int* length, int* decimal_point); + +} // namespace double_conversion + +#endif // DOUBLE_CONVERSION_FIXED_DTOA_H_ diff --git a/native/iosTest/Pods/DoubleConversion/double-conversion/ieee.h b/native/iosTest/Pods/DoubleConversion/double-conversion/ieee.h new file mode 100644 index 000000000..661141d1a --- /dev/null +++ b/native/iosTest/Pods/DoubleConversion/double-conversion/ieee.h @@ -0,0 +1,402 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_DOUBLE_H_ +#define DOUBLE_CONVERSION_DOUBLE_H_ + +#include "diy-fp.h" + +namespace double_conversion { + +// We assume that doubles and uint64_t have the same endianness. +static uint64_t double_to_uint64(double d) { return BitCast(d); } +static double uint64_to_double(uint64_t d64) { return BitCast(d64); } +static uint32_t float_to_uint32(float f) { return BitCast(f); } +static float uint32_to_float(uint32_t d32) { return BitCast(d32); } + +// Helper functions for doubles. +class Double { + public: + static const uint64_t kSignMask = UINT64_2PART_C(0x80000000, 00000000); + static const uint64_t kExponentMask = UINT64_2PART_C(0x7FF00000, 00000000); + static const uint64_t kSignificandMask = UINT64_2PART_C(0x000FFFFF, FFFFFFFF); + static const uint64_t kHiddenBit = UINT64_2PART_C(0x00100000, 00000000); + static const int kPhysicalSignificandSize = 52; // Excludes the hidden bit. + static const int kSignificandSize = 53; + + Double() : d64_(0) {} + explicit Double(double d) : d64_(double_to_uint64(d)) {} + explicit Double(uint64_t d64) : d64_(d64) {} + explicit Double(DiyFp diy_fp) + : d64_(DiyFpToUint64(diy_fp)) {} + + // The value encoded by this Double must be greater or equal to +0.0. + // It must not be special (infinity, or NaN). + DiyFp AsDiyFp() const { + ASSERT(Sign() > 0); + ASSERT(!IsSpecial()); + return DiyFp(Significand(), Exponent()); + } + + // The value encoded by this Double must be strictly greater than 0. + DiyFp AsNormalizedDiyFp() const { + ASSERT(value() > 0.0); + uint64_t f = Significand(); + int e = Exponent(); + + // The current double could be a denormal. + while ((f & kHiddenBit) == 0) { + f <<= 1; + e--; + } + // Do the final shifts in one go. + f <<= DiyFp::kSignificandSize - kSignificandSize; + e -= DiyFp::kSignificandSize - kSignificandSize; + return DiyFp(f, e); + } + + // Returns the double's bit as uint64. + uint64_t AsUint64() const { + return d64_; + } + + // Returns the next greater double. Returns +infinity on input +infinity. + double NextDouble() const { + if (d64_ == kInfinity) return Double(kInfinity).value(); + if (Sign() < 0 && Significand() == 0) { + // -0.0 + return 0.0; + } + if (Sign() < 0) { + return Double(d64_ - 1).value(); + } else { + return Double(d64_ + 1).value(); + } + } + + double PreviousDouble() const { + if (d64_ == (kInfinity | kSignMask)) return -Double::Infinity(); + if (Sign() < 0) { + return Double(d64_ + 1).value(); + } else { + if (Significand() == 0) return -0.0; + return Double(d64_ - 1).value(); + } + } + + int Exponent() const { + if (IsDenormal()) return kDenormalExponent; + + uint64_t d64 = AsUint64(); + int biased_e = + static_cast((d64 & kExponentMask) >> kPhysicalSignificandSize); + return biased_e - kExponentBias; + } + + uint64_t Significand() const { + uint64_t d64 = AsUint64(); + uint64_t significand = d64 & kSignificandMask; + if (!IsDenormal()) { + return significand + kHiddenBit; + } else { + return significand; + } + } + + // Returns true if the double is a denormal. + bool IsDenormal() const { + uint64_t d64 = AsUint64(); + return (d64 & kExponentMask) == 0; + } + + // We consider denormals not to be special. + // Hence only Infinity and NaN are special. + bool IsSpecial() const { + uint64_t d64 = AsUint64(); + return (d64 & kExponentMask) == kExponentMask; + } + + bool IsNan() const { + uint64_t d64 = AsUint64(); + return ((d64 & kExponentMask) == kExponentMask) && + ((d64 & kSignificandMask) != 0); + } + + bool IsInfinite() const { + uint64_t d64 = AsUint64(); + return ((d64 & kExponentMask) == kExponentMask) && + ((d64 & kSignificandMask) == 0); + } + + int Sign() const { + uint64_t d64 = AsUint64(); + return (d64 & kSignMask) == 0? 1: -1; + } + + // Precondition: the value encoded by this Double must be greater or equal + // than +0.0. + DiyFp UpperBoundary() const { + ASSERT(Sign() > 0); + return DiyFp(Significand() * 2 + 1, Exponent() - 1); + } + + // Computes the two boundaries of this. + // The bigger boundary (m_plus) is normalized. The lower boundary has the same + // exponent as m_plus. + // Precondition: the value encoded by this Double must be greater than 0. + void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const { + ASSERT(value() > 0.0); + DiyFp v = this->AsDiyFp(); + DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1)); + DiyFp m_minus; + if (LowerBoundaryIsCloser()) { + m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2); + } else { + m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1); + } + m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e())); + m_minus.set_e(m_plus.e()); + *out_m_plus = m_plus; + *out_m_minus = m_minus; + } + + bool LowerBoundaryIsCloser() const { + // The boundary is closer if the significand is of the form f == 2^p-1 then + // the lower boundary is closer. + // Think of v = 1000e10 and v- = 9999e9. + // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but + // at a distance of 1e8. + // The only exception is for the smallest normal: the largest denormal is + // at the same distance as its successor. + // Note: denormals have the same exponent as the smallest normals. + bool physical_significand_is_zero = ((AsUint64() & kSignificandMask) == 0); + return physical_significand_is_zero && (Exponent() != kDenormalExponent); + } + + double value() const { return uint64_to_double(d64_); } + + // Returns the significand size for a given order of magnitude. + // If v = f*2^e with 2^p-1 <= f <= 2^p then p+e is v's order of magnitude. + // This function returns the number of significant binary digits v will have + // once it's encoded into a double. In almost all cases this is equal to + // kSignificandSize. The only exceptions are denormals. They start with + // leading zeroes and their effective significand-size is hence smaller. + static int SignificandSizeForOrderOfMagnitude(int order) { + if (order >= (kDenormalExponent + kSignificandSize)) { + return kSignificandSize; + } + if (order <= kDenormalExponent) return 0; + return order - kDenormalExponent; + } + + static double Infinity() { + return Double(kInfinity).value(); + } + + static double NaN() { + return Double(kNaN).value(); + } + + private: + static const int kExponentBias = 0x3FF + kPhysicalSignificandSize; + static const int kDenormalExponent = -kExponentBias + 1; + static const int kMaxExponent = 0x7FF - kExponentBias; + static const uint64_t kInfinity = UINT64_2PART_C(0x7FF00000, 00000000); + static const uint64_t kNaN = UINT64_2PART_C(0x7FF80000, 00000000); + + const uint64_t d64_; + + static uint64_t DiyFpToUint64(DiyFp diy_fp) { + uint64_t significand = diy_fp.f(); + int exponent = diy_fp.e(); + while (significand > kHiddenBit + kSignificandMask) { + significand >>= 1; + exponent++; + } + if (exponent >= kMaxExponent) { + return kInfinity; + } + if (exponent < kDenormalExponent) { + return 0; + } + while (exponent > kDenormalExponent && (significand & kHiddenBit) == 0) { + significand <<= 1; + exponent--; + } + uint64_t biased_exponent; + if (exponent == kDenormalExponent && (significand & kHiddenBit) == 0) { + biased_exponent = 0; + } else { + biased_exponent = static_cast(exponent + kExponentBias); + } + return (significand & kSignificandMask) | + (biased_exponent << kPhysicalSignificandSize); + } + + DISALLOW_COPY_AND_ASSIGN(Double); +}; + +class Single { + public: + static const uint32_t kSignMask = 0x80000000; + static const uint32_t kExponentMask = 0x7F800000; + static const uint32_t kSignificandMask = 0x007FFFFF; + static const uint32_t kHiddenBit = 0x00800000; + static const int kPhysicalSignificandSize = 23; // Excludes the hidden bit. + static const int kSignificandSize = 24; + + Single() : d32_(0) {} + explicit Single(float f) : d32_(float_to_uint32(f)) {} + explicit Single(uint32_t d32) : d32_(d32) {} + + // The value encoded by this Single must be greater or equal to +0.0. + // It must not be special (infinity, or NaN). + DiyFp AsDiyFp() const { + ASSERT(Sign() > 0); + ASSERT(!IsSpecial()); + return DiyFp(Significand(), Exponent()); + } + + // Returns the single's bit as uint64. + uint32_t AsUint32() const { + return d32_; + } + + int Exponent() const { + if (IsDenormal()) return kDenormalExponent; + + uint32_t d32 = AsUint32(); + int biased_e = + static_cast((d32 & kExponentMask) >> kPhysicalSignificandSize); + return biased_e - kExponentBias; + } + + uint32_t Significand() const { + uint32_t d32 = AsUint32(); + uint32_t significand = d32 & kSignificandMask; + if (!IsDenormal()) { + return significand + kHiddenBit; + } else { + return significand; + } + } + + // Returns true if the single is a denormal. + bool IsDenormal() const { + uint32_t d32 = AsUint32(); + return (d32 & kExponentMask) == 0; + } + + // We consider denormals not to be special. + // Hence only Infinity and NaN are special. + bool IsSpecial() const { + uint32_t d32 = AsUint32(); + return (d32 & kExponentMask) == kExponentMask; + } + + bool IsNan() const { + uint32_t d32 = AsUint32(); + return ((d32 & kExponentMask) == kExponentMask) && + ((d32 & kSignificandMask) != 0); + } + + bool IsInfinite() const { + uint32_t d32 = AsUint32(); + return ((d32 & kExponentMask) == kExponentMask) && + ((d32 & kSignificandMask) == 0); + } + + int Sign() const { + uint32_t d32 = AsUint32(); + return (d32 & kSignMask) == 0? 1: -1; + } + + // Computes the two boundaries of this. + // The bigger boundary (m_plus) is normalized. The lower boundary has the same + // exponent as m_plus. + // Precondition: the value encoded by this Single must be greater than 0. + void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const { + ASSERT(value() > 0.0); + DiyFp v = this->AsDiyFp(); + DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1)); + DiyFp m_minus; + if (LowerBoundaryIsCloser()) { + m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2); + } else { + m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1); + } + m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e())); + m_minus.set_e(m_plus.e()); + *out_m_plus = m_plus; + *out_m_minus = m_minus; + } + + // Precondition: the value encoded by this Single must be greater or equal + // than +0.0. + DiyFp UpperBoundary() const { + ASSERT(Sign() > 0); + return DiyFp(Significand() * 2 + 1, Exponent() - 1); + } + + bool LowerBoundaryIsCloser() const { + // The boundary is closer if the significand is of the form f == 2^p-1 then + // the lower boundary is closer. + // Think of v = 1000e10 and v- = 9999e9. + // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but + // at a distance of 1e8. + // The only exception is for the smallest normal: the largest denormal is + // at the same distance as its successor. + // Note: denormals have the same exponent as the smallest normals. + bool physical_significand_is_zero = ((AsUint32() & kSignificandMask) == 0); + return physical_significand_is_zero && (Exponent() != kDenormalExponent); + } + + float value() const { return uint32_to_float(d32_); } + + static float Infinity() { + return Single(kInfinity).value(); + } + + static float NaN() { + return Single(kNaN).value(); + } + + private: + static const int kExponentBias = 0x7F + kPhysicalSignificandSize; + static const int kDenormalExponent = -kExponentBias + 1; + static const int kMaxExponent = 0xFF - kExponentBias; + static const uint32_t kInfinity = 0x7F800000; + static const uint32_t kNaN = 0x7FC00000; + + const uint32_t d32_; + + DISALLOW_COPY_AND_ASSIGN(Single); +}; + +} // namespace double_conversion + +#endif // DOUBLE_CONVERSION_DOUBLE_H_ diff --git a/native/iosTest/Pods/DoubleConversion/double-conversion/strtod.cc b/native/iosTest/Pods/DoubleConversion/double-conversion/strtod.cc new file mode 100644 index 000000000..17abcbb2a --- /dev/null +++ b/native/iosTest/Pods/DoubleConversion/double-conversion/strtod.cc @@ -0,0 +1,555 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include +#include + +#include "strtod.h" +#include "bignum.h" +#include "cached-powers.h" +#include "ieee.h" + +namespace double_conversion { + +// 2^53 = 9007199254740992. +// Any integer with at most 15 decimal digits will hence fit into a double +// (which has a 53bit significand) without loss of precision. +static const int kMaxExactDoubleIntegerDecimalDigits = 15; +// 2^64 = 18446744073709551616 > 10^19 +static const int kMaxUint64DecimalDigits = 19; + +// Max double: 1.7976931348623157 x 10^308 +// Min non-zero double: 4.9406564584124654 x 10^-324 +// Any x >= 10^309 is interpreted as +infinity. +// Any x <= 10^-324 is interpreted as 0. +// Note that 2.5e-324 (despite being smaller than the min double) will be read +// as non-zero (equal to the min non-zero double). +static const int kMaxDecimalPower = 309; +static const int kMinDecimalPower = -324; + +// 2^64 = 18446744073709551616 +static const uint64_t kMaxUint64 = UINT64_2PART_C(0xFFFFFFFF, FFFFFFFF); + + +static const double exact_powers_of_ten[] = { + 1.0, // 10^0 + 10.0, + 100.0, + 1000.0, + 10000.0, + 100000.0, + 1000000.0, + 10000000.0, + 100000000.0, + 1000000000.0, + 10000000000.0, // 10^10 + 100000000000.0, + 1000000000000.0, + 10000000000000.0, + 100000000000000.0, + 1000000000000000.0, + 10000000000000000.0, + 100000000000000000.0, + 1000000000000000000.0, + 10000000000000000000.0, + 100000000000000000000.0, // 10^20 + 1000000000000000000000.0, + // 10^22 = 0x21e19e0c9bab2400000 = 0x878678326eac9 * 2^22 + 10000000000000000000000.0 +}; +static const int kExactPowersOfTenSize = ARRAY_SIZE(exact_powers_of_ten); + +// Maximum number of significant digits in the decimal representation. +// In fact the value is 772 (see conversions.cc), but to give us some margin +// we round up to 780. +static const int kMaxSignificantDecimalDigits = 780; + +static Vector TrimLeadingZeros(Vector buffer) { + for (int i = 0; i < buffer.length(); i++) { + if (buffer[i] != '0') { + return buffer.SubVector(i, buffer.length()); + } + } + return Vector(buffer.start(), 0); +} + + +static Vector TrimTrailingZeros(Vector buffer) { + for (int i = buffer.length() - 1; i >= 0; --i) { + if (buffer[i] != '0') { + return buffer.SubVector(0, i + 1); + } + } + return Vector(buffer.start(), 0); +} + + +static void CutToMaxSignificantDigits(Vector buffer, + int exponent, + char* significant_buffer, + int* significant_exponent) { + for (int i = 0; i < kMaxSignificantDecimalDigits - 1; ++i) { + significant_buffer[i] = buffer[i]; + } + // The input buffer has been trimmed. Therefore the last digit must be + // different from '0'. + ASSERT(buffer[buffer.length() - 1] != '0'); + // Set the last digit to be non-zero. This is sufficient to guarantee + // correct rounding. + significant_buffer[kMaxSignificantDecimalDigits - 1] = '1'; + *significant_exponent = + exponent + (buffer.length() - kMaxSignificantDecimalDigits); +} + + +// Trims the buffer and cuts it to at most kMaxSignificantDecimalDigits. +// If possible the input-buffer is reused, but if the buffer needs to be +// modified (due to cutting), then the input needs to be copied into the +// buffer_copy_space. +static void TrimAndCut(Vector buffer, int exponent, + char* buffer_copy_space, int space_size, + Vector* trimmed, int* updated_exponent) { + Vector left_trimmed = TrimLeadingZeros(buffer); + Vector right_trimmed = TrimTrailingZeros(left_trimmed); + exponent += left_trimmed.length() - right_trimmed.length(); + if (right_trimmed.length() > kMaxSignificantDecimalDigits) { + (void) space_size; // Mark variable as used. + ASSERT(space_size >= kMaxSignificantDecimalDigits); + CutToMaxSignificantDigits(right_trimmed, exponent, + buffer_copy_space, updated_exponent); + *trimmed = Vector(buffer_copy_space, + kMaxSignificantDecimalDigits); + } else { + *trimmed = right_trimmed; + *updated_exponent = exponent; + } +} + + +// Reads digits from the buffer and converts them to a uint64. +// Reads in as many digits as fit into a uint64. +// When the string starts with "1844674407370955161" no further digit is read. +// Since 2^64 = 18446744073709551616 it would still be possible read another +// digit if it was less or equal than 6, but this would complicate the code. +static uint64_t ReadUint64(Vector buffer, + int* number_of_read_digits) { + uint64_t result = 0; + int i = 0; + while (i < buffer.length() && result <= (kMaxUint64 / 10 - 1)) { + int digit = buffer[i++] - '0'; + ASSERT(0 <= digit && digit <= 9); + result = 10 * result + digit; + } + *number_of_read_digits = i; + return result; +} + + +// Reads a DiyFp from the buffer. +// The returned DiyFp is not necessarily normalized. +// If remaining_decimals is zero then the returned DiyFp is accurate. +// Otherwise it has been rounded and has error of at most 1/2 ulp. +static void ReadDiyFp(Vector buffer, + DiyFp* result, + int* remaining_decimals) { + int read_digits; + uint64_t significand = ReadUint64(buffer, &read_digits); + if (buffer.length() == read_digits) { + *result = DiyFp(significand, 0); + *remaining_decimals = 0; + } else { + // Round the significand. + if (buffer[read_digits] >= '5') { + significand++; + } + // Compute the binary exponent. + int exponent = 0; + *result = DiyFp(significand, exponent); + *remaining_decimals = buffer.length() - read_digits; + } +} + + +static bool DoubleStrtod(Vector trimmed, + int exponent, + double* result) { +#if !defined(DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS) + // On x86 the floating-point stack can be 64 or 80 bits wide. If it is + // 80 bits wide (as is the case on Linux) then double-rounding occurs and the + // result is not accurate. + // We know that Windows32 uses 64 bits and is therefore accurate. + // Note that the ARM simulator is compiled for 32bits. It therefore exhibits + // the same problem. + return false; +#endif + if (trimmed.length() <= kMaxExactDoubleIntegerDecimalDigits) { + int read_digits; + // The trimmed input fits into a double. + // If the 10^exponent (resp. 10^-exponent) fits into a double too then we + // can compute the result-double simply by multiplying (resp. dividing) the + // two numbers. + // This is possible because IEEE guarantees that floating-point operations + // return the best possible approximation. + if (exponent < 0 && -exponent < kExactPowersOfTenSize) { + // 10^-exponent fits into a double. + *result = static_cast(ReadUint64(trimmed, &read_digits)); + ASSERT(read_digits == trimmed.length()); + *result /= exact_powers_of_ten[-exponent]; + return true; + } + if (0 <= exponent && exponent < kExactPowersOfTenSize) { + // 10^exponent fits into a double. + *result = static_cast(ReadUint64(trimmed, &read_digits)); + ASSERT(read_digits == trimmed.length()); + *result *= exact_powers_of_ten[exponent]; + return true; + } + int remaining_digits = + kMaxExactDoubleIntegerDecimalDigits - trimmed.length(); + if ((0 <= exponent) && + (exponent - remaining_digits < kExactPowersOfTenSize)) { + // The trimmed string was short and we can multiply it with + // 10^remaining_digits. As a result the remaining exponent now fits + // into a double too. + *result = static_cast(ReadUint64(trimmed, &read_digits)); + ASSERT(read_digits == trimmed.length()); + *result *= exact_powers_of_ten[remaining_digits]; + *result *= exact_powers_of_ten[exponent - remaining_digits]; + return true; + } + } + return false; +} + + +// Returns 10^exponent as an exact DiyFp. +// The given exponent must be in the range [1; kDecimalExponentDistance[. +static DiyFp AdjustmentPowerOfTen(int exponent) { + ASSERT(0 < exponent); + ASSERT(exponent < PowersOfTenCache::kDecimalExponentDistance); + // Simply hardcode the remaining powers for the given decimal exponent + // distance. + ASSERT(PowersOfTenCache::kDecimalExponentDistance == 8); + switch (exponent) { + case 1: return DiyFp(UINT64_2PART_C(0xa0000000, 00000000), -60); + case 2: return DiyFp(UINT64_2PART_C(0xc8000000, 00000000), -57); + case 3: return DiyFp(UINT64_2PART_C(0xfa000000, 00000000), -54); + case 4: return DiyFp(UINT64_2PART_C(0x9c400000, 00000000), -50); + case 5: return DiyFp(UINT64_2PART_C(0xc3500000, 00000000), -47); + case 6: return DiyFp(UINT64_2PART_C(0xf4240000, 00000000), -44); + case 7: return DiyFp(UINT64_2PART_C(0x98968000, 00000000), -40); + default: + UNREACHABLE(); + } +} + + +// If the function returns true then the result is the correct double. +// Otherwise it is either the correct double or the double that is just below +// the correct double. +static bool DiyFpStrtod(Vector buffer, + int exponent, + double* result) { + DiyFp input; + int remaining_decimals; + ReadDiyFp(buffer, &input, &remaining_decimals); + // Since we may have dropped some digits the input is not accurate. + // If remaining_decimals is different than 0 than the error is at most + // .5 ulp (unit in the last place). + // We don't want to deal with fractions and therefore keep a common + // denominator. + const int kDenominatorLog = 3; + const int kDenominator = 1 << kDenominatorLog; + // Move the remaining decimals into the exponent. + exponent += remaining_decimals; + uint64_t error = (remaining_decimals == 0 ? 0 : kDenominator / 2); + + int old_e = input.e(); + input.Normalize(); + error <<= old_e - input.e(); + + ASSERT(exponent <= PowersOfTenCache::kMaxDecimalExponent); + if (exponent < PowersOfTenCache::kMinDecimalExponent) { + *result = 0.0; + return true; + } + DiyFp cached_power; + int cached_decimal_exponent; + PowersOfTenCache::GetCachedPowerForDecimalExponent(exponent, + &cached_power, + &cached_decimal_exponent); + + if (cached_decimal_exponent != exponent) { + int adjustment_exponent = exponent - cached_decimal_exponent; + DiyFp adjustment_power = AdjustmentPowerOfTen(adjustment_exponent); + input.Multiply(adjustment_power); + if (kMaxUint64DecimalDigits - buffer.length() >= adjustment_exponent) { + // The product of input with the adjustment power fits into a 64 bit + // integer. + ASSERT(DiyFp::kSignificandSize == 64); + } else { + // The adjustment power is exact. There is hence only an error of 0.5. + error += kDenominator / 2; + } + } + + input.Multiply(cached_power); + // The error introduced by a multiplication of a*b equals + // error_a + error_b + error_a*error_b/2^64 + 0.5 + // Substituting a with 'input' and b with 'cached_power' we have + // error_b = 0.5 (all cached powers have an error of less than 0.5 ulp), + // error_ab = 0 or 1 / kDenominator > error_a*error_b/ 2^64 + int error_b = kDenominator / 2; + int error_ab = (error == 0 ? 0 : 1); // We round up to 1. + int fixed_error = kDenominator / 2; + error += error_b + error_ab + fixed_error; + + old_e = input.e(); + input.Normalize(); + error <<= old_e - input.e(); + + // See if the double's significand changes if we add/subtract the error. + int order_of_magnitude = DiyFp::kSignificandSize + input.e(); + int effective_significand_size = + Double::SignificandSizeForOrderOfMagnitude(order_of_magnitude); + int precision_digits_count = + DiyFp::kSignificandSize - effective_significand_size; + if (precision_digits_count + kDenominatorLog >= DiyFp::kSignificandSize) { + // This can only happen for very small denormals. In this case the + // half-way multiplied by the denominator exceeds the range of an uint64. + // Simply shift everything to the right. + int shift_amount = (precision_digits_count + kDenominatorLog) - + DiyFp::kSignificandSize + 1; + input.set_f(input.f() >> shift_amount); + input.set_e(input.e() + shift_amount); + // We add 1 for the lost precision of error, and kDenominator for + // the lost precision of input.f(). + error = (error >> shift_amount) + 1 + kDenominator; + precision_digits_count -= shift_amount; + } + // We use uint64_ts now. This only works if the DiyFp uses uint64_ts too. + ASSERT(DiyFp::kSignificandSize == 64); + ASSERT(precision_digits_count < 64); + uint64_t one64 = 1; + uint64_t precision_bits_mask = (one64 << precision_digits_count) - 1; + uint64_t precision_bits = input.f() & precision_bits_mask; + uint64_t half_way = one64 << (precision_digits_count - 1); + precision_bits *= kDenominator; + half_way *= kDenominator; + DiyFp rounded_input(input.f() >> precision_digits_count, + input.e() + precision_digits_count); + if (precision_bits >= half_way + error) { + rounded_input.set_f(rounded_input.f() + 1); + } + // If the last_bits are too close to the half-way case than we are too + // inaccurate and round down. In this case we return false so that we can + // fall back to a more precise algorithm. + + *result = Double(rounded_input).value(); + if (half_way - error < precision_bits && precision_bits < half_way + error) { + // Too imprecise. The caller will have to fall back to a slower version. + // However the returned number is guaranteed to be either the correct + // double, or the next-lower double. + return false; + } else { + return true; + } +} + + +// Returns +// - -1 if buffer*10^exponent < diy_fp. +// - 0 if buffer*10^exponent == diy_fp. +// - +1 if buffer*10^exponent > diy_fp. +// Preconditions: +// buffer.length() + exponent <= kMaxDecimalPower + 1 +// buffer.length() + exponent > kMinDecimalPower +// buffer.length() <= kMaxDecimalSignificantDigits +static int CompareBufferWithDiyFp(Vector buffer, + int exponent, + DiyFp diy_fp) { + ASSERT(buffer.length() + exponent <= kMaxDecimalPower + 1); + ASSERT(buffer.length() + exponent > kMinDecimalPower); + ASSERT(buffer.length() <= kMaxSignificantDecimalDigits); + // Make sure that the Bignum will be able to hold all our numbers. + // Our Bignum implementation has a separate field for exponents. Shifts will + // consume at most one bigit (< 64 bits). + // ln(10) == 3.3219... + ASSERT(((kMaxDecimalPower + 1) * 333 / 100) < Bignum::kMaxSignificantBits); + Bignum buffer_bignum; + Bignum diy_fp_bignum; + buffer_bignum.AssignDecimalString(buffer); + diy_fp_bignum.AssignUInt64(diy_fp.f()); + if (exponent >= 0) { + buffer_bignum.MultiplyByPowerOfTen(exponent); + } else { + diy_fp_bignum.MultiplyByPowerOfTen(-exponent); + } + if (diy_fp.e() > 0) { + diy_fp_bignum.ShiftLeft(diy_fp.e()); + } else { + buffer_bignum.ShiftLeft(-diy_fp.e()); + } + return Bignum::Compare(buffer_bignum, diy_fp_bignum); +} + + +// Returns true if the guess is the correct double. +// Returns false, when guess is either correct or the next-lower double. +static bool ComputeGuess(Vector trimmed, int exponent, + double* guess) { + if (trimmed.length() == 0) { + *guess = 0.0; + return true; + } + if (exponent + trimmed.length() - 1 >= kMaxDecimalPower) { + *guess = Double::Infinity(); + return true; + } + if (exponent + trimmed.length() <= kMinDecimalPower) { + *guess = 0.0; + return true; + } + + if (DoubleStrtod(trimmed, exponent, guess) || + DiyFpStrtod(trimmed, exponent, guess)) { + return true; + } + if (*guess == Double::Infinity()) { + return true; + } + return false; +} + +double Strtod(Vector buffer, int exponent) { + char copy_buffer[kMaxSignificantDecimalDigits]; + Vector trimmed; + int updated_exponent; + TrimAndCut(buffer, exponent, copy_buffer, kMaxSignificantDecimalDigits, + &trimmed, &updated_exponent); + exponent = updated_exponent; + + double guess; + bool is_correct = ComputeGuess(trimmed, exponent, &guess); + if (is_correct) return guess; + + DiyFp upper_boundary = Double(guess).UpperBoundary(); + int comparison = CompareBufferWithDiyFp(trimmed, exponent, upper_boundary); + if (comparison < 0) { + return guess; + } else if (comparison > 0) { + return Double(guess).NextDouble(); + } else if ((Double(guess).Significand() & 1) == 0) { + // Round towards even. + return guess; + } else { + return Double(guess).NextDouble(); + } +} + +float Strtof(Vector buffer, int exponent) { + char copy_buffer[kMaxSignificantDecimalDigits]; + Vector trimmed; + int updated_exponent; + TrimAndCut(buffer, exponent, copy_buffer, kMaxSignificantDecimalDigits, + &trimmed, &updated_exponent); + exponent = updated_exponent; + + double double_guess; + bool is_correct = ComputeGuess(trimmed, exponent, &double_guess); + + float float_guess = static_cast(double_guess); + if (float_guess == double_guess) { + // This shortcut triggers for integer values. + return float_guess; + } + + // We must catch double-rounding. Say the double has been rounded up, and is + // now a boundary of a float, and rounds up again. This is why we have to + // look at previous too. + // Example (in decimal numbers): + // input: 12349 + // high-precision (4 digits): 1235 + // low-precision (3 digits): + // when read from input: 123 + // when rounded from high precision: 124. + // To do this we simply look at the neigbors of the correct result and see + // if they would round to the same float. If the guess is not correct we have + // to look at four values (since two different doubles could be the correct + // double). + + double double_next = Double(double_guess).NextDouble(); + double double_previous = Double(double_guess).PreviousDouble(); + + float f1 = static_cast(double_previous); + float f2 = float_guess; + float f3 = static_cast(double_next); + float f4; + if (is_correct) { + f4 = f3; + } else { + double double_next2 = Double(double_next).NextDouble(); + f4 = static_cast(double_next2); + } + (void) f2; // Mark variable as used. + ASSERT(f1 <= f2 && f2 <= f3 && f3 <= f4); + + // If the guess doesn't lie near a single-precision boundary we can simply + // return its float-value. + if (f1 == f4) { + return float_guess; + } + + ASSERT((f1 != f2 && f2 == f3 && f3 == f4) || + (f1 == f2 && f2 != f3 && f3 == f4) || + (f1 == f2 && f2 == f3 && f3 != f4)); + + // guess and next are the two possible canditates (in the same way that + // double_guess was the lower candidate for a double-precision guess). + float guess = f1; + float next = f4; + DiyFp upper_boundary; + if (guess == 0.0f) { + float min_float = 1e-45f; + upper_boundary = Double(static_cast(min_float) / 2).AsDiyFp(); + } else { + upper_boundary = Single(guess).UpperBoundary(); + } + int comparison = CompareBufferWithDiyFp(trimmed, exponent, upper_boundary); + if (comparison < 0) { + return guess; + } else if (comparison > 0) { + return next; + } else if ((Single(guess).Significand() & 1) == 0) { + // Round towards even. + return guess; + } else { + return next; + } +} + +} // namespace double_conversion diff --git a/native/iosTest/Pods/DoubleConversion/double-conversion/strtod.h b/native/iosTest/Pods/DoubleConversion/double-conversion/strtod.h new file mode 100644 index 000000000..ed0293b8f --- /dev/null +++ b/native/iosTest/Pods/DoubleConversion/double-conversion/strtod.h @@ -0,0 +1,45 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_STRTOD_H_ +#define DOUBLE_CONVERSION_STRTOD_H_ + +#include "utils.h" + +namespace double_conversion { + +// The buffer must only contain digits in the range [0-9]. It must not +// contain a dot or a sign. It must not start with '0', and must not be empty. +double Strtod(Vector buffer, int exponent); + +// The buffer must only contain digits in the range [0-9]. It must not +// contain a dot or a sign. It must not start with '0', and must not be empty. +float Strtof(Vector buffer, int exponent); + +} // namespace double_conversion + +#endif // DOUBLE_CONVERSION_STRTOD_H_ diff --git a/native/iosTest/Pods/DoubleConversion/double-conversion/utils.h b/native/iosTest/Pods/DoubleConversion/double-conversion/utils.h new file mode 100644 index 000000000..a7c9b429a --- /dev/null +++ b/native/iosTest/Pods/DoubleConversion/double-conversion/utils.h @@ -0,0 +1,324 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_UTILS_H_ +#define DOUBLE_CONVERSION_UTILS_H_ + +#include +#include + +#include +#ifndef ASSERT +#define ASSERT(condition) \ + assert(condition); +#endif +#ifndef UNIMPLEMENTED +#define UNIMPLEMENTED() (abort()) +#endif +#ifndef UNREACHABLE +#define UNREACHABLE() (abort()) +#endif + +// Double operations detection based on target architecture. +// Linux uses a 80bit wide floating point stack on x86. This induces double +// rounding, which in turn leads to wrong results. +// An easy way to test if the floating-point operations are correct is to +// evaluate: 89255.0/1e22. If the floating-point stack is 64 bits wide then +// the result is equal to 89255e-22. +// The best way to test this, is to create a division-function and to compare +// the output of the division with the expected result. (Inlining must be +// disabled.) +// On Linux,x86 89255e-22 != Div_double(89255.0/1e22) +#if defined(_M_X64) || defined(__x86_64__) || \ + defined(__ARMEL__) || defined(__avr32__) || \ + defined(__hppa__) || defined(__ia64__) || \ + defined(__mips__) || \ + defined(__powerpc__) || defined(__ppc__) || defined(__ppc64__) || \ + defined(__sparc__) || defined(__sparc) || defined(__s390__) || \ + defined(__SH4__) || defined(__alpha__) || \ + defined(_MIPS_ARCH_MIPS32R2) || \ + defined(__AARCH64EL__) +#define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1 +#elif defined(__mc68000__) +#undef DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS +#elif defined(_M_IX86) || defined(__i386__) || defined(__i386) +#if defined(_WIN32) +// Windows uses a 64bit wide floating point stack. +#define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1 +#else +#undef DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS +#endif // _WIN32 +#else +#error Target architecture was not detected as supported by Double-Conversion. +#endif + +#if defined(__GNUC__) +#define DOUBLE_CONVERSION_UNUSED __attribute__((unused)) +#else +#define DOUBLE_CONVERSION_UNUSED +#endif + +#if defined(_WIN32) && !defined(__MINGW32__) + +typedef signed char int8_t; +typedef unsigned char uint8_t; +typedef short int16_t; // NOLINT +typedef unsigned short uint16_t; // NOLINT +typedef int int32_t; +typedef unsigned int uint32_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; +// intptr_t and friends are defined in crtdefs.h through stdio.h. + +#else + +#include + +#endif + +// The following macro works on both 32 and 64-bit platforms. +// Usage: instead of writing 0x1234567890123456 +// write UINT64_2PART_C(0x12345678,90123456); +#define UINT64_2PART_C(a, b) (((static_cast(a) << 32) + 0x##b##u)) + + +// The expression ARRAY_SIZE(a) is a compile-time constant of type +// size_t which represents the number of elements of the given +// array. You should only use ARRAY_SIZE on statically allocated +// arrays. +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(a) \ + ((sizeof(a) / sizeof(*(a))) / \ + static_cast(!(sizeof(a) % sizeof(*(a))))) +#endif + +// A macro to disallow the evil copy constructor and operator= functions +// This should be used in the private: declarations for a class +#ifndef DISALLOW_COPY_AND_ASSIGN +#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&); \ + void operator=(const TypeName&) +#endif + +// A macro to disallow all the implicit constructors, namely the +// default constructor, copy constructor and operator= functions. +// +// This should be used in the private: declarations for a class +// that wants to prevent anyone from instantiating it. This is +// especially useful for classes containing only static methods. +#ifndef DISALLOW_IMPLICIT_CONSTRUCTORS +#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \ + TypeName(); \ + DISALLOW_COPY_AND_ASSIGN(TypeName) +#endif + +namespace double_conversion { + +static const int kCharSize = sizeof(char); + +// Returns the maximum of the two parameters. +template +static T Max(T a, T b) { + return a < b ? b : a; +} + + +// Returns the minimum of the two parameters. +template +static T Min(T a, T b) { + return a < b ? a : b; +} + + +inline int StrLength(const char* string) { + size_t length = strlen(string); + ASSERT(length == static_cast(static_cast(length))); + return static_cast(length); +} + +// This is a simplified version of V8's Vector class. +template +class Vector { + public: + Vector() : start_(NULL), length_(0) {} + Vector(T* data, int length) : start_(data), length_(length) { + ASSERT(length == 0 || (length > 0 && data != NULL)); + } + + // Returns a vector using the same backing storage as this one, + // spanning from and including 'from', to but not including 'to'. + Vector SubVector(int from, int to) { + ASSERT(to <= length_); + ASSERT(from < to); + ASSERT(0 <= from); + return Vector(start() + from, to - from); + } + + // Returns the length of the vector. + int length() const { return length_; } + + // Returns whether or not the vector is empty. + bool is_empty() const { return length_ == 0; } + + // Returns the pointer to the start of the data in the vector. + T* start() const { return start_; } + + // Access individual vector elements - checks bounds in debug mode. + T& operator[](int index) const { + ASSERT(0 <= index && index < length_); + return start_[index]; + } + + T& first() { return start_[0]; } + + T& last() { return start_[length_ - 1]; } + + private: + T* start_; + int length_; +}; + + +// Helper class for building result strings in a character buffer. The +// purpose of the class is to use safe operations that checks the +// buffer bounds on all operations in debug mode. +class StringBuilder { + public: + StringBuilder(char* buffer, int size) + : buffer_(buffer, size), position_(0) { } + + ~StringBuilder() { if (!is_finalized()) Finalize(); } + + int size() const { return buffer_.length(); } + + // Get the current position in the builder. + int position() const { + ASSERT(!is_finalized()); + return position_; + } + + // Reset the position. + void Reset() { position_ = 0; } + + // Add a single character to the builder. It is not allowed to add + // 0-characters; use the Finalize() method to terminate the string + // instead. + void AddCharacter(char c) { + ASSERT(c != '\0'); + ASSERT(!is_finalized() && position_ < buffer_.length()); + buffer_[position_++] = c; + } + + // Add an entire string to the builder. Uses strlen() internally to + // compute the length of the input string. + void AddString(const char* s) { + AddSubstring(s, StrLength(s)); + } + + // Add the first 'n' characters of the given string 's' to the + // builder. The input string must have enough characters. + void AddSubstring(const char* s, int n) { + ASSERT(!is_finalized() && position_ + n < buffer_.length()); + ASSERT(static_cast(n) <= strlen(s)); + memmove(&buffer_[position_], s, n * kCharSize); + position_ += n; + } + + + // Add character padding to the builder. If count is non-positive, + // nothing is added to the builder. + void AddPadding(char c, int count) { + for (int i = 0; i < count; i++) { + AddCharacter(c); + } + } + + // Finalize the string by 0-terminating it and returning the buffer. + char* Finalize() { + ASSERT(!is_finalized() && position_ < buffer_.length()); + buffer_[position_] = '\0'; + // Make sure nobody managed to add a 0-character to the + // buffer while building the string. + ASSERT(strlen(buffer_.start()) == static_cast(position_)); + position_ = -1; + ASSERT(is_finalized()); + return buffer_.start(); + } + + private: + Vector buffer_; + int position_; + + bool is_finalized() const { return position_ < 0; } + + DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder); +}; + +// The type-based aliasing rule allows the compiler to assume that pointers of +// different types (for some definition of different) never alias each other. +// Thus the following code does not work: +// +// float f = foo(); +// int fbits = *(int*)(&f); +// +// The compiler 'knows' that the int pointer can't refer to f since the types +// don't match, so the compiler may cache f in a register, leaving random data +// in fbits. Using C++ style casts makes no difference, however a pointer to +// char data is assumed to alias any other pointer. This is the 'memcpy +// exception'. +// +// Bit_cast uses the memcpy exception to move the bits from a variable of one +// type of a variable of another type. Of course the end result is likely to +// be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005) +// will completely optimize BitCast away. +// +// There is an additional use for BitCast. +// Recent gccs will warn when they see casts that may result in breakage due to +// the type-based aliasing rule. If you have checked that there is no breakage +// you can use BitCast to cast one pointer type to another. This confuses gcc +// enough that it can no longer see that you have cast one pointer type to +// another thus avoiding the warning. +template +inline Dest BitCast(const Source& source) { + // Compile time assertion: sizeof(Dest) == sizeof(Source) + // A compile error here means your Dest and Source have different sizes. + DOUBLE_CONVERSION_UNUSED + typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1]; + + Dest dest; + memmove(&dest, &source, sizeof(dest)); + return dest; +} + +template +inline Dest BitCast(Source* source) { + return BitCast(reinterpret_cast(source)); +} + +} // namespace double_conversion + +#endif // DOUBLE_CONVERSION_UTILS_H_ diff --git a/native/iosTest/Pods/Folly/LICENSE b/native/iosTest/Pods/Folly/LICENSE new file mode 100644 index 000000000..48bdb1282 --- /dev/null +++ b/native/iosTest/Pods/Folly/LICENSE @@ -0,0 +1,200 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + +Files in folly/external/farmhash licensed as follows + + Copyright (c) 2014 Google, Inc. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. diff --git a/native/iosTest/Pods/Folly/README.md b/native/iosTest/Pods/Folly/README.md new file mode 100644 index 000000000..f50d687b9 --- /dev/null +++ b/native/iosTest/Pods/Folly/README.md @@ -0,0 +1,267 @@ +Folly: Facebook Open-source Library +----------------------------------- + +[![Build Status](https://travis-ci.org/facebook/folly.svg?branch=master)](https://travis-ci.org/facebook/folly) + +### What is `folly`? + +Folly (acronymed loosely after Facebook Open Source Library) is a +library of C++14 components designed with practicality and efficiency +in mind. **Folly contains a variety of core library components used extensively +at Facebook**. In particular, it's often a dependency of Facebook's other +open source C++ efforts and place where those projects can share code. + +It complements (as opposed to competing against) offerings +such as Boost and of course `std`. In fact, we embark on defining our +own component only when something we need is either not available, or +does not meet the needed performance profile. We endeavor to remove +things from folly if or when `std` or Boost obsoletes them. + +Performance concerns permeate much of Folly, sometimes leading to +designs that are more idiosyncratic than they would otherwise be (see +e.g. `PackedSyncPtr.h`, `SmallLocks.h`). Good performance at large +scale is a unifying theme in all of Folly. + +### Logical Design + +Folly is a collection of relatively independent components, some as +simple as a few symbols. There is no restriction on internal +dependencies, meaning that a given folly module may use any other +folly components. + +All symbols are defined in the top-level namespace `folly`, except of +course macros. Macro names are ALL_UPPERCASE and should be prefixed +with `FOLLY_`. Namespace `folly` defines other internal namespaces +such as `internal` or `detail`. User code should not depend on symbols +in those namespaces. + +Folly has an `experimental` directory as well. This designation connotes +primarily that we feel the API may change heavily over time. This code, +typically, is still in heavy use and is well tested. + +### Physical Design + +At the top level Folly uses the classic "stuttering" scheme +`folly/folly` used by Boost and others. The first directory serves as +an installation root of the library (with possible versioning a la +`folly-1.0/`), and the second is to distinguish the library when +including files, e.g. `#include `. + +The directory structure is flat (mimicking the namespace structure), +i.e. we don't have an elaborate directory hierarchy (it is possible +this will change in future versions). The subdirectory `experimental` +contains files that are used inside folly and possibly at Facebook but +not considered stable enough for client use. Your code should not use +files in `folly/experimental` lest it may break when you update Folly. + +The `folly/folly/test` subdirectory includes the unittests for all +components, usually named `ComponentXyzTest.cpp` for each +`ComponentXyz.*`. The `folly/folly/docs` directory contains +documentation. + +### What's in it? + +Because of folly's fairly flat structure, the best way to see what's in it +is to look at the headers in [top level `folly/` directory](https://github.com/facebook/folly/tree/master/folly). You can also +check the [`docs` folder](folly/docs) for documentation, starting with the +[overview](folly/docs/Overview.md). + +Folly is published on GitHub at https://github.com/facebook/folly + +### Build Notes + +#### Dependencies + +folly requires gcc 5.1+ and a version of boost compiled with C++14 support. + +googletest is required to build and run folly's tests. You can download +it from https://github.com/google/googletest/archive/release-1.8.0.tar.gz +The following commands can be used to download and install it: + +``` +wget https://github.com/google/googletest/archive/release-1.8.0.tar.gz && \ +tar zxf release-1.8.0.tar.gz && \ +rm -f release-1.8.0.tar.gz && \ +cd googletest-release-1.8.0 && \ +cmake . && \ +make && \ +make install +``` + +#### Finding dependencies in non-default locations + +If you have boost, gtest, or other dependencies installed in a non-default +location, you can use the `CMAKE_INCLUDE_PATH` and `CMAKE_LIBRARY_PATH` +variables to make CMAKE look also look for header files and libraries in +non-standard locations. For example, to also search the directories +`/alt/include/path1` and `/alt/include/path2` for header files and the +directories `/alt/lib/path1` and `/alt/lib/path2` for libraries, you can invoke +`cmake` as follows: + +``` +cmake \ + -DCMAKE_INCLUDE_PATH=/alt/include/path1:/alt/include/path2 \ + -DCMAKE_LIBRARY_PATH=/alt/lib/path1:/alt/lib/path2 ... +``` + +#### Building tests + +By default, building the tests is disabled as part of the CMake `all` target. +To build the tests, specify `-DBUILD_TESTS=ON` to CMake at configure time. + +#### Ubuntu 16.04 LTS + +The following packages are required (feel free to cut and paste the apt-get +command below): + +``` +sudo apt-get install \ + g++ \ + cmake \ + libboost-all-dev \ + libevent-dev \ + libdouble-conversion-dev \ + libgoogle-glog-dev \ + libgflags-dev \ + libiberty-dev \ + liblz4-dev \ + liblzma-dev \ + libsnappy-dev \ + make \ + zlib1g-dev \ + binutils-dev \ + libjemalloc-dev \ + libssl-dev \ + pkg-config \ + libunwind-dev +``` + +Folly relies on [fmt](https://github.com/fmtlib/fmt) which needs to be installed from source. +The following commands will download, compile, and install fmt. + +``` +git clone https://github.com/fmtlib/fmt.git && cd fmt + +mkdir _build && cd _build +cmake .. + +make -j$(nproc) +sudo make install +``` + +If advanced debugging functionality is required, use: + +``` +sudo apt-get install \ + libunwind8-dev \ + libelf-dev \ + libdwarf-dev +``` + +In the folly directory (e.g. the checkout root or the archive unpack root), run: +``` + mkdir _build && cd _build + cmake .. + make -j $(nproc) + make install # with either sudo or DESTDIR as necessary +``` + +#### OS X (Homebrew) + +folly is available as a Formula and releases may be built via `brew install folly`. + +You may also use `folly/build/bootstrap-osx-homebrew.sh` to build against `master`: + +``` + ./folly/build/bootstrap-osx-homebrew.sh +``` + +This will create a build directory `_build` in the top-level. + +#### OS X (MacPorts) + +Install the required packages from MacPorts: + +``` + sudo port install \ + boost \ + cmake \ + gflags \ + git \ + google-glog \ + libevent \ + libtool \ + lz4 \ + lzma \ + openssl \ + snappy \ + xz \ + zlib +``` + +Download and install double-conversion: + +``` + git clone https://github.com/google/double-conversion.git + cd double-conversion + cmake -DBUILD_SHARED_LIBS=ON . + make + sudo make install +``` + +Download and install folly with the parameters listed below: + +``` + git clone https://github.com/facebook/folly.git + cd folly + mkdir _build + cd _build + cmake .. + make + sudo make install +``` + +#### Windows (Vcpkg) + +folly is available in [Vcpkg](https://github.com/Microsoft/vcpkg#vcpkg) and releases may be built via `vcpkg install folly:x64-windows`. + +You may also use `vcpkg install folly:x64-windows --head` to build against `master`. + +#### Other Linux distributions + +- double-conversion (https://github.com/google/double-conversion) + + Download and build double-conversion. + You may need to tell cmake where to find it. + + [double-conversion/] `ln -s src double-conversion` + + [folly/] `mkdir build && cd build` + [folly/build/] `cmake "-DCMAKE_INCLUDE_PATH=$DOUBLE_CONVERSION_HOME/include" "-DCMAKE_LIBRARY_PATH=$DOUBLE_CONVERSION_HOME/lib" ..` + + [folly/build/] `make` + +- additional platform specific dependencies: + + Fedora >= 21 64-bit (last tested on Fedora 28 64-bit) + - gcc + - gcc-c++ + - cmake + - automake + - boost-devel + - libtool + - lz4-devel + - lzma-devel + - snappy-devel + - zlib-devel + - glog-devel + - gflags-devel + - scons + - double-conversion-devel + - openssl-devel + - libevent-devel + + Optional + - libdwarf-dev + - libelf-dev + - libunwind8-dev diff --git a/native/iosTest/Pods/Folly/folly/AtomicHashArray-inl.h b/native/iosTest/Pods/Folly/folly/AtomicHashArray-inl.h new file mode 100644 index 000000000..d05801160 --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/AtomicHashArray-inl.h @@ -0,0 +1,550 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FOLLY_ATOMICHASHARRAY_H_ +#error "This should only be included by AtomicHashArray.h" +#endif + +#include + +#include +#include +#include +#include + +namespace folly { + +// AtomicHashArray private constructor -- +template < + class KeyT, + class ValueT, + class HashFcn, + class EqualFcn, + class Allocator, + class ProbeFcn, + class KeyConvertFcn> +AtomicHashArray< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>:: + AtomicHashArray( + size_t capacity, + KeyT emptyKey, + KeyT lockedKey, + KeyT erasedKey, + double _maxLoadFactor, + uint32_t cacheSize) + : capacity_(capacity), + maxEntries_(size_t(_maxLoadFactor * capacity_ + 0.5)), + kEmptyKey_(emptyKey), + kLockedKey_(lockedKey), + kErasedKey_(erasedKey), + kAnchorMask_(nextPowTwo(capacity_) - 1), + numEntries_(0, cacheSize), + numPendingEntries_(0, cacheSize), + isFull_(0), + numErases_(0) { + if (capacity == 0) { + throw_exception("capacity"); + } +} + +/* + * findInternal -- + * + * Sets ret.second to value found and ret.index to index + * of key and returns true, or if key does not exist returns false and + * ret.index is set to capacity_. + */ +template < + class KeyT, + class ValueT, + class HashFcn, + class EqualFcn, + class Allocator, + class ProbeFcn, + class KeyConvertFcn> +template +typename AtomicHashArray< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::SimpleRetT +AtomicHashArray< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::findInternal(const LookupKeyT key_in) { + checkLegalKeyIfKey(key_in); + + for (size_t idx = keyToAnchorIdx(key_in), + numProbes = 0; + ; + idx = ProbeFcn()(idx, numProbes, capacity_)) { + const KeyT key = acquireLoadKey(cells_[idx]); + if (LIKELY(LookupEqualFcn()(key, key_in))) { + return SimpleRetT(idx, true); + } + if (UNLIKELY(key == kEmptyKey_)) { + // if we hit an empty element, this key does not exist + return SimpleRetT(capacity_, false); + } + // NOTE: the way we count numProbes must be same in find(), insert(), + // and erase(). Otherwise it may break probing. + ++numProbes; + if (UNLIKELY(numProbes >= capacity_)) { + // probed every cell...fail + return SimpleRetT(capacity_, false); + } + } +} + +/* + * insertInternal -- + * + * Returns false on failure due to key collision or full. + * Also sets ret.index to the index of the key. If the map is full, sets + * ret.index = capacity_. Also sets ret.second to cell value, thus if insert + * successful this will be what we just inserted, if there is a key collision + * this will be the previously inserted value, and if the map is full it is + * default. + */ +template < + class KeyT, + class ValueT, + class HashFcn, + class EqualFcn, + class Allocator, + class ProbeFcn, + class KeyConvertFcn> +template < + typename LookupKeyT, + typename LookupHashFcn, + typename LookupEqualFcn, + typename LookupKeyToKeyFcn, + typename... ArgTs> +typename AtomicHashArray< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::SimpleRetT +AtomicHashArray< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::insertInternal(LookupKeyT key_in, ArgTs&&... vCtorArgs) { + const short NO_NEW_INSERTS = 1; + const short NO_PENDING_INSERTS = 2; + checkLegalKeyIfKey(key_in); + + size_t idx = keyToAnchorIdx(key_in); + size_t numProbes = 0; + for (;;) { + DCHECK_LT(idx, capacity_); + value_type* cell = &cells_[idx]; + if (relaxedLoadKey(*cell) == kEmptyKey_) { + // NOTE: isFull_ is set based on numEntries_.readFast(), so it's + // possible to insert more than maxEntries_ entries. However, it's not + // possible to insert past capacity_. + ++numPendingEntries_; + if (isFull_.load(std::memory_order_acquire)) { + --numPendingEntries_; + + // Before deciding whether this insert succeeded, this thread needs to + // wait until no other thread can add a new entry. + + // Correctness assumes isFull_ is true at this point. If + // another thread now does ++numPendingEntries_, we expect it + // to pass the isFull_.load() test above. (It shouldn't insert + // a new entry.) + detail::atomic_hash_spin_wait([&] { + return (isFull_.load(std::memory_order_acquire) != + NO_PENDING_INSERTS) && + (numPendingEntries_.readFull() != 0); + }); + isFull_.store(NO_PENDING_INSERTS, std::memory_order_release); + + if (relaxedLoadKey(*cell) == kEmptyKey_) { + // Don't insert past max load factor + return SimpleRetT(capacity_, false); + } + } else { + // An unallocated cell. Try once to lock it. If we succeed, insert here. + // If we fail, fall through to comparison below; maybe the insert that + // just beat us was for this very key.... + if (tryLockCell(cell)) { + KeyT key_new; + // Write the value - done before unlocking + try { + key_new = LookupKeyToKeyFcn()(key_in); + typedef + typename std::remove_const::type LookupKeyTNoConst; + constexpr bool kAlreadyChecked = + std::is_same::value; + if (!kAlreadyChecked) { + checkLegalKeyIfKey(key_new); + } + DCHECK(relaxedLoadKey(*cell) == kLockedKey_); + // A const mapped_type is only constant once constructed, so cast + // away any const for the placement new here. + using mapped = typename std::remove_const::type; + new (const_cast(&cell->second)) + ValueT(std::forward(vCtorArgs)...); + unlockCell(cell, key_new); // Sets the new key + } catch (...) { + // Transition back to empty key---requires handling + // locked->empty below. + unlockCell(cell, kEmptyKey_); + --numPendingEntries_; + throw; + } + // An erase() can race here and delete right after our insertion + // Direct comparison rather than EqualFcn ok here + // (we just inserted it) + DCHECK( + relaxedLoadKey(*cell) == key_new || + relaxedLoadKey(*cell) == kErasedKey_); + --numPendingEntries_; + ++numEntries_; // This is a thread cached atomic increment :) + if (numEntries_.readFast() >= maxEntries_) { + isFull_.store(NO_NEW_INSERTS, std::memory_order_relaxed); + } + return SimpleRetT(idx, true); + } + --numPendingEntries_; + } + } + DCHECK(relaxedLoadKey(*cell) != kEmptyKey_); + if (kLockedKey_ == acquireLoadKey(*cell)) { + detail::atomic_hash_spin_wait( + [&] { return kLockedKey_ == acquireLoadKey(*cell); }); + } + + const KeyT thisKey = acquireLoadKey(*cell); + if (LookupEqualFcn()(thisKey, key_in)) { + // Found an existing entry for our key, but we don't overwrite the + // previous value. + return SimpleRetT(idx, false); + } else if (thisKey == kEmptyKey_ || thisKey == kLockedKey_) { + // We need to try again (i.e., don't increment numProbes or + // advance idx): this case can happen if the constructor for + // ValueT threw for this very cell (the rethrow block above). + continue; + } + + // NOTE: the way we count numProbes must be same in find(), + // insert(), and erase(). Otherwise it may break probing. + ++numProbes; + if (UNLIKELY(numProbes >= capacity_)) { + // probed every cell...fail + return SimpleRetT(capacity_, false); + } + + idx = ProbeFcn()(idx, numProbes, capacity_); + } +} + +/* + * erase -- + * + * This will attempt to erase the given key key_in if the key is found. It + * returns 1 iff the key was located and marked as erased, and 0 otherwise. + * + * Memory is not freed or reclaimed by erase, i.e. the cell containing the + * erased key will never be reused. If there's an associated value, we won't + * touch it either. + */ +template < + class KeyT, + class ValueT, + class HashFcn, + class EqualFcn, + class Allocator, + class ProbeFcn, + class KeyConvertFcn> +size_t AtomicHashArray< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::erase(KeyT key_in) { + CHECK_NE(key_in, kEmptyKey_); + CHECK_NE(key_in, kLockedKey_); + CHECK_NE(key_in, kErasedKey_); + + for (size_t idx = keyToAnchorIdx(key_in), numProbes = 0;; + idx = ProbeFcn()(idx, numProbes, capacity_)) { + DCHECK_LT(idx, capacity_); + value_type* cell = &cells_[idx]; + KeyT currentKey = acquireLoadKey(*cell); + if (currentKey == kEmptyKey_ || currentKey == kLockedKey_) { + // If we hit an empty (or locked) element, this key does not exist. This + // is similar to how it's handled in find(). + return 0; + } + if (EqualFcn()(currentKey, key_in)) { + // Found an existing entry for our key, attempt to mark it erased. + // Some other thread may have erased our key, but this is ok. + KeyT expect = currentKey; + if (cellKeyPtr(*cell)->compare_exchange_strong(expect, kErasedKey_)) { + numErases_.fetch_add(1, std::memory_order_relaxed); + + // Even if there's a value in the cell, we won't delete (or even + // default construct) it because some other thread may be accessing it. + // Locking it meanwhile won't work either since another thread may be + // holding a pointer to it. + + // We found the key and successfully erased it. + return 1; + } + // If another thread succeeds in erasing our key, we'll stop our search. + return 0; + } + + // NOTE: the way we count numProbes must be same in find(), insert(), + // and erase(). Otherwise it may break probing. + ++numProbes; + if (UNLIKELY(numProbes >= capacity_)) { + // probed every cell...fail + return 0; + } + } +} + +template < + class KeyT, + class ValueT, + class HashFcn, + class EqualFcn, + class Allocator, + class ProbeFcn, + class KeyConvertFcn> +typename AtomicHashArray< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::SmartPtr +AtomicHashArray< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::create(size_t maxSize, const Config& c) { + CHECK_LE(c.maxLoadFactor, 1.0); + CHECK_GT(c.maxLoadFactor, 0.0); + CHECK_NE(c.emptyKey, c.lockedKey); + size_t capacity = size_t(maxSize / c.maxLoadFactor); + size_t sz = sizeof(AtomicHashArray) + sizeof(value_type) * capacity; + + auto const mem = Allocator().allocate(sz); + try { + new (mem) AtomicHashArray( + capacity, + c.emptyKey, + c.lockedKey, + c.erasedKey, + c.maxLoadFactor, + c.entryCountThreadCacheSize); + } catch (...) { + Allocator().deallocate(mem, sz); + throw; + } + + SmartPtr map(static_cast((void*)mem)); + + /* + * Mark all cells as empty. + * + * Note: we're bending the rules a little here accessing the key + * element in our cells even though the cell object has not been + * constructed, and casting them to atomic objects (see cellKeyPtr). + * (Also, in fact we never actually invoke the value_type + * constructor.) This is in order to avoid needing to default + * construct a bunch of value_type when we first start up: if you + * have an expensive default constructor for the value type this can + * noticeably speed construction time for an AHA. + */ + FOR_EACH_RANGE (i, 0, map->capacity_) { + cellKeyPtr(map->cells_[i]) + ->store(map->kEmptyKey_, std::memory_order_relaxed); + } + return map; +} + +template < + class KeyT, + class ValueT, + class HashFcn, + class EqualFcn, + class Allocator, + class ProbeFcn, + class KeyConvertFcn> +void AtomicHashArray< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::destroy(AtomicHashArray* p) { + assert(p); + + size_t sz = sizeof(AtomicHashArray) + sizeof(value_type) * p->capacity_; + + FOR_EACH_RANGE (i, 0, p->capacity_) { + if (p->cells_[i].first != p->kEmptyKey_) { + p->cells_[i].~value_type(); + } + } + p->~AtomicHashArray(); + + Allocator().deallocate((char*)p, sz); +} + +// clear -- clears all keys and values in the map and resets all counters +template < + class KeyT, + class ValueT, + class HashFcn, + class EqualFcn, + class Allocator, + class ProbeFcn, + class KeyConvertFcn> +void AtomicHashArray< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::clear() { + FOR_EACH_RANGE (i, 0, capacity_) { + if (cells_[i].first != kEmptyKey_) { + cells_[i].~value_type(); + *const_cast(&cells_[i].first) = kEmptyKey_; + } + CHECK(cells_[i].first == kEmptyKey_); + } + numEntries_.set(0); + numPendingEntries_.set(0); + isFull_.store(0, std::memory_order_relaxed); + numErases_.store(0, std::memory_order_relaxed); +} + +// Iterator implementation + +template < + class KeyT, + class ValueT, + class HashFcn, + class EqualFcn, + class Allocator, + class ProbeFcn, + class KeyConvertFcn> +template +struct AtomicHashArray< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::aha_iterator + : detail::IteratorFacade< + aha_iterator, + IterVal, + std::forward_iterator_tag> { + explicit aha_iterator() : aha_(nullptr) {} + + // Conversion ctor for interoperability between const_iterator and + // iterator. The enable_if<> magic keeps us well-behaved for + // is_convertible<> (v. the iterator_facade documentation). + template + aha_iterator( + const aha_iterator& o, + typename std::enable_if< + std::is_convertible::value>::type* = nullptr) + : aha_(o.aha_), offset_(o.offset_) {} + + explicit aha_iterator(ContT* array, size_t offset) + : aha_(array), offset_(offset) {} + + // Returns unique index that can be used with findAt(). + // WARNING: The following function will fail silently for hashtable + // with capacity > 2^32 + uint32_t getIndex() const { + return offset_; + } + + void advancePastEmpty() { + while (offset_ < aha_->capacity_ && !isValid()) { + ++offset_; + } + } + + private: + friend class AtomicHashArray; + friend class detail:: + IteratorFacade; + + void increment() { + ++offset_; + advancePastEmpty(); + } + + bool equal(const aha_iterator& o) const { + return aha_ == o.aha_ && offset_ == o.offset_; + } + + IterVal& dereference() const { + return aha_->cells_[offset_]; + } + + bool isValid() const { + KeyT key = acquireLoadKey(aha_->cells_[offset_]); + return key != aha_->kEmptyKey_ && key != aha_->kLockedKey_ && + key != aha_->kErasedKey_; + } + + private: + ContT* aha_; + size_t offset_; +}; // aha_iterator + +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/AtomicHashArray.h b/native/iosTest/Pods/Folly/folly/AtomicHashArray.h new file mode 100644 index 000000000..cd62a2329 --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/AtomicHashArray.h @@ -0,0 +1,448 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AtomicHashArray is the building block for AtomicHashMap. It provides the + * core lock-free functionality, but is limited by the fact that it cannot + * grow past its initialization size and is a little more awkward (no public + * constructor, for example). If you're confident that you won't run out of + * space, don't mind the awkardness, and really need bare-metal performance, + * feel free to use AHA directly. + * + * Check out AtomicHashMap.h for more thorough documentation on perf and + * general pros and cons relative to other hash maps. + * + * @author Spencer Ahrens + * @author Jordan DeLong + */ + +#pragma once +#define FOLLY_ATOMICHASHARRAY_H_ + +#include + +#include +#include +#include + +namespace folly { + +struct AtomicHashArrayLinearProbeFcn { + inline size_t operator()(size_t idx, size_t /* numProbes */, size_t capacity) + const { + idx += 1; // linear probing + + // Avoid modulus because it's slow + return LIKELY(idx < capacity) ? idx : (idx - capacity); + } +}; + +struct AtomicHashArrayQuadraticProbeFcn { + inline size_t operator()(size_t idx, size_t numProbes, size_t capacity) + const { + idx += numProbes; // quadratic probing + + // Avoid modulus because it's slow + return LIKELY(idx < capacity) ? idx : (idx - capacity); + } +}; + +// Enables specializing checkLegalKey without specializing its class. +namespace detail { +template +inline void checkLegalKeyIfKeyTImpl( + NotKeyT /* ignored */, + KeyT /* emptyKey */, + KeyT /* lockedKey */, + KeyT /* erasedKey */) {} + +template +inline void checkLegalKeyIfKeyTImpl( + KeyT key_in, + KeyT emptyKey, + KeyT lockedKey, + KeyT erasedKey) { + DCHECK_NE(key_in, emptyKey); + DCHECK_NE(key_in, lockedKey); + DCHECK_NE(key_in, erasedKey); +} +} // namespace detail + +template < + class KeyT, + class ValueT, + class HashFcn = std::hash, + class EqualFcn = std::equal_to, + class Allocator = std::allocator, + class ProbeFcn = AtomicHashArrayLinearProbeFcn, + class KeyConvertFcn = Identity> +class AtomicHashMap; + +template < + class KeyT, + class ValueT, + class HashFcn = std::hash, + class EqualFcn = std::equal_to, + class Allocator = std::allocator, + class ProbeFcn = AtomicHashArrayLinearProbeFcn, + class KeyConvertFcn = Identity> +class AtomicHashArray { + static_assert( + (std::is_convertible::value || + std::is_convertible::value || + std::is_convertible::value), + "You are trying to use AtomicHashArray with disallowed key " + "types. You must use atomically compare-and-swappable integer " + "keys, or a different container class."); + + public: + typedef KeyT key_type; + typedef ValueT mapped_type; + typedef HashFcn hasher; + typedef EqualFcn key_equal; + typedef KeyConvertFcn key_convert; + typedef std::pair value_type; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef value_type* pointer; + typedef const value_type* const_pointer; + + const size_t capacity_; + const size_t maxEntries_; + const KeyT kEmptyKey_; + const KeyT kLockedKey_; + const KeyT kErasedKey_; + + template + struct aha_iterator; + + typedef aha_iterator const_iterator; + typedef aha_iterator iterator; + + // You really shouldn't need this if you use the SmartPtr provided by create, + // but if you really want to do something crazy like stick the released + // pointer into a DescriminatedPtr or something, you'll need this to clean up + // after yourself. + static void destroy(AtomicHashArray*); + + private: + const size_t kAnchorMask_; + + struct Deleter { + void operator()(AtomicHashArray* ptr) { + AtomicHashArray::destroy(ptr); + } + }; + + public: + typedef std::unique_ptr SmartPtr; + + /* + * create -- + * + * Creates AtomicHashArray objects. Use instead of constructor/destructor. + * + * We do things this way in order to avoid the perf penalty of a second + * pointer indirection when composing these into AtomicHashMap, which needs + * to store an array of pointers so that it can perform atomic operations on + * them when growing. + * + * Instead of a mess of arguments, we take a max size and a Config struct to + * simulate named ctor parameters. The Config struct has sensible defaults + * for everything, but is overloaded - if you specify a positive capacity, + * that will be used directly instead of computing it based on + * maxLoadFactor. + * + * Create returns an AHA::SmartPtr which is a unique_ptr with a custom + * deleter to make sure everything is cleaned up properly. + */ + struct Config { + KeyT emptyKey; + KeyT lockedKey; + KeyT erasedKey; + double maxLoadFactor; + double growthFactor; + uint32_t entryCountThreadCacheSize; + size_t capacity; // if positive, overrides maxLoadFactor + + // Cannot have constexpr ctor because some compilers rightly complain. + Config() + : emptyKey((KeyT)-1), + lockedKey((KeyT)-2), + erasedKey((KeyT)-3), + maxLoadFactor(0.8), + growthFactor(-1), + entryCountThreadCacheSize(1000), + capacity(0) {} + }; + + // Cannot have pre-instantiated const Config instance because of SIOF. + static SmartPtr create(size_t maxSize, const Config& c = Config()); + + /* + * find -- + * + * + * Returns the iterator to the element if found, otherwise end(). + * + * As an optional feature, the type of the key to look up (LookupKeyT) is + * allowed to be different from the type of keys actually stored (KeyT). + * + * This enables use cases where materializing the key is costly and usually + * redudant, e.g., canonicalizing/interning a set of strings and being able + * to look up by StringPiece. To use this feature, LookupHashFcn must take + * a LookupKeyT, and LookupEqualFcn must take KeyT and LookupKeyT as first + * and second parameter, respectively. + * + * See folly/test/ArrayHashArrayTest.cpp for sample usage. + */ + template < + typename LookupKeyT = key_type, + typename LookupHashFcn = hasher, + typename LookupEqualFcn = key_equal> + iterator find(LookupKeyT k) { + return iterator( + this, findInternal(k).idx); + } + + template < + typename LookupKeyT = key_type, + typename LookupHashFcn = hasher, + typename LookupEqualFcn = key_equal> + const_iterator find(LookupKeyT k) const { + return const_cast(this) + ->find(k); + } + + /* + * insert -- + * + * Returns a pair with iterator to the element at r.first and bool success. + * Retrieve the index with ret.first.getIndex(). + * + * Fails on key collision (does not overwrite) or if map becomes + * full, at which point no element is inserted, iterator is set to end(), + * and success is set false. On collisions, success is set false, but the + * iterator is set to the existing entry. + */ + std::pair insert(const value_type& r) { + return emplace(r.first, r.second); + } + std::pair insert(value_type&& r) { + return emplace(r.first, std::move(r.second)); + } + + /* + * emplace -- + * + * Same contract as insert(), but performs in-place construction + * of the value type using the specified arguments. + * + * Also, like find(), this method optionally allows 'key_in' to have a type + * different from that stored in the table; see find(). If and only if no + * equal key is already present, this method converts 'key_in' to a key of + * type KeyT using the provided LookupKeyToKeyFcn. + */ + template < + typename LookupKeyT = key_type, + typename LookupHashFcn = hasher, + typename LookupEqualFcn = key_equal, + typename LookupKeyToKeyFcn = key_convert, + typename... ArgTs> + std::pair emplace(LookupKeyT key_in, ArgTs&&... vCtorArgs) { + SimpleRetT ret = insertInternal< + LookupKeyT, + LookupHashFcn, + LookupEqualFcn, + LookupKeyToKeyFcn>(key_in, std::forward(vCtorArgs)...); + return std::make_pair(iterator(this, ret.idx), ret.success); + } + + // returns the number of elements erased - should never exceed 1 + size_t erase(KeyT k); + + // clears all keys and values in the map and resets all counters. Not thread + // safe. + void clear(); + + // Exact number of elements in the map - note that readFull() acquires a + // mutex. See folly/ThreadCachedInt.h for more details. + size_t size() const { + return numEntries_.readFull() - numErases_.load(std::memory_order_relaxed); + } + + bool empty() const { + return size() == 0; + } + + iterator begin() { + iterator it(this, 0); + it.advancePastEmpty(); + return it; + } + const_iterator begin() const { + const_iterator it(this, 0); + it.advancePastEmpty(); + return it; + } + + iterator end() { + return iterator(this, capacity_); + } + const_iterator end() const { + return const_iterator(this, capacity_); + } + + // See AtomicHashMap::findAt - access elements directly + // WARNING: The following 2 functions will fail silently for hashtable + // with capacity > 2^32 + iterator findAt(uint32_t idx) { + DCHECK_LT(idx, capacity_); + return iterator(this, idx); + } + const_iterator findAt(uint32_t idx) const { + return const_cast(this)->findAt(idx); + } + + iterator makeIter(size_t idx) { + return iterator(this, idx); + } + const_iterator makeIter(size_t idx) const { + return const_iterator(this, idx); + } + + // The max load factor allowed for this map + double maxLoadFactor() const { + return ((double)maxEntries_) / capacity_; + } + + void setEntryCountThreadCacheSize(uint32_t newSize) { + numEntries_.setCacheSize(newSize); + numPendingEntries_.setCacheSize(newSize); + } + + uint32_t getEntryCountThreadCacheSize() const { + return numEntries_.getCacheSize(); + } + + /* Private data and helper functions... */ + + private: + friend class AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn>; + + struct SimpleRetT { + size_t idx; + bool success; + SimpleRetT(size_t i, bool s) : idx(i), success(s) {} + SimpleRetT() = default; + }; + + template < + typename LookupKeyT = key_type, + typename LookupHashFcn = hasher, + typename LookupEqualFcn = key_equal, + typename LookupKeyToKeyFcn = Identity, + typename... ArgTs> + SimpleRetT insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs); + + template < + typename LookupKeyT = key_type, + typename LookupHashFcn = hasher, + typename LookupEqualFcn = key_equal> + SimpleRetT findInternal(const LookupKeyT key); + + template + void checkLegalKeyIfKey(MaybeKeyT key) { + detail::checkLegalKeyIfKeyTImpl(key, kEmptyKey_, kLockedKey_, kErasedKey_); + } + + static std::atomic* cellKeyPtr(const value_type& r) { + // We need some illegal casting here in order to actually store + // our value_type as a std::pair. But a little bit of + // undefined behavior never hurt anyone ... + static_assert( + sizeof(std::atomic) == sizeof(KeyT), + "std::atomic is implemented in an unexpected way for AHM"); + return const_cast*>( + reinterpret_cast const*>(&r.first)); + } + + static KeyT relaxedLoadKey(const value_type& r) { + return cellKeyPtr(r)->load(std::memory_order_relaxed); + } + + static KeyT acquireLoadKey(const value_type& r) { + return cellKeyPtr(r)->load(std::memory_order_acquire); + } + + // Fun with thread local storage - atomic increment is expensive + // (relatively), so we accumulate in the thread cache and periodically + // flush to the actual variable, and walk through the unflushed counts when + // reading the value, so be careful of calling size() too frequently. This + // increases insertion throughput several times over while keeping the count + // accurate. + ThreadCachedInt numEntries_; // Successful key inserts + ThreadCachedInt numPendingEntries_; // Used by insertInternal + std::atomic isFull_; // Used by insertInternal + std::atomic numErases_; // Successful key erases + + value_type cells_[0]; // This must be the last field of this class + + // Force constructor/destructor private since create/destroy should be + // used externally instead + AtomicHashArray( + size_t capacity, + KeyT emptyKey, + KeyT lockedKey, + KeyT erasedKey, + double maxLoadFactor, + uint32_t cacheSize); + + AtomicHashArray(const AtomicHashArray&) = delete; + AtomicHashArray& operator=(const AtomicHashArray&) = delete; + + ~AtomicHashArray() = default; + + inline void unlockCell(value_type* const cell, KeyT newKey) { + cellKeyPtr(*cell)->store(newKey, std::memory_order_release); + } + + inline bool tryLockCell(value_type* const cell) { + KeyT expect = kEmptyKey_; + return cellKeyPtr(*cell)->compare_exchange_strong( + expect, kLockedKey_, std::memory_order_acq_rel); + } + + template + inline size_t keyToAnchorIdx(const LookupKeyT k) const { + const size_t hashVal = LookupHashFcn()(k); + const size_t probe = hashVal & kAnchorMask_; + return LIKELY(probe < capacity_) ? probe : hashVal % capacity_; + } + +}; // AtomicHashArray + +} // namespace folly + +#include diff --git a/native/iosTest/Pods/Folly/folly/AtomicHashMap-inl.h b/native/iosTest/Pods/Folly/folly/AtomicHashMap-inl.h new file mode 100644 index 000000000..f15f07e2b --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/AtomicHashMap-inl.h @@ -0,0 +1,657 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FOLLY_ATOMICHASHMAP_H_ +#error "This should only be included by AtomicHashMap.h" +#endif + +#include +#include + +#include + +namespace folly { + +// AtomicHashMap constructor -- Atomic wrapper that allows growth +// This class has a lot of overhead (184 Bytes) so only use for big maps +template < + typename KeyT, + typename ValueT, + typename HashFcn, + typename EqualFcn, + typename Allocator, + typename ProbeFcn, + typename KeyConvertFcn> +AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::AtomicHashMap(size_t finalSizeEst, const Config& config) + : kGrowthFrac_( + config.growthFactor < 0 ? 1.0f - config.maxLoadFactor + : config.growthFactor) { + CHECK(config.maxLoadFactor > 0.0f && config.maxLoadFactor < 1.0f); + subMaps_[0].store( + SubMap::create(finalSizeEst, config).release(), + std::memory_order_relaxed); + auto subMapCount = kNumSubMaps_; + FOR_EACH_RANGE (i, 1, subMapCount) { + subMaps_[i].store(nullptr, std::memory_order_relaxed); + } + numMapsAllocated_.store(1, std::memory_order_relaxed); +} + +// emplace -- +template < + typename KeyT, + typename ValueT, + typename HashFcn, + typename EqualFcn, + typename Allocator, + typename ProbeFcn, + typename KeyConvertFcn> +template < + typename LookupKeyT, + typename LookupHashFcn, + typename LookupEqualFcn, + typename LookupKeyToKeyFcn, + typename... ArgTs> +std::pair< + typename AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::iterator, + bool> +AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::emplace(LookupKeyT k, ArgTs&&... vCtorArgs) { + SimpleRetT ret = insertInternal< + LookupKeyT, + LookupHashFcn, + LookupEqualFcn, + LookupKeyToKeyFcn>(k, std::forward(vCtorArgs)...); + SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed); + return std::make_pair( + iterator(this, ret.i, subMap->makeIter(ret.j)), ret.success); +} + +// insertInternal -- Allocates new sub maps as existing ones fill up. +template < + typename KeyT, + typename ValueT, + typename HashFcn, + typename EqualFcn, + typename Allocator, + typename ProbeFcn, + typename KeyConvertFcn> +template < + typename LookupKeyT, + typename LookupHashFcn, + typename LookupEqualFcn, + typename LookupKeyToKeyFcn, + typename... ArgTs> +typename AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::SimpleRetT +AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs) { +beginInsertInternal: + auto nextMapIdx = // this maintains our state + numMapsAllocated_.load(std::memory_order_acquire); + typename SubMap::SimpleRetT ret; + FOR_EACH_RANGE (i, 0, nextMapIdx) { + // insert in each map successively. If one succeeds, we're done! + SubMap* subMap = subMaps_[i].load(std::memory_order_relaxed); + ret = subMap->template insertInternal< + LookupKeyT, + LookupHashFcn, + LookupEqualFcn, + LookupKeyToKeyFcn>(key, std::forward(vCtorArgs)...); + if (ret.idx == subMap->capacity_) { + continue; // map is full, so try the next one + } + // Either collision or success - insert in either case + return SimpleRetT(i, ret.idx, ret.success); + } + + // If we made it this far, all maps are full and we need to try to allocate + // the next one. + + SubMap* primarySubMap = subMaps_[0].load(std::memory_order_relaxed); + if (nextMapIdx >= kNumSubMaps_ || + primarySubMap->capacity_ * kGrowthFrac_ < 1.0) { + // Can't allocate any more sub maps. + throw AtomicHashMapFullError(); + } + + if (tryLockMap(nextMapIdx)) { + // Alloc a new map and shove it in. We can change whatever + // we want because other threads are waiting on us... + size_t numCellsAllocated = (size_t)( + primarySubMap->capacity_ * + std::pow(1.0 + kGrowthFrac_, nextMapIdx - 1)); + size_t newSize = size_t(numCellsAllocated * kGrowthFrac_); + DCHECK( + subMaps_[nextMapIdx].load(std::memory_order_relaxed) == + (SubMap*)kLockedPtr_); + // create a new map using the settings stored in the first map + + Config config; + config.emptyKey = primarySubMap->kEmptyKey_; + config.lockedKey = primarySubMap->kLockedKey_; + config.erasedKey = primarySubMap->kErasedKey_; + config.maxLoadFactor = primarySubMap->maxLoadFactor(); + config.entryCountThreadCacheSize = + primarySubMap->getEntryCountThreadCacheSize(); + subMaps_[nextMapIdx].store( + SubMap::create(newSize, config).release(), std::memory_order_relaxed); + + // Publish the map to other threads. + numMapsAllocated_.fetch_add(1, std::memory_order_release); + DCHECK_EQ( + nextMapIdx + 1, numMapsAllocated_.load(std::memory_order_relaxed)); + } else { + // If we lost the race, we'll have to wait for the next map to get + // allocated before doing any insertion here. + detail::atomic_hash_spin_wait([&] { + return nextMapIdx >= numMapsAllocated_.load(std::memory_order_acquire); + }); + } + + // Relaxed is ok here because either we just created this map, or we + // just did a spin wait with an acquire load on numMapsAllocated_. + SubMap* loadedMap = subMaps_[nextMapIdx].load(std::memory_order_relaxed); + DCHECK(loadedMap && loadedMap != (SubMap*)kLockedPtr_); + ret = loadedMap->insertInternal(key, std::forward(vCtorArgs)...); + if (ret.idx != loadedMap->capacity_) { + return SimpleRetT(nextMapIdx, ret.idx, ret.success); + } + // We took way too long and the new map is already full...try again from + // the top (this should pretty much never happen). + goto beginInsertInternal; +} + +// find -- +template < + typename KeyT, + typename ValueT, + typename HashFcn, + typename EqualFcn, + typename Allocator, + typename ProbeFcn, + typename KeyConvertFcn> +template +typename AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::iterator +AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::find(LookupKeyT k) { + SimpleRetT ret = findInternal(k); + if (!ret.success) { + return end(); + } + SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed); + return iterator(this, ret.i, subMap->makeIter(ret.j)); +} + +template < + typename KeyT, + typename ValueT, + typename HashFcn, + typename EqualFcn, + typename Allocator, + typename ProbeFcn, + typename KeyConvertFcn> +template +typename AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::const_iterator +AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::find(LookupKeyT k) const { + return const_cast(this) + ->find(k); +} + +// findInternal -- +template < + typename KeyT, + typename ValueT, + typename HashFcn, + typename EqualFcn, + typename Allocator, + typename ProbeFcn, + typename KeyConvertFcn> +template +typename AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::SimpleRetT +AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::findInternal(const LookupKeyT k) const { + SubMap* const primaryMap = subMaps_[0].load(std::memory_order_relaxed); + typename SubMap::SimpleRetT ret = + primaryMap + ->template findInternal(k); + if (LIKELY(ret.idx != primaryMap->capacity_)) { + return SimpleRetT(0, ret.idx, ret.success); + } + const unsigned int numMaps = + numMapsAllocated_.load(std::memory_order_acquire); + FOR_EACH_RANGE (i, 1, numMaps) { + // Check each map successively. If one succeeds, we're done! + SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed); + ret = + thisMap + ->template findInternal( + k); + if (LIKELY(ret.idx != thisMap->capacity_)) { + return SimpleRetT(i, ret.idx, ret.success); + } + } + // Didn't find our key... + return SimpleRetT(numMaps, 0, false); +} + +// findAtInternal -- see encodeIndex() for details. +template < + typename KeyT, + typename ValueT, + typename HashFcn, + typename EqualFcn, + typename Allocator, + typename ProbeFcn, + typename KeyConvertFcn> +typename AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::SimpleRetT +AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::findAtInternal(uint32_t idx) const { + uint32_t subMapIdx, subMapOffset; + if (idx & kSecondaryMapBit_) { + // idx falls in a secondary map + idx &= ~kSecondaryMapBit_; // unset secondary bit + subMapIdx = idx >> kSubMapIndexShift_; + DCHECK_LT(subMapIdx, numMapsAllocated_.load(std::memory_order_relaxed)); + subMapOffset = idx & kSubMapIndexMask_; + } else { + // idx falls in primary map + subMapIdx = 0; + subMapOffset = idx; + } + return SimpleRetT(subMapIdx, subMapOffset, true); +} + +// erase -- +template < + typename KeyT, + typename ValueT, + typename HashFcn, + typename EqualFcn, + typename Allocator, + typename ProbeFcn, + typename KeyConvertFcn> +typename AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::size_type +AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::erase(const KeyT k) { + int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); + FOR_EACH_RANGE (i, 0, numMaps) { + // Check each map successively. If one succeeds, we're done! + if (subMaps_[i].load(std::memory_order_relaxed)->erase(k)) { + return 1; + } + } + // Didn't find our key... + return 0; +} + +// capacity -- summation of capacities of all submaps +template < + typename KeyT, + typename ValueT, + typename HashFcn, + typename EqualFcn, + typename Allocator, + typename ProbeFcn, + typename KeyConvertFcn> +size_t AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::capacity() const { + size_t totalCap(0); + int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); + FOR_EACH_RANGE (i, 0, numMaps) { + totalCap += subMaps_[i].load(std::memory_order_relaxed)->capacity_; + } + return totalCap; +} + +// spaceRemaining -- +// number of new insertions until current submaps are all at max load +template < + typename KeyT, + typename ValueT, + typename HashFcn, + typename EqualFcn, + typename Allocator, + typename ProbeFcn, + typename KeyConvertFcn> +size_t AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::spaceRemaining() const { + size_t spaceRem(0); + int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); + FOR_EACH_RANGE (i, 0, numMaps) { + SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed); + spaceRem += + std::max(0, thisMap->maxEntries_ - &thisMap->numEntries_.readFull()); + } + return spaceRem; +} + +// clear -- Wipes all keys and values from primary map and destroys +// all secondary maps. Not thread safe. +template < + typename KeyT, + typename ValueT, + typename HashFcn, + typename EqualFcn, + typename Allocator, + typename ProbeFcn, + typename KeyConvertFcn> +void AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::clear() { + subMaps_[0].load(std::memory_order_relaxed)->clear(); + int const numMaps = numMapsAllocated_.load(std::memory_order_relaxed); + FOR_EACH_RANGE (i, 1, numMaps) { + SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed); + DCHECK(thisMap); + SubMap::destroy(thisMap); + subMaps_[i].store(nullptr, std::memory_order_relaxed); + } + numMapsAllocated_.store(1, std::memory_order_relaxed); +} + +// size -- +template < + typename KeyT, + typename ValueT, + typename HashFcn, + typename EqualFcn, + typename Allocator, + typename ProbeFcn, + typename KeyConvertFcn> +size_t AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::size() const { + size_t totalSize(0); + int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); + FOR_EACH_RANGE (i, 0, numMaps) { + totalSize += subMaps_[i].load(std::memory_order_relaxed)->size(); + } + return totalSize; +} + +// encodeIndex -- Encode the submap index and offset into return. +// index_ret must be pre-populated with the submap offset. +// +// We leave index_ret untouched when referring to the primary map +// so it can be as large as possible (31 data bits). Max size of +// secondary maps is limited by what can fit in the low 27 bits. +// +// Returns the following bit-encoded data in index_ret: +// if subMap == 0 (primary map) => +// bit(s) value +// 31 0 +// 0-30 submap offset (index_ret input) +// +// if subMap > 0 (secondary maps) => +// bit(s) value +// 31 1 +// 27-30 which subMap +// 0-26 subMap offset (index_ret input) +template < + typename KeyT, + typename ValueT, + typename HashFcn, + typename EqualFcn, + typename Allocator, + typename ProbeFcn, + typename KeyConvertFcn> +inline uint32_t AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::encodeIndex(uint32_t subMap, uint32_t offset) { + DCHECK_EQ(offset & kSecondaryMapBit_, 0); // offset can't be too big + if (subMap == 0) { + return offset; + } + // Make sure subMap isn't too big + DCHECK_EQ(subMap >> kNumSubMapBits_, 0); + // Make sure subMap bits of offset are clear + DCHECK_EQ(offset & (~kSubMapIndexMask_ | kSecondaryMapBit_), 0); + + // Set high-order bits to encode which submap this index belongs to + return offset | (subMap << kSubMapIndexShift_) | kSecondaryMapBit_; +} + +// Iterator implementation + +template < + typename KeyT, + typename ValueT, + typename HashFcn, + typename EqualFcn, + typename Allocator, + typename ProbeFcn, + typename KeyConvertFcn> +template +struct AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn>::ahm_iterator + : detail::IteratorFacade< + ahm_iterator, + IterVal, + std::forward_iterator_tag> { + explicit ahm_iterator() : ahm_(nullptr) {} + + // Conversion ctor for interoperability between const_iterator and + // iterator. The enable_if<> magic keeps us well-behaved for + // is_convertible<> (v. the iterator_facade documentation). + template + ahm_iterator( + const ahm_iterator& o, + typename std::enable_if< + std::is_convertible::value>::type* = nullptr) + : ahm_(o.ahm_), subMap_(o.subMap_), subIt_(o.subIt_) {} + + /* + * Returns the unique index that can be used for access directly + * into the data storage. + */ + uint32_t getIndex() const { + CHECK(!isEnd()); + return ahm_->encodeIndex(subMap_, subIt_.getIndex()); + } + + private: + friend class AtomicHashMap; + explicit ahm_iterator(ContT* ahm, uint32_t subMap, const SubIt& subIt) + : ahm_(ahm), subMap_(subMap), subIt_(subIt) {} + + friend class detail:: + IteratorFacade; + + void increment() { + CHECK(!isEnd()); + ++subIt_; + checkAdvanceToNextSubmap(); + } + + bool equal(const ahm_iterator& other) const { + if (ahm_ != other.ahm_) { + return false; + } + + if (isEnd() || other.isEnd()) { + return isEnd() == other.isEnd(); + } + + return subMap_ == other.subMap_ && subIt_ == other.subIt_; + } + + IterVal& dereference() const { + return *subIt_; + } + + bool isEnd() const { + return ahm_ == nullptr; + } + + void checkAdvanceToNextSubmap() { + if (isEnd()) { + return; + } + + SubMap* thisMap = ahm_->subMaps_[subMap_].load(std::memory_order_relaxed); + while (subIt_ == thisMap->end()) { + // This sub iterator is done, advance to next one + if (subMap_ + 1 < + ahm_->numMapsAllocated_.load(std::memory_order_acquire)) { + ++subMap_; + thisMap = ahm_->subMaps_[subMap_].load(std::memory_order_relaxed); + subIt_ = thisMap->begin(); + } else { + ahm_ = nullptr; + return; + } + } + } + + private: + ContT* ahm_; + uint32_t subMap_; + SubIt subIt_; +}; // ahm_iterator + +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/AtomicHashMap.h b/native/iosTest/Pods/Folly/folly/AtomicHashMap.h new file mode 100644 index 000000000..a98d0866f --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/AtomicHashMap.h @@ -0,0 +1,500 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * AtomicHashMap -- + * + * A high-performance concurrent hash map with int32 or int64 keys. Supports + * insert, find(key), findAt(index), erase(key), size, and more. Memory cannot + * be freed or reclaimed by erase. Can grow to a maximum of about 18 times the + * initial capacity, but performance degrades linearly with growth. Can also be + * used as an object store with unique 32-bit references directly into the + * internal storage (retrieved with iterator::getIndex()). + * + * Advantages: + * - High-performance (~2-4x tbb::concurrent_hash_map in heavily + * multi-threaded environments). + * - Efficient memory usage if initial capacity is not over estimated + * (especially for small keys and values). + * - Good fragmentation properties (only allocates in large slabs which can + * be reused with clear() and never move). + * - Can generate unique, long-lived 32-bit references for efficient lookup + * (see findAt()). + * + * Disadvantages: + * - Keys must be native int32 or int64, or explicitly converted. + * - Must be able to specify unique empty, locked, and erased keys + * - Performance degrades linearly as size grows beyond initialization + * capacity. + * - Max size limit of ~18x initial size (dependent on max load factor). + * - Memory is not freed or reclaimed by erase. + * + * Usage and Operation Details: + * Simple performance/memory tradeoff with maxLoadFactor. Higher load factors + * give better memory utilization but probe lengths increase, reducing + * performance. + * + * Implementation and Performance Details: + * AHArray is a fixed size contiguous block of value_type cells. When + * writing a cell, the key is locked while the rest of the record is + * written. Once done, the cell is unlocked by setting the key. find() + * is completely wait-free and doesn't require any non-relaxed atomic + * operations. AHA cannot grow beyond initialization capacity, but is + * faster because of reduced data indirection. + * + * AHMap is a wrapper around AHArray sub-maps that allows growth and provides + * an interface closer to the STL UnorderedAssociativeContainer concept. These + * sub-maps are allocated on the fly and are processed in series, so the more + * there are (from growing past initial capacity), the worse the performance. + * + * Insert returns false if there is a key collision and throws if the max size + * of the map is exceeded. + * + * Benchmark performance with 8 simultaneous threads processing 1 million + * unique entries on a 4-core, 2.5 GHz machine: + * + * Load Factor Mem Efficiency usec/Insert usec/Find + * 50% 50% 0.19 0.05 + * 85% 85% 0.20 0.06 + * 90% 90% 0.23 0.08 + * 95% 95% 0.27 0.10 + * + * See folly/tests/AtomicHashMapTest.cpp for more benchmarks. + * + * @author Spencer Ahrens + * @author Jordan DeLong + * + */ + +#pragma once +#define FOLLY_ATOMICHASHMAP_H_ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace folly { + +/* + * AtomicHashMap provides an interface somewhat similar to the + * UnorderedAssociativeContainer concept in C++. This does not + * exactly match this concept (or even the basic Container concept), + * because of some restrictions imposed by our datastructure. + * + * Specific differences (there are quite a few): + * + * - Efficiently thread safe for inserts (main point of this stuff), + * wait-free for lookups. + * + * - You can erase from this container, but the cell containing the key will + * not be free or reclaimed. + * + * - You can erase everything by calling clear() (and you must guarantee only + * one thread can be using the container to do that). + * + * - We aren't DefaultConstructible, CopyConstructible, Assignable, or + * EqualityComparable. (Most of these are probably not something + * you actually want to do with this anyway.) + * + * - We don't support the various bucket functions, rehash(), + * reserve(), or equal_range(). Also no constructors taking + * iterators, although this could change. + * + * - Several insertion functions, notably operator[], are not + * implemented. It is a little too easy to misuse these functions + * with this container, where part of the point is that when an + * insertion happens for a new key, it will atomically have the + * desired value. + * + * - The map has no templated insert() taking an iterator range, but + * we do provide an insert(key, value). The latter seems more + * frequently useful for this container (to avoid sprinkling + * make_pair everywhere), and providing both can lead to some gross + * template error messages. + * + * - The Allocator must not be stateful (a new instance will be spun up for + * each allocation), and its allocate() method must take a raw number of + * bytes. + * + * - KeyT must be a 32 bit or 64 bit atomic integer type, and you must + * define special 'locked' and 'empty' key values in the ctor + * + * - We don't take the Hash function object as an instance in the + * constructor. + * + */ + +// Thrown when insertion fails due to running out of space for +// submaps. +struct FOLLY_EXPORT AtomicHashMapFullError : std::runtime_error { + explicit AtomicHashMapFullError() + : std::runtime_error("AtomicHashMap is full") {} +}; + +template < + class KeyT, + class ValueT, + class HashFcn, + class EqualFcn, + class Allocator, + class ProbeFcn, + class KeyConvertFcn> +class AtomicHashMap { + typedef AtomicHashArray< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + ProbeFcn, + KeyConvertFcn> + SubMap; + + public: + typedef KeyT key_type; + typedef ValueT mapped_type; + typedef std::pair value_type; + typedef HashFcn hasher; + typedef EqualFcn key_equal; + typedef KeyConvertFcn key_convert; + typedef value_type* pointer; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef std::ptrdiff_t difference_type; + typedef std::size_t size_type; + typedef typename SubMap::Config Config; + + template + struct ahm_iterator; + + typedef ahm_iterator< + const AtomicHashMap, + const value_type, + typename SubMap::const_iterator> + const_iterator; + typedef ahm_iterator + iterator; + + public: + const float kGrowthFrac_; // How much to grow when we run out of capacity. + + // The constructor takes a finalSizeEst which is the optimal + // number of elements to maximize space utilization and performance, + // and a Config object to specify more advanced options. + explicit AtomicHashMap(size_t finalSizeEst, const Config& c = Config()); + + AtomicHashMap(const AtomicHashMap&) = delete; + AtomicHashMap& operator=(const AtomicHashMap&) = delete; + + ~AtomicHashMap() { + const unsigned int numMaps = + numMapsAllocated_.load(std::memory_order_relaxed); + FOR_EACH_RANGE (i, 0, numMaps) { + SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed); + DCHECK(thisMap); + SubMap::destroy(thisMap); + } + } + + key_equal key_eq() const { + return key_equal(); + } + hasher hash_function() const { + return hasher(); + } + + /* + * insert -- + * + * Returns a pair with iterator to the element at r.first and + * success. Retrieve the index with ret.first.getIndex(). + * + * Does not overwrite on key collision, but returns an iterator to + * the existing element (since this could due to a race with + * another thread, it is often important to check this return + * value). + * + * Allocates new sub maps as the existing ones become full. If + * all sub maps are full, no element is inserted, and + * AtomicHashMapFullError is thrown. + */ + std::pair insert(const value_type& r) { + return emplace(r.first, r.second); + } + std::pair insert(key_type k, const mapped_type& v) { + return emplace(k, v); + } + std::pair insert(value_type&& r) { + return emplace(r.first, std::move(r.second)); + } + std::pair insert(key_type k, mapped_type&& v) { + return emplace(k, std::move(v)); + } + + /* + * emplace -- + * + * Same contract as insert(), but performs in-place construction + * of the value type using the specified arguments. + * + * Also, like find(), this method optionally allows 'key_in' to have a type + * different from that stored in the table; see find(). If and only if no + * equal key is already present, this method converts 'key_in' to a key of + * type KeyT using the provided LookupKeyToKeyFcn. + */ + template < + typename LookupKeyT = key_type, + typename LookupHashFcn = hasher, + typename LookupEqualFcn = key_equal, + typename LookupKeyToKeyFcn = key_convert, + typename... ArgTs> + std::pair emplace(LookupKeyT k, ArgTs&&... vCtorArg); + + /* + * find -- + * + * Returns the iterator to the element if found, otherwise end(). + * + * As an optional feature, the type of the key to look up (LookupKeyT) is + * allowed to be different from the type of keys actually stored (KeyT). + * + * This enables use cases where materializing the key is costly and usually + * redudant, e.g., canonicalizing/interning a set of strings and being able + * to look up by StringPiece. To use this feature, LookupHashFcn must take + * a LookupKeyT, and LookupEqualFcn must take KeyT and LookupKeyT as first + * and second parameter, respectively. + * + * See folly/test/ArrayHashMapTest.cpp for sample usage. + */ + template < + typename LookupKeyT = key_type, + typename LookupHashFcn = hasher, + typename LookupEqualFcn = key_equal> + iterator find(LookupKeyT k); + + template < + typename LookupKeyT = key_type, + typename LookupHashFcn = hasher, + typename LookupEqualFcn = key_equal> + const_iterator find(LookupKeyT k) const; + + /* + * erase -- + * + * Erases key k from the map + * + * Returns 1 iff the key is found and erased, and 0 otherwise. + */ + size_type erase(key_type k); + + /* + * clear -- + * + * Wipes all keys and values from primary map and destroys all secondary + * maps. Primary map remains allocated and thus the memory can be reused + * in place. Not thread safe. + * + */ + void clear(); + + /* + * size -- + * + * Returns the exact size of the map. Note this is not as cheap as typical + * size() implementations because, for each AtomicHashArray in this AHM, we + * need to grab a lock and accumulate the values from all the thread local + * counters. See folly/ThreadCachedInt.h for more details. + */ + size_t size() const; + + bool empty() const { + return size() == 0; + } + + size_type count(key_type k) const { + return find(k) == end() ? 0 : 1; + } + + /* + * findAt -- + * + * Returns an iterator into the map. + * + * idx should only be an unmodified value returned by calling getIndex() on + * a valid iterator returned by find() or insert(). If idx is invalid you + * have a bug and the process aborts. + */ + iterator findAt(uint32_t idx) { + SimpleRetT ret = findAtInternal(idx); + DCHECK_LT(ret.i, numSubMaps()); + return iterator( + this, + ret.i, + subMaps_[ret.i].load(std::memory_order_relaxed)->makeIter(ret.j)); + } + const_iterator findAt(uint32_t idx) const { + return const_cast(this)->findAt(idx); + } + + // Total capacity - summation of capacities of all submaps. + size_t capacity() const; + + // Number of new insertions until current submaps are all at max load factor. + size_t spaceRemaining() const; + + void setEntryCountThreadCacheSize(int32_t newSize) { + const int numMaps = numMapsAllocated_.load(std::memory_order_acquire); + for (int i = 0; i < numMaps; ++i) { + SubMap* map = subMaps_[i].load(std::memory_order_relaxed); + map->setEntryCountThreadCacheSize(newSize); + } + } + + // Number of sub maps allocated so far to implement this map. The more there + // are, the worse the performance. + int numSubMaps() const { + return numMapsAllocated_.load(std::memory_order_acquire); + } + + iterator begin() { + iterator it(this, 0, subMaps_[0].load(std::memory_order_relaxed)->begin()); + it.checkAdvanceToNextSubmap(); + return it; + } + + const_iterator begin() const { + const_iterator it( + this, 0, subMaps_[0].load(std::memory_order_relaxed)->begin()); + it.checkAdvanceToNextSubmap(); + return it; + } + + iterator end() { + return iterator(); + } + + const_iterator end() const { + return const_iterator(); + } + + /* Advanced functions for direct access: */ + + inline uint32_t recToIdx(const value_type& r, bool mayInsert = true) { + SimpleRetT ret = + mayInsert ? insertInternal(r.first, r.second) : findInternal(r.first); + return encodeIndex(ret.i, ret.j); + } + + inline uint32_t recToIdx(value_type&& r, bool mayInsert = true) { + SimpleRetT ret = mayInsert ? insertInternal(r.first, std::move(r.second)) + : findInternal(r.first); + return encodeIndex(ret.i, ret.j); + } + + inline uint32_t + recToIdx(key_type k, const mapped_type& v, bool mayInsert = true) { + SimpleRetT ret = mayInsert ? insertInternal(k, v) : findInternal(k); + return encodeIndex(ret.i, ret.j); + } + + inline uint32_t recToIdx(key_type k, mapped_type&& v, bool mayInsert = true) { + SimpleRetT ret = + mayInsert ? insertInternal(k, std::move(v)) : findInternal(k); + return encodeIndex(ret.i, ret.j); + } + + inline uint32_t keyToIdx(const KeyT k, bool mayInsert = false) { + return recToIdx(value_type(k), mayInsert); + } + + inline const value_type& idxToRec(uint32_t idx) const { + SimpleRetT ret = findAtInternal(idx); + return subMaps_[ret.i].load(std::memory_order_relaxed)->idxToRec(ret.j); + } + + /* Private data and helper functions... */ + + private: + // This limits primary submap size to 2^31 ~= 2 billion, secondary submap + // size to 2^(32 - kNumSubMapBits_ - 1) = 2^27 ~= 130 million, and num subMaps + // to 2^kNumSubMapBits_ = 16. + static const uint32_t kNumSubMapBits_ = 4; + static const uint32_t kSecondaryMapBit_ = 1u << 31; // Highest bit + static const uint32_t kSubMapIndexShift_ = 32 - kNumSubMapBits_ - 1; + static const uint32_t kSubMapIndexMask_ = (1 << kSubMapIndexShift_) - 1; + static const uint32_t kNumSubMaps_ = 1 << kNumSubMapBits_; + static const uintptr_t kLockedPtr_ = 0x88ULL << 48; // invalid pointer + + struct SimpleRetT { + uint32_t i; + size_t j; + bool success; + SimpleRetT(uint32_t ii, size_t jj, bool s) : i(ii), j(jj), success(s) {} + SimpleRetT() = default; + }; + + template < + typename LookupKeyT = key_type, + typename LookupHashFcn = hasher, + typename LookupEqualFcn = key_equal, + typename LookupKeyToKeyFcn = key_convert, + typename... ArgTs> + SimpleRetT insertInternal(LookupKeyT key, ArgTs&&... value); + + template < + typename LookupKeyT = key_type, + typename LookupHashFcn = hasher, + typename LookupEqualFcn = key_equal> + SimpleRetT findInternal(const LookupKeyT k) const; + + SimpleRetT findAtInternal(uint32_t idx) const; + + std::atomic subMaps_[kNumSubMaps_]; + std::atomic numMapsAllocated_; + + inline bool tryLockMap(unsigned int idx) { + SubMap* val = nullptr; + return subMaps_[idx].compare_exchange_strong( + val, (SubMap*)kLockedPtr_, std::memory_order_acquire); + } + + static inline uint32_t encodeIndex(uint32_t subMap, uint32_t subMapIdx); + +}; // AtomicHashMap + +template < + class KeyT, + class ValueT, + class HashFcn = std::hash, + class EqualFcn = std::equal_to, + class Allocator = std::allocator> +using QuadraticProbingAtomicHashMap = AtomicHashMap< + KeyT, + ValueT, + HashFcn, + EqualFcn, + Allocator, + AtomicHashArrayQuadraticProbeFcn>; +} // namespace folly + +#include diff --git a/native/iosTest/Pods/Folly/folly/AtomicIntrusiveLinkedList.h b/native/iosTest/Pods/Folly/folly/AtomicIntrusiveLinkedList.h new file mode 100644 index 000000000..aa2a866e0 --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/AtomicIntrusiveLinkedList.h @@ -0,0 +1,178 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include +#include + +namespace folly { + +/** + * A very simple atomic single-linked list primitive. + * + * Usage: + * + * class MyClass { + * AtomicIntrusiveLinkedListHook hook_; + * } + * + * AtomicIntrusiveLinkedList list; + * list.insert(&a); + * list.sweep([] (MyClass* c) { doSomething(c); } + */ +template +struct AtomicIntrusiveLinkedListHook { + T* next{nullptr}; +}; + +template T::*HookMember> +class AtomicIntrusiveLinkedList { + public: + AtomicIntrusiveLinkedList() {} + AtomicIntrusiveLinkedList(const AtomicIntrusiveLinkedList&) = delete; + AtomicIntrusiveLinkedList& operator=(const AtomicIntrusiveLinkedList&) = + delete; + AtomicIntrusiveLinkedList(AtomicIntrusiveLinkedList&& other) noexcept { + auto tmp = other.head_.load(); + other.head_ = head_.load(); + head_ = tmp; + } + AtomicIntrusiveLinkedList& operator=( + AtomicIntrusiveLinkedList&& other) noexcept { + auto tmp = other.head_.load(); + other.head_ = head_.load(); + head_ = tmp; + + return *this; + } + + /** + * Note: list must be empty on destruction. + */ + ~AtomicIntrusiveLinkedList() { + assert(empty()); + } + + bool empty() const { + return head_.load() == nullptr; + } + + /** + * Atomically insert t at the head of the list. + * @return True if the inserted element is the only one in the list + * after the call. + */ + bool insertHead(T* t) { + assert(next(t) == nullptr); + + auto oldHead = head_.load(std::memory_order_relaxed); + do { + next(t) = oldHead; + /* oldHead is updated by the call below. + + NOTE: we don't use next(t) instead of oldHead directly due to + compiler bugs (GCC prior to 4.8.3 (bug 60272), clang (bug 18899), + MSVC (bug 819819); source: + http://en.cppreference.com/w/cpp/atomic/atomic/compare_exchange */ + } while (!head_.compare_exchange_weak( + oldHead, t, std::memory_order_release, std::memory_order_relaxed)); + + return oldHead == nullptr; + } + + /** + * Replaces the head with nullptr, + * and calls func() on the removed elements in the order from tail to head. + * Returns false if the list was empty. + */ + template + bool sweepOnce(F&& func) { + if (auto head = head_.exchange(nullptr)) { + auto rhead = reverse(head); + unlinkAll(rhead, std::forward(func)); + return true; + } + return false; + } + + /** + * Repeatedly replaces the head with nullptr, + * and calls func() on the removed elements in the order from tail to head. + * Stops when the list is empty. + */ + template + void sweep(F&& func) { + while (sweepOnce(func)) { + } + } + + /** + * Similar to sweep() but calls func() on elements in LIFO order. + * + * func() is called for all elements in the list at the moment + * reverseSweep() is called. Unlike sweep() it does not loop to ensure the + * list is empty at some point after the last invocation. This way callers + * can reason about the ordering: elements inserted since the last call to + * reverseSweep() will be provided in LIFO order. + * + * Example: if elements are inserted in the order 1-2-3, the callback is + * invoked 3-2-1. If the callback moves elements onto a stack, popping off + * the stack will produce the original insertion order 1-2-3. + */ + template + void reverseSweep(F&& func) { + // We don't loop like sweep() does because the overall order of callbacks + // would be strand-wise LIFO which is meaningless to callers. + auto head = head_.exchange(nullptr); + unlinkAll(head, std::forward(func)); + } + + private: + std::atomic head_{nullptr}; + + static T*& next(T* t) { + return (t->*HookMember).next; + } + + /* Reverses a linked list, returning the pointer to the new head + (old tail) */ + static T* reverse(T* head) { + T* rhead = nullptr; + while (head != nullptr) { + auto t = head; + head = next(t); + next(t) = rhead; + rhead = t; + } + return rhead; + } + + /* Unlinks all elements in the linked list fragment pointed to by `head', + * calling func() on every element */ + template + void unlinkAll(T* head, F&& func) { + while (head != nullptr) { + auto t = head; + head = next(t); + next(t) = nullptr; + func(t); + } + } +}; + +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/AtomicLinkedList.h b/native/iosTest/Pods/Folly/folly/AtomicLinkedList.h new file mode 100644 index 000000000..ecff27ab3 --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/AtomicLinkedList.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include + +namespace folly { + +/** + * A very simple atomic single-linked list primitive. + * + * Usage: + * + * AtomicLinkedList list; + * list.insert(a); + * list.sweep([] (MyClass& c) { doSomething(c); } + */ + +template +class AtomicLinkedList { + public: + AtomicLinkedList() {} + AtomicLinkedList(const AtomicLinkedList&) = delete; + AtomicLinkedList& operator=(const AtomicLinkedList&) = delete; + AtomicLinkedList(AtomicLinkedList&& other) noexcept = default; + AtomicLinkedList& operator=(AtomicLinkedList&& other) = default; + + ~AtomicLinkedList() { + sweep([](T&&) {}); + } + + bool empty() const { + return list_.empty(); + } + + /** + * Atomically insert t at the head of the list. + * @return True if the inserted element is the only one in the list + * after the call. + */ + bool insertHead(T t) { + auto wrapper = std::make_unique(std::move(t)); + + return list_.insertHead(wrapper.release()); + } + + /** + * Repeatedly pops element from head, + * and calls func() on the removed elements in the order from tail to head. + * Stops when the list is empty. + */ + template + void sweep(F&& func) { + list_.sweep([&](Wrapper* wrapperPtr) mutable { + std::unique_ptr wrapper(wrapperPtr); + + func(std::move(wrapper->data)); + }); + } + + /** + * Similar to sweep() but calls func() on elements in LIFO order. + * + * func() is called for all elements in the list at the moment + * reverseSweep() is called. Unlike sweep() it does not loop to ensure the + * list is empty at some point after the last invocation. This way callers + * can reason about the ordering: elements inserted since the last call to + * reverseSweep() will be provided in LIFO order. + * + * Example: if elements are inserted in the order 1-2-3, the callback is + * invoked 3-2-1. If the callback moves elements onto a stack, popping off + * the stack will produce the original insertion order 1-2-3. + */ + template + void reverseSweep(F&& func) { + list_.reverseSweep([&](Wrapper* wrapperPtr) mutable { + std::unique_ptr wrapper(wrapperPtr); + + func(std::move(wrapper->data)); + }); + } + + private: + struct Wrapper { + explicit Wrapper(T&& t) : data(std::move(t)) {} + + AtomicIntrusiveLinkedListHook hook; + T data; + }; + AtomicIntrusiveLinkedList list_; +}; + +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/AtomicUnorderedMap.h b/native/iosTest/Pods/Folly/folly/AtomicUnorderedMap.h new file mode 100644 index 000000000..f7e84d7af --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/AtomicUnorderedMap.h @@ -0,0 +1,513 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace folly { + +/// You're probably reading this because you are looking for an +/// AtomicUnorderedMap that is fully general, highly concurrent (for +/// reads, writes, and iteration), and makes no performance compromises. +/// We haven't figured that one out yet. What you will find here is a +/// hash table implementation that sacrifices generality so that it can +/// give you all of the other things. +/// +/// LIMITATIONS: +/// +/// * Insert only (*) - the only write operation supported directly by +/// AtomicUnorderedInsertMap is findOrConstruct. There is a (*) because +/// values aren't moved, so you can roll your own concurrency control for +/// in-place updates of values (see MutableData and MutableAtom below), +/// but the hash table itself doesn't help you. +/// +/// * No resizing - you must specify the capacity up front, and once +/// the hash map gets full you won't be able to insert. Insert +/// performance will degrade once the load factor is high. Insert is +/// O(1/(1-actual_load_factor)). Note that this is a pretty strong +/// limitation, because you can't remove existing keys. +/// +/// * 2^30 maximum default capacity - by default AtomicUnorderedInsertMap +/// uses uint32_t internal indexes (and steals 2 bits), limiting you +/// to about a billion entries. If you need more you can fill in all +/// of the template params so you change IndexType to uint64_t, or you +/// can use AtomicUnorderedInsertMap64. 64-bit indexes will increase +/// the space over of the map, of course. +/// +/// WHAT YOU GET IN EXCHANGE: +/// +/// * Arbitrary key and value types - any K and V that can be used in a +/// std::unordered_map can be used here. In fact, the key and value +/// types don't even have to be copyable or moveable! +/// +/// * Keys and values in the map won't be moved - it is safe to keep +/// pointers or references to the keys and values in the map, because +/// they are never moved or destroyed (until the map itself is destroyed). +/// +/// * Iterators are never invalidated - writes don't invalidate iterators, +/// so you can scan and insert in parallel. +/// +/// * Fast wait-free reads - reads are usually only a single cache miss, +/// even when the hash table is very large. Wait-freedom means that +/// you won't see latency outliers even in the face of concurrent writes. +/// +/// * Lock-free insert - writes proceed in parallel. If a thread in the +/// middle of a write is unlucky and gets suspended, it doesn't block +/// anybody else. +/// +/// COMMENTS ON INSERT-ONLY +/// +/// This map provides wait-free linearizable reads and lock-free +/// linearizable inserts. Inserted values won't be moved, but no +/// concurrency control is provided for safely updating them. To remind +/// you of that fact they are only provided in const form. This is the +/// only simple safe thing to do while preserving something like the normal +/// std::map iteration form, which requires that iteration be exposed +/// via std::pair (and prevents encapsulation of access to the value). +/// +/// There are a couple of reasonable policies for doing in-place +/// concurrency control on the values. I am hoping that the policy can +/// be injected via the value type or an extra template param, to keep +/// the core AtomicUnorderedInsertMap insert-only: +/// +/// CONST: this is the currently implemented strategy, which is simple, +/// performant, and not that expressive. You can always put in a value +/// with a mutable field (see MutableAtom below), but that doesn't look +/// as pretty as it should. +/// +/// ATOMIC: for integers and integer-size trivially copyable structs +/// (via an adapter like tao/queues/AtomicStruct) the value can be a +/// std::atomic and read and written atomically. +/// +/// SEQ-LOCK: attach a counter incremented before and after write. +/// Writers serialize by using CAS to make an even->odd transition, +/// then odd->even after the write. Readers grab the value with memcpy, +/// checking sequence value before and after. Readers retry until they +/// see an even sequence number that doesn't change. This works for +/// larger structs, but still requires memcpy to be equivalent to copy +/// assignment, and it is no longer lock-free. It scales very well, +/// because the readers are still invisible (no cache line writes). +/// +/// LOCK: folly's SharedMutex would be a good choice here. +/// +/// MEMORY ALLOCATION +/// +/// Underlying memory is allocated as a big anonymous mmap chunk, which +/// might be cheaper than calloc() and is certainly not more expensive +/// for large maps. If the SkipKeyValueDeletion template param is true +/// then deletion of the map consists of unmapping the backing memory, +/// which is much faster than destructing all of the keys and values. +/// Feel free to override if std::is_trivial_destructor isn't recognizing +/// the triviality of your destructors. +template < + typename Key, + typename Value, + typename Hash = std::hash, + typename KeyEqual = std::equal_to, + bool SkipKeyValueDeletion = + (std::is_trivially_destructible::value && + std::is_trivially_destructible::value), + template class Atom = std::atomic, + typename IndexType = uint32_t, + typename Allocator = folly::detail::MMapAlloc> + +struct AtomicUnorderedInsertMap { + typedef Key key_type; + typedef Value mapped_type; + typedef std::pair value_type; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + typedef Hash hasher; + typedef KeyEqual key_equal; + typedef const value_type& const_reference; + + typedef struct ConstIterator { + ConstIterator(const AtomicUnorderedInsertMap& owner, IndexType slot) + : owner_(owner), slot_(slot) {} + + ConstIterator(const ConstIterator&) = default; + ConstIterator& operator=(const ConstIterator&) = default; + + const value_type& operator*() const { + return owner_.slots_[slot_].keyValue(); + } + + const value_type* operator->() const { + return &owner_.slots_[slot_].keyValue(); + } + + // pre-increment + const ConstIterator& operator++() { + while (slot_ > 0) { + --slot_; + if (owner_.slots_[slot_].state() == LINKED) { + break; + } + } + return *this; + } + + // post-increment + ConstIterator operator++(int /* dummy */) { + auto prev = *this; + ++*this; + return prev; + } + + bool operator==(const ConstIterator& rhs) const { + return slot_ == rhs.slot_; + } + bool operator!=(const ConstIterator& rhs) const { + return !(*this == rhs); + } + + private: + const AtomicUnorderedInsertMap& owner_; + IndexType slot_; + } const_iterator; + + friend ConstIterator; + + /// Constructs a map that will support the insertion of maxSize key-value + /// pairs without exceeding the max load factor. Load factors of greater + /// than 1 are not supported, and once the actual load factor of the + /// map approaches 1 the insert performance will suffer. The capacity + /// is limited to 2^30 (about a billion) for the default IndexType, + /// beyond which we will throw invalid_argument. + explicit AtomicUnorderedInsertMap( + size_t maxSize, + float maxLoadFactor = 0.8f, + const Allocator& alloc = Allocator()) + : allocator_(alloc) { + size_t capacity = size_t(maxSize / std::min(1.0f, maxLoadFactor) + 128); + size_t avail = size_t{1} << (8 * sizeof(IndexType) - 2); + if (capacity > avail && maxSize < avail) { + // we'll do our best + capacity = avail; + } + if (capacity < maxSize || capacity > avail) { + throw std::invalid_argument( + "AtomicUnorderedInsertMap capacity must fit in IndexType with 2 bits " + "left over"); + } + + numSlots_ = capacity; + slotMask_ = folly::nextPowTwo(capacity * 4) - 1; + mmapRequested_ = sizeof(Slot) * capacity; + slots_ = reinterpret_cast(allocator_.allocate(mmapRequested_)); + zeroFillSlots(); + // mark the zero-th slot as in-use but not valid, since that happens + // to be our nil value + slots_[0].stateUpdate(EMPTY, CONSTRUCTING); + } + + ~AtomicUnorderedInsertMap() { + if (!SkipKeyValueDeletion) { + for (size_t i = 1; i < numSlots_; ++i) { + slots_[i].~Slot(); + } + } + allocator_.deallocate(reinterpret_cast(slots_), mmapRequested_); + } + + /// Searches for the key, returning (iter,false) if it is found. + /// If it is not found calls the functor Func with a void* argument + /// that is raw storage suitable for placement construction of a Value + /// (see raw_value_type), then returns (iter,true). May call Func and + /// then return (iter,false) if there are other concurrent writes, in + /// which case the newly constructed value will be immediately destroyed. + /// + /// This function does not block other readers or writers. If there + /// are other concurrent writes, many parallel calls to func may happen + /// and only the first one to complete will win. The values constructed + /// by the other calls to func will be destroyed. + /// + /// Usage: + /// + /// AtomicUnorderedInsertMap memo; + /// + /// auto value = memo.findOrConstruct(key, [=](void* raw) { + /// new (raw) std::string(computation(key)); + /// })->first; + template + std::pair findOrConstruct(const Key& key, Func&& func) { + auto const slot = keyToSlotIdx(key); + auto prev = slots_[slot].headAndState_.load(std::memory_order_acquire); + + auto existing = find(key, slot); + if (existing != 0) { + return std::make_pair(ConstIterator(*this, existing), false); + } + + auto idx = allocateNear(slot); + new (&slots_[idx].keyValue().first) Key(key); + func(static_cast(&slots_[idx].keyValue().second)); + + while (true) { + slots_[idx].next_ = prev >> 2; + + // we can merge the head update and the CONSTRUCTING -> LINKED update + // into a single CAS if slot == idx (which should happen often) + auto after = idx << 2; + if (slot == idx) { + after += LINKED; + } else { + after += (prev & 3); + } + + if (slots_[slot].headAndState_.compare_exchange_strong(prev, after)) { + // success + if (idx != slot) { + slots_[idx].stateUpdate(CONSTRUCTING, LINKED); + } + return std::make_pair(ConstIterator(*this, idx), true); + } + // compare_exchange_strong updates its first arg on failure, so + // there is no need to reread prev + + existing = find(key, slot); + if (existing != 0) { + // our allocated key and value are no longer needed + slots_[idx].keyValue().first.~Key(); + slots_[idx].keyValue().second.~Value(); + slots_[idx].stateUpdate(CONSTRUCTING, EMPTY); + + return std::make_pair(ConstIterator(*this, existing), false); + } + } + } + + /// This isn't really emplace, but it is what we need to test. + /// Eventually we can duplicate all of the std::pair constructor + /// forms, including a recursive tuple forwarding template + /// http://functionalcpp.wordpress.com/2013/08/28/tuple-forwarding/). + template + std::pair emplace(const K& key, V&& value) { + return findOrConstruct( + key, [&](void* raw) { new (raw) Value(std::forward(value)); }); + } + + const_iterator find(const Key& key) const { + return ConstIterator(*this, find(key, keyToSlotIdx(key))); + } + + const_iterator cbegin() const { + IndexType slot = numSlots_ - 1; + while (slot > 0 && slots_[slot].state() != LINKED) { + --slot; + } + return ConstIterator(*this, slot); + } + + const_iterator cend() const { + return ConstIterator(*this, 0); + } + + private: + enum : IndexType { + kMaxAllocationTries = 1000, // after this we throw + }; + + enum BucketState : IndexType { + EMPTY = 0, + CONSTRUCTING = 1, + LINKED = 2, + }; + + /// Lock-free insertion is easiest by prepending to collision chains. + /// A large chaining hash table takes two cache misses instead of + /// one, however. Our solution is to colocate the bucket storage and + /// the head storage, so that even though we are traversing chains we + /// are likely to stay within the same cache line. Just make sure to + /// traverse head before looking at any keys. This strategy gives us + /// 32 bit pointers and fast iteration. + struct Slot { + /// The bottom two bits are the BucketState, the rest is the index + /// of the first bucket for the chain whose keys map to this slot. + /// When things are going well the head usually links to this slot, + /// but that doesn't always have to happen. + Atom headAndState_; + + /// The next bucket in the chain + IndexType next_; + + /// Key and Value + aligned_storage_for_t raw_; + + ~Slot() { + auto s = state(); + assert(s == EMPTY || s == LINKED); + if (s == LINKED) { + keyValue().first.~Key(); + keyValue().second.~Value(); + } + } + + BucketState state() const { + return BucketState(headAndState_.load(std::memory_order_acquire) & 3); + } + + void stateUpdate(BucketState before, BucketState after) { + assert(state() == before); + headAndState_ += (after - before); + } + + value_type& keyValue() { + assert(state() != EMPTY); + return *static_cast(static_cast(&raw_)); + } + + const value_type& keyValue() const { + assert(state() != EMPTY); + return *static_cast(static_cast(&raw_)); + } + }; + + // We manually manage the slot memory so we can bypass initialization + // (by getting a zero-filled mmap chunk) and optionally destruction of + // the slots + + size_t mmapRequested_; + size_t numSlots_; + + /// tricky, see keyToSlodIdx + size_t slotMask_; + + Allocator allocator_; + Slot* slots_; + + IndexType keyToSlotIdx(const Key& key) const { + size_t h = hasher()(key); + h &= slotMask_; + while (h >= numSlots_) { + h -= numSlots_; + } + return h; + } + + IndexType find(const Key& key, IndexType slot) const { + KeyEqual ke = {}; + auto hs = slots_[slot].headAndState_.load(std::memory_order_acquire); + for (slot = hs >> 2; slot != 0; slot = slots_[slot].next_) { + if (ke(key, slots_[slot].keyValue().first)) { + return slot; + } + } + return 0; + } + + /// Allocates a slot and returns its index. Tries to put it near + /// slots_[start]. + IndexType allocateNear(IndexType start) { + for (IndexType tries = 0; tries < kMaxAllocationTries; ++tries) { + auto slot = allocationAttempt(start, tries); + auto prev = slots_[slot].headAndState_.load(std::memory_order_acquire); + if ((prev & 3) == EMPTY && + slots_[slot].headAndState_.compare_exchange_strong( + prev, prev + CONSTRUCTING - EMPTY)) { + return slot; + } + } + throw std::bad_alloc(); + } + + /// Returns the slot we should attempt to allocate after tries failed + /// tries, starting from the specified slot. This is pulled out so we + /// can specialize it differently during deterministic testing + IndexType allocationAttempt(IndexType start, IndexType tries) const { + if (LIKELY(tries < 8 && start + tries < numSlots_)) { + return IndexType(start + tries); + } else { + IndexType rv; + if (sizeof(IndexType) <= 4) { + rv = IndexType(folly::Random::rand32(numSlots_)); + } else { + rv = IndexType(folly::Random::rand64(numSlots_)); + } + assert(rv < numSlots_); + return rv; + } + } + + void zeroFillSlots() { + using folly::detail::GivesZeroFilledMemory; + if (!GivesZeroFilledMemory::value) { + memset(static_cast(slots_), 0, mmapRequested_); + } + } +}; + +/// AtomicUnorderedInsertMap64 is just a type alias that makes it easier +/// to select a 64 bit slot index type. Use this if you need a capacity +/// bigger than 2^30 (about a billion). This increases memory overheads, +/// obviously. +template < + typename Key, + typename Value, + typename Hash = std::hash, + typename KeyEqual = std::equal_to, + bool SkipKeyValueDeletion = + (std::is_trivially_destructible::value && + std::is_trivially_destructible::value), + template class Atom = std::atomic, + typename Allocator = folly::detail::MMapAlloc> +using AtomicUnorderedInsertMap64 = AtomicUnorderedInsertMap< + Key, + Value, + Hash, + KeyEqual, + SkipKeyValueDeletion, + Atom, + uint64_t, + Allocator>; + +/// MutableAtom is a tiny wrapper than gives you the option of atomically +/// updating values inserted into an AtomicUnorderedInsertMap>. This relies on AtomicUnorderedInsertMap's guarantee +/// that it doesn't move values. +template class Atom = std::atomic> +struct MutableAtom { + mutable Atom data; + + explicit MutableAtom(const T& init) : data(init) {} +}; + +/// MutableData is a tiny wrapper than gives you the option of using an +/// external concurrency control mechanism to updating values inserted +/// into an AtomicUnorderedInsertMap. +template +struct MutableData { + mutable T data; + explicit MutableData(const T& init) : data(init) {} +}; + +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/Benchmark.h b/native/iosTest/Pods/Folly/folly/Benchmark.h new file mode 100644 index 000000000..f4d8fc7b0 --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/Benchmark.h @@ -0,0 +1,683 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include // for FB_ANONYMOUS_VARIABLE +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +DECLARE_bool(benchmark); + +namespace folly { + +/** + * Runs all benchmarks defined. Usually put in main(). + */ +void runBenchmarks(); + +/** + * Runs all benchmarks defined if and only if the --benchmark flag has + * been passed to the program. Usually put in main(). + */ +inline bool runBenchmarksOnFlag() { + if (FLAGS_benchmark) { + runBenchmarks(); + } + return FLAGS_benchmark; +} + +class UserMetric { + public: + enum class Type { CUSTOM, TIME, METRIC }; + + int64_t value{}; + Type type{Type::CUSTOM}; + + UserMetric() = default; + /* implicit */ UserMetric(int64_t val, Type typ = Type::CUSTOM) + : value(val), type(typ) {} +}; + +using UserCounters = std::unordered_map; + +namespace detail { +struct TimeIterData { + std::chrono::high_resolution_clock::duration duration; + unsigned int niter; + UserCounters userCounters; +}; + +using BenchmarkFun = std::function; + +struct BenchmarkRegistration { + std::string file; + std::string name; + BenchmarkFun func; + bool useCounter = false; +}; + +struct BenchmarkResult { + std::string file; + std::string name; + double timeInNs; + UserCounters counters; +}; + +/** + * Adds a benchmark wrapped in a std::function. Only used + * internally. Pass by value is intentional. + */ +void addBenchmarkImpl( + const char* file, + const char* name, + BenchmarkFun, + bool useCounter); + +} // namespace detail + +/** + * Supporting type for BENCHMARK_SUSPEND defined below. + */ +struct BenchmarkSuspender { + using Clock = std::chrono::high_resolution_clock; + using TimePoint = Clock::time_point; + using Duration = Clock::duration; + + BenchmarkSuspender() { + start = Clock::now(); + } + + BenchmarkSuspender(const BenchmarkSuspender&) = delete; + BenchmarkSuspender(BenchmarkSuspender&& rhs) noexcept { + start = rhs.start; + rhs.start = {}; + } + + BenchmarkSuspender& operator=(const BenchmarkSuspender&) = delete; + BenchmarkSuspender& operator=(BenchmarkSuspender&& rhs) { + if (start != TimePoint{}) { + tally(); + } + start = rhs.start; + rhs.start = {}; + return *this; + } + + ~BenchmarkSuspender() { + if (start != TimePoint{}) { + tally(); + } + } + + void dismiss() { + assert(start != TimePoint{}); + tally(); + start = {}; + } + + void rehire() { + assert(start == TimePoint{}); + start = Clock::now(); + } + + template + auto dismissing(F f) -> invoke_result_t { + SCOPE_EXIT { + rehire(); + }; + dismiss(); + return f(); + } + + /** + * This is for use inside of if-conditions, used in BENCHMARK macros. + * If-conditions bypass the explicit on operator bool. + */ + explicit operator bool() const { + return false; + } + + /** + * Accumulates time spent outside benchmark. + */ + static Duration timeSpent; + + private: + void tally() { + auto end = Clock::now(); + timeSpent += end - start; + start = end; + } + + TimePoint start; +}; + +/** + * Adds a benchmark. Usually not called directly but instead through + * the macro BENCHMARK defined below. The lambda function involved + * must take exactly one parameter of type unsigned, and the benchmark + * uses it with counter semantics (iteration occurs inside the + * function). + */ +template +typename std::enable_if::value>::type +addBenchmark(const char* file, const char* name, Lambda&& lambda) { + auto execute = [=](unsigned int times) { + BenchmarkSuspender::timeSpent = {}; + unsigned int niter; + + // CORE MEASUREMENT STARTS + auto start = std::chrono::high_resolution_clock::now(); + niter = lambda(times); + auto end = std::chrono::high_resolution_clock::now(); + // CORE MEASUREMENT ENDS + return detail::TimeIterData{ + (end - start) - BenchmarkSuspender::timeSpent, niter, UserCounters{}}; + }; + + detail::addBenchmarkImpl(file, name, detail::BenchmarkFun(execute), false); +} + +/** + * Adds a benchmark. Usually not called directly but instead through + * the macro BENCHMARK defined below. The lambda function involved + * must take zero parameters, and the benchmark calls it repeatedly + * (iteration occurs outside the function). + */ +template +typename std::enable_if::value>::type +addBenchmark(const char* file, const char* name, Lambda&& lambda) { + addBenchmark(file, name, [=](unsigned int times) { + unsigned int niter = 0; + while (times-- > 0) { + niter += lambda(); + } + return niter; + }); +} + +/** + * similar as previous two template specialization, but lambda will also take + * customized counters in the following two cases + */ +template +typename std::enable_if< + folly::is_invocable::value>::type +addBenchmark(const char* file, const char* name, Lambda&& lambda) { + auto execute = [=](unsigned int times) { + BenchmarkSuspender::timeSpent = {}; + unsigned int niter; + + // CORE MEASUREMENT STARTS + auto start = std::chrono::high_resolution_clock::now(); + UserCounters counters; + niter = lambda(counters, times); + auto end = std::chrono::high_resolution_clock::now(); + // CORE MEASUREMENT ENDS + return detail::TimeIterData{ + (end - start) - BenchmarkSuspender::timeSpent, niter, counters}; + }; + + detail::addBenchmarkImpl( + file, + name, + std::function(execute), + true); +} + +template +typename std::enable_if::value>::type +addBenchmark(const char* file, const char* name, Lambda&& lambda) { + addBenchmark(file, name, [=](UserCounters& counters, unsigned int times) { + unsigned int niter = 0; + while (times-- > 0) { + niter += lambda(counters); + } + return niter; + }); +} + +/** + * Call doNotOptimizeAway(var) to ensure that var will be computed even + * post-optimization. Use it for variables that are computed during + * benchmarking but otherwise are useless. The compiler tends to do a + * good job at eliminating unused variables, and this function fools it + * into thinking var is in fact needed. + * + * Call makeUnpredictable(var) when you don't want the optimizer to use + * its knowledge of var to shape the following code. This is useful + * when constant propagation or power reduction is possible during your + * benchmark but not in real use cases. + */ + +#ifdef _MSC_VER + +#pragma optimize("", off) + +inline void doNotOptimizeDependencySink(const void*) {} + +#pragma optimize("", on) + +template +void doNotOptimizeAway(const T& datum) { + doNotOptimizeDependencySink(&datum); +} + +template +void makeUnpredictable(T& datum) { + doNotOptimizeDependencySink(&datum); +} + +#else + +namespace detail { +template +struct DoNotOptimizeAwayNeedsIndirect { + using Decayed = typename std::decay::type; + + // First two constraints ensure it can be an "r" operand. + // std::is_pointer check is because callers seem to expect that + // doNotOptimizeAway(&x) is equivalent to doNotOptimizeAway(x). + constexpr static bool value = !folly::is_trivially_copyable::value || + sizeof(Decayed) > sizeof(long) || std::is_pointer::value; +}; +} // namespace detail + +template +auto doNotOptimizeAway(const T& datum) -> typename std::enable_if< + !detail::DoNotOptimizeAwayNeedsIndirect::value>::type { + // The "r" constraint forces the compiler to make datum available + // in a register to the asm block, which means that it must have + // computed/loaded it. We use this path for things that are <= + // sizeof(long) (they have to fit), trivial (otherwise the compiler + // doesn't want to put them in a register), and not a pointer (because + // doNotOptimizeAway(&foo) would otherwise be a foot gun that didn't + // necessarily compute foo). + // + // An earlier version of this method had a more permissive input operand + // constraint, but that caused unnecessary variation between clang and + // gcc benchmarks. + asm volatile("" ::"r"(datum)); +} + +template +auto doNotOptimizeAway(const T& datum) -> typename std::enable_if< + detail::DoNotOptimizeAwayNeedsIndirect::value>::type { + // This version of doNotOptimizeAway tells the compiler that the asm + // block will read datum from memory, and that in addition it might read + // or write from any memory location. If the memory clobber could be + // separated into input and output that would be preferrable. + asm volatile("" ::"m"(datum) : "memory"); +} + +template +auto makeUnpredictable(T& datum) -> typename std::enable_if< + !detail::DoNotOptimizeAwayNeedsIndirect::value>::type { + asm volatile("" : "+r"(datum)); +} + +template +auto makeUnpredictable(T& datum) -> typename std::enable_if< + detail::DoNotOptimizeAwayNeedsIndirect::value>::type { + asm volatile("" ::"m"(datum) : "memory"); +} + +#endif + +struct dynamic; + +void benchmarkResultsToDynamic( + const std::vector& data, + dynamic&); + +void benchmarkResultsFromDynamic( + const dynamic&, + std::vector&); + +void printResultComparison( + const std::vector& base, + const std::vector& test); + +} // namespace folly + +/** + * Introduces a benchmark function. Used internally, see BENCHMARK and + * friends below. + */ + +#define BENCHMARK_IMPL(funName, stringName, rv, paramType, paramName) \ + static void funName(paramType); \ + FOLLY_MAYBE_UNUSED static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = \ + (::folly::addBenchmark( \ + __FILE__, \ + stringName, \ + [](paramType paramName) -> unsigned { \ + funName(paramName); \ + return rv; \ + }), \ + true); \ + static void funName(paramType paramName) + +#define BENCHMARK_IMPL_COUNTERS( \ + funName, stringName, counters, rv, paramType, paramName) \ + static void funName( \ + ::folly::UserCounters& FOLLY_PP_DETAIL_APPEND_VA_ARG(paramType)); \ + FOLLY_MAYBE_UNUSED static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = \ + (::folly::addBenchmark( \ + __FILE__, \ + stringName, \ + [](::folly::UserCounters& counters FOLLY_PP_DETAIL_APPEND_VA_ARG( \ + paramType paramName)) -> unsigned { \ + funName(counters FOLLY_PP_DETAIL_APPEND_VA_ARG(paramName)); \ + return rv; \ + }), \ + true); \ + static void funName(::folly::UserCounters& counters \ + FOLLY_PP_DETAIL_APPEND_VA_ARG(paramType paramName)) + +/** + * Introduces a benchmark function with support for returning the actual + * number of iterations. Used internally, see BENCHMARK_MULTI and friends + * below. + */ +#define BENCHMARK_MULTI_IMPL(funName, stringName, paramType, paramName) \ + static unsigned funName(paramType); \ + FOLLY_MAYBE_UNUSED static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = \ + (::folly::addBenchmark( \ + __FILE__, \ + stringName, \ + [](paramType paramName) { return funName(paramName); }), \ + true); \ + static unsigned funName(paramType paramName) + +/** + * Introduces a benchmark function. Use with either one or two arguments. + * The first is the name of the benchmark. Use something descriptive, such + * as insertVectorBegin. The second argument may be missing, or could be a + * symbolic counter. The counter dictates how many internal iteration the + * benchmark does. Example: + * + * BENCHMARK(vectorPushBack) { + * vector v; + * v.push_back(42); + * } + * + * BENCHMARK(insertVectorBegin, iters) { + * vector v; + * FOR_EACH_RANGE (i, 0, iters) { + * v.insert(v.begin(), 42); + * } + * } + */ +#define BENCHMARK(name, ...) \ + BENCHMARK_IMPL( \ + name, \ + FOLLY_PP_STRINGIZE(name), \ + FB_ARG_2_OR_1(1, ##__VA_ARGS__), \ + FB_ONE_OR_NONE(unsigned, ##__VA_ARGS__), \ + __VA_ARGS__) + +/** + * Allow users to record customized counter during benchmarking, + * there will be one extra column showing in the output result for each counter + * + * BENCHMARK_COUNTERS(insertVectorBegin, couters, iters) { + * vector v; + * FOR_EACH_RANGE (i, 0, iters) { + * v.insert(v.begin(), 42); + * } + * BENCHMARK_SUSPEND { + * counters["foo"] = 10; + * } + * } + */ +#define BENCHMARK_COUNTERS(name, counters, ...) \ + BENCHMARK_IMPL_COUNTERS( \ + name, \ + FOLLY_PP_STRINGIZE(name), \ + counters, \ + FB_ARG_2_OR_1(1, ##__VA_ARGS__), \ + FB_ONE_OR_NONE(unsigned, ##__VA_ARGS__), \ + __VA_ARGS__) +/** + * Like BENCHMARK above, but allows the user to return the actual + * number of iterations executed in the function body. This can be + * useful if the benchmark function doesn't know upfront how many + * iterations it's going to run or if it runs through a certain + * number of test cases, e.g.: + * + * BENCHMARK_MULTI(benchmarkSomething) { + * std::vector testCases { 0, 1, 1, 2, 3, 5 }; + * for (int c : testCases) { + * doSomething(c); + * } + * return testCases.size(); + * } + */ +#define BENCHMARK_MULTI(name, ...) \ + BENCHMARK_MULTI_IMPL( \ + name, \ + FOLLY_PP_STRINGIZE(name), \ + FB_ONE_OR_NONE(unsigned, ##__VA_ARGS__), \ + __VA_ARGS__) + +/** + * Defines a benchmark that passes a parameter to another one. This is + * common for benchmarks that need a "problem size" in addition to + * "number of iterations". Consider: + * + * void pushBack(uint32_t n, size_t initialSize) { + * vector v; + * BENCHMARK_SUSPEND { + * v.resize(initialSize); + * } + * FOR_EACH_RANGE (i, 0, n) { + * v.push_back(i); + * } + * } + * BENCHMARK_PARAM(pushBack, 0) + * BENCHMARK_PARAM(pushBack, 1000) + * BENCHMARK_PARAM(pushBack, 1000000) + * + * The benchmark above estimates the speed of push_back at different + * initial sizes of the vector. The framework will pass 0, 1000, and + * 1000000 for initialSize, and the iteration count for n. + */ +#define BENCHMARK_PARAM(name, param) BENCHMARK_NAMED_PARAM(name, param, param) + +/** + * Same as BENCHMARK_PARAM, but allows one to return the actual number of + * iterations that have been run. + */ +#define BENCHMARK_PARAM_MULTI(name, param) \ + BENCHMARK_NAMED_PARAM_MULTI(name, param, param) + +/* + * Like BENCHMARK_PARAM(), but allows a custom name to be specified for each + * parameter, rather than using the parameter value. + * + * Useful when the parameter value is not a valid token for string pasting, + * of when you want to specify multiple parameter arguments. + * + * For example: + * + * void addValue(uint32_t n, int64_t bucketSize, int64_t min, int64_t max) { + * Histogram hist(bucketSize, min, max); + * int64_t num = min; + * FOR_EACH_RANGE (i, 0, n) { + * hist.addValue(num); + * ++num; + * if (num > max) { num = min; } + * } + * } + * + * BENCHMARK_NAMED_PARAM(addValue, 0_to_100, 1, 0, 100) + * BENCHMARK_NAMED_PARAM(addValue, 0_to_1000, 10, 0, 1000) + * BENCHMARK_NAMED_PARAM(addValue, 5k_to_20k, 250, 5000, 20000) + */ +#define BENCHMARK_NAMED_PARAM(name, param_name, ...) \ + BENCHMARK_IMPL( \ + FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \ + FOLLY_PP_STRINGIZE(name) "(" FOLLY_PP_STRINGIZE(param_name) ")", \ + iters, \ + unsigned, \ + iters) { \ + name(iters, ##__VA_ARGS__); \ + } + +/** + * Same as BENCHMARK_NAMED_PARAM, but allows one to return the actual number + * of iterations that have been run. + */ +#define BENCHMARK_NAMED_PARAM_MULTI(name, param_name, ...) \ + BENCHMARK_MULTI_IMPL( \ + FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \ + FOLLY_PP_STRINGIZE(name) "(" FOLLY_PP_STRINGIZE(param_name) ")", \ + unsigned, \ + iters) { \ + return name(iters, ##__VA_ARGS__); \ + } + +/** + * Just like BENCHMARK, but prints the time relative to a + * baseline. The baseline is the most recent BENCHMARK() seen in + * the current scope. Example: + * + * // This is the baseline + * BENCHMARK(insertVectorBegin, n) { + * vector v; + * FOR_EACH_RANGE (i, 0, n) { + * v.insert(v.begin(), 42); + * } + * } + * + * BENCHMARK_RELATIVE(insertListBegin, n) { + * list s; + * FOR_EACH_RANGE (i, 0, n) { + * s.insert(s.begin(), 42); + * } + * } + * + * Any number of relative benchmark can be associated with a + * baseline. Another BENCHMARK() occurrence effectively establishes a + * new baseline. + */ +#define BENCHMARK_RELATIVE(name, ...) \ + BENCHMARK_IMPL( \ + name, \ + "%" FOLLY_PP_STRINGIZE(name), \ + FB_ARG_2_OR_1(1, ##__VA_ARGS__), \ + FB_ONE_OR_NONE(unsigned, ##__VA_ARGS__), \ + __VA_ARGS__) + +#define BENCHMARK_COUNTERS_RELATIVE(name, counters, ...) \ + BENCHMARK_IMPL_COUNTERS( \ + name, \ + "%" FOLLY_PP_STRINGIZE(name), \ + counters, \ + FB_ARG_2_OR_1(1, ##__VA_ARGS__), \ + FB_ONE_OR_NONE(unsigned, ##__VA_ARGS__), \ + __VA_ARGS__) +/** + * Same as BENCHMARK_RELATIVE, but allows one to return the actual number + * of iterations that have been run. + */ +#define BENCHMARK_RELATIVE_MULTI(name, ...) \ + BENCHMARK_MULTI_IMPL( \ + name, \ + "%" FOLLY_PP_STRINGIZE(name), \ + FB_ONE_OR_NONE(unsigned, ##__VA_ARGS__), \ + __VA_ARGS__) + +/** + * A combination of BENCHMARK_RELATIVE and BENCHMARK_PARAM. + */ +#define BENCHMARK_RELATIVE_PARAM(name, param) \ + BENCHMARK_RELATIVE_NAMED_PARAM(name, param, param) + +/** + * Same as BENCHMARK_RELATIVE_PARAM, but allows one to return the actual + * number of iterations that have been run. + */ +#define BENCHMARK_RELATIVE_PARAM_MULTI(name, param) \ + BENCHMARK_RELATIVE_NAMED_PARAM_MULTI(name, param, param) + +/** + * A combination of BENCHMARK_RELATIVE and BENCHMARK_NAMED_PARAM. + */ +#define BENCHMARK_RELATIVE_NAMED_PARAM(name, param_name, ...) \ + BENCHMARK_IMPL( \ + FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \ + "%" FOLLY_PP_STRINGIZE(name) "(" FOLLY_PP_STRINGIZE(param_name) ")", \ + iters, \ + unsigned, \ + iters) { \ + name(iters, ##__VA_ARGS__); \ + } + +/** + * Same as BENCHMARK_RELATIVE_NAMED_PARAM, but allows one to return the + * actual number of iterations that have been run. + */ +#define BENCHMARK_RELATIVE_NAMED_PARAM_MULTI(name, param_name, ...) \ + BENCHMARK_MULTI_IMPL( \ + FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \ + "%" FOLLY_PP_STRINGIZE(name) "(" FOLLY_PP_STRINGIZE(param_name) ")", \ + unsigned, \ + iters) { \ + return name(iters, ##__VA_ARGS__); \ + } + +/** + * Draws a line of dashes. + */ +#define BENCHMARK_DRAW_LINE() \ + FOLLY_MAYBE_UNUSED static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = \ + (::folly::addBenchmark(__FILE__, "-", []() -> unsigned { return 0; }), \ + true) + +/** + * Allows execution of code that doesn't count torward the benchmark's + * time budget. Example: + * + * BENCHMARK_START_GROUP(insertVectorBegin, n) { + * vector v; + * BENCHMARK_SUSPEND { + * v.reserve(n); + * } + * FOR_EACH_RANGE (i, 0, n) { + * v.insert(v.begin(), 42); + * } + * } + */ +#define BENCHMARK_SUSPEND \ + if (auto FB_ANONYMOUS_VARIABLE(BENCHMARK_SUSPEND) = \ + ::folly::BenchmarkSuspender()) { \ + } else diff --git a/native/iosTest/Pods/Folly/folly/Bits.h b/native/iosTest/Pods/Folly/folly/Bits.h new file mode 100644 index 000000000..1569d598c --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/Bits.h @@ -0,0 +1,17 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include // @shim diff --git a/native/iosTest/Pods/Folly/folly/CPortability.h b/native/iosTest/Pods/Folly/folly/CPortability.h new file mode 100644 index 000000000..976daf069 --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/CPortability.h @@ -0,0 +1,242 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +/* These definitions are in a separate file so that they + * may be included from C- as well as C++-based projects. */ + +#include + +/** + * Portable version check. + */ +#ifndef __GNUC_PREREQ +#if defined __GNUC__ && defined __GNUC_MINOR__ +/* nolint */ +#define __GNUC_PREREQ(maj, min) \ + ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min)) +#else +/* nolint */ +#define __GNUC_PREREQ(maj, min) 0 +#endif +#endif + +// portable version check for clang +#ifndef __CLANG_PREREQ +#if defined __clang__ && defined __clang_major__ && defined __clang_minor__ +/* nolint */ +#define __CLANG_PREREQ(maj, min) \ + ((__clang_major__ << 16) + __clang_minor__ >= ((maj) << 16) + (min)) +#else +/* nolint */ +#define __CLANG_PREREQ(maj, min) 0 +#endif +#endif + +#if defined(__has_builtin) +#define FOLLY_HAS_BUILTIN(...) __has_builtin(__VA_ARGS__) +#else +#define FOLLY_HAS_BUILTIN(...) 0 +#endif + +#if defined(__has_feature) +#define FOLLY_HAS_FEATURE(...) __has_feature(__VA_ARGS__) +#else +#define FOLLY_HAS_FEATURE(...) 0 +#endif + +/* FOLLY_SANITIZE_ADDRESS is defined to 1 if the current compilation unit + * is being compiled with ASAN enabled. + * + * Beware when using this macro in a header file: this macro may change values + * across compilation units if some libraries are built with ASAN enabled + * and some built with ASAN disabled. For instance, this may occur, if folly + * itself was compiled without ASAN but a downstream project that uses folly is + * compiling with ASAN enabled. + * + * Use FOLLY_LIBRARY_SANITIZE_ADDRESS (defined in folly-config.h) to check if + * folly itself was compiled with ASAN enabled. + */ +#ifndef FOLLY_SANITIZE_ADDRESS +#if FOLLY_HAS_FEATURE(address_sanitizer) || __SANITIZE_ADDRESS__ +#define FOLLY_SANITIZE_ADDRESS 1 +#endif +#endif + +/* Define attribute wrapper for function attribute used to disable + * address sanitizer instrumentation. Unfortunately, this attribute + * has issues when inlining is used, so disable that as well. */ +#ifdef FOLLY_SANITIZE_ADDRESS +#if defined(__clang__) +#if __has_attribute(__no_sanitize__) +#define FOLLY_DISABLE_ADDRESS_SANITIZER \ + __attribute__((__no_sanitize__("address"), __noinline__)) +#elif __has_attribute(__no_address_safety_analysis__) +#define FOLLY_DISABLE_ADDRESS_SANITIZER \ + __attribute__((__no_address_safety_analysis__, __noinline__)) +#elif __has_attribute(__no_sanitize_address__) +#define FOLLY_DISABLE_ADDRESS_SANITIZER \ + __attribute__((__no_sanitize_address__, __noinline__)) +#endif +#elif defined(__GNUC__) +#define FOLLY_DISABLE_ADDRESS_SANITIZER \ + __attribute__((__no_address_safety_analysis__, __noinline__)) +#endif +#endif +#ifndef FOLLY_DISABLE_ADDRESS_SANITIZER +#define FOLLY_DISABLE_ADDRESS_SANITIZER +#endif + +/* Define a convenience macro to test when thread sanitizer is being used + * across the different compilers (e.g. clang, gcc) */ +#ifndef FOLLY_SANITIZE_THREAD +#if FOLLY_HAS_FEATURE(thread_sanitizer) || __SANITIZE_THREAD__ +#define FOLLY_SANITIZE_THREAD 1 +#endif +#endif + +#if FOLLY_SANITIZE_THREAD +#define FOLLY_DISABLE_THREAD_SANITIZER \ + __attribute__((no_sanitize_thread, noinline)) +#else +#define FOLLY_DISABLE_THREAD_SANITIZER +#endif + +/** + * Define a convenience macro to test when memory sanitizer is being used + * across the different compilers (e.g. clang, gcc) + */ +#ifndef FOLLY_SANITIZE_MEMORY +#if FOLLY_HAS_FEATURE(memory_sanitizer) || __SANITIZE_MEMORY__ +#define FOLLY_SANITIZE_MEMORY 1 +#endif +#endif + +#if FOLLY_SANITIZE_MEMORY +#define FOLLY_DISABLE_MEMORY_SANITIZER \ + __attribute__((no_sanitize_memory, noinline)) +#else +#define FOLLY_DISABLE_MEMORY_SANITIZER +#endif + +/** + * Define a convenience macro to test when ASAN, UBSAN, TSAN or MSAN sanitizer + * are being used + */ +#ifndef FOLLY_SANITIZE +#if defined(FOLLY_SANITIZE_ADDRESS) || defined(FOLLY_SANITIZE_THREAD) || \ + defined(FOLLY_SANITIZE_MEMORY) +#define FOLLY_SANITIZE 1 +#endif +#endif + +#if FOLLY_SANITIZE +#define FOLLY_DISABLE_UNDEFINED_BEHAVIOR_SANITIZER(...) \ + __attribute__((no_sanitize(__VA_ARGS__))) +#else +#define FOLLY_DISABLE_UNDEFINED_BEHAVIOR_SANITIZER(...) +#endif // FOLLY_SANITIZE + +#define FOLLY_DISABLE_SANITIZERS \ + FOLLY_DISABLE_ADDRESS_SANITIZER FOLLY_DISABLE_THREAD_SANITIZER \ + FOLLY_DISABLE_UNDEFINED_BEHAVIOR_SANITIZER("undefined") + +/** + * Macro for marking functions as having public visibility. + */ +#if defined(__GNUC__) +#define FOLLY_EXPORT __attribute__((__visibility__("default"))) +#else +#define FOLLY_EXPORT +#endif + +// noinline +#ifdef _MSC_VER +#define FOLLY_NOINLINE __declspec(noinline) +#elif defined(__GNUC__) +#define FOLLY_NOINLINE __attribute__((__noinline__)) +#else +#define FOLLY_NOINLINE +#endif + +// always inline +#ifdef _MSC_VER +#define FOLLY_ALWAYS_INLINE __forceinline +#elif defined(__GNUC__) +#define FOLLY_ALWAYS_INLINE inline __attribute__((__always_inline__)) +#else +#define FOLLY_ALWAYS_INLINE inline +#endif + +// attribute hidden +#if defined(_MSC_VER) +#define FOLLY_ATTR_VISIBILITY_HIDDEN +#elif defined(__GNUC__) +#define FOLLY_ATTR_VISIBILITY_HIDDEN __attribute__((__visibility__("hidden"))) +#else +#define FOLLY_ATTR_VISIBILITY_HIDDEN +#endif + +// An attribute for marking symbols as weak, if supported +#if FOLLY_HAVE_WEAK_SYMBOLS +#define FOLLY_ATTR_WEAK __attribute__((__weak__)) +#else +#define FOLLY_ATTR_WEAK +#endif + +// Microsoft ABI version (can be overridden manually if necessary) +#ifndef FOLLY_MICROSOFT_ABI_VER +#ifdef _MSC_VER +#define FOLLY_MICROSOFT_ABI_VER _MSC_VER +#endif +#endif + +// FOLLY_ERASE +// +// A conceptual attribute/syntax combo for erasing a function from the build +// artifacts and forcing all call-sites to inline the callee, at least as far +// as each compiler supports. +// +// Semantically includes the inline specifier. +#define FOLLY_ERASE FOLLY_ALWAYS_INLINE FOLLY_ATTR_VISIBILITY_HIDDEN + +// FOLLY_ERASE_HACK_GCC +// +// Equivalent to FOLLY_ERASE, but without hiding under gcc. Useful when applied +// to a function which may sometimes be hidden separately, for example by being +// declared in an anonymous namespace, since in such cases with -Wattributes +// enabled, gcc would emit: 'visibility' attribute ignored. +// +// Semantically includes the inline specifier. +#if defined(__GNUC__) && !defined(__clang__) +#define FOLLY_ERASE_HACK_GCC FOLLY_ALWAYS_INLINE +#else +#define FOLLY_ERASE_HACK_GCC FOLLY_ERASE +#endif + +// FOLLY_ERASE_TRYCATCH +// +// Equivalent to FOLLY_ERASE, but for code which might contain explicit +// exception handling. Has the effect of FOLLY_ERASE, except under MSVC which +// warns about __forceinline when functions contain exception handling. +// +// Semantically includes the inline specifier. +#ifdef _MSC_VER +#define FOLLY_ERASE_TRYCATCH inline +#else +#define FOLLY_ERASE_TRYCATCH FOLLY_ERASE +#endif diff --git a/native/iosTest/Pods/Folly/folly/CachelinePadded.h b/native/iosTest/Pods/Folly/folly/CachelinePadded.h new file mode 100644 index 000000000..635bc53da --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/CachelinePadded.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include +#include + +#include +#include + +namespace folly { + +/** + * Holds a type T, in addition to enough padding to ensure that it isn't subject + * to false sharing within the range used by folly. + * + * If `sizeof(T) <= alignof(T)` then the inner `T` will be entirely within one + * false sharing range (AKA cache line). + * + * CachelinePadded may add padding both before and after the value. Consider + * whether alignas(folly::hardware_destructive_interference_size) suffices. + */ +template +class CachelinePadded { + public: + template + explicit CachelinePadded(Args&&... args) + : inner_(std::forward(args)...) { + FOLLY_SAFE_DCHECK( + (reinterpret_cast(&inner_) % alignof(T)) == 0, + "CachelinePadded requires types aligned to their ABI requirement"); + } + + T* get() { + return &inner_; + } + + const T* get() const { + return &inner_; + } + + T* operator->() { + return get(); + } + + const T* operator->() const { + return get(); + } + + T& operator*() { + return *get(); + } + + const T& operator*() const { + return *get(); + } + + private: + static constexpr size_t paddingSize() noexcept { + return hardware_destructive_interference_size - + (alignof(T) % hardware_destructive_interference_size); + } + char paddingPre_[paddingSize()]; + T inner_; + char paddingPost_[paddingSize()]; +}; +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/CancellationToken-inl.h b/native/iosTest/Pods/Folly/folly/CancellationToken-inl.h new file mode 100644 index 000000000..8ce5c5c79 --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/CancellationToken-inl.h @@ -0,0 +1,351 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include + +namespace folly { + +namespace detail { + +// Internal cancellation state object. +class CancellationState { + public: + FOLLY_NODISCARD static CancellationStateSourcePtr create(); + + private: + // Constructed initially with a CancellationSource reference count of 1. + CancellationState() noexcept; + + ~CancellationState(); + + friend struct CancellationStateTokenDeleter; + friend struct CancellationStateSourceDeleter; + + void removeTokenReference() noexcept; + void removeSourceReference() noexcept; + + public: + FOLLY_NODISCARD CancellationStateTokenPtr addTokenReference() noexcept; + + FOLLY_NODISCARD CancellationStateSourcePtr addSourceReference() noexcept; + + bool tryAddCallback( + CancellationCallback* callback, + bool incrementRefCountIfSuccessful) noexcept; + + void removeCallback(CancellationCallback* callback) noexcept; + + bool isCancellationRequested() const noexcept; + bool canBeCancelled() const noexcept; + + // Request cancellation. + // Return 'true' if cancellation had already been requested. + // Return 'false' if this was the first thread to request + // cancellation. + bool requestCancellation() noexcept; + + private: + void lock() noexcept; + void unlock() noexcept; + void unlockAndIncrementTokenCount() noexcept; + void unlockAndDecrementTokenCount() noexcept; + bool tryLockAndCancelUnlessCancelled() noexcept; + + template + bool tryLock(Predicate predicate) noexcept; + + static bool canBeCancelled(std::uint64_t state) noexcept; + static bool isCancellationRequested(std::uint64_t state) noexcept; + static bool isLocked(std::uint64_t state) noexcept; + + static constexpr std::uint64_t kCancellationRequestedFlag = 1; + static constexpr std::uint64_t kLockedFlag = 2; + static constexpr std::uint64_t kTokenReferenceCountIncrement = 4; + static constexpr std::uint64_t kSourceReferenceCountIncrement = + std::uint64_t(1) << 33u; + static constexpr std::uint64_t kTokenReferenceCountMask = + (kSourceReferenceCountIncrement - 1u) - + (kTokenReferenceCountIncrement - 1u); + static constexpr std::uint64_t kSourceReferenceCountMask = + std::numeric_limits::max() - + (kSourceReferenceCountIncrement - 1u); + + // Bit 0 - Cancellation Requested + // Bit 1 - Locked Flag + // Bits 2-32 - Token reference count (max ~2 billion) + // Bits 33-63 - Source reference count (max ~2 billion) + std::atomic state_; + CancellationCallback* head_; + std::thread::id signallingThreadId_; +}; + +inline void CancellationStateTokenDeleter::operator()( + CancellationState* state) noexcept { + state->removeTokenReference(); +} + +inline void CancellationStateSourceDeleter::operator()( + CancellationState* state) noexcept { + state->removeSourceReference(); +} + +} // namespace detail + +inline CancellationToken::CancellationToken( + const CancellationToken& other) noexcept + : state_() { + if (other.state_) { + state_ = other.state_->addTokenReference(); + } +} + +inline CancellationToken::CancellationToken(CancellationToken&& other) noexcept + : state_(std::move(other.state_)) {} + +inline CancellationToken& CancellationToken::operator=( + const CancellationToken& other) noexcept { + if (state_ != other.state_) { + CancellationToken temp{other}; + swap(temp); + } + return *this; +} + +inline CancellationToken& CancellationToken::operator=( + CancellationToken&& other) noexcept { + state_ = std::move(other.state_); + return *this; +} + +inline bool CancellationToken::isCancellationRequested() const noexcept { + return state_ != nullptr && state_->isCancellationRequested(); +} + +inline bool CancellationToken::canBeCancelled() const noexcept { + return state_ != nullptr && state_->canBeCancelled(); +} + +inline void CancellationToken::swap(CancellationToken& other) noexcept { + std::swap(state_, other.state_); +} + +inline CancellationToken::CancellationToken( + detail::CancellationStateTokenPtr state) noexcept + : state_(std::move(state)) {} + +inline bool operator==( + const CancellationToken& a, + const CancellationToken& b) noexcept { + return a.state_ == b.state_; +} + +inline bool operator!=( + const CancellationToken& a, + const CancellationToken& b) noexcept { + return !(a == b); +} + +inline CancellationSource::CancellationSource() + : state_(detail::CancellationState::create()) {} + +inline CancellationSource::CancellationSource( + const CancellationSource& other) noexcept + : state_() { + if (other.state_) { + state_ = other.state_->addSourceReference(); + } +} + +inline CancellationSource::CancellationSource( + CancellationSource&& other) noexcept + : state_(std::move(other.state_)) {} + +inline CancellationSource& CancellationSource::operator=( + const CancellationSource& other) noexcept { + if (state_ != other.state_) { + CancellationSource temp{other}; + swap(temp); + } + return *this; +} + +inline CancellationSource& CancellationSource::operator=( + CancellationSource&& other) noexcept { + state_ = std::move(other.state_); + return *this; +} + +inline CancellationSource CancellationSource::invalid() noexcept { + return CancellationSource{detail::CancellationStateSourcePtr{}}; +} + +inline bool CancellationSource::isCancellationRequested() const noexcept { + return state_ != nullptr && state_->isCancellationRequested(); +} + +inline bool CancellationSource::canBeCancelled() const noexcept { + return state_ != nullptr; +} + +inline CancellationToken CancellationSource::getToken() const noexcept { + if (state_ != nullptr) { + return CancellationToken{state_->addTokenReference()}; + } + return CancellationToken{}; +} + +inline bool CancellationSource::requestCancellation() const noexcept { + if (state_ != nullptr) { + return state_->requestCancellation(); + } + return false; +} + +inline void CancellationSource::swap(CancellationSource& other) noexcept { + std::swap(state_, other.state_); +} + +inline CancellationSource::CancellationSource( + detail::CancellationStateSourcePtr&& state) noexcept + : state_(std::move(state)) {} + +template < + typename Callable, + std::enable_if_t< + std::is_constructible:: + value, + int>> +inline CancellationCallback::CancellationCallback( + CancellationToken&& ct, + Callable&& callable) + : next_(nullptr), + prevNext_(nullptr), + state_(nullptr), + callback_(static_cast(callable)), + destructorHasRunInsideCallback_(nullptr), + callbackCompleted_(false) { + if (ct.state_ != nullptr && ct.state_->tryAddCallback(this, false)) { + state_ = ct.state_.release(); + } +} + +template < + typename Callable, + std::enable_if_t< + std::is_constructible:: + value, + int>> +inline CancellationCallback::CancellationCallback( + const CancellationToken& ct, + Callable&& callable) + : next_(nullptr), + prevNext_(nullptr), + state_(nullptr), + callback_(static_cast(callable)), + destructorHasRunInsideCallback_(nullptr), + callbackCompleted_(false) { + if (ct.state_ != nullptr && ct.state_->tryAddCallback(this, true)) { + state_ = ct.state_.get(); + } +} + +inline CancellationCallback::~CancellationCallback() { + if (state_ != nullptr) { + state_->removeCallback(this); + } +} + +inline void CancellationCallback::invokeCallback() noexcept { + // Invoke within a noexcept context so that we std::terminate() if it throws. + callback_(); +} + +namespace detail { + +inline CancellationStateSourcePtr CancellationState::create() { + return CancellationStateSourcePtr{new CancellationState()}; +} + +inline CancellationState::CancellationState() noexcept + : state_(kSourceReferenceCountIncrement), + head_(nullptr), + signallingThreadId_() {} + +inline CancellationStateTokenPtr +CancellationState::addTokenReference() noexcept { + state_.fetch_add(kTokenReferenceCountIncrement, std::memory_order_relaxed); + return CancellationStateTokenPtr{this}; +} + +inline void CancellationState::removeTokenReference() noexcept { + const auto oldState = state_.fetch_sub( + kTokenReferenceCountIncrement, std::memory_order_acq_rel); + DCHECK( + (oldState & kTokenReferenceCountMask) >= kTokenReferenceCountIncrement); + if (oldState < (2 * kTokenReferenceCountIncrement)) { + delete this; + } +} + +inline CancellationStateSourcePtr +CancellationState::addSourceReference() noexcept { + state_.fetch_add(kSourceReferenceCountIncrement, std::memory_order_relaxed); + return CancellationStateSourcePtr{this}; +} + +inline void CancellationState::removeSourceReference() noexcept { + const auto oldState = state_.fetch_sub( + kSourceReferenceCountIncrement, std::memory_order_acq_rel); + DCHECK( + (oldState & kSourceReferenceCountMask) >= kSourceReferenceCountIncrement); + if (oldState < + (kSourceReferenceCountIncrement + kTokenReferenceCountIncrement)) { + delete this; + } +} + +inline bool CancellationState::isCancellationRequested() const noexcept { + return isCancellationRequested(state_.load(std::memory_order_acquire)); +} + +inline bool CancellationState::canBeCancelled() const noexcept { + return canBeCancelled(state_.load(std::memory_order_acquire)); +} + +inline bool CancellationState::canBeCancelled(std::uint64_t state) noexcept { + // Can be cancelled if there is at least one CancellationSource ref-count + // or if cancellation has been requested. + return (state >= kSourceReferenceCountIncrement) || + isCancellationRequested(state); +} + +inline bool CancellationState::isCancellationRequested( + std::uint64_t state) noexcept { + return (state & kCancellationRequestedFlag) != 0; +} + +inline bool CancellationState::isLocked(std::uint64_t state) noexcept { + return (state & kLockedFlag) != 0; +} + +} // namespace detail + +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/CancellationToken.h b/native/iosTest/Pods/Folly/folly/CancellationToken.h new file mode 100644 index 000000000..bdda121b0 --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/CancellationToken.h @@ -0,0 +1,298 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include + +#include +#include +#include +#include + +namespace folly { + +class CancellationCallback; +class CancellationSource; +struct OperationCancelled : public std::exception { + const char* what() const noexcept override { + return "coroutine operation cancelled"; + } +}; + +namespace detail { +class CancellationState; +struct CancellationStateTokenDeleter { + void operator()(CancellationState*) noexcept; +}; +struct CancellationStateSourceDeleter { + void operator()(CancellationState*) noexcept; +}; +using CancellationStateTokenPtr = + std::unique_ptr; +using CancellationStateSourcePtr = + std::unique_ptr; +} // namespace detail + +// A CancellationToken is an object that can be passed into an function or +// operation that allows the caller to later request that the operation be +// cancelled. +// +// A CancellationToken object can be obtained by calling the .getToken() +// method on a CancellationSource or by copying another CancellationToken +// object. All CancellationToken objects obtained from the same original +// CancellationSource object all reference the same underlying cancellation +// state and will all be cancelled together. +// +// If your function needs to be cancellable but does not need to request +// cancellation then you should take a CancellationToken as a parameter. +// If your function needs to be able to request cancellation then you +// should instead take a CancellationSource as a parameter. +class CancellationToken { + public: + // Constructs to a token that can never be cancelled. + // + // Pass a default-constructed CancellationToken into an operation that + // you never intend to cancel. These objects are very cheap to create. + CancellationToken() noexcept = default; + + // Construct a copy of the token that shares the same underlying state. + CancellationToken(const CancellationToken& other) noexcept; + CancellationToken(CancellationToken&& other) noexcept; + + CancellationToken& operator=(const CancellationToken& other) noexcept; + CancellationToken& operator=(CancellationToken&& other) noexcept; + + // Query whether someone has called .requestCancellation() on an instance + // of CancellationSource object associated with this CancellationToken. + bool isCancellationRequested() const noexcept; + + // Query whether this CancellationToken can ever have cancellation requested + // on it. + // + // This will return false if the CancellationToken is not associated with a + // CancellationSource object. eg. because the CancellationToken was + // default-constructed, has been moved-from or because the last + // CancellationSource object associated with the underlying cancellation state + // has been destroyed and the operation has not yet been cancelled and so + // never will be. + // + // Implementations of operations may be able to take more efficient code-paths + // if they know they can never be cancelled. + bool canBeCancelled() const noexcept; + + void swap(CancellationToken& other) noexcept; + + friend bool operator==( + const CancellationToken& a, + const CancellationToken& b) noexcept; + + private: + friend class CancellationCallback; + friend class CancellationSource; + + explicit CancellationToken(detail::CancellationStateTokenPtr state) noexcept; + + detail::CancellationStateTokenPtr state_; +}; + +bool operator==( + const CancellationToken& a, + const CancellationToken& b) noexcept; +bool operator!=( + const CancellationToken& a, + const CancellationToken& b) noexcept; + +// A CancellationSource object provides the ability to request cancellation of +// operations that an associated CancellationToken was passed to. +// +// Example usage: +// CancellationSource cs; +// Future f = startSomeOperation(cs.getToken()); +// +// // Later... +// cs.requestCancellation(); +class CancellationSource { + public: + // Construct to a new, independent cancellation source. + CancellationSource(); + + // Construct a new reference to the same underlying cancellation state. + // + // Either the original or the new copy can be used to request cancellation + // of associated work. + CancellationSource(const CancellationSource& other) noexcept; + + // This leaves 'other' in an empty state where 'requestCancellation()' is a + // no-op and 'canBeCancelled()' returns false. + CancellationSource(CancellationSource&& other) noexcept; + + CancellationSource& operator=(const CancellationSource& other) noexcept; + CancellationSource& operator=(CancellationSource&& other) noexcept; + + // Construct a CancellationSource that cannot be cancelled. + // + // This factory function can be used to obtain a CancellationSource that + // is equivalent to a moved-from CancellationSource object without needing + // to allocate any shared-state. + static CancellationSource invalid() noexcept; + + // Query if cancellation has already been requested on this CancellationSource + // or any other CancellationSource object copied from the same original + // CancellationSource object. + bool isCancellationRequested() const noexcept; + + // Query if cancellation can be requested through this CancellationSource + // object. This will only return false if the CancellationSource object has + // been moved-from. + bool canBeCancelled() const noexcept; + + // Obtain a CancellationToken linked to this CancellationSource. + // + // This token can be passed into cancellable operations to allow the caller + // to later request cancellation of that operation. + CancellationToken getToken() const noexcept; + + // Request cancellation of work associated with this CancellationSource. + // + // This will ensure subsequent calls to isCancellationRequested() on any + // CancellationSource or CancellationToken object associated with the same + // underlying cancellation state to return true. + // + // If this is the first call to requestCancellation() on any + // CancellationSource object with the same underlying state then this call + // will also execute the callbacks associated with any CancellationCallback + // objects that were constructed with an associated CancellationToken. + // + // Note that it is possible that another thread may be concurrently + // registering a callback with CancellationCallback. This method guarantees + // that either this thread will see the callback registration and will + // ensure that the callback is called, or the CancellationCallback constructor + // will see the cancellation-requested signal and will execute the callback + // inline inside the constructor. + // + // Returns the previous state of 'isCancellationRequested()'. i.e. + // - 'true' if cancellation had previously been requested. + // - 'false' if this was the first call to request cancellation. + bool requestCancellation() const noexcept; + + void swap(CancellationSource& other) noexcept; + + friend bool operator==( + const CancellationSource& a, + const CancellationSource& b) noexcept; + + private: + explicit CancellationSource( + detail::CancellationStateSourcePtr&& state) noexcept; + + detail::CancellationStateSourcePtr state_; +}; + +bool operator==( + const CancellationSource& a, + const CancellationSource& b) noexcept; +bool operator!=( + const CancellationSource& a, + const CancellationSource& b) noexcept; + +class CancellationCallback { + using VoidFunction = folly::Function; + + public: + // Constructing a CancellationCallback object registers the callback + // with the specified CancellationToken such that the callback will be + // executed if the corresponding CancellationSource object has the + // requestCancellation() method called on it. + // + // If the CancellationToken object already had cancellation requested + // then the callback will be executed inline on the current thread before + // the constructor returns. Otherwise, the callback will be executed on + // in the execution context of the first thread to call requestCancellation() + // on a corresponding CancellationSource. + // + // The callback object must not throw any unhandled exceptions. Doing so + // will result in the program terminating via std::terminate(). + template < + typename Callable, + std::enable_if_t< + std::is_constructible::value, + int> = 0> + CancellationCallback(CancellationToken&& ct, Callable&& callable); + template < + typename Callable, + std::enable_if_t< + std::is_constructible::value, + int> = 0> + CancellationCallback(const CancellationToken& ct, Callable&& callable); + + // Deregisters the callback from the CancellationToken. + // + // If cancellation has been requested concurrently on another thread and the + // callback is currently executing then the destructor will block until after + // the callback has returned (otherwise it might be left with a dangling + // reference). + // + // You should generally try to implement your callback functions to be lock + // free to avoid deadlocks between the callback executing and the + // CancellationCallback destructor trying to deregister the callback. + // + // If the callback has not started executing yet then the callback will be + // deregistered from the CancellationToken before the destructor completes. + // + // Once the destructor returns you can be guaranteed that the callback will + // not be called by a subsequent call to 'requestCancellation()' on a + // CancellationSource associated with the CancellationToken passed to the + // constructor. + ~CancellationCallback(); + + // Not copyable/movable + CancellationCallback(const CancellationCallback&) = delete; + CancellationCallback(CancellationCallback&&) = delete; + CancellationCallback& operator=(const CancellationCallback&) = delete; + CancellationCallback& operator=(CancellationCallback&&) = delete; + + private: + friend class detail::CancellationState; + + void invokeCallback() noexcept; + + CancellationCallback* next_; + + // Pointer to the pointer that points to this node in the linked list. + // This could be the 'next_' of a previous CancellationCallback or could + // be the 'head_' pointer of the CancellationState. + // If this node is inserted in the list then this will be non-null. + CancellationCallback** prevNext_; + + detail::CancellationState* state_; + VoidFunction callback_; + + // Pointer to a flag stored on the stack of the caller to invokeCallback() + // that is used to indicate to the caller of invokeCallback() that the + // destructor has run and it is no longer valid to access the callback + // object. + bool* destructorHasRunInsideCallback_; + + // Flag used to signal that the callback has completed executing on another + // thread and it is now safe to exit the destructor. + std::atomic callbackCompleted_; +}; + +} // namespace folly + +#include diff --git a/native/iosTest/Pods/Folly/folly/Chrono.h b/native/iosTest/Pods/Folly/folly/Chrono.h new file mode 100644 index 000000000..3b3838b43 --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/Chrono.h @@ -0,0 +1,204 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include +#include + +#include +#include +#include + +/*** + * include or backport: + * * std::chrono::ceil + * * std::chrono::floor + * * std::chrono::round + */ + +#if __cpp_lib_chrono >= 201510 || _LIBCPP_STD_VER > 14 || _MSC_VER + +namespace folly { +namespace chrono { + +/* using override */ using std::chrono::abs; +/* using override */ using std::chrono::ceil; +/* using override */ using std::chrono::floor; +/* using override */ using std::chrono::round; +} // namespace chrono +} // namespace folly + +#else + +namespace folly { +namespace chrono { + +namespace detail { + +// from: http://en.cppreference.com/w/cpp/chrono/duration/ceil, CC-BY-SA +template +struct is_duration : std::false_type {}; +template +struct is_duration> : std::true_type {}; + +template +constexpr To ceil_impl(Duration const& d, To const& t) { + return t < d ? t + To{1} : t; +} + +template +constexpr To floor_impl(Duration const& d, To const& t) { + return t > d ? t - To{1} : t; +} + +template +constexpr To round_impl(To const& t0, To const& t1, Diff diff0, Diff diff1) { + return diff0 < diff1 ? t0 : diff1 < diff0 ? t1 : t0.count() & 1 ? t1 : t0; +} + +template +constexpr To round_impl(Duration const& d, To const& t0, To const& t1) { + return round_impl(t0, t1, d - t0, t1 - d); +} + +template +constexpr To round_impl(Duration const& d, To const& t0) { + return round_impl(d, t0, t0 + To{1}); +} +} // namespace detail + +// mimic: std::chrono::abs, C++17 +template < + typename Rep, + typename Period, + typename = typename std::enable_if< + std::chrono::duration::min() < + std::chrono::duration::zero()>::type> +constexpr std::chrono::duration abs( + std::chrono::duration const& d) { + return d < std::chrono::duration::zero() ? -d : d; +} + +// mimic: std::chrono::ceil, C++17 +// from: http://en.cppreference.com/w/cpp/chrono/duration/ceil, CC-BY-SA +template < + typename To, + typename Rep, + typename Period, + typename = typename std::enable_if::value>::type> +constexpr To ceil(std::chrono::duration const& d) { + return detail::ceil_impl(d, std::chrono::duration_cast(d)); +} + +// mimic: std::chrono::ceil, C++17 +// from: http://en.cppreference.com/w/cpp/chrono/time_point/ceil, CC-BY-SA +template < + typename To, + typename Clock, + typename Duration, + typename = typename std::enable_if::value>::type> +constexpr std::chrono::time_point ceil( + std::chrono::time_point const& tp) { + return std::chrono::time_point{ceil(tp.time_since_epoch())}; +} + +// mimic: std::chrono::floor, C++17 +// from: http://en.cppreference.com/w/cpp/chrono/duration/floor, CC-BY-SA +template < + typename To, + typename Rep, + typename Period, + typename = typename std::enable_if::value>::type> +constexpr To floor(std::chrono::duration const& d) { + return detail::floor_impl(d, std::chrono::duration_cast(d)); +} + +// mimic: std::chrono::floor, C++17 +// from: http://en.cppreference.com/w/cpp/chrono/time_point/floor, CC-BY-SA +template < + typename To, + typename Clock, + typename Duration, + typename = typename std::enable_if::value>::type> +constexpr std::chrono::time_point floor( + std::chrono::time_point const& tp) { + return std::chrono::time_point{floor(tp.time_since_epoch())}; +} + +// mimic: std::chrono::round, C++17 +// from: http://en.cppreference.com/w/cpp/chrono/duration/round, CC-BY-SA +template < + typename To, + typename Rep, + typename Period, + typename = typename std::enable_if< + detail::is_duration::value && + !std::chrono::treat_as_floating_point::value>::type> +constexpr To round(std::chrono::duration const& d) { + return detail::round_impl(d, floor(d)); +} + +// mimic: std::chrono::round, C++17 +// from: http://en.cppreference.com/w/cpp/chrono/time_point/round, CC-BY-SA +template < + typename To, + typename Clock, + typename Duration, + typename = typename std::enable_if< + detail::is_duration::value && + !std::chrono::treat_as_floating_point::value>::type> +constexpr std::chrono::time_point round( + std::chrono::time_point const& tp) { + return std::chrono::time_point{round(tp.time_since_epoch())}; +} +} // namespace chrono +} // namespace folly + +#endif + +namespace folly { +namespace chrono { + +struct coarse_steady_clock { + using rep = std::chrono::milliseconds::rep; + using period = std::chrono::milliseconds::period; + using duration = std::chrono::duration; + using time_point = std::chrono::time_point; + constexpr static bool is_steady = true; + + static time_point now() noexcept { +#ifndef CLOCK_MONOTONIC_COARSE + return time_point(std::chrono::duration_cast( + std::chrono::steady_clock::now().time_since_epoch())); +#else + timespec ts; + auto ret = clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); + if (kIsDebug && (ret != 0)) { + throw_exception( + "Error using CLOCK_MONOTONIC_COARSE."); + } + + return time_point(std::chrono::duration_cast( + std::chrono::seconds(ts.tv_sec) + + std::chrono::nanoseconds(ts.tv_nsec))); +#endif + } +}; + +} // namespace chrono +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/ClockGettimeWrappers.h b/native/iosTest/Pods/Folly/folly/ClockGettimeWrappers.h new file mode 100644 index 000000000..8c403193e --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/ClockGettimeWrappers.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#include + +namespace folly { +namespace chrono { + +extern int (*clock_gettime)(clockid_t, timespec* ts); +extern int64_t (*clock_gettime_ns)(clockid_t); +} // namespace chrono +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/ConcurrentBitSet.h b/native/iosTest/Pods/Folly/folly/ConcurrentBitSet.h new file mode 100644 index 000000000..2be6e2162 --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/ConcurrentBitSet.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace folly { + +/** + * An atomic bitset of fixed size (specified at compile time). + * + * Formerly known as AtomicBitSet. It was renamed while fixing a bug + * to avoid any silent breakages during run time. + */ +template +class ConcurrentBitSet { + public: + /** + * Construct a ConcurrentBitSet; all bits are initially false. + */ + ConcurrentBitSet(); + + ConcurrentBitSet(const ConcurrentBitSet&) = delete; + ConcurrentBitSet& operator=(const ConcurrentBitSet&) = delete; + + /** + * Set bit idx to true, using the given memory order. Returns the + * previous value of the bit. + * + * Note that the operation is a read-modify-write operation due to the use + * of fetch_or. + */ + bool set(size_t idx, std::memory_order order = std::memory_order_seq_cst); + + /** + * Set bit idx to false, using the given memory order. Returns the + * previous value of the bit. + * + * Note that the operation is a read-modify-write operation due to the use + * of fetch_and. + */ + bool reset(size_t idx, std::memory_order order = std::memory_order_seq_cst); + + /** + * Set bit idx to the given value, using the given memory order. Returns + * the previous value of the bit. + * + * Note that the operation is a read-modify-write operation due to the use + * of fetch_and or fetch_or. + * + * Yes, this is an overload of set(), to keep as close to std::bitset's + * interface as possible. + */ + bool set( + size_t idx, + bool value, + std::memory_order order = std::memory_order_seq_cst); + + /** + * Read bit idx. + */ + bool test(size_t idx, std::memory_order order = std::memory_order_seq_cst) + const; + + /** + * Same as test() with the default memory order. + */ + bool operator[](size_t idx) const; + + /** + * Return the size of the bitset. + */ + constexpr size_t size() const { + return N; + } + + private: + // Pick the largest lock-free type available +#if (ATOMIC_LLONG_LOCK_FREE == 2) + typedef unsigned long long BlockType; +#elif (ATOMIC_LONG_LOCK_FREE == 2) + typedef unsigned long BlockType; +#else + // Even if not lock free, what can we do? + typedef unsigned int BlockType; +#endif + typedef std::atomic AtomicBlockType; + + static constexpr size_t kBitsPerBlock = + std::numeric_limits::digits; + + static constexpr size_t blockIndex(size_t bit) { + return bit / kBitsPerBlock; + } + + static constexpr size_t bitOffset(size_t bit) { + return bit % kBitsPerBlock; + } + + // avoid casts + static constexpr BlockType kOne = 1; + static constexpr size_t kNumBlocks = (N + kBitsPerBlock - 1) / kBitsPerBlock; + std::array data_; +}; + +// value-initialize to zero +template +inline ConcurrentBitSet::ConcurrentBitSet() : data_() {} + +template +inline bool ConcurrentBitSet::set(size_t idx, std::memory_order order) { + assert(idx < N); + BlockType mask = kOne << bitOffset(idx); + return data_[blockIndex(idx)].fetch_or(mask, order) & mask; +} + +template +inline bool ConcurrentBitSet::reset(size_t idx, std::memory_order order) { + assert(idx < N); + BlockType mask = kOne << bitOffset(idx); + return data_[blockIndex(idx)].fetch_and(~mask, order) & mask; +} + +template +inline bool +ConcurrentBitSet::set(size_t idx, bool value, std::memory_order order) { + return value ? set(idx, order) : reset(idx, order); +} + +template +inline bool ConcurrentBitSet::test(size_t idx, std::memory_order order) + const { + assert(idx < N); + BlockType mask = kOne << bitOffset(idx); + return data_[blockIndex(idx)].load(order) & mask; +} + +template +inline bool ConcurrentBitSet::operator[](size_t idx) const { + return test(idx); +} + +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/ConcurrentSkipList-inl.h b/native/iosTest/Pods/Folly/folly/ConcurrentSkipList-inl.h new file mode 100644 index 000000000..a0eec3076 --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/ConcurrentSkipList-inl.h @@ -0,0 +1,377 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// @author: Xin Liu + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +namespace folly { +namespace detail { + +template +class csl_iterator; + +template +class SkipListNode { + enum : uint16_t { + IS_HEAD_NODE = 1, + MARKED_FOR_REMOVAL = (1 << 1), + FULLY_LINKED = (1 << 2), + }; + + public: + typedef T value_type; + + SkipListNode(const SkipListNode&) = delete; + SkipListNode& operator=(const SkipListNode&) = delete; + + template < + typename NodeAlloc, + typename U, + typename = + typename std::enable_if::value>::type> + static SkipListNode* + create(NodeAlloc& alloc, int height, U&& data, bool isHead = false) { + DCHECK(height >= 1 && height < 64) << height; + + size_t size = + sizeof(SkipListNode) + height * sizeof(std::atomic); + auto storage = std::allocator_traits::allocate(alloc, size); + // do placement new + return new (storage) + SkipListNode(uint8_t(height), std::forward(data), isHead); + } + + template + static void destroy(NodeAlloc& alloc, SkipListNode* node) { + size_t size = sizeof(SkipListNode) + + node->height_ * sizeof(std::atomic); + node->~SkipListNode(); + std::allocator_traits::deallocate(alloc, node, size); + } + + template + struct DestroyIsNoOp : StrictConjunction< + AllocatorHasTrivialDeallocate, + std::is_trivially_destructible> {}; + + // copy the head node to a new head node assuming lock acquired + SkipListNode* copyHead(SkipListNode* node) { + DCHECK(node != nullptr && height_ > node->height_); + setFlags(node->getFlags()); + for (uint8_t i = 0; i < node->height_; ++i) { + setSkip(i, node->skip(i)); + } + return this; + } + + inline SkipListNode* skip(int layer) const { + DCHECK_LT(layer, height_); + return skip_[layer].load(std::memory_order_consume); + } + + // next valid node as in the linked list + SkipListNode* next() { + SkipListNode* node; + for (node = skip(0); (node != nullptr && node->markedForRemoval()); + node = node->skip(0)) { + } + return node; + } + + void setSkip(uint8_t h, SkipListNode* next) { + DCHECK_LT(h, height_); + skip_[h].store(next, std::memory_order_release); + } + + value_type& data() { + return data_; + } + const value_type& data() const { + return data_; + } + int maxLayer() const { + return height_ - 1; + } + int height() const { + return height_; + } + + std::unique_lock acquireGuard() { + return std::unique_lock(spinLock_); + } + + bool fullyLinked() const { + return getFlags() & FULLY_LINKED; + } + bool markedForRemoval() const { + return getFlags() & MARKED_FOR_REMOVAL; + } + bool isHeadNode() const { + return getFlags() & IS_HEAD_NODE; + } + + void setIsHeadNode() { + setFlags(uint16_t(getFlags() | IS_HEAD_NODE)); + } + void setFullyLinked() { + setFlags(uint16_t(getFlags() | FULLY_LINKED)); + } + void setMarkedForRemoval() { + setFlags(uint16_t(getFlags() | MARKED_FOR_REMOVAL)); + } + + private: + // Note! this can only be called from create() as a placement new. + template + SkipListNode(uint8_t height, U&& data, bool isHead) + : height_(height), data_(std::forward(data)) { + spinLock_.init(); + setFlags(0); + if (isHead) { + setIsHeadNode(); + } + // need to explicitly init the dynamic atomic pointer array + for (uint8_t i = 0; i < height_; ++i) { + new (&skip_[i]) std::atomic(nullptr); + } + } + + ~SkipListNode() { + for (uint8_t i = 0; i < height_; ++i) { + skip_[i].~atomic(); + } + } + + uint16_t getFlags() const { + return flags_.load(std::memory_order_consume); + } + void setFlags(uint16_t flags) { + flags_.store(flags, std::memory_order_release); + } + + // TODO(xliu): on x86_64, it's possible to squeeze these into + // skip_[0] to maybe save 8 bytes depending on the data alignments. + // NOTE: currently this is x86_64 only anyway, due to the + // MicroSpinLock. + std::atomic flags_; + const uint8_t height_; + MicroSpinLock spinLock_; + + value_type data_; + + std::atomic skip_[0]; +}; + +class SkipListRandomHeight { + enum { kMaxHeight = 64 }; + + public: + // make it a singleton. + static SkipListRandomHeight* instance() { + static SkipListRandomHeight instance_; + return &instance_; + } + + int getHeight(int maxHeight) const { + DCHECK_LE(maxHeight, kMaxHeight) << "max height too big!"; + double p = randomProb(); + for (int i = 0; i < maxHeight; ++i) { + if (p < lookupTable_[i]) { + return i + 1; + } + } + return maxHeight; + } + + size_t getSizeLimit(int height) const { + DCHECK_LT(height, kMaxHeight); + return sizeLimitTable_[height]; + } + + private: + SkipListRandomHeight() { + initLookupTable(); + } + + void initLookupTable() { + // set skip prob = 1/E + static const double kProbInv = exp(1); + static const double kProb = 1.0 / kProbInv; + static const size_t kMaxSizeLimit = std::numeric_limits::max(); + + double sizeLimit = 1; + double p = lookupTable_[0] = (1 - kProb); + sizeLimitTable_[0] = 1; + for (int i = 1; i < kMaxHeight - 1; ++i) { + p *= kProb; + sizeLimit *= kProbInv; + lookupTable_[i] = lookupTable_[i - 1] + p; + sizeLimitTable_[i] = sizeLimit > kMaxSizeLimit + ? kMaxSizeLimit + : static_cast(sizeLimit); + } + lookupTable_[kMaxHeight - 1] = 1; + sizeLimitTable_[kMaxHeight - 1] = kMaxSizeLimit; + } + + static double randomProb() { + static ThreadLocal rng_; + return (*rng_)(); + } + + double lookupTable_[kMaxHeight]; + size_t sizeLimitTable_[kMaxHeight]; +}; + +template +class NodeRecycler; + +template +class NodeRecycler< + NodeType, + NodeAlloc, + typename std::enable_if< + !NodeType::template DestroyIsNoOp::value>::type> { + public: + explicit NodeRecycler(const NodeAlloc& alloc) + : refs_(0), dirty_(false), alloc_(alloc) { + lock_.init(); + } + + explicit NodeRecycler() : refs_(0), dirty_(false) { + lock_.init(); + } + + ~NodeRecycler() { + CHECK_EQ(refs(), 0); + if (nodes_) { + for (auto& node : *nodes_) { + NodeType::destroy(alloc_, node); + } + } + } + + void add(NodeType* node) { + std::lock_guard g(lock_); + if (nodes_.get() == nullptr) { + nodes_ = std::make_unique>(1, node); + } else { + nodes_->push_back(node); + } + DCHECK_GT(refs(), 0); + dirty_.store(true, std::memory_order_relaxed); + } + + int addRef() { + return refs_.fetch_add(1, std::memory_order_relaxed); + } + + int releaseRef() { + // We don't expect to clean the recycler immediately everytime it is OK + // to do so. Here, it is possible that multiple accessors all release at + // the same time but nobody would clean the recycler here. If this + // happens, the recycler will usually still get cleaned when + // such a race doesn't happen. The worst case is the recycler will + // eventually get deleted along with the skiplist. + if (LIKELY(!dirty_.load(std::memory_order_relaxed) || refs() > 1)) { + return refs_.fetch_add(-1, std::memory_order_relaxed); + } + + std::unique_ptr> newNodes; + { + std::lock_guard g(lock_); + if (nodes_.get() == nullptr || refs() > 1) { + return refs_.fetch_add(-1, std::memory_order_relaxed); + } + // once refs_ reaches 1 and there is no other accessor, it is safe to + // remove all the current nodes in the recycler, as we already acquired + // the lock here so no more new nodes can be added, even though new + // accessors may be added after that. + newNodes.swap(nodes_); + dirty_.store(false, std::memory_order_relaxed); + } + + // TODO(xliu) should we spawn a thread to do this when there are large + // number of nodes in the recycler? + for (auto& node : *newNodes) { + NodeType::destroy(alloc_, node); + } + + // decrease the ref count at the very end, to minimize the + // chance of other threads acquiring lock_ to clear the deleted + // nodes again. + return refs_.fetch_add(-1, std::memory_order_relaxed); + } + + NodeAlloc& alloc() { + return alloc_; + } + + private: + int refs() const { + return refs_.load(std::memory_order_relaxed); + } + + std::unique_ptr> nodes_; + std::atomic refs_; // current number of visitors to the list + std::atomic dirty_; // whether *nodes_ is non-empty + MicroSpinLock lock_; // protects access to *nodes_ + NodeAlloc alloc_; +}; + +// In case of arena allocator, no recycling is necessary, and it's possible +// to save on ConcurrentSkipList size. +template +class NodeRecycler< + NodeType, + NodeAlloc, + typename std::enable_if< + NodeType::template DestroyIsNoOp::value>::type> { + public: + explicit NodeRecycler(const NodeAlloc& alloc) : alloc_(alloc) {} + + void addRef() {} + void releaseRef() {} + + void add(NodeType* /* node */) {} + + NodeAlloc& alloc() { + return alloc_; + } + + private: + NodeAlloc alloc_; +}; + +} // namespace detail +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/ConcurrentSkipList.h b/native/iosTest/Pods/Folly/folly/ConcurrentSkipList.h new file mode 100644 index 000000000..ab75ce397 --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/ConcurrentSkipList.h @@ -0,0 +1,878 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// @author: Xin Liu +// +// A concurrent skip list (CSL) implementation. +// Ref: http://www.cs.tau.ac.il/~shanir/nir-pubs-web/Papers/OPODIS2006-BA.pdf + +/* + +This implements a sorted associative container that supports only +unique keys. (Similar to std::set.) + +Features: + + 1. Small memory overhead: ~40% less memory overhead compared with + std::set (1.6 words per node versus 3). It has an minimum of 4 + words (7 words if there nodes got deleted) per-list overhead + though. + + 2. Read accesses (count, find iterator, skipper) are lock-free and + mostly wait-free (the only wait a reader may need to do is when + the node it is visiting is in a pending stage, i.e. deleting, + adding and not fully linked). Write accesses (remove, add) need + to acquire locks, but locks are local to the predecessor nodes + and/or successor nodes. + + 3. Good high contention performance, comparable single-thread + performance. In the multithreaded case (12 workers), CSL tested + 10x faster than a RWSpinLocked std::set for an averaged sized + list (1K - 1M nodes). + + Comparable read performance to std::set when single threaded, + especially when the list size is large, and scales better to + larger lists: when the size is small, CSL can be 20-50% slower on + find()/contains(). As the size gets large (> 1M elements), + find()/contains() can be 30% faster. + + Iterating through a skiplist is similar to iterating through a + linked list, thus is much (2-6x) faster than on a std::set + (tree-based). This is especially true for short lists due to + better cache locality. Based on that, it's also faster to + intersect two skiplists. + + 4. Lazy removal with GC support. The removed nodes get deleted when + the last Accessor to the skiplist is destroyed. + +Caveats: + + 1. Write operations are usually 30% slower than std::set in a single + threaded environment. + + 2. Need to have a head node for each list, which has a 4 word + overhead. + + 3. When the list is quite small (< 1000 elements), single threaded + benchmarks show CSL can be 10x slower than std:set. + + 4. The interface requires using an Accessor to access the skiplist. + (See below.) + + 5. Currently x64 only, due to use of MicroSpinLock. + + 6. Freed nodes will not be reclaimed as long as there are ongoing + uses of the list. + +Sample usage: + + typedef ConcurrentSkipList SkipListT; + shared_ptr sl(SkipListT::createInstance(init_head_height); + { + // It's usually good practice to hold an accessor only during + // its necessary life cycle (but not in a tight loop as + // Accessor creation incurs ref-counting overhead). + // + // Holding it longer delays garbage-collecting the deleted + // nodes in the list. + SkipListT::Accessor accessor(sl); + accessor.insert(23); + accessor.erase(2); + for (auto &elem : accessor) { + // use elem to access data + } + ... ... + } + + Another useful type is the Skipper accessor. This is useful if you + want to skip to locations in the way std::lower_bound() works, + i.e. it can be used for going through the list by skipping to the + node no less than a specified key. The Skipper keeps its location as + state, which makes it convenient for things like implementing + intersection of two sets efficiently, as it can start from the last + visited position. + + { + SkipListT::Accessor accessor(sl); + SkipListT::Skipper skipper(accessor); + skipper.to(30); + if (skipper) { + CHECK_LE(30, *skipper); + } + ... ... + // GC may happen when the accessor gets destructed. + } +*/ + +#pragma once + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +namespace folly { + +template < + typename T, + typename Comp = std::less, + // All nodes are allocated using provided SysAllocator, + // it should be thread-safe. + typename NodeAlloc = SysAllocator, + int MAX_HEIGHT = 24> +class ConcurrentSkipList { + // MAX_HEIGHT needs to be at least 2 to suppress compiler + // warnings/errors (Werror=uninitialized tiggered due to preds_[1] + // being treated as a scalar in the compiler). + static_assert( + MAX_HEIGHT >= 2 && MAX_HEIGHT < 64, + "MAX_HEIGHT can only be in the range of [2, 64)"); + typedef std::unique_lock ScopedLocker; + typedef ConcurrentSkipList SkipListType; + + public: + typedef detail::SkipListNode NodeType; + typedef T value_type; + typedef T key_type; + + typedef detail::csl_iterator iterator; + typedef detail::csl_iterator const_iterator; + + class Accessor; + class Skipper; + + explicit ConcurrentSkipList(int height, const NodeAlloc& alloc) + : recycler_(alloc), + head_(NodeType::create(recycler_.alloc(), height, value_type(), true)), + size_(0) {} + + explicit ConcurrentSkipList(int height) + : recycler_(), + head_(NodeType::create(recycler_.alloc(), height, value_type(), true)), + size_(0) {} + + // Convenient function to get an Accessor to a new instance. + static Accessor create(int height, const NodeAlloc& alloc) { + return Accessor(createInstance(height, alloc)); + } + + static Accessor create(int height = 1) { + return Accessor(createInstance(height)); + } + + // Create a shared_ptr skiplist object with initial head height. + static std::shared_ptr createInstance( + int height, + const NodeAlloc& alloc) { + return std::make_shared(height, alloc); + } + + static std::shared_ptr createInstance(int height = 1) { + return std::make_shared(height); + } + + //=================================================================== + // Below are implementation details. + // Please see ConcurrentSkipList::Accessor for stdlib-like APIs. + //=================================================================== + + ~ConcurrentSkipList() { + if /* constexpr */ (NodeType::template DestroyIsNoOp::value) { + // Avoid traversing the list if using arena allocator. + return; + } + for (NodeType* current = head_.load(std::memory_order_relaxed); current;) { + NodeType* tmp = current->skip(0); + NodeType::destroy(recycler_.alloc(), current); + current = tmp; + } + } + + private: + static bool greater(const value_type& data, const NodeType* node) { + return node && Comp()(node->data(), data); + } + + static bool less(const value_type& data, const NodeType* node) { + return (node == nullptr) || Comp()(data, node->data()); + } + + static int findInsertionPoint( + NodeType* cur, + int cur_layer, + const value_type& data, + NodeType* preds[], + NodeType* succs[]) { + int foundLayer = -1; + NodeType* pred = cur; + NodeType* foundNode = nullptr; + for (int layer = cur_layer; layer >= 0; --layer) { + NodeType* node = pred->skip(layer); + while (greater(data, node)) { + pred = node; + node = node->skip(layer); + } + if (foundLayer == -1 && !less(data, node)) { // the two keys equal + foundLayer = layer; + foundNode = node; + } + preds[layer] = pred; + + // if found, succs[0..foundLayer] need to point to the cached foundNode, + // as foundNode might be deleted at the same time thus pred->skip() can + // return nullptr or another node. + succs[layer] = foundNode ? foundNode : node; + } + return foundLayer; + } + + size_t size() const { + return size_.load(std::memory_order_relaxed); + } + + int height() const { + return head_.load(std::memory_order_consume)->height(); + } + + int maxLayer() const { + return height() - 1; + } + + size_t incrementSize(int delta) { + return size_.fetch_add(delta, std::memory_order_relaxed) + delta; + } + + // Returns the node if found, nullptr otherwise. + NodeType* find(const value_type& data) { + auto ret = findNode(data); + if (ret.second && !ret.first->markedForRemoval()) { + return ret.first; + } + return nullptr; + } + + // lock all the necessary nodes for changing (adding or removing) the list. + // returns true if all the lock acquried successfully and the related nodes + // are all validate (not in certain pending states), false otherwise. + bool lockNodesForChange( + int nodeHeight, + ScopedLocker guards[MAX_HEIGHT], + NodeType* preds[MAX_HEIGHT], + NodeType* succs[MAX_HEIGHT], + bool adding = true) { + NodeType *pred, *succ, *prevPred = nullptr; + bool valid = true; + for (int layer = 0; valid && layer < nodeHeight; ++layer) { + pred = preds[layer]; + DCHECK(pred != nullptr) << "layer=" << layer << " height=" << height() + << " nodeheight=" << nodeHeight; + succ = succs[layer]; + if (pred != prevPred) { + guards[layer] = pred->acquireGuard(); + prevPred = pred; + } + valid = !pred->markedForRemoval() && + pred->skip(layer) == succ; // check again after locking + + if (adding) { // when adding a node, the succ shouldn't be going away + valid = valid && (succ == nullptr || !succ->markedForRemoval()); + } + } + + return valid; + } + + // Returns a paired value: + // pair.first always stores the pointer to the node with the same input key. + // It could be either the newly added data, or the existed data in the + // list with the same key. + // pair.second stores whether the data is added successfully: + // 0 means not added, otherwise reutrns the new size. + template + std::pair addOrGetData(U&& data) { + NodeType *preds[MAX_HEIGHT], *succs[MAX_HEIGHT]; + NodeType* newNode; + size_t newSize; + while (true) { + int max_layer = 0; + int layer = findInsertionPointGetMaxLayer(data, preds, succs, &max_layer); + + if (layer >= 0) { + NodeType* nodeFound = succs[layer]; + DCHECK(nodeFound != nullptr); + if (nodeFound->markedForRemoval()) { + continue; // if it's getting deleted retry finding node. + } + // wait until fully linked. + while (UNLIKELY(!nodeFound->fullyLinked())) { + } + return std::make_pair(nodeFound, 0); + } + + // need to capped at the original height -- the real height may have grown + int nodeHeight = + detail::SkipListRandomHeight::instance()->getHeight(max_layer + 1); + + ScopedLocker guards[MAX_HEIGHT]; + if (!lockNodesForChange(nodeHeight, guards, preds, succs)) { + continue; // give up the locks and retry until all valid + } + + // locks acquired and all valid, need to modify the links under the locks. + newNode = NodeType::create( + recycler_.alloc(), nodeHeight, std::forward(data)); + for (int k = 0; k < nodeHeight; ++k) { + newNode->setSkip(k, succs[k]); + preds[k]->setSkip(k, newNode); + } + + newNode->setFullyLinked(); + newSize = incrementSize(1); + break; + } + + int hgt = height(); + size_t sizeLimit = + detail::SkipListRandomHeight::instance()->getSizeLimit(hgt); + + if (hgt < MAX_HEIGHT && newSize > sizeLimit) { + growHeight(hgt + 1); + } + CHECK_GT(newSize, 0); + return std::make_pair(newNode, newSize); + } + + bool remove(const value_type& data) { + NodeType* nodeToDelete = nullptr; + ScopedLocker nodeGuard; + bool isMarked = false; + int nodeHeight = 0; + NodeType *preds[MAX_HEIGHT], *succs[MAX_HEIGHT]; + + while (true) { + int max_layer = 0; + int layer = findInsertionPointGetMaxLayer(data, preds, succs, &max_layer); + if (!isMarked && (layer < 0 || !okToDelete(succs[layer], layer))) { + return false; + } + + if (!isMarked) { + nodeToDelete = succs[layer]; + nodeHeight = nodeToDelete->height(); + nodeGuard = nodeToDelete->acquireGuard(); + if (nodeToDelete->markedForRemoval()) { + return false; + } + nodeToDelete->setMarkedForRemoval(); + isMarked = true; + } + + // acquire pred locks from bottom layer up + ScopedLocker guards[MAX_HEIGHT]; + if (!lockNodesForChange(nodeHeight, guards, preds, succs, false)) { + continue; // this will unlock all the locks + } + + for (int k = nodeHeight - 1; k >= 0; --k) { + preds[k]->setSkip(k, nodeToDelete->skip(k)); + } + + incrementSize(-1); + break; + } + recycle(nodeToDelete); + return true; + } + + const value_type* first() const { + auto node = head_.load(std::memory_order_consume)->skip(0); + return node ? &node->data() : nullptr; + } + + const value_type* last() const { + NodeType* pred = head_.load(std::memory_order_consume); + NodeType* node = nullptr; + for (int layer = maxLayer(); layer >= 0; --layer) { + do { + node = pred->skip(layer); + if (node) { + pred = node; + } + } while (node != nullptr); + } + return pred == head_.load(std::memory_order_relaxed) ? nullptr + : &pred->data(); + } + + static bool okToDelete(NodeType* candidate, int layer) { + DCHECK(candidate != nullptr); + return candidate->fullyLinked() && candidate->maxLayer() == layer && + !candidate->markedForRemoval(); + } + + // find node for insertion/deleting + int findInsertionPointGetMaxLayer( + const value_type& data, + NodeType* preds[], + NodeType* succs[], + int* max_layer) const { + *max_layer = maxLayer(); + return findInsertionPoint( + head_.load(std::memory_order_consume), *max_layer, data, preds, succs); + } + + // Find node for access. Returns a paired values: + // pair.first = the first node that no-less than data value + // pair.second = 1 when the data value is founded, or 0 otherwise. + // This is like lower_bound, but not exact: we could have the node marked for + // removal so still need to check that. + std::pair findNode(const value_type& data) const { + return findNodeDownRight(data); + } + + // Find node by first stepping down then stepping right. Based on benchmark + // results, this is slightly faster than findNodeRightDown for better + // localality on the skipping pointers. + std::pair findNodeDownRight(const value_type& data) const { + NodeType* pred = head_.load(std::memory_order_consume); + int ht = pred->height(); + NodeType* node = nullptr; + + bool found = false; + while (!found) { + // stepping down + for (; ht > 0 && less(data, node = pred->skip(ht - 1)); --ht) { + } + if (ht == 0) { + return std::make_pair(node, 0); // not found + } + // node <= data now, but we need to fix up ht + --ht; + + // stepping right + while (greater(data, node)) { + pred = node; + node = node->skip(ht); + } + found = !less(data, node); + } + return std::make_pair(node, found); + } + + // find node by first stepping right then stepping down. + // We still keep this for reference purposes. + std::pair findNodeRightDown(const value_type& data) const { + NodeType* pred = head_.load(std::memory_order_consume); + NodeType* node = nullptr; + auto top = maxLayer(); + int found = 0; + for (int layer = top; !found && layer >= 0; --layer) { + node = pred->skip(layer); + while (greater(data, node)) { + pred = node; + node = node->skip(layer); + } + found = !less(data, node); + } + return std::make_pair(node, found); + } + + NodeType* lower_bound(const value_type& data) const { + auto node = findNode(data).first; + while (node != nullptr && node->markedForRemoval()) { + node = node->skip(0); + } + return node; + } + + void growHeight(int height) { + NodeType* oldHead = head_.load(std::memory_order_consume); + if (oldHead->height() >= height) { // someone else already did this + return; + } + + NodeType* newHead = + NodeType::create(recycler_.alloc(), height, value_type(), true); + + { // need to guard the head node in case others are adding/removing + // nodes linked to the head. + ScopedLocker g = oldHead->acquireGuard(); + newHead->copyHead(oldHead); + NodeType* expected = oldHead; + if (!head_.compare_exchange_strong( + expected, newHead, std::memory_order_release)) { + // if someone has already done the swap, just return. + NodeType::destroy(recycler_.alloc(), newHead); + return; + } + oldHead->setMarkedForRemoval(); + } + recycle(oldHead); + } + + void recycle(NodeType* node) { + recycler_.add(node); + } + + detail::NodeRecycler recycler_; + std::atomic head_; + std::atomic size_; +}; + +template +class ConcurrentSkipList::Accessor { + typedef detail::SkipListNode NodeType; + typedef ConcurrentSkipList SkipListType; + + public: + typedef T value_type; + typedef T key_type; + typedef T& reference; + typedef T* pointer; + typedef const T& const_reference; + typedef const T* const_pointer; + typedef size_t size_type; + typedef Comp key_compare; + typedef Comp value_compare; + + typedef typename SkipListType::iterator iterator; + typedef typename SkipListType::const_iterator const_iterator; + typedef typename SkipListType::Skipper Skipper; + + explicit Accessor(std::shared_ptr skip_list) + : slHolder_(std::move(skip_list)) { + sl_ = slHolder_.get(); + DCHECK(sl_ != nullptr); + sl_->recycler_.addRef(); + } + + // Unsafe initializer: the caller assumes the responsibility to keep + // skip_list valid during the whole life cycle of the Acessor. + explicit Accessor(ConcurrentSkipList* skip_list) : sl_(skip_list) { + DCHECK(sl_ != nullptr); + sl_->recycler_.addRef(); + } + + Accessor(const Accessor& accessor) + : sl_(accessor.sl_), slHolder_(accessor.slHolder_) { + sl_->recycler_.addRef(); + } + + Accessor& operator=(const Accessor& accessor) { + if (this != &accessor) { + slHolder_ = accessor.slHolder_; + sl_->recycler_.releaseRef(); + sl_ = accessor.sl_; + sl_->recycler_.addRef(); + } + return *this; + } + + ~Accessor() { + sl_->recycler_.releaseRef(); + } + + bool empty() const { + return sl_->size() == 0; + } + size_t size() const { + return sl_->size(); + } + size_type max_size() const { + return std::numeric_limits::max(); + } + + // returns end() if the value is not in the list, otherwise returns an + // iterator pointing to the data, and it's guaranteed that the data is valid + // as far as the Accessor is hold. + iterator find(const key_type& value) { + return iterator(sl_->find(value)); + } + const_iterator find(const key_type& value) const { + return iterator(sl_->find(value)); + } + size_type count(const key_type& data) const { + return contains(data); + } + + iterator begin() const { + NodeType* head = sl_->head_.load(std::memory_order_consume); + return iterator(head->next()); + } + iterator end() const { + return iterator(nullptr); + } + const_iterator cbegin() const { + return begin(); + } + const_iterator cend() const { + return end(); + } + + template < + typename U, + typename = + typename std::enable_if::value>::type> + std::pair insert(U&& data) { + auto ret = sl_->addOrGetData(std::forward(data)); + return std::make_pair(iterator(ret.first), ret.second); + } + size_t erase(const key_type& data) { + return remove(data); + } + + iterator lower_bound(const key_type& data) const { + return iterator(sl_->lower_bound(data)); + } + + size_t height() const { + return sl_->height(); + } + + // first() returns pointer to the first element in the skiplist, or + // nullptr if empty. + // + // last() returns the pointer to the last element in the skiplist, + // nullptr if list is empty. + // + // Note: As concurrent writing can happen, first() is not + // guaranteed to be the min_element() in the list. Similarly + // last() is not guaranteed to be the max_element(), and both of them can + // be invalid (i.e. nullptr), so we name them differently from front() and + // tail() here. + const key_type* first() const { + return sl_->first(); + } + const key_type* last() const { + return sl_->last(); + } + + // Try to remove the last element in the skip list. + // + // Returns true if we removed it, false if either the list is empty + // or a race condition happened (i.e. the used-to-be last element + // was already removed by another thread). + bool pop_back() { + auto last = sl_->last(); + return last ? sl_->remove(*last) : false; + } + + std::pair addOrGetData(const key_type& data) { + auto ret = sl_->addOrGetData(data); + return std::make_pair(&ret.first->data(), ret.second); + } + + SkipListType* skiplist() const { + return sl_; + } + + // legacy interfaces + // TODO:(xliu) remove these. + // Returns true if the node is added successfully, false if not, i.e. the + // node with the same key already existed in the list. + bool contains(const key_type& data) const { + return sl_->find(data); + } + bool add(const key_type& data) { + return sl_->addOrGetData(data).second; + } + bool remove(const key_type& data) { + return sl_->remove(data); + } + + private: + SkipListType* sl_; + std::shared_ptr slHolder_; +}; + +// implements forward iterator concept. +template +class detail::csl_iterator : public detail::IteratorFacade< + csl_iterator, + ValT, + std::forward_iterator_tag> { + public: + typedef ValT value_type; + typedef value_type& reference; + typedef value_type* pointer; + typedef ptrdiff_t difference_type; + + explicit csl_iterator(NodeT* node = nullptr) : node_(node) {} + + template + csl_iterator( + const csl_iterator& other, + typename std::enable_if< + std::is_convertible::value>::type* = nullptr) + : node_(other.node_) {} + + size_t nodeSize() const { + return node_ == nullptr ? 0 + : node_->height() * sizeof(NodeT*) + sizeof(*this); + } + + bool good() const { + return node_ != nullptr; + } + + private: + template + friend class csl_iterator; + friend class detail:: + IteratorFacade; + + void increment() { + node_ = node_->next(); + } + bool equal(const csl_iterator& other) const { + return node_ == other.node_; + } + value_type& dereference() const { + return node_->data(); + } + + NodeT* node_; +}; + +// Skipper interface +template +class ConcurrentSkipList::Skipper { + typedef detail::SkipListNode NodeType; + typedef ConcurrentSkipList SkipListType; + typedef typename SkipListType::Accessor Accessor; + + public: + typedef T value_type; + typedef T& reference; + typedef T* pointer; + typedef ptrdiff_t difference_type; + + Skipper(const std::shared_ptr& skipList) : accessor_(skipList) { + init(); + } + + Skipper(const Accessor& accessor) : accessor_(accessor) { + init(); + } + + void init() { + // need to cache the head node + NodeType* head_node = head(); + headHeight_ = head_node->height(); + for (int i = 0; i < headHeight_; ++i) { + preds_[i] = head_node; + succs_[i] = head_node->skip(i); + } + int max_layer = maxLayer(); + for (int i = 0; i < max_layer; ++i) { + hints_[i] = uint8_t(i + 1); + } + hints_[max_layer] = max_layer; + } + + // advance to the next node in the list. + Skipper& operator++() { + preds_[0] = succs_[0]; + succs_[0] = preds_[0]->skip(0); + int height = curHeight(); + for (int i = 1; i < height && preds_[0] == succs_[i]; ++i) { + preds_[i] = succs_[i]; + succs_[i] = preds_[i]->skip(i); + } + return *this; + } + + bool good() const { + return succs_[0] != nullptr; + } + + int maxLayer() const { + return headHeight_ - 1; + } + + int curHeight() const { + // need to cap the height to the cached head height, as the current node + // might be some newly inserted node and also during the time period the + // head height may have grown. + return succs_[0] ? std::min(headHeight_, succs_[0]->height()) : 0; + } + + const value_type& data() const { + DCHECK(succs_[0] != nullptr); + return succs_[0]->data(); + } + + value_type& operator*() const { + DCHECK(succs_[0] != nullptr); + return succs_[0]->data(); + } + + value_type* operator->() { + DCHECK(succs_[0] != nullptr); + return &succs_[0]->data(); + } + + /* + * Skip to the position whose data is no less than the parameter. + * (I.e. the lower_bound). + * + * Returns true if the data is found, false otherwise. + */ + bool to(const value_type& data) { + int layer = curHeight() - 1; + if (layer < 0) { + return false; // reaches the end of the list + } + + int lyr = hints_[layer]; + int max_layer = maxLayer(); + while (SkipListType::greater(data, succs_[lyr]) && lyr < max_layer) { + ++lyr; + } + hints_[layer] = lyr; // update the hint + + int foundLayer = SkipListType::findInsertionPoint( + preds_[lyr], lyr, data, preds_, succs_); + if (foundLayer < 0) { + return false; + } + + DCHECK(succs_[0] != nullptr) + << "lyr=" << lyr << "; max_layer=" << max_layer; + return !succs_[0]->markedForRemoval(); + } + + private: + NodeType* head() const { + return accessor_.skiplist()->head_.load(std::memory_order_consume); + } + + Accessor accessor_; + int headHeight_; + NodeType *succs_[MAX_HEIGHT], *preds_[MAX_HEIGHT]; + uint8_t hints_[MAX_HEIGHT]; +}; + +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/ConstexprMath.h b/native/iosTest/Pods/Folly/folly/ConstexprMath.h new file mode 100644 index 000000000..4a70ff6cd --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/ConstexprMath.h @@ -0,0 +1,383 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include +#include +#include + +namespace folly { +// TLDR: Prefer using operator< for ordering. And when +// a and b are equivalent objects, we return b to make +// sorting stable. +// See http://stepanovpapers.com/notes.pdf for details. +template +constexpr T constexpr_max(T a) { + return a; +} +template +constexpr T constexpr_max(T a, T b, Ts... ts) { + return b < a ? constexpr_max(a, ts...) : constexpr_max(b, ts...); +} + +// When a and b are equivalent objects, we return a to +// make sorting stable. +template +constexpr T constexpr_min(T a) { + return a; +} +template +constexpr T constexpr_min(T a, T b, Ts... ts) { + return b < a ? constexpr_min(b, ts...) : constexpr_min(a, ts...); +} + +template +constexpr T const& +constexpr_clamp(T const& v, T const& lo, T const& hi, Less less) { + return less(v, lo) ? lo : less(hi, v) ? hi : v; +} +template +constexpr T const& constexpr_clamp(T const& v, T const& lo, T const& hi) { + return constexpr_clamp(v, lo, hi, std::less{}); +} + +namespace detail { + +template +struct constexpr_abs_helper {}; + +template +struct constexpr_abs_helper< + T, + typename std::enable_if::value>::type> { + static constexpr T go(T t) { + return t < static_cast(0) ? -t : t; + } +}; + +template +struct constexpr_abs_helper< + T, + typename std::enable_if< + std::is_integral::value && !std::is_same::value && + std::is_unsigned::value>::type> { + static constexpr T go(T t) { + return t; + } +}; + +template +struct constexpr_abs_helper< + T, + typename std::enable_if< + std::is_integral::value && !std::is_same::value && + std::is_signed::value>::type> { + static constexpr typename std::make_unsigned::type go(T t) { + return typename std::make_unsigned::type(t < static_cast(0) ? -t : t); + } +}; +} // namespace detail + +template +constexpr auto constexpr_abs(T t) + -> decltype(detail::constexpr_abs_helper::go(t)) { + return detail::constexpr_abs_helper::go(t); +} + +namespace detail { +template +constexpr T constexpr_log2_(T a, T e) { + return e == T(1) ? a : constexpr_log2_(a + T(1), e / T(2)); +} + +template +constexpr T constexpr_log2_ceil_(T l2, T t) { + return l2 + T(T(1) << l2 < t ? 1 : 0); +} + +template +constexpr T constexpr_square_(T t) { + return t * t; +} +} // namespace detail + +template +constexpr T constexpr_log2(T t) { + return detail::constexpr_log2_(T(0), t); +} + +template +constexpr T constexpr_log2_ceil(T t) { + return detail::constexpr_log2_ceil_(constexpr_log2(t), t); +} + +template +constexpr T constexpr_ceil(T t, T round) { + return round == T(0) + ? t + : ((t + (t < T(0) ? T(0) : round - T(1))) / round) * round; +} + +template +constexpr T constexpr_pow(T base, std::size_t exp) { + return exp == 0 + ? T(1) + : exp == 1 ? base + : detail::constexpr_square_(constexpr_pow(base, exp / 2)) * + (exp % 2 ? base : T(1)); +} + +/// constexpr_find_last_set +/// +/// Return the 1-based index of the most significant bit which is set. +/// For x > 0, constexpr_find_last_set(x) == 1 + floor(log2(x)). +template +constexpr std::size_t constexpr_find_last_set(T const t) { + using U = std::make_unsigned_t; + return t == T(0) ? 0 : 1 + constexpr_log2(static_cast(t)); +} + +namespace detail { +template +constexpr std::size_t +constexpr_find_first_set_(std::size_t s, std::size_t a, U const u) { + return s == 0 ? a + : constexpr_find_first_set_( + s / 2, a + s * bool((u >> a) % (U(1) << s) == U(0)), u); +} +} // namespace detail + +/// constexpr_find_first_set +/// +/// Return the 1-based index of the least significant bit which is set. +/// For x > 0, the exponent in the largest power of two which does not divide x. +template +constexpr std::size_t constexpr_find_first_set(T t) { + using U = std::make_unsigned_t; + using size = std::integral_constant; + return t == T(0) + ? 0 + : 1 + detail::constexpr_find_first_set_(size{}, 0, static_cast(t)); +} + +template +constexpr T constexpr_add_overflow_clamped(T a, T b) { + using L = std::numeric_limits; + using M = std::intmax_t; + static_assert( + !std::is_integral::value || sizeof(T) <= sizeof(M), + "Integral type too large!"); + // clang-format off + return + // don't do anything special for non-integral types. + !std::is_integral::value ? a + b : + // for narrow integral types, just convert to intmax_t. + sizeof(T) < sizeof(M) + ? T(constexpr_clamp(M(a) + M(b), M(L::min()), M(L::max()))) : + // when a >= 0, cannot add more than `MAX - a` onto a. + !(a < 0) ? a + constexpr_min(b, T(L::max() - a)) : + // a < 0 && b >= 0, `a + b` will always be in valid range of type T. + !(b < 0) ? a + b : + // a < 0 && b < 0, keep the result >= MIN. + a + constexpr_max(b, T(L::min() - a)); + // clang-format on +} + +template +constexpr T constexpr_sub_overflow_clamped(T a, T b) { + using L = std::numeric_limits; + using M = std::intmax_t; + static_assert( + !std::is_integral::value || sizeof(T) <= sizeof(M), + "Integral type too large!"); + // clang-format off + return + // don't do anything special for non-integral types. + !std::is_integral::value ? a - b : + // for unsigned type, keep result >= 0. + std::is_unsigned::value ? (a < b ? 0 : a - b) : + // for narrow signed integral types, just convert to intmax_t. + sizeof(T) < sizeof(M) + ? T(constexpr_clamp(M(a) - M(b), M(L::min()), M(L::max()))) : + // (a >= 0 && b >= 0) || (a < 0 && b < 0), `a - b` will always be valid. + (a < 0) == (b < 0) ? a - b : + // MIN < b, so `-b` should be in valid range (-MAX <= -b <= MAX), + // convert subtraction to addition. + L::min() < b ? constexpr_add_overflow_clamped(a, T(-b)) : + // -b = -MIN = (MAX + 1) and a <= -1, result is in valid range. + a < 0 ? a - b : + // -b = -MIN = (MAX + 1) and a >= 0, result > MAX. + L::max(); + // clang-format on +} + +// clamp_cast<> provides sane numeric conversions from float point numbers to +// integral numbers, and between different types of integral numbers. It helps +// to avoid unexpected bugs introduced by bad conversion, and undefined behavior +// like overflow when casting float point numbers to integral numbers. +// +// When doing clamp_cast(value), if `value` is in valid range of Dst, +// it will give correct result in Dst, equal to `value`. +// +// If `value` is outside the representable range of Dst, it will be clamped to +// MAX or MIN in Dst, instead of being undefined behavior. +// +// Float NaNs are converted to 0 in integral type. +// +// Here's some comparision with static_cast<>: +// (with FB-internal gcc-5-glibc-2.23 toolchain) +// +// static_cast(NaN) = 6 +// clamp_cast(NaN) = 0 +// +// static_cast(9999999999.0f) = -348639895 +// clamp_cast(9999999999.0f) = 2147483647 +// +// static_cast(2147483647.0f) = -348639895 +// clamp_cast(2147483647.0f) = 2147483647 +// +// static_cast(4294967295.0f) = 0 +// clamp_cast(4294967295.0f) = 4294967295 +// +// static_cast(-1) = 4294967295 +// clamp_cast(-1) = 0 +// +// static_cast(32768u) = -32768 +// clamp_cast(32768u) = 32767 + +template +constexpr typename std::enable_if::value, Dst>::type +constexpr_clamp_cast(Src src) { + static_assert( + std::is_integral::value && sizeof(Dst) <= sizeof(int64_t), + "constexpr_clamp_cast can only cast into integral type (up to 64bit)"); + + using L = std::numeric_limits; + // clang-format off + return + // Check if Src and Dst have same signedness. + std::is_signed::value == std::is_signed::value + ? ( + // Src and Dst have same signedness. If sizeof(Src) <= sizeof(Dst), + // we can safely convert Src to Dst without any loss of accuracy. + sizeof(Src) <= sizeof(Dst) ? Dst(src) : + // If Src is larger in size, we need to clamp it to valid range in Dst. + Dst(constexpr_clamp(src, Src(L::min()), Src(L::max())))) + // Src and Dst have different signedness. + // Check if it's signed -> unsigend cast. + : std::is_signed::value && std::is_unsigned::value + ? ( + // If src < 0, the result should be 0. + src < 0 ? Dst(0) : + // Otherwise, src >= 0. If src can fit into Dst, we can safely cast it + // without loss of accuracy. + sizeof(Src) <= sizeof(Dst) ? Dst(src) : + // If Src is larger in size than Dst, we need to ensure the result is + // at most Dst MAX. + Dst(constexpr_min(src, Src(L::max())))) + // It's unsigned -> signed cast. + : ( + // Since Src is unsigned, and Dst is signed, Src can fit into Dst only + // when sizeof(Src) < sizeof(Dst). + sizeof(Src) < sizeof(Dst) ? Dst(src) : + // If Src does not fit into Dst, we need to ensure the result is at most + // Dst MAX. + Dst(constexpr_min(src, Src(L::max())))); + // clang-format on +} + +namespace detail { +// Upper/lower bound values that could be accurately represented in both +// integral and float point types. +constexpr double kClampCastLowerBoundDoubleToInt64F = -9223372036854774784.0; +constexpr double kClampCastUpperBoundDoubleToInt64F = 9223372036854774784.0; +constexpr double kClampCastUpperBoundDoubleToUInt64F = 18446744073709549568.0; + +constexpr float kClampCastLowerBoundFloatToInt32F = -2147483520.0f; +constexpr float kClampCastUpperBoundFloatToInt32F = 2147483520.0f; +constexpr float kClampCastUpperBoundFloatToUInt32F = 4294967040.0f; + +// This works the same as constexpr_clamp, but the comparision are done in Src +// to prevent any implicit promotions. +template +constexpr D constexpr_clamp_cast_helper(S src, S sl, S su, D dl, D du) { + return src < sl ? dl : (src > su ? du : D(src)); +} +} // namespace detail + +template +constexpr typename std::enable_if::value, Dst>::type +constexpr_clamp_cast(Src src) { + static_assert( + std::is_integral::value && sizeof(Dst) <= sizeof(int64_t), + "constexpr_clamp_cast can only cast into integral type (up to 64bit)"); + + using L = std::numeric_limits; + // clang-format off + return + // Special case: cast NaN into 0. + // Using a trick here to portably check for NaN: f != f only if f is NaN. + // see: https://stackoverflow.com/a/570694 + (src != src) ? Dst(0) : + // using `sizeof(Src) > sizeof(Dst)` as a heuristic that Dst can be + // represented in Src without loss of accuracy. + // see: https://en.wikipedia.org/wiki/Floating-point_arithmetic + sizeof(Src) > sizeof(Dst) ? + detail::constexpr_clamp_cast_helper( + src, Src(L::min()), Src(L::max()), L::min(), L::max()) : + // sizeof(Src) < sizeof(Dst) only happens when doing cast of + // 32bit float -> u/int64_t. + // Losslessly promote float into double, change into double -> u/int64_t. + sizeof(Src) < sizeof(Dst) ? ( + src >= 0.0 + ? constexpr_clamp_cast( + constexpr_clamp_cast(double(src))) + : constexpr_clamp_cast( + constexpr_clamp_cast(double(src)))) : + // The following are for sizeof(Src) == sizeof(Dst). + std::is_same::value && std::is_same::value ? + detail::constexpr_clamp_cast_helper( + double(src), + detail::kClampCastLowerBoundDoubleToInt64F, + detail::kClampCastUpperBoundDoubleToInt64F, + L::min(), + L::max()) : + std::is_same::value && std::is_same::value ? + detail::constexpr_clamp_cast_helper( + double(src), + 0.0, + detail::kClampCastUpperBoundDoubleToUInt64F, + L::min(), + L::max()) : + std::is_same::value && std::is_same::value ? + detail::constexpr_clamp_cast_helper( + float(src), + detail::kClampCastLowerBoundFloatToInt32F, + detail::kClampCastUpperBoundFloatToInt32F, + L::min(), + L::max()) : + detail::constexpr_clamp_cast_helper( + float(src), + 0.0f, + detail::kClampCastUpperBoundFloatToUInt32F, + L::min(), + L::max()); + // clang-format on +} + +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/Conv.cpp b/native/iosTest/Pods/Folly/folly/Conv.cpp new file mode 100644 index 000000000..d74df582f --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/Conv.cpp @@ -0,0 +1,790 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +namespace folly { +namespace detail { + +namespace { + +/** + * Finds the first non-digit in a string. The number of digits + * searched depends on the precision of the Tgt integral. Assumes the + * string starts with NO whitespace and NO sign. + * + * The semantics of the routine is: + * for (;; ++b) { + * if (b >= e || !isdigit(*b)) return b; + * } + * + * Complete unrolling marks bottom-line (i.e. entire conversion) + * improvements of 20%. + */ +inline const char* findFirstNonDigit(const char* b, const char* e) { + for (; b < e; ++b) { + auto const c = static_cast(*b) - '0'; + if (c >= 10) { + break; + } + } + return b; +} + +// Maximum value of number when represented as a string +template +struct MaxString { + static const char* const value; +}; + +template <> +const char* const MaxString::value = "255"; +template <> +const char* const MaxString::value = "65535"; +template <> +const char* const MaxString::value = "4294967295"; +#if __SIZEOF_LONG__ == 4 +template <> +const char* const MaxString::value = "4294967295"; +#else +template <> +const char* const MaxString::value = "18446744073709551615"; +#endif +static_assert( + sizeof(unsigned long) >= 4, + "Wrong value for MaxString::value," + " please update."); +template <> +const char* const MaxString::value = "18446744073709551615"; +static_assert( + sizeof(unsigned long long) >= 8, + "Wrong value for MaxString::value" + ", please update."); + +#if FOLLY_HAVE_INT128_T +template <> +const char* const MaxString<__uint128_t>::value = + "340282366920938463463374607431768211455"; +#endif + +/* + * Lookup tables that converts from a decimal character value to an integral + * binary value, shifted by a decimal "shift" multiplier. + * For all character values in the range '0'..'9', the table at those + * index locations returns the actual decimal value shifted by the multiplier. + * For all other values, the lookup table returns an invalid OOR value. + */ +// Out-of-range flag value, larger than the largest value that can fit in +// four decimal bytes (9999), but four of these added up together should +// still not overflow uint16_t. +constexpr int32_t OOR = 10000; + +alignas(16) constexpr uint16_t shift1[] = { + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 30 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, 0, 1, // 40 + 2, 3, 4, 5, 6, 7, 8, 9, OOR, OOR, + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 60 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 70 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 80 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 90 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 100 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 110 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 120 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 130 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 140 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 150 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 160 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 170 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 180 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 190 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 200 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 210 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 220 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 230 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 240 + OOR, OOR, OOR, OOR, OOR, OOR // 250 +}; + +alignas(16) constexpr uint16_t shift10[] = { + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 30 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, 0, 10, // 40 + 20, 30, 40, 50, 60, 70, 80, 90, OOR, OOR, + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 60 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 70 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 80 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 90 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 100 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 110 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 120 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 130 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 140 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 150 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 160 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 170 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 180 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 190 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 200 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 210 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 220 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 230 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 240 + OOR, OOR, OOR, OOR, OOR, OOR // 250 +}; + +alignas(16) constexpr uint16_t shift100[] = { + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 30 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, 0, 100, // 40 + 200, 300, 400, 500, 600, 700, 800, 900, OOR, OOR, + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 60 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 70 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 80 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 90 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 100 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 110 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 120 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 130 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 140 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 150 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 160 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 170 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 180 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 190 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 200 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 210 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 220 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 230 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 240 + OOR, OOR, OOR, OOR, OOR, OOR // 250 +}; + +alignas(16) constexpr uint16_t shift1000[] = { + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 30 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, 0, 1000, // 40 + 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, OOR, OOR, + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 60 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 70 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 80 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 90 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 100 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 110 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 120 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 130 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 140 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 150 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 160 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 170 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 180 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 190 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 200 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 210 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 220 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 230 + OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 240 + OOR, OOR, OOR, OOR, OOR, OOR // 250 +}; + +struct ErrorString { + const char* string; + bool quote; +}; + +// Keep this in sync with ConversionCode in Conv.h +constexpr const std::array< + ErrorString, + static_cast(ConversionCode::NUM_ERROR_CODES)> + kErrorStrings{{ + {"Success", true}, + {"Empty input string", true}, + {"No digits found in input string", true}, + {"Integer overflow when parsing bool (must be 0 or 1)", true}, + {"Invalid value for bool", true}, + {"Non-digit character found", true}, + {"Invalid leading character", true}, + {"Overflow during conversion", true}, + {"Negative overflow during conversion", true}, + {"Unable to convert string to floating point value", true}, + {"Non-whitespace character found after end of conversion", true}, + {"Overflow during arithmetic conversion", false}, + {"Negative overflow during arithmetic conversion", false}, + {"Loss of precision during arithmetic conversion", false}, + }}; + +// Check if ASCII is really ASCII +using IsAscii = + bool_constant<'A' == 65 && 'Z' == 90 && 'a' == 97 && 'z' == 122>; + +// The code in this file that uses tolower() really only cares about +// 7-bit ASCII characters, so we can take a nice shortcut here. +inline char tolower_ascii(char in) { + return IsAscii::value ? in | 0x20 : char(std::tolower(in)); +} + +inline bool bool_str_cmp(const char** b, size_t len, const char* value) { + // Can't use strncasecmp, since we want to ensure that the full value matches + const char* p = *b; + const char* e = *b + len; + const char* v = value; + while (*v != '\0') { + if (p == e || tolower_ascii(*p) != *v) { // value is already lowercase + return false; + } + ++p; + ++v; + } + + *b = p; + return true; +} + +} // namespace + +Expected str_to_bool(StringPiece* src) noexcept { + auto b = src->begin(), e = src->end(); + for (;; ++b) { + if (b >= e) { + return makeUnexpected(ConversionCode::EMPTY_INPUT_STRING); + } + if (!std::isspace(*b)) { + break; + } + } + + bool result; + auto len = size_t(e - b); + switch (*b) { + case '0': + case '1': { + result = false; + for (; b < e && isdigit(*b); ++b) { + if (result || (*b != '0' && *b != '1')) { + return makeUnexpected(ConversionCode::BOOL_OVERFLOW); + } + result = (*b == '1'); + } + break; + } + case 'y': + case 'Y': + result = true; + if (!bool_str_cmp(&b, len, "yes")) { + ++b; // accept the single 'y' character + } + break; + case 'n': + case 'N': + result = false; + if (!bool_str_cmp(&b, len, "no")) { + ++b; + } + break; + case 't': + case 'T': + result = true; + if (!bool_str_cmp(&b, len, "true")) { + ++b; + } + break; + case 'f': + case 'F': + result = false; + if (!bool_str_cmp(&b, len, "false")) { + ++b; + } + break; + case 'o': + case 'O': + if (bool_str_cmp(&b, len, "on")) { + result = true; + } else if (bool_str_cmp(&b, len, "off")) { + result = false; + } else { + return makeUnexpected(ConversionCode::BOOL_INVALID_VALUE); + } + break; + default: + return makeUnexpected(ConversionCode::BOOL_INVALID_VALUE); + } + + src->assign(b, e); + + return result; +} + +/** + * StringPiece to double, with progress information. Alters the + * StringPiece parameter to munch the already-parsed characters. + */ +template +Expected str_to_floating(StringPiece* src) noexcept { + using namespace double_conversion; + static StringToDoubleConverter conv( + StringToDoubleConverter::ALLOW_TRAILING_JUNK | + StringToDoubleConverter::ALLOW_LEADING_SPACES, + 0.0, + // return this for junk input string + std::numeric_limits::quiet_NaN(), + nullptr, + nullptr); + + if (src->empty()) { + return makeUnexpected(ConversionCode::EMPTY_INPUT_STRING); + } + + int length; + auto result = conv.StringToDouble( + src->data(), + static_cast(src->size()), + &length); // processed char count + + if (!std::isnan(result)) { + // If we get here with length = 0, the input string is empty. + // If we get here with result = 0.0, it's either because the string + // contained only whitespace, or because we had an actual zero value + // (with potential trailing junk). If it was only whitespace, we + // want to raise an error; length will point past the last character + // that was processed, so we need to check if that character was + // whitespace or not. + if (length == 0 || + (result == 0.0 && std::isspace((*src)[size_t(length) - 1]))) { + return makeUnexpected(ConversionCode::EMPTY_INPUT_STRING); + } + if (length >= 2) { + const char* suffix = src->data() + length - 1; + // double_conversion doesn't update length correctly when there is an + // incomplete exponent specifier. Converting "12e-f-g" shouldn't consume + // any more than "12", but it will consume "12e-". + + // "123-" should only parse "123" + if (*suffix == '-' || *suffix == '+') { + --suffix; + --length; + } + // "12e-f-g" or "12euro" should only parse "12" + if (*suffix == 'e' || *suffix == 'E') { + --length; + } + } + src->advance(size_t(length)); + return Tgt(result); + } + + auto* e = src->end(); + auto* b = + std::find_if_not(src->begin(), e, [](char c) { return std::isspace(c); }); + + // There must be non-whitespace, otherwise we would have caught this above + assert(b < e); + auto size = size_t(e - b); + + bool negative = false; + if (*b == '-') { + negative = true; + ++b; + --size; + } + + result = 0.0; + + switch (tolower_ascii(*b)) { + case 'i': + if (size >= 3 && tolower_ascii(b[1]) == 'n' && + tolower_ascii(b[2]) == 'f') { + if (size >= 8 && tolower_ascii(b[3]) == 'i' && + tolower_ascii(b[4]) == 'n' && tolower_ascii(b[5]) == 'i' && + tolower_ascii(b[6]) == 't' && tolower_ascii(b[7]) == 'y') { + b += 8; + } else { + b += 3; + } + result = std::numeric_limits::infinity(); + } + break; + + case 'n': + if (size >= 3 && tolower_ascii(b[1]) == 'a' && + tolower_ascii(b[2]) == 'n') { + b += 3; + result = std::numeric_limits::quiet_NaN(); + } + break; + + default: + break; + } + + if (result == 0.0) { + // All bets are off + return makeUnexpected(ConversionCode::STRING_TO_FLOAT_ERROR); + } + + if (negative) { + result = -result; + } + + src->assign(b, e); + + return Tgt(result); +} + +template Expected str_to_floating( + StringPiece* src) noexcept; +template Expected str_to_floating( + StringPiece* src) noexcept; + +/** + * This class takes care of additional processing needed for signed values, + * like leading sign character and overflow checks. + */ +template ::value> +class SignedValueHandler; + +template +class SignedValueHandler { + public: + ConversionCode init(const char*& b) { + negative_ = false; + if (!std::isdigit(*b)) { + if (*b == '-') { + negative_ = true; + } else if (UNLIKELY(*b != '+')) { + return ConversionCode::INVALID_LEADING_CHAR; + } + ++b; + } + return ConversionCode::SUCCESS; + } + + ConversionCode overflow() { + return negative_ ? ConversionCode::NEGATIVE_OVERFLOW + : ConversionCode::POSITIVE_OVERFLOW; + } + + template + Expected finalize(U value) { + T rv; + if (negative_) { + rv = T(-value); + if (UNLIKELY(rv > 0)) { + return makeUnexpected(ConversionCode::NEGATIVE_OVERFLOW); + } + } else { + rv = T(value); + if (UNLIKELY(rv < 0)) { + return makeUnexpected(ConversionCode::POSITIVE_OVERFLOW); + } + } + return rv; + } + + private: + bool negative_; +}; + +// For unsigned types, we don't need any extra processing +template +class SignedValueHandler { + public: + ConversionCode init(const char*&) { + return ConversionCode::SUCCESS; + } + + ConversionCode overflow() { + return ConversionCode::POSITIVE_OVERFLOW; + } + + Expected finalize(T value) { + return value; + } +}; + +/** + * String represented as a pair of pointers to char to signed/unsigned + * integrals. Assumes NO whitespace before or after, and also that the + * string is composed entirely of digits (and an optional sign only for + * signed types). String may be empty, in which case digits_to returns + * an appropriate error. + */ +template +inline Expected digits_to( + const char* b, + const char* const e) noexcept { + using UT = typename std::make_unsigned::type; + assert(b <= e); + + SignedValueHandler sgn; + + auto err = sgn.init(b); + if (UNLIKELY(err != ConversionCode::SUCCESS)) { + return makeUnexpected(err); + } + + auto size = size_t(e - b); + + /* Although the string is entirely made of digits, we still need to + * check for overflow. + */ + if (size > std::numeric_limits::digits10) { + // Leading zeros? + if (b < e && *b == '0') { + for (++b;; ++b) { + if (b == e) { + return Tgt(0); // just zeros, e.g. "0000" + } + if (*b != '0') { + size = size_t(e - b); + break; + } + } + } + if (size > std::numeric_limits::digits10 && + (size != std::numeric_limits::digits10 + 1 || + strncmp(b, MaxString::value, size) > 0)) { + return makeUnexpected(sgn.overflow()); + } + } + + // Here we know that the number won't overflow when + // converted. Proceed without checks. + + UT result = 0; + + for (; e - b >= 4; b += 4) { + result *= static_cast(10000); + const int32_t r0 = shift1000[static_cast(b[0])]; + const int32_t r1 = shift100[static_cast(b[1])]; + const int32_t r2 = shift10[static_cast(b[2])]; + const int32_t r3 = shift1[static_cast(b[3])]; + const auto sum = r0 + r1 + r2 + r3; + if (sum >= OOR) { + goto outOfRange; + } + result += UT(sum); + } + + switch (e - b) { + case 3: { + const int32_t r0 = shift100[static_cast(b[0])]; + const int32_t r1 = shift10[static_cast(b[1])]; + const int32_t r2 = shift1[static_cast(b[2])]; + const auto sum = r0 + r1 + r2; + if (sum >= OOR) { + goto outOfRange; + } + result = UT(1000 * result + sum); + break; + } + case 2: { + const int32_t r0 = shift10[static_cast(b[0])]; + const int32_t r1 = shift1[static_cast(b[1])]; + const auto sum = r0 + r1; + if (sum >= OOR) { + goto outOfRange; + } + result = UT(100 * result + sum); + break; + } + case 1: { + const int32_t sum = shift1[static_cast(b[0])]; + if (sum >= OOR) { + goto outOfRange; + } + result = UT(10 * result + sum); + break; + } + default: + assert(b == e); + if (size == 0) { + return makeUnexpected(ConversionCode::NO_DIGITS); + } + break; + } + + return sgn.finalize(result); + +outOfRange: + return makeUnexpected(ConversionCode::NON_DIGIT_CHAR); +} + +template Expected digits_to( + const char*, + const char*) noexcept; +template Expected digits_to( + const char*, + const char*) noexcept; +template Expected digits_to( + const char*, + const char*) noexcept; + +template Expected digits_to( + const char*, + const char*) noexcept; +template Expected digits_to( + const char*, + const char*) noexcept; + +template Expected digits_to( + const char*, + const char*) noexcept; +template Expected digits_to( + const char*, + const char*) noexcept; + +template Expected digits_to( + const char*, + const char*) noexcept; +template Expected digits_to( + const char*, + const char*) noexcept; + +template Expected digits_to( + const char*, + const char*) noexcept; +template Expected +digits_to(const char*, const char*) noexcept; + +#if FOLLY_HAVE_INT128_T +template Expected<__int128, ConversionCode> digits_to<__int128>( + const char*, + const char*) noexcept; +template Expected +digits_to(const char*, const char*) noexcept; +#endif + +/** + * StringPiece to integrals, with progress information. Alters the + * StringPiece parameter to munch the already-parsed characters. + */ +template +Expected str_to_integral(StringPiece* src) noexcept { + using UT = typename std::make_unsigned::type; + + auto b = src->data(), past = src->data() + src->size(); + + for (;; ++b) { + if (UNLIKELY(b >= past)) { + return makeUnexpected(ConversionCode::EMPTY_INPUT_STRING); + } + if (!std::isspace(*b)) { + break; + } + } + + SignedValueHandler sgn; + auto err = sgn.init(b); + + if (UNLIKELY(err != ConversionCode::SUCCESS)) { + return makeUnexpected(err); + } + if (std::is_signed::value && UNLIKELY(b >= past)) { + return makeUnexpected(ConversionCode::NO_DIGITS); + } + if (UNLIKELY(!isdigit(*b))) { + return makeUnexpected(ConversionCode::NON_DIGIT_CHAR); + } + + auto m = findFirstNonDigit(b + 1, past); + + auto tmp = digits_to(b, m); + + if (UNLIKELY(!tmp.hasValue())) { + return makeUnexpected( + tmp.error() == ConversionCode::POSITIVE_OVERFLOW ? sgn.overflow() + : tmp.error()); + } + + auto res = sgn.finalize(tmp.value()); + + if (res.hasValue()) { + src->advance(size_t(m - src->data())); + } + + return res; +} + +template Expected str_to_integral( + StringPiece* src) noexcept; +template Expected str_to_integral( + StringPiece* src) noexcept; +template Expected str_to_integral( + StringPiece* src) noexcept; + +template Expected str_to_integral( + StringPiece* src) noexcept; +template Expected +str_to_integral(StringPiece* src) noexcept; + +template Expected str_to_integral( + StringPiece* src) noexcept; +template Expected str_to_integral( + StringPiece* src) noexcept; + +template Expected str_to_integral( + StringPiece* src) noexcept; +template Expected str_to_integral( + StringPiece* src) noexcept; + +template Expected str_to_integral( + StringPiece* src) noexcept; +template Expected +str_to_integral(StringPiece* src) noexcept; + +#if FOLLY_HAVE_INT128_T +template Expected<__int128, ConversionCode> str_to_integral<__int128>( + StringPiece* src) noexcept; +template Expected +str_to_integral(StringPiece* src) noexcept; +#endif + +} // namespace detail + +ConversionError makeConversionError(ConversionCode code, StringPiece input) { + using namespace detail; + static_assert( + std::is_unsigned::type>::value, + "ConversionCode should be unsigned"); + assert((std::size_t)code < kErrorStrings.size()); + const ErrorString& err = kErrorStrings[(std::size_t)code]; + if (code == ConversionCode::EMPTY_INPUT_STRING && input.empty()) { + return {err.string, code}; + } + std::string tmp(err.string); + tmp.append(": "); + if (err.quote) { + tmp.append(1, '"'); + } + if (!input.empty()) { + tmp.append(input.data(), input.size()); + } + if (err.quote) { + tmp.append(1, '"'); + } + return {tmp, code}; +} + +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/Conv.h b/native/iosTest/Pods/Folly/folly/Conv.h new file mode 100644 index 000000000..b6c0b87e1 --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/Conv.h @@ -0,0 +1,1658 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * + * This file provides a generic interface for converting objects to and from + * string-like types (std::string, fbstring, StringPiece), as well as + * range-checked conversions between numeric and enum types. The mechanisms are + * extensible, so that user-specified types can add folly::to support. + * + ******************************************************************************* + * TYPE -> STRING CONVERSIONS + ******************************************************************************* + * You can call the to or to. These are variadic + * functions that convert their arguments to strings, and concatenate them to + * form a result. So, for example, + * + * auto str = to(123, "456", 789); + * + * Sets str to "123456789". + * + * In addition to just concatenating the arguments, related functions can + * delimit them with some string: toDelim(",", "123", 456, "789") + * will return the string "123,456,789". + * + * toAppend does not return a string; instead, it takes a pointer to a string as + * its last argument, and appends the result of the concatenation into it: + * std::string str = "123"; + * toAppend(456, "789", &str); // Now str is "123456789". + * + * The toAppendFit function acts like toAppend, but it precalculates the size + * required to perform the append operation, and reserves that space in the + * output string before actually inserting its arguments. This can sometimes + * save on string expansion, but beware: appending to the same string many times + * with toAppendFit is likely a pessimization, since it will resize the string + * once per append. + * + * The combination of the append and delim variants also exist: toAppendDelim + * and toAppendDelimFit are defined, with the obvious semantics. + * + ******************************************************************************* + * STRING -> TYPE CONVERSIONS + ******************************************************************************* + * Going in the other direction, and parsing a string into a C++ type, is also + * supported: + * to("123"); // Returns 123. + * + * Out of range (e.g. to("1000")), or invalidly formatted (e.g. + * to("four")) inputs will throw. If throw-on-error is undesirable (for + * instance: you're dealing with untrusted input, and want to protect yourself + * from users sending you down a very slow exception-throwing path), you can use + * tryTo, which will return an Expected. + * + * There are overloads of to() and tryTo() that take a StringPiece*. These parse + * out a type from the beginning of a string, and modify the passed-in + * StringPiece to indicate the portion of the string not consumed. + * + ******************************************************************************* + * NUMERIC / ENUM CONVERSIONS + ******************************************************************************* + * Conv also supports a to(S) overload, where T and S are numeric or enum + * types, that checks to see that the target type can represent its argument, + * and will throw if it cannot. This includes cases where a floating point -> + * integral conversion is attempted on a value with a non-zero fractional + * component, and integral -> floating point conversions that would lose + * precision. Enum conversions are range-checked for the underlying type of the + * enum, but there is no check that the input value is a valid choice of enum + * value. + * + ******************************************************************************* + * CUSTOM TYPE CONVERSIONS + ******************************************************************************* + * Users may customize the string conversion functionality for their own data + * types, . The key functions you should implement are: + * // Two functions to allow conversion to your type from a string. + * Expected parseTo(folly::StringPiece in, + * YourType& out); + * YourErrorType makeConversionError(YourErrorType in, StringPiece in); + * // Two functions to allow conversion from your type to a string. + * template + * void toAppend(const YourType& in, String* out); + * size_t estimateSpaceNeeded(const YourType& in); + * + * These are documented below, inline. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include // V8 JavaScript implementation + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace folly { + +// Keep this in sync with kErrorStrings in Conv.cpp +enum class ConversionCode : unsigned char { + SUCCESS, + EMPTY_INPUT_STRING, + NO_DIGITS, + BOOL_OVERFLOW, + BOOL_INVALID_VALUE, + NON_DIGIT_CHAR, + INVALID_LEADING_CHAR, + POSITIVE_OVERFLOW, + NEGATIVE_OVERFLOW, + STRING_TO_FLOAT_ERROR, + NON_WHITESPACE_AFTER_END, + ARITH_POSITIVE_OVERFLOW, + ARITH_NEGATIVE_OVERFLOW, + ARITH_LOSS_OF_PRECISION, + NUM_ERROR_CODES, // has to be the last entry +}; + +struct ConversionErrorBase : std::range_error { + using std::range_error::range_error; +}; + +class ConversionError : public ConversionErrorBase { + public: + ConversionError(const std::string& str, ConversionCode code) + : ConversionErrorBase(str), code_(code) {} + + ConversionError(const char* str, ConversionCode code) + : ConversionErrorBase(str), code_(code) {} + + ConversionCode errorCode() const { + return code_; + } + + private: + ConversionCode code_; +}; + +/******************************************************************************* + * Custom Error Translation + * + * Your overloaded parseTo() function can return a custom error code on failure. + * ::folly::to() will call makeConversionError to translate that error code into + * an object to throw. makeConversionError is found by argument-dependent + * lookup. It should have this signature: + * + * namespace other_namespace { + * enum YourErrorCode { BAD_ERROR, WORSE_ERROR }; + * + * struct YourConversionError : ConversionErrorBase { + * YourConversionError(const char* what) : ConversionErrorBase(what) {} + * }; + * + * YourConversionError + * makeConversionError(YourErrorCode code, ::folly::StringPiece sp) { + * ... + * return YourConversionError(messageString); + * } + ******************************************************************************/ +ConversionError makeConversionError(ConversionCode code, StringPiece input); + +namespace detail { +/** + * Enforce that the suffix following a number is made up only of whitespace. + */ +inline ConversionCode enforceWhitespaceErr(StringPiece sp) { + for (auto c : sp) { + if (UNLIKELY(!std::isspace(c))) { + return ConversionCode::NON_WHITESPACE_AFTER_END; + } + } + return ConversionCode::SUCCESS; +} + +/** + * Keep this implementation around for prettyToDouble(). + */ +inline void enforceWhitespace(StringPiece sp) { + auto err = enforceWhitespaceErr(sp); + if (err != ConversionCode::SUCCESS) { + throw_exception(makeConversionError(err, sp)); + } +} +} // namespace detail + +/** + * The identity conversion function. + * tryTo(T) returns itself for all types T. + */ +template +typename std::enable_if< + std::is_same::type>::value, + Expected>::type +tryTo(Src&& value) { + return std::forward(value); +} + +template +typename std::enable_if< + std::is_same::type>::value, + Tgt>::type +to(Src&& value) { + return std::forward(value); +} + +/******************************************************************************* + * Arithmetic to boolean + ******************************************************************************/ + +/** + * Unchecked conversion from arithmetic to boolean. This is different from the + * other arithmetic conversions because we use the C convention of treating any + * non-zero value as true, instead of range checking. + */ +template +typename std::enable_if< + std::is_arithmetic::value && !std::is_same::value && + std::is_same::value, + Expected>::type +tryTo(const Src& value) { + return value != Src(); +} + +template +typename std::enable_if< + std::is_arithmetic::value && !std::is_same::value && + std::is_same::value, + Tgt>::type +to(const Src& value) { + return value != Src(); +} + +/******************************************************************************* + * Anything to string + ******************************************************************************/ + +namespace detail { + +#ifdef _MSC_VER +// MSVC can't quite figure out the LastElementImpl::call() stuff +// in the base implementation, so we have to use tuples instead, +// which result in significantly more templates being compiled, +// though the runtime performance is the same. + +template +auto getLastElement(Ts&&... ts) -> decltype(std::get( + std::forward_as_tuple(std::forward(ts)...))) { + return std::get( + std::forward_as_tuple(std::forward(ts)...)); +} + +inline void getLastElement() {} + +template +struct LastElementType : std::tuple_element> {}; + +template <> +struct LastElementType<0> { + using type = void; +}; + +template +struct LastElement + : std::decay::type> {}; +#else +template +struct LastElementImpl { + static void call(Ignored...) {} +}; + +template +struct LastElementImpl { + template + static Last call(Ignored..., Last&& last) { + return std::forward(last); + } +}; + +template +auto getLastElement(const Ts&... ts) + -> decltype(LastElementImpl::call(ts...)) { + return LastElementImpl::call(ts...); +} + +template +struct LastElement : std::decay::call(std::declval()...))> { +}; +#endif + +} // namespace detail + +/******************************************************************************* + * Conversions from integral types to string types. + ******************************************************************************/ + +#if FOLLY_HAVE_INT128_T +namespace detail { + +template +constexpr unsigned int digitsEnough() { + return (unsigned int)(ceil(sizeof(IntegerType) * CHAR_BIT * M_LN2 / M_LN10)); +} + +inline size_t +unsafeTelescope128(char* buffer, size_t room, unsigned __int128 x) { + typedef unsigned __int128 Usrc; + size_t p = room - 1; + + while (x >= (Usrc(1) << 64)) { // Using 128-bit division while needed + const auto y = x / 10; + const auto digit = x % 10; + + buffer[p--] = static_cast('0' + digit); + x = y; + } + + uint64_t xx = static_cast(x); // Rest uses faster 64-bit division + + while (xx >= 10) { + const auto y = xx / 10ULL; + const auto digit = xx % 10ULL; + + buffer[p--] = static_cast('0' + digit); + xx = y; + } + + buffer[p] = static_cast('0' + xx); + + return p; +} + +} // namespace detail +#endif + +/** + * Returns the number of digits in the base 10 representation of an + * uint64_t. Useful for preallocating buffers and such. It's also used + * internally, see below. Measurements suggest that defining a + * separate overload for 32-bit integers is not worthwhile. + */ + +inline uint32_t digits10(uint64_t v) { +#ifdef __x86_64__ + + // For this arch we can get a little help from specialized CPU instructions + // which can count leading zeroes; 64 minus that is appx. log (base 2). + // Use that to approximate base-10 digits (log_10) and then adjust if needed. + + // 10^i, defined for i 0 through 19. + // This is 20 * 8 == 160 bytes, which fits neatly into 5 cache lines + // (assuming a cache line size of 64). + alignas(64) static const uint64_t powersOf10[20] = { + 1, + 10, + 100, + 1000, + 10000, + 100000, + 1000000, + 10000000, + 100000000, + 1000000000, + 10000000000, + 100000000000, + 1000000000000, + 10000000000000, + 100000000000000, + 1000000000000000, + 10000000000000000, + 100000000000000000, + 1000000000000000000, + 10000000000000000000UL, + }; + + // "count leading zeroes" operation not valid; for 0; special case this. + if (UNLIKELY(!v)) { + return 1; + } + + // bits is in the ballpark of log_2(v). + const uint32_t leadingZeroes = __builtin_clzll(v); + const auto bits = 63 - leadingZeroes; + + // approximate log_10(v) == log_10(2) * bits. + // Integer magic below: 77/256 is appx. 0.3010 (log_10(2)). + // The +1 is to make this the ceiling of the log_10 estimate. + const uint32_t minLength = 1 + ((bits * 77) >> 8); + + // return that log_10 lower bound, plus adjust if input >= 10^(that bound) + // in case there's a small error and we misjudged length. + return minLength + uint32_t(v >= powersOf10[minLength]); + +#else + + uint32_t result = 1; + while (true) { + if (LIKELY(v < 10)) { + return result; + } + if (LIKELY(v < 100)) { + return result + 1; + } + if (LIKELY(v < 1000)) { + return result + 2; + } + if (LIKELY(v < 10000)) { + return result + 3; + } + // Skip ahead by 4 orders of magnitude + v /= 10000U; + result += 4; + } + +#endif +} + +/** + * Copies the ASCII base 10 representation of v into buffer and + * returns the number of bytes written. Does NOT append a \0. Assumes + * the buffer points to digits10(v) bytes of valid memory. Note that + * uint64 needs at most 20 bytes, uint32_t needs at most 10 bytes, + * uint16_t needs at most 5 bytes, and so on. Measurements suggest + * that defining a separate overload for 32-bit integers is not + * worthwhile. + * + * This primitive is unsafe because it makes the size assumption and + * because it does not add a terminating \0. + */ + +inline uint32_t uint64ToBufferUnsafe(uint64_t v, char* const buffer) { + auto const result = digits10(v); + // WARNING: using size_t or pointer arithmetic for pos slows down + // the loop below 20x. This is because several 32-bit ops can be + // done in parallel, but only fewer 64-bit ones. + uint32_t pos = result - 1; + while (v >= 10) { + // Keep these together so a peephole optimization "sees" them and + // computes them in one shot. + auto const q = v / 10; + auto const r = v % 10; + buffer[pos--] = static_cast('0' + r); + v = q; + } + // Last digit is trivial to handle + buffer[pos] = static_cast(v + '0'); + return result; +} + +/** + * A single char gets appended. + */ +template +void toAppend(char value, Tgt* result) { + *result += value; +} + +template +constexpr typename std::enable_if::value, size_t>::type +estimateSpaceNeeded(T) { + return 1; +} + +template +constexpr size_t estimateSpaceNeeded(const char (&)[N]) { + return N; +} + +/** + * Everything implicitly convertible to const char* gets appended. + */ +template +typename std::enable_if< + std::is_convertible::value && + IsSomeString::value>::type +toAppend(Src value, Tgt* result) { + // Treat null pointers like an empty string, as in: + // operator<<(std::ostream&, const char*). + const char* c = value; + if (c) { + result->append(value); + } +} + +template +typename std::enable_if::value, size_t>:: + type + estimateSpaceNeeded(Src value) { + const char* c = value; + if (c) { + return folly::StringPiece(value).size(); + }; + return 0; +} + +template +typename std::enable_if::value, size_t>::type +estimateSpaceNeeded(Src const& value) { + return value.size(); +} + +template +typename std::enable_if< + std::is_convertible::value && + !IsSomeString::value && + !std::is_convertible::value, + size_t>::type +estimateSpaceNeeded(Src value) { + return folly::StringPiece(value).size(); +} + +template <> +inline size_t estimateSpaceNeeded(std::nullptr_t /* value */) { + return 0; +} + +template +typename std::enable_if< + std::is_pointer::value && + IsSomeString>::value, + size_t>::type +estimateSpaceNeeded(Src value) { + return value->size(); +} + +/** + * Strings get appended, too. + */ +template +typename std::enable_if< + IsSomeString::value && IsSomeString::value>::type +toAppend(const Src& value, Tgt* result) { + result->append(value); +} + +/** + * and StringPiece objects too + */ +template +typename std::enable_if::value>::type toAppend( + StringPiece value, + Tgt* result) { + result->append(value.data(), value.size()); +} + +/** + * There's no implicit conversion from fbstring to other string types, + * so make a specialization. + */ +template +typename std::enable_if::value>::type toAppend( + const fbstring& value, + Tgt* result) { + result->append(value.data(), value.size()); +} + +#if FOLLY_HAVE_INT128_T +/** + * Special handling for 128 bit integers. + */ + +template +void toAppend(__int128 value, Tgt* result) { + typedef unsigned __int128 Usrc; + char buffer[detail::digitsEnough() + 1]; + size_t p; + + if (value < 0) { + p = detail::unsafeTelescope128(buffer, sizeof(buffer), -Usrc(value)); + buffer[--p] = '-'; + } else { + p = detail::unsafeTelescope128(buffer, sizeof(buffer), value); + } + + result->append(buffer + p, buffer + sizeof(buffer)); +} + +template +void toAppend(unsigned __int128 value, Tgt* result) { + char buffer[detail::digitsEnough()]; + size_t p; + + p = detail::unsafeTelescope128(buffer, sizeof(buffer), value); + + result->append(buffer + p, buffer + sizeof(buffer)); +} + +template +constexpr + typename std::enable_if::value, size_t>::type + estimateSpaceNeeded(T) { + return detail::digitsEnough<__int128>(); +} + +template +constexpr typename std:: + enable_if::value, size_t>::type + estimateSpaceNeeded(T) { + return detail::digitsEnough(); +} + +#endif + +/** + * int32_t and int64_t to string (by appending) go through here. The + * result is APPENDED to a preexisting string passed as the second + * parameter. This should be efficient with fbstring because fbstring + * incurs no dynamic allocation below 23 bytes and no number has more + * than 22 bytes in its textual representation (20 for digits, one for + * sign, one for the terminating 0). + */ +template +typename std::enable_if< + std::is_integral::value && std::is_signed::value && + IsSomeString::value && sizeof(Src) >= 4>::type +toAppend(Src value, Tgt* result) { + char buffer[20]; + if (value < 0) { + result->push_back('-'); + result->append( + buffer, + uint64ToBufferUnsafe(~static_cast(value) + 1, buffer)); + } else { + result->append(buffer, uint64ToBufferUnsafe(uint64_t(value), buffer)); + } +} + +template +typename std::enable_if< + std::is_integral::value && std::is_signed::value && + sizeof(Src) >= 4 && sizeof(Src) < 16, + size_t>::type +estimateSpaceNeeded(Src value) { + if (value < 0) { + // When "value" is the smallest negative, negating it would evoke + // undefined behavior, so, instead of writing "-value" below, we write + // "~static_cast(value) + 1" + return 1 + digits10(~static_cast(value) + 1); + } + + return digits10(static_cast(value)); +} + +/** + * As above, but for uint32_t and uint64_t. + */ +template +typename std::enable_if< + std::is_integral::value && !std::is_signed::value && + IsSomeString::value && sizeof(Src) >= 4>::type +toAppend(Src value, Tgt* result) { + char buffer[20]; + result->append(buffer, uint64ToBufferUnsafe(value, buffer)); +} + +template +typename std::enable_if< + std::is_integral::value && !std::is_signed::value && + sizeof(Src) >= 4 && sizeof(Src) < 16, + size_t>::type +estimateSpaceNeeded(Src value) { + return digits10(value); +} + +/** + * All small signed and unsigned integers to string go through 32-bit + * types int32_t and uint32_t, respectively. + */ +template +typename std::enable_if< + std::is_integral::value && IsSomeString::value && + sizeof(Src) < 4>::type +toAppend(Src value, Tgt* result) { + typedef + typename std::conditional::value, int64_t, uint64_t>:: + type Intermediate; + toAppend(static_cast(value), result); +} + +template +typename std::enable_if< + std::is_integral::value && sizeof(Src) < 4 && + !std::is_same::value, + size_t>::type +estimateSpaceNeeded(Src value) { + typedef + typename std::conditional::value, int64_t, uint64_t>:: + type Intermediate; + return estimateSpaceNeeded(static_cast(value)); +} + +/** + * Enumerated values get appended as integers. + */ +template +typename std::enable_if< + std::is_enum::value && IsSomeString::value>::type +toAppend(Src value, Tgt* result) { + toAppend(to_underlying(value), result); +} + +template +typename std::enable_if::value, size_t>::type +estimateSpaceNeeded(Src value) { + return estimateSpaceNeeded(to_underlying(value)); +} + +/******************************************************************************* + * Conversions from floating-point types to string types. + ******************************************************************************/ + +namespace detail { +constexpr int kConvMaxDecimalInShortestLow = -6; +constexpr int kConvMaxDecimalInShortestHigh = 21; +} // namespace detail + +/** Wrapper around DoubleToStringConverter **/ +template +typename std::enable_if< + std::is_floating_point::value && IsSomeString::value>::type +toAppend( + Src value, + Tgt* result, + double_conversion::DoubleToStringConverter::DtoaMode mode, + unsigned int numDigits) { + using namespace double_conversion; + DoubleToStringConverter conv( + DoubleToStringConverter::NO_FLAGS, + "Infinity", + "NaN", + 'E', + detail::kConvMaxDecimalInShortestLow, + detail::kConvMaxDecimalInShortestHigh, + 6, // max leading padding zeros + 1); // max trailing padding zeros + char buffer[256]; + StringBuilder builder(buffer, sizeof(buffer)); + switch (mode) { + case DoubleToStringConverter::SHORTEST: + conv.ToShortest(value, &builder); + break; + case DoubleToStringConverter::SHORTEST_SINGLE: + conv.ToShortestSingle(static_cast(value), &builder); + break; + case DoubleToStringConverter::FIXED: + conv.ToFixed(value, int(numDigits), &builder); + break; + case DoubleToStringConverter::PRECISION: + default: + assert(mode == DoubleToStringConverter::PRECISION); + conv.ToPrecision(value, int(numDigits), &builder); + break; + } + const size_t length = size_t(builder.position()); + builder.Finalize(); + result->append(buffer, length); +} + +/** + * As above, but for floating point + */ +template +typename std::enable_if< + std::is_floating_point::value && IsSomeString::value>::type +toAppend(Src value, Tgt* result) { + toAppend( + value, result, double_conversion::DoubleToStringConverter::SHORTEST, 0); +} + +/** + * Upper bound of the length of the output from + * DoubleToStringConverter::ToShortest(double, StringBuilder*), + * as used in toAppend(double, string*). + */ +template +typename std::enable_if::value, size_t>::type +estimateSpaceNeeded(Src value) { + // kBase10MaximalLength is 17. We add 1 for decimal point, + // e.g. 10.0/9 is 17 digits and 18 characters, including the decimal point. + constexpr int kMaxMantissaSpace = + double_conversion::DoubleToStringConverter::kBase10MaximalLength + 1; + // strlen("E-") + digits10(numeric_limits::max_exponent10) + constexpr int kMaxExponentSpace = 2 + 3; + static const int kMaxPositiveSpace = std::max({ + // E.g. 1.1111111111111111E-100. + kMaxMantissaSpace + kMaxExponentSpace, + // E.g. 0.000001.1111111111111111, if kConvMaxDecimalInShortestLow is -6. + kMaxMantissaSpace - detail::kConvMaxDecimalInShortestLow, + // If kConvMaxDecimalInShortestHigh is 21, then 1e21 is the smallest + // number > 1 which ToShortest outputs in exponential notation, + // so 21 is the longest non-exponential number > 1. + detail::kConvMaxDecimalInShortestHigh, + }); + return size_t( + kMaxPositiveSpace + + (value < 0 ? 1 : 0)); // +1 for minus sign, if negative +} + +/** + * This can be specialized, together with adding specialization + * for estimateSpaceNeed for your type, so that we allocate + * as much as you need instead of the default + */ +template +struct HasLengthEstimator : std::false_type {}; + +template +constexpr typename std::enable_if< + !std::is_fundamental::value && +#if FOLLY_HAVE_INT128_T + // On OSX 10.10, is_fundamental<__int128> is false :-O + !std::is_same<__int128, Src>::value && + !std::is_same::value && +#endif + !IsSomeString::value && + !std::is_convertible::value && + !std::is_convertible::value && + !std::is_enum::value && !HasLengthEstimator::value, + size_t>::type +estimateSpaceNeeded(const Src&) { + return sizeof(Src) + 1; // dumbest best effort ever? +} + +namespace detail { + +template +typename std::enable_if::value, size_t>::type +estimateSpaceToReserve(size_t sofar, Tgt*) { + return sofar; +} + +template +size_t estimateSpaceToReserve(size_t sofar, const T& v, const Ts&... vs) { + return estimateSpaceToReserve(sofar + estimateSpaceNeeded(v), vs...); +} + +template +void reserveInTarget(const Ts&... vs) { + getLastElement(vs...)->reserve(estimateSpaceToReserve(0, vs...)); +} + +template +void reserveInTargetDelim(const Delimiter& d, const Ts&... vs) { + static_assert(sizeof...(vs) >= 2, "Needs at least 2 args"); + size_t fordelim = (sizeof...(vs) - 2) * + estimateSpaceToReserve(0, d, static_cast(nullptr)); + getLastElement(vs...)->reserve(estimateSpaceToReserve(fordelim, vs...)); +} + +/** + * Variadic base case: append one element + */ +template +typename std::enable_if< + IsSomeString::type>::value>::type +toAppendStrImpl(const T& v, Tgt result) { + toAppend(v, result); +} + +template +typename std::enable_if< + sizeof...(Ts) >= 2 && + IsSomeString::type>::type>::value>::type +toAppendStrImpl(const T& v, const Ts&... vs) { + toAppend(v, getLastElement(vs...)); + toAppendStrImpl(vs...); +} + +template +typename std::enable_if< + IsSomeString::type>::value>::type +toAppendDelimStrImpl(const Delimiter& /* delim */, const T& v, Tgt result) { + toAppend(v, result); +} + +template +typename std::enable_if< + sizeof...(Ts) >= 2 && + IsSomeString::type>::type>::value>::type +toAppendDelimStrImpl(const Delimiter& delim, const T& v, const Ts&... vs) { + // we are really careful here, calling toAppend with just one element does + // not try to estimate space needed (as we already did that). If we call + // toAppend(v, delim, ....) we would do unnecesary size calculation + toAppend(v, detail::getLastElement(vs...)); + toAppend(delim, detail::getLastElement(vs...)); + toAppendDelimStrImpl(delim, vs...); +} +} // namespace detail + +/** + * Variadic conversion to string. Appends each element in turn. + * If we have two or more things to append, we will not reserve + * the space for them and will depend on strings exponential growth. + * If you just append once consider using toAppendFit which reserves + * the space needed (but does not have exponential as a result). + * + * Custom implementations of toAppend() can be provided in the same namespace as + * the type to customize printing. estimateSpaceNeed() may also be provided to + * avoid reallocations in toAppendFit(): + * + * namespace other_namespace { + * + * template + * void toAppend(const OtherType&, String* out); + * + * // optional + * size_t estimateSpaceNeeded(const OtherType&); + * + * } + */ +template +typename std::enable_if< + sizeof...(Ts) >= 3 && + IsSomeString::type>::type>::value>::type +toAppend(const Ts&... vs) { + ::folly::detail::toAppendStrImpl(vs...); +} + +#ifdef _MSC_VER +// Special case pid_t on MSVC, because it's a void* rather than an +// integral type. We can't do a global special case because this is already +// dangerous enough (as most pointers will implicitly convert to a void*) +// just doing it for MSVC. +template +void toAppend(const pid_t a, Tgt* res) { + toAppend(uint64_t(a), res); +} +#endif + +/** + * Special version of the call that preallocates exaclty as much memory + * as need for arguments to be stored in target. This means we are + * not doing exponential growth when we append. If you are using it + * in a loop you are aiming at your foot with a big perf-destroying + * bazooka. + * On the other hand if you are appending to a string once, this + * will probably save a few calls to malloc. + */ +template +typename std::enable_if::type>::type>::value>::type +toAppendFit(const Ts&... vs) { + ::folly::detail::reserveInTarget(vs...); + toAppend(vs...); +} + +template +void toAppendFit(const Ts&) {} + +/** + * Variadic base case: do nothing. + */ +template +typename std::enable_if::value>::type toAppend( + Tgt* /* result */) {} + +/** + * Variadic base case: do nothing. + */ +template +typename std::enable_if::value>::type toAppendDelim( + const Delimiter& /* delim */, + Tgt* /* result */) {} + +/** + * 1 element: same as toAppend. + */ +template +typename std::enable_if::value>::type +toAppendDelim(const Delimiter& /* delim */, const T& v, Tgt* tgt) { + toAppend(v, tgt); +} + +/** + * Append to string with a delimiter in between elements. Check out + * comments for toAppend for details about memory allocation. + */ +template +typename std::enable_if< + sizeof...(Ts) >= 3 && + IsSomeString::type>::type>::value>::type +toAppendDelim(const Delimiter& delim, const Ts&... vs) { + detail::toAppendDelimStrImpl(delim, vs...); +} + +/** + * Detail in comment for toAppendFit + */ +template +typename std::enable_if::type>::type>::value>::type +toAppendDelimFit(const Delimiter& delim, const Ts&... vs) { + detail::reserveInTargetDelim(delim, vs...); + toAppendDelim(delim, vs...); +} + +template +void toAppendDelimFit(const De&, const Ts&) {} + +/** + * to(v1, v2, ...) uses toAppend() (see below) as back-end + * for all types. + */ +template +typename std::enable_if< + IsSomeString::value && + (sizeof...(Ts) != 1 || + !std::is_same::type>:: + value), + Tgt>::type +to(const Ts&... vs) { + Tgt result; + toAppendFit(vs..., &result); + return result; +} + +/** + * Special version of to for floating point. When calling + * folly::to(double), generic implementation above will + * firstly reserve 24 (or 25 when negative value) bytes. This will + * introduce a malloc call for most mainstream string implementations. + * + * But for most cases, a floating point doesn't need 24 (or 25) bytes to + * be converted as a string. + * + * This special version will not do string reserve. + */ +template +typename std::enable_if< + IsSomeString::value && std::is_floating_point::value, + Tgt>::type +to(Src value) { + Tgt result; + toAppend(value, &result); + return result; +} + +/** + * toDelim(SomeString str) returns itself. + */ +template +typename std::enable_if< + IsSomeString::value && + std::is_same::type>::value, + Tgt>::type +toDelim(const Delim& /* delim */, Src&& value) { + return std::forward(value); +} + +/** + * toDelim(delim, v1, v2, ...) uses toAppendDelim() as + * back-end for all types. + */ +template +typename std::enable_if< + IsSomeString::value && + (sizeof...(Ts) != 1 || + !std::is_same::type>:: + value), + Tgt>::type +toDelim(const Delim& delim, const Ts&... vs) { + Tgt result; + toAppendDelimFit(delim, vs..., &result); + return result; +} + +/******************************************************************************* + * Conversions from string types to integral types. + ******************************************************************************/ + +namespace detail { + +Expected str_to_bool(StringPiece* src) noexcept; + +template +Expected str_to_floating(StringPiece* src) noexcept; + +extern template Expected str_to_floating( + StringPiece* src) noexcept; +extern template Expected str_to_floating( + StringPiece* src) noexcept; + +template +Expected digits_to(const char* b, const char* e) noexcept; + +extern template Expected digits_to( + const char*, + const char*) noexcept; +extern template Expected digits_to( + const char*, + const char*) noexcept; +extern template Expected +digits_to(const char*, const char*) noexcept; + +extern template Expected digits_to( + const char*, + const char*) noexcept; +extern template Expected +digits_to(const char*, const char*) noexcept; + +extern template Expected digits_to( + const char*, + const char*) noexcept; +extern template Expected digits_to( + const char*, + const char*) noexcept; + +extern template Expected digits_to( + const char*, + const char*) noexcept; +extern template Expected +digits_to(const char*, const char*) noexcept; + +extern template Expected digits_to( + const char*, + const char*) noexcept; +extern template Expected +digits_to(const char*, const char*) noexcept; + +#if FOLLY_HAVE_INT128_T +extern template Expected<__int128, ConversionCode> digits_to<__int128>( + const char*, + const char*) noexcept; +extern template Expected +digits_to(const char*, const char*) noexcept; +#endif + +template +Expected str_to_integral(StringPiece* src) noexcept; + +extern template Expected str_to_integral( + StringPiece* src) noexcept; +extern template Expected +str_to_integral(StringPiece* src) noexcept; +extern template Expected +str_to_integral(StringPiece* src) noexcept; + +extern template Expected str_to_integral( + StringPiece* src) noexcept; +extern template Expected +str_to_integral(StringPiece* src) noexcept; + +extern template Expected str_to_integral( + StringPiece* src) noexcept; +extern template Expected +str_to_integral(StringPiece* src) noexcept; + +extern template Expected str_to_integral( + StringPiece* src) noexcept; +extern template Expected +str_to_integral(StringPiece* src) noexcept; + +extern template Expected str_to_integral( + StringPiece* src) noexcept; +extern template Expected +str_to_integral(StringPiece* src) noexcept; + +#if FOLLY_HAVE_INT128_T +extern template Expected<__int128, ConversionCode> str_to_integral<__int128>( + StringPiece* src) noexcept; +extern template Expected +str_to_integral(StringPiece* src) noexcept; +#endif + +template +typename std:: + enable_if::value, Expected>::type + convertTo(StringPiece* src) noexcept { + return str_to_bool(src); +} + +template +typename std::enable_if< + std::is_floating_point::value, + Expected>::type +convertTo(StringPiece* src) noexcept { + return str_to_floating(src); +} + +template +typename std::enable_if< + std::is_integral::value && !std::is_same::value, + Expected>::type +convertTo(StringPiece* src) noexcept { + return str_to_integral(src); +} + +} // namespace detail + +/** + * String represented as a pair of pointers to char to unsigned + * integrals. Assumes NO whitespace before or after. + */ +template +typename std::enable_if< + std::is_integral::value && !std::is_same::value, + Expected>::type +tryTo(const char* b, const char* e) { + return detail::digits_to(b, e); +} + +template +typename std::enable_if< + std::is_integral::value && !std::is_same::value, + Tgt>::type +to(const char* b, const char* e) { + return tryTo(b, e).thenOrThrow( + [](Tgt res) { return res; }, + [=](ConversionCode code) { + return makeConversionError(code, StringPiece(b, e)); + }); +} + +/******************************************************************************* + * Conversions from string types to arithmetic types. + ******************************************************************************/ + +/** + * Parsing strings to numeric types. + */ +template +FOLLY_NODISCARD inline typename std::enable_if< + std::is_arithmetic::value, + Expected>::type +parseTo(StringPiece src, Tgt& out) { + return detail::convertTo(&src).then( + [&](Tgt res) { return void(out = res), src; }); +} + +/******************************************************************************* + * Integral / Floating Point to integral / Floating Point + ******************************************************************************/ + +namespace detail { + +/** + * Bool to integral/float doesn't need any special checks, and this + * overload means we aren't trying to see if a bool is less than + * an integer. + */ +template +typename std::enable_if< + !std::is_same::value && + (std::is_integral::value || std::is_floating_point::value), + Expected>::type +convertTo(const bool& value) noexcept { + return static_cast(value ? 1 : 0); +} + +/** + * Checked conversion from integral to integral. The checks are only + * performed when meaningful, e.g. conversion from int to long goes + * unchecked. + */ +template +typename std::enable_if< + std::is_integral::value && !std::is_same::value && + !std::is_same::value && std::is_integral::value, + Expected>::type +convertTo(const Src& value) noexcept { + if /* constexpr */ ( + std::make_unsigned_t(std::numeric_limits::max()) < + std::make_unsigned_t(std::numeric_limits::max())) { + if (greater_than::max()>(value)) { + return makeUnexpected(ConversionCode::ARITH_POSITIVE_OVERFLOW); + } + } + if /* constexpr */ ( + std::is_signed::value && + (!std::is_signed::value || sizeof(Src) > sizeof(Tgt))) { + if (less_than::min()>(value)) { + return makeUnexpected(ConversionCode::ARITH_NEGATIVE_OVERFLOW); + } + } + return static_cast(value); +} + +/** + * Checked conversion from floating to floating. The checks are only + * performed when meaningful, e.g. conversion from float to double goes + * unchecked. + */ +template +typename std::enable_if< + std::is_floating_point::value && std::is_floating_point::value && + !std::is_same::value, + Expected>::type +convertTo(const Src& value) noexcept { + if /* constexpr */ ( + std::numeric_limits::max() < std::numeric_limits::max()) { + if (value > std::numeric_limits::max()) { + return makeUnexpected(ConversionCode::ARITH_POSITIVE_OVERFLOW); + } + if (value < std::numeric_limits::lowest()) { + return makeUnexpected(ConversionCode::ARITH_NEGATIVE_OVERFLOW); + } + } + return static_cast(value); +} + +/** + * Check if a floating point value can safely be converted to an + * integer value without triggering undefined behaviour. + */ +template +inline typename std::enable_if< + std::is_floating_point::value && std::is_integral::value && + !std::is_same::value, + bool>::type +checkConversion(const Src& value) { + constexpr Src tgtMaxAsSrc = static_cast(std::numeric_limits::max()); + constexpr Src tgtMinAsSrc = static_cast(std::numeric_limits::min()); + if (value >= tgtMaxAsSrc) { + if (value > tgtMaxAsSrc) { + return false; + } + const Src mmax = folly::nextafter(tgtMaxAsSrc, Src()); + if (static_cast(value - mmax) > + std::numeric_limits::max() - static_cast(mmax)) { + return false; + } + } else if (std::is_signed::value && value <= tgtMinAsSrc) { + if (value < tgtMinAsSrc) { + return false; + } + const Src mmin = folly::nextafter(tgtMinAsSrc, Src()); + if (static_cast(value - mmin) < + std::numeric_limits::min() - static_cast(mmin)) { + return false; + } + } + return true; +} + +// Integers can always safely be converted to floating point values +template +constexpr typename std::enable_if< + std::is_integral::value && std::is_floating_point::value, + bool>::type +checkConversion(const Src&) { + return true; +} + +// Also, floating point values can always be safely converted to bool +// Per the standard, any floating point value that is not zero will yield true +template +constexpr typename std::enable_if< + std::is_floating_point::value && std::is_same::value, + bool>::type +checkConversion(const Src&) { + return true; +} + +/** + * Checked conversion from integral to floating point and back. The + * result must be convertible back to the source type without loss of + * precision. This seems Draconian but sometimes is what's needed, and + * complements existing routines nicely. For various rounding + * routines, see . + */ +template +typename std::enable_if< + (std::is_integral::value && std::is_floating_point::value) || + (std::is_floating_point::value && std::is_integral::value), + Expected>::type +convertTo(const Src& value) noexcept { + if (LIKELY(checkConversion(value))) { + Tgt result = static_cast(value); + if (LIKELY(checkConversion(result))) { + Src witness = static_cast(result); + if (LIKELY(value == witness)) { + return result; + } + } + } + return makeUnexpected(ConversionCode::ARITH_LOSS_OF_PRECISION); +} + +template +inline std::string errorValue(const Src& value) { + return to("(", pretty_name(), ") ", value); +} + +template +using IsArithToArith = bool_constant< + !std::is_same::value && !std::is_same::value && + std::is_arithmetic::value && std::is_arithmetic::value>; + +} // namespace detail + +template +typename std::enable_if< + detail::IsArithToArith::value, + Expected>::type +tryTo(const Src& value) noexcept { + return detail::convertTo(value); +} + +template +typename std::enable_if::value, Tgt>::type to( + const Src& value) { + return tryTo(value).thenOrThrow( + [](Tgt res) { return res; }, + [&](ConversionCode e) { + return makeConversionError(e, detail::errorValue(value)); + }); +} + +/******************************************************************************* + * Custom Conversions + * + * Any type can be used with folly::to by implementing parseTo. The + * implementation should be provided in the namespace of the type to facilitate + * argument-dependent lookup: + * + * namespace other_namespace { + * ::folly::Expected<::folly::StringPiece, SomeErrorCode> + * parseTo(::folly::StringPiece, OtherType&) noexcept; + * } + ******************************************************************************/ +template +FOLLY_NODISCARD typename std::enable_if< + std::is_enum::value, + Expected>::type +parseTo(StringPiece in, T& out) noexcept { + typename std::underlying_type::type tmp{}; + auto restOrError = parseTo(in, tmp); + out = static_cast(tmp); // Harmless if parseTo fails + return restOrError; +} + +FOLLY_NODISCARD +inline Expected parseTo( + StringPiece in, + StringPiece& out) noexcept { + out = in; + return StringPiece{in.end(), in.end()}; +} + +FOLLY_NODISCARD +inline Expected parseTo( + StringPiece in, + std::string& out) { + out.clear(); + out.append(in.data(), in.size()); // TODO try/catch? + return StringPiece{in.end(), in.end()}; +} + +FOLLY_NODISCARD +inline Expected parseTo( + StringPiece in, + fbstring& out) { + out.clear(); + out.append(in.data(), in.size()); // TODO try/catch? + return StringPiece{in.end(), in.end()}; +} + +namespace detail { +template +using ParseToResult = decltype(parseTo(StringPiece{}, std::declval())); + +struct CheckTrailingSpace { + Expected operator()(StringPiece sp) const { + auto e = enforceWhitespaceErr(sp); + if (UNLIKELY(e != ConversionCode::SUCCESS)) { + return makeUnexpected(e); + } + return unit; + } +}; + +template +struct ReturnUnit { + template + constexpr Expected operator()(T&&) const { + return unit; + } +}; + +// Older versions of the parseTo customization point threw on error and +// returned void. Handle that. +template +inline typename std::enable_if< + std::is_void>::value, + Expected>::type +parseToWrap(StringPiece sp, Tgt& out) { + parseTo(sp, out); + return StringPiece(sp.end(), sp.end()); +} + +template +inline typename std::enable_if< + !std::is_void>::value, + ParseToResult>::type +parseToWrap(StringPiece sp, Tgt& out) { + return parseTo(sp, out); +} + +template +using ParseToError = ExpectedErrorType()))>; + +} // namespace detail + +/** + * String or StringPiece to target conversion. Accepts leading and trailing + * whitespace, but no non-space trailing characters. + */ + +template +inline typename std::enable_if< + !std::is_same::value, + Expected>>::type +tryTo(StringPiece src) { + Tgt result{}; + using Error = detail::ParseToError; + using Check = typename std::conditional< + std::is_arithmetic::value, + detail::CheckTrailingSpace, + detail::ReturnUnit>::type; + return parseTo(src, result).then(Check(), [&](Unit) { + return std::move(result); + }); +} + +template +inline typename std::enable_if< + IsSomeString::value && !std::is_same::value, + Tgt>::type +to(Src const& src) { + return to(StringPiece(src.data(), src.size())); +} + +template +inline + typename std::enable_if::value, Tgt>::type + to(StringPiece src) { + Tgt result{}; + using Error = detail::ParseToError; + using Check = typename std::conditional< + std::is_arithmetic::value, + detail::CheckTrailingSpace, + detail::ReturnUnit>::type; + auto tmp = detail::parseToWrap(src, result); + return tmp + .thenOrThrow( + Check(), + [&](Error e) { throw_exception(makeConversionError(e, src)); }) + .thenOrThrow( + [&](Unit) { return std::move(result); }, + [&](Error e) { + throw_exception(makeConversionError(e, tmp.value())); + }); +} + +/** + * tryTo/to that take the strings by pointer so the caller gets information + * about how much of the string was consumed by the conversion. These do not + * check for trailing whitepsace. + */ +template +Expected> tryTo(StringPiece* src) { + Tgt result; + return parseTo(*src, result).then([&, src](StringPiece sp) -> Tgt { + *src = sp; + return std::move(result); + }); +} + +template +Tgt to(StringPiece* src) { + Tgt result{}; + using Error = detail::ParseToError; + return parseTo(*src, result) + .thenOrThrow( + [&, src](StringPiece sp) -> Tgt { + *src = sp; + return std::move(result); + }, + [=](Error e) { return makeConversionError(e, *src); }); +} + +/******************************************************************************* + * Enum to anything and back + ******************************************************************************/ + +template +typename std::enable_if< + std::is_enum::value && !std::is_same::value && + !std::is_convertible::value, + Expected>::type +tryTo(const Src& value) { + return tryTo(to_underlying(value)); +} + +template +typename std::enable_if< + !std::is_convertible::value && std::is_enum::value && + !std::is_same::value, + Expected>::type +tryTo(const Src& value) { + using I = typename std::underlying_type::type; + return tryTo(value).then([](I i) { return static_cast(i); }); +} + +template +typename std::enable_if< + std::is_enum::value && !std::is_same::value && + !std::is_convertible::value, + Tgt>::type +to(const Src& value) { + return to(to_underlying(value)); +} + +template +typename std::enable_if< + !std::is_convertible::value && std::is_enum::value && + !std::is_same::value, + Tgt>::type +to(const Src& value) { + return static_cast(to::type>(value)); +} + +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/CppAttributes.h b/native/iosTest/Pods/Folly/folly/CppAttributes.h new file mode 100644 index 000000000..cd02fa3fd --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/CppAttributes.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * GCC compatible wrappers around clang attributes. + * + * @author Dominik Gabi + */ + +#pragma once + +#ifndef __has_attribute +#define FOLLY_HAS_ATTRIBUTE(x) 0 +#else +#define FOLLY_HAS_ATTRIBUTE(x) __has_attribute(x) +#endif + +#ifndef __has_cpp_attribute +#define FOLLY_HAS_CPP_ATTRIBUTE(x) 0 +#else +#define FOLLY_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#endif + +#ifndef __has_extension +#define FOLLY_HAS_EXTENSION(x) 0 +#else +#define FOLLY_HAS_EXTENSION(x) __has_extension(x) +#endif + +/** + * Fallthrough to indicate that `break` was left out on purpose in a switch + * statement, e.g. + * + * switch (n) { + * case 22: + * case 33: // no warning: no statements between case labels + * f(); + * case 44: // warning: unannotated fall-through + * g(); + * FOLLY_FALLTHROUGH; // no warning: annotated fall-through + * } + */ +#if FOLLY_HAS_CPP_ATTRIBUTE(fallthrough) +#define FOLLY_FALLTHROUGH [[fallthrough]] +#elif FOLLY_HAS_CPP_ATTRIBUTE(clang::fallthrough) +#define FOLLY_FALLTHROUGH [[clang::fallthrough]] +#elif FOLLY_HAS_CPP_ATTRIBUTE(gnu::fallthrough) +#define FOLLY_FALLTHROUGH [[gnu::fallthrough]] +#else +#define FOLLY_FALLTHROUGH +#endif + +/** + * Maybe_unused indicates that a function, variable or parameter might or + * might not be used, e.g. + * + * int foo(FOLLY_MAYBE_UNUSED int x) { + * #ifdef USE_X + * return x; + * #else + * return 0; + * #endif + * } + */ +#if FOLLY_HAS_CPP_ATTRIBUTE(maybe_unused) +#define FOLLY_MAYBE_UNUSED [[maybe_unused]] +#elif FOLLY_HAS_ATTRIBUTE(__unused__) || __GNUC__ +#define FOLLY_MAYBE_UNUSED __attribute__((__unused__)) +#else +#define FOLLY_MAYBE_UNUSED +#endif + +/** + * Nullable indicates that a return value or a parameter may be a `nullptr`, + * e.g. + * + * int* FOLLY_NULLABLE foo(int* a, int* FOLLY_NULLABLE b) { + * if (*a > 0) { // safe dereference + * return nullptr; + * } + * if (*b < 0) { // unsafe dereference + * return *a; + * } + * if (b != nullptr && *b == 1) { // safe checked dereference + * return new int(1); + * } + * return nullptr; + * } + */ +#if FOLLY_HAS_EXTENSION(nullability) +#define FOLLY_NULLABLE _Nullable +#define FOLLY_NONNULL _Nonnull +#else +#define FOLLY_NULLABLE +#define FOLLY_NONNULL +#endif + +/** + * "Cold" indicates to the compiler that a function is only expected to be + * called from unlikely code paths. It can affect decisions made by the + * optimizer both when processing the function body and when analyzing + * call-sites. + */ +#if __GNUC__ +#define FOLLY_COLD __attribute__((__cold__)) +#else +#define FOLLY_COLD +#endif diff --git a/native/iosTest/Pods/Folly/folly/CpuId.h b/native/iosTest/Pods/Folly/folly/CpuId.h new file mode 100644 index 000000000..517ccb571 --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/CpuId.h @@ -0,0 +1,218 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#include + +#ifdef _MSC_VER +#include +#endif + +namespace folly { + +/** + * Identification of an Intel CPU. + * Supports CPUID feature flags (EAX=1) and extended features (EAX=7, ECX=0). + * Values from + * http://www.intel.com/content/www/us/en/processors/processor-identification-cpuid-instruction-note.html + */ +class CpuId { + public: + // Always inline in order for this to be usable from a __ifunc__. + // In shared library mode, a __ifunc__ runs at relocation time, while the + // PLT hasn't been fully populated yet; thus, ifuncs cannot use symbols + // with potentially external linkage. (This issue is less likely in opt + // mode since inlining happens more likely, and it doesn't happen for + // statically linked binaries which don't depend on the PLT) + FOLLY_ALWAYS_INLINE CpuId() { +#if defined(_MSC_VER) && (FOLLY_X64 || defined(_M_IX86)) + int reg[4]; + __cpuid(static_cast(reg), 0); + const int n = reg[0]; + if (n >= 1) { + __cpuid(static_cast(reg), 1); + f1c_ = uint32_t(reg[2]); + f1d_ = uint32_t(reg[3]); + } + if (n >= 7) { + __cpuidex(static_cast(reg), 7, 0); + f7b_ = uint32_t(reg[1]); + f7c_ = uint32_t(reg[2]); + } +#elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && \ + defined(__GNUC__) + // The following block like the normal cpuid branch below, but gcc + // reserves ebx for use of its pic register so we must specially + // handle the save and restore to avoid clobbering the register + uint32_t n; + __asm__( + "pushl %%ebx\n\t" + "cpuid\n\t" + "popl %%ebx\n\t" + : "=a"(n) + : "a"(0) + : "ecx", "edx"); + if (n >= 1) { + uint32_t f1a; + __asm__( + "pushl %%ebx\n\t" + "cpuid\n\t" + "popl %%ebx\n\t" + : "=a"(f1a), "=c"(f1c_), "=d"(f1d_) + : "a"(1) + :); + } + if (n >= 7) { + __asm__( + "pushl %%ebx\n\t" + "cpuid\n\t" + "movl %%ebx, %%eax\n\r" + "popl %%ebx" + : "=a"(f7b_), "=c"(f7c_) + : "a"(7), "c"(0) + : "edx"); + } +#elif FOLLY_X64 || defined(__i386__) + uint32_t n; + __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx"); + if (n >= 1) { + uint32_t f1a; + __asm__("cpuid" : "=a"(f1a), "=c"(f1c_), "=d"(f1d_) : "a"(1) : "ebx"); + } + if (n >= 7) { + uint32_t f7a; + __asm__("cpuid" + : "=a"(f7a), "=b"(f7b_), "=c"(f7c_) + : "a"(7), "c"(0) + : "edx"); + } +#endif + } + +#define FOLLY_DETAIL_CPUID_X(name, r, bit) \ + FOLLY_ALWAYS_INLINE bool name() const { \ + return ((r) & (1U << bit)) != 0; \ + } + +// cpuid(1): Processor Info and Feature Bits. +#define FOLLY_DETAIL_CPUID_C(name, bit) FOLLY_DETAIL_CPUID_X(name, f1c_, bit) + FOLLY_DETAIL_CPUID_C(sse3, 0) + FOLLY_DETAIL_CPUID_C(pclmuldq, 1) + FOLLY_DETAIL_CPUID_C(dtes64, 2) + FOLLY_DETAIL_CPUID_C(monitor, 3) + FOLLY_DETAIL_CPUID_C(dscpl, 4) + FOLLY_DETAIL_CPUID_C(vmx, 5) + FOLLY_DETAIL_CPUID_C(smx, 6) + FOLLY_DETAIL_CPUID_C(eist, 7) + FOLLY_DETAIL_CPUID_C(tm2, 8) + FOLLY_DETAIL_CPUID_C(ssse3, 9) + FOLLY_DETAIL_CPUID_C(cnxtid, 10) + FOLLY_DETAIL_CPUID_C(fma, 12) + FOLLY_DETAIL_CPUID_C(cx16, 13) + FOLLY_DETAIL_CPUID_C(xtpr, 14) + FOLLY_DETAIL_CPUID_C(pdcm, 15) + FOLLY_DETAIL_CPUID_C(pcid, 17) + FOLLY_DETAIL_CPUID_C(dca, 18) + FOLLY_DETAIL_CPUID_C(sse41, 19) + FOLLY_DETAIL_CPUID_C(sse42, 20) + FOLLY_DETAIL_CPUID_C(x2apic, 21) + FOLLY_DETAIL_CPUID_C(movbe, 22) + FOLLY_DETAIL_CPUID_C(popcnt, 23) + FOLLY_DETAIL_CPUID_C(tscdeadline, 24) + FOLLY_DETAIL_CPUID_C(aes, 25) + FOLLY_DETAIL_CPUID_C(xsave, 26) + FOLLY_DETAIL_CPUID_C(osxsave, 27) + FOLLY_DETAIL_CPUID_C(avx, 28) + FOLLY_DETAIL_CPUID_C(f16c, 29) + FOLLY_DETAIL_CPUID_C(rdrand, 30) +#undef FOLLY_DETAIL_CPUID_C +#define FOLLY_DETAIL_CPUID_D(name, bit) FOLLY_DETAIL_CPUID_X(name, f1d_, bit) + FOLLY_DETAIL_CPUID_D(fpu, 0) + FOLLY_DETAIL_CPUID_D(vme, 1) + FOLLY_DETAIL_CPUID_D(de, 2) + FOLLY_DETAIL_CPUID_D(pse, 3) + FOLLY_DETAIL_CPUID_D(tsc, 4) + FOLLY_DETAIL_CPUID_D(msr, 5) + FOLLY_DETAIL_CPUID_D(pae, 6) + FOLLY_DETAIL_CPUID_D(mce, 7) + FOLLY_DETAIL_CPUID_D(cx8, 8) + FOLLY_DETAIL_CPUID_D(apic, 9) + FOLLY_DETAIL_CPUID_D(sep, 11) + FOLLY_DETAIL_CPUID_D(mtrr, 12) + FOLLY_DETAIL_CPUID_D(pge, 13) + FOLLY_DETAIL_CPUID_D(mca, 14) + FOLLY_DETAIL_CPUID_D(cmov, 15) + FOLLY_DETAIL_CPUID_D(pat, 16) + FOLLY_DETAIL_CPUID_D(pse36, 17) + FOLLY_DETAIL_CPUID_D(psn, 18) + FOLLY_DETAIL_CPUID_D(clfsh, 19) + FOLLY_DETAIL_CPUID_D(ds, 21) + FOLLY_DETAIL_CPUID_D(acpi, 22) + FOLLY_DETAIL_CPUID_D(mmx, 23) + FOLLY_DETAIL_CPUID_D(fxsr, 24) + FOLLY_DETAIL_CPUID_D(sse, 25) + FOLLY_DETAIL_CPUID_D(sse2, 26) + FOLLY_DETAIL_CPUID_D(ss, 27) + FOLLY_DETAIL_CPUID_D(htt, 28) + FOLLY_DETAIL_CPUID_D(tm, 29) + FOLLY_DETAIL_CPUID_D(pbe, 31) +#undef FOLLY_DETAIL_CPUID_D + + // cpuid(7): Extended Features. +#define FOLLY_DETAIL_CPUID_B(name, bit) FOLLY_DETAIL_CPUID_X(name, f7b_, bit) + FOLLY_DETAIL_CPUID_B(bmi1, 3) + FOLLY_DETAIL_CPUID_B(hle, 4) + FOLLY_DETAIL_CPUID_B(avx2, 5) + FOLLY_DETAIL_CPUID_B(smep, 7) + FOLLY_DETAIL_CPUID_B(bmi2, 8) + FOLLY_DETAIL_CPUID_B(erms, 9) + FOLLY_DETAIL_CPUID_B(invpcid, 10) + FOLLY_DETAIL_CPUID_B(rtm, 11) + FOLLY_DETAIL_CPUID_B(mpx, 14) + FOLLY_DETAIL_CPUID_B(avx512f, 16) + FOLLY_DETAIL_CPUID_B(avx512dq, 17) + FOLLY_DETAIL_CPUID_B(rdseed, 18) + FOLLY_DETAIL_CPUID_B(adx, 19) + FOLLY_DETAIL_CPUID_B(smap, 20) + FOLLY_DETAIL_CPUID_B(avx512ifma, 21) + FOLLY_DETAIL_CPUID_B(pcommit, 22) + FOLLY_DETAIL_CPUID_B(clflushopt, 23) + FOLLY_DETAIL_CPUID_B(clwb, 24) + FOLLY_DETAIL_CPUID_B(avx512pf, 26) + FOLLY_DETAIL_CPUID_B(avx512er, 27) + FOLLY_DETAIL_CPUID_B(avx512cd, 28) + FOLLY_DETAIL_CPUID_B(sha, 29) + FOLLY_DETAIL_CPUID_B(avx512bw, 30) + FOLLY_DETAIL_CPUID_B(avx512vl, 31) +#undef FOLLY_DETAIL_CPUID_B +#define FOLLY_DETAIL_CPUID_C(name, bit) FOLLY_DETAIL_CPUID_X(name, f7c_, bit) + FOLLY_DETAIL_CPUID_C(prefetchwt1, 0) + FOLLY_DETAIL_CPUID_C(avx512vbmi, 1) +#undef FOLLY_DETAIL_CPUID_C + +#undef FOLLY_DETAIL_CPUID_X + + private: + uint32_t f1c_ = 0; + uint32_t f1d_ = 0; + uint32_t f7b_ = 0; + uint32_t f7c_ = 0; +}; + +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/DefaultKeepAliveExecutor.h b/native/iosTest/Pods/Folly/folly/DefaultKeepAliveExecutor.h new file mode 100644 index 000000000..c5d7f6065 --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/DefaultKeepAliveExecutor.h @@ -0,0 +1,158 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#include + +#include +#include + +namespace folly { + +/// An Executor accepts units of work with add(), which should be +/// threadsafe. +class DefaultKeepAliveExecutor : public virtual Executor { + public: + DefaultKeepAliveExecutor() : Executor() {} + + virtual ~DefaultKeepAliveExecutor() { + DCHECK(!keepAlive_); + } + + folly::Executor::KeepAlive<> weakRef() { + return WeakRef::create(controlBlock_, this); + } + + protected: + void joinKeepAlive() { + DCHECK(keepAlive_); + keepAlive_.reset(); + keepAliveReleaseBaton_.wait(); + } + + void joinAndResetKeepAlive() { + joinKeepAlive(); + auto keepAliveCount = + controlBlock_->keepAliveCount_.exchange(1, std::memory_order_relaxed); + DCHECK_EQ(keepAliveCount, 0); + keepAliveReleaseBaton_.reset(); + keepAlive_ = makeKeepAlive(this); + } + + private: + struct ControlBlock { + std::atomic keepAliveCount_{1}; + }; + + class WeakRef : public Executor { + public: + static folly::Executor::KeepAlive<> create( + std::shared_ptr controlBlock, + Executor* executor) { + return makeKeepAlive(new WeakRef(std::move(controlBlock), executor)); + } + + void add(Func f) override { + if (auto executor = lock()) { + executor->add(std::move(f)); + } + } + + void addWithPriority(Func f, int8_t priority) override { + if (auto executor = lock()) { + executor->addWithPriority(std::move(f), priority); + } + } + + virtual uint8_t getNumPriorities() const override { + return numPriorities_; + } + + private: + WeakRef(std::shared_ptr controlBlock, Executor* executor) + : controlBlock_(std::move(controlBlock)), + executor_(executor), + numPriorities_(executor->getNumPriorities()) {} + + bool keepAliveAcquire() override { + auto keepAliveCount = + keepAliveCount_.fetch_add(1, std::memory_order_relaxed); + // We should never increment from 0 + DCHECK(keepAliveCount > 0); + return true; + } + + void keepAliveRelease() override { + auto keepAliveCount = + keepAliveCount_.fetch_sub(1, std::memory_order_acq_rel); + DCHECK(keepAliveCount >= 1); + + if (keepAliveCount == 1) { + delete this; + } + } + + folly::Executor::KeepAlive<> lock() { + auto controlBlock = + controlBlock_->keepAliveCount_.load(std::memory_order_relaxed); + do { + if (controlBlock == 0) { + return {}; + } + } while (!controlBlock_->keepAliveCount_.compare_exchange_weak( + controlBlock, + controlBlock + 1, + std::memory_order_release, + std::memory_order_relaxed)); + + return makeKeepAlive(executor_); + } + + std::atomic keepAliveCount_{1}; + + std::shared_ptr controlBlock_; + Executor* executor_; + + uint8_t numPriorities_; + }; + + bool keepAliveAcquire() override { + auto keepAliveCount = + controlBlock_->keepAliveCount_.fetch_add(1, std::memory_order_relaxed); + // We should never increment from 0 + DCHECK(keepAliveCount > 0); + return true; + } + + void keepAliveRelease() override { + auto keepAliveCount = + controlBlock_->keepAliveCount_.fetch_sub(1, std::memory_order_acquire); + DCHECK(keepAliveCount >= 1); + + if (keepAliveCount == 1) { + keepAliveReleaseBaton_.post(); // std::memory_order_release + } + } + + std::shared_ptr controlBlock_{std::make_shared()}; + Baton<> keepAliveReleaseBaton_; + KeepAlive keepAlive_{makeKeepAlive(this)}; +}; + +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/Demangle.cpp b/native/iosTest/Pods/Folly/folly/Demangle.cpp new file mode 100644 index 000000000..0df902c00 --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/Demangle.cpp @@ -0,0 +1,125 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include + +#include +#include +#include + +#if FOLLY_DETAIL_HAVE_DEMANGLE_H + +#include + +#endif + +namespace folly { + +#if FOLLY_DETAIL_HAVE_DEMANGLE_H + +fbstring demangle(const char* name) { + if (!name) { + return fbstring(); + } +#ifdef FOLLY_DEMANGLE_MAX_SYMBOL_SIZE + // GCC's __cxa_demangle() uses on-stack data structures for the + // parser state which are linear in the number of components of the + // symbol. For extremely long symbols, this can cause a stack + // overflow. We set an arbitrary symbol length limit above which we + // just return the mangled name. + size_t mangledLen = strlen(name); + if (mangledLen > FOLLY_DEMANGLE_MAX_SYMBOL_SIZE) { + return fbstring(name, mangledLen); + } +#endif + + int status; + size_t len = 0; + // malloc() memory for the demangled type name + char* demangled = abi::__cxa_demangle(name, nullptr, &len, &status); + if (status != 0) { + return name; + } + // len is the length of the buffer (including NUL terminator and maybe + // other junk) + return fbstring(demangled, strlen(demangled), len, AcquireMallocatedString()); +} + +namespace { + +struct DemangleBuf { + char* dest; + size_t remaining; + size_t total; +}; + +void demangleCallback(const char* str, size_t size, void* p) { + DemangleBuf* buf = static_cast(p); + size_t n = std::min(buf->remaining, size); + memcpy(buf->dest, str, n); + buf->dest += n; + buf->remaining -= n; + buf->total += size; +} + +} // namespace + +size_t demangle(const char* name, char* out, size_t outSize) { +#ifdef FOLLY_DEMANGLE_MAX_SYMBOL_SIZE + size_t mangledLen = strlen(name); + if (mangledLen > FOLLY_DEMANGLE_MAX_SYMBOL_SIZE) { + if (outSize) { + size_t n = std::min(mangledLen, outSize - 1); + memcpy(out, name, n); + out[n] = '\0'; + } + return mangledLen; + } +#endif + + DemangleBuf dbuf; + dbuf.dest = out; + dbuf.remaining = outSize ? outSize - 1 : 0; // leave room for null term + dbuf.total = 0; + + // Unlike most library functions, this returns 1 on success and 0 on failure + int status = + detail::cplus_demangle_v3_callback_wrapper(name, demangleCallback, &dbuf); + if (status == 0) { // failed, return original + return folly::strlcpy(out, name, outSize); + } + if (outSize != 0) { + *dbuf.dest = '\0'; + } + return dbuf.total; +} + +#else + +fbstring demangle(const char* name) { + return name; +} + +size_t demangle(const char* name, char* out, size_t outSize) { + return folly::strlcpy(out, name, outSize); +} + +#endif + +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/Demangle.h b/native/iosTest/Pods/Folly/folly/Demangle.h new file mode 100644 index 000000000..da13fb178 --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/Demangle.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +namespace folly { + +/** + * Return the demangled (prettyfied) version of a C++ type. + * + * This function tries to produce a human-readable type, but the type name will + * be returned unchanged in case of error or if demangling isn't supported on + * your system. + * + * Use for debugging -- do not rely on demangle() returning anything useful. + * + * This function may allocate memory (and therefore throw std::bad_alloc). + */ +fbstring demangle(const char* name); +inline fbstring demangle(const std::type_info& type) { + return demangle(type.name()); +} + +/** + * Return the demangled (prettyfied) version of a C++ type in a user-provided + * buffer. + * + * The semantics are the same as for snprintf or strlcpy: bufSize is the size + * of the buffer, the string is always null-terminated, and the return value is + * the number of characters (not including the null terminator) that would have + * been written if the buffer was big enough. (So a return value >= bufSize + * indicates that the output was truncated) + * + * This function does not allocate memory and is async-signal-safe. + * + * Note that the underlying function for the fbstring-returning demangle is + * somewhat standard (abi::__cxa_demangle, which uses malloc), the underlying + * function for this version is less so (cplus_demangle_v3_callback from + * libiberty), so it is possible for the fbstring version to work, while this + * version returns the original, mangled name. + */ +size_t demangle(const char* name, char* out, size_t outSize); +inline size_t demangle(const std::type_info& type, char* buf, size_t bufSize) { + return demangle(type.name(), buf, bufSize); +} + +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/DiscriminatedPtr.h b/native/iosTest/Pods/Folly/folly/DiscriminatedPtr.h new file mode 100644 index 000000000..0f49bd688 --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/DiscriminatedPtr.h @@ -0,0 +1,247 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Discriminated pointer: Type-safe pointer to one of several types. + * + * Similar to boost::variant, but has no space overhead over a raw pointer, as + * it relies on the fact that (on x86_64) there are 16 unused bits in a + * pointer. + * + * @author Tudor Bosman (tudorb@fb.com) + */ + +#pragma once + +#include +#include + +#include + +#include +#include +#include + +#if !FOLLY_X64 && !FOLLY_AARCH64 && !FOLLY_PPC64 +#error "DiscriminatedPtr is x64, arm64 and ppc64 specific code." +#endif + +namespace folly { + +/** + * Discriminated pointer. + * + * Given a list of types, a DiscriminatedPtr may point to an object + * of one of the given types, or may be empty. DiscriminatedPtr is type-safe: + * you may only get a pointer to the type that you put in, otherwise get + * throws an exception (and get_nothrow returns nullptr) + * + * This pointer does not do any kind of lifetime management -- it's not a + * "smart" pointer. You are responsible for deallocating any memory used + * to hold pointees, if necessary. + */ +template +class DiscriminatedPtr { + // <, not <=, as our indexes are 1-based (0 means "empty") + static_assert( + sizeof...(Types) < std::numeric_limits::max(), + "too many types"); + + public: + /** + * Create an empty DiscriminatedPtr. + */ + DiscriminatedPtr() : data_(0) {} + + /** + * Create a DiscriminatedPtr that points to an object of type T. + * Fails at compile time if T is not a valid type (listed in Types) + */ + template + explicit DiscriminatedPtr(T* ptr) { + set(ptr, typeIndex()); + } + + /** + * Set this DiscriminatedPtr to point to an object of type T. + * Fails at compile time if T is not a valid type (listed in Types) + */ + template + void set(T* ptr) { + set(ptr, typeIndex()); + } + + /** + * Get a pointer to the object that this DiscriminatedPtr points to, if it is + * of type T. Fails at compile time if T is not a valid type (listed in + * Types), and returns nullptr if this DiscriminatedPtr is empty or points to + * an object of a different type. + */ + template + T* get_nothrow() noexcept { + void* p = LIKELY(hasType()) ? ptr() : nullptr; + return static_cast(p); + } + + template + const T* get_nothrow() const noexcept { + const void* p = LIKELY(hasType()) ? ptr() : nullptr; + return static_cast(p); + } + + /** + * Get a pointer to the object that this DiscriminatedPtr points to, if it is + * of type T. Fails at compile time if T is not a valid type (listed in + * Types), and throws std::invalid_argument if this DiscriminatedPtr is empty + * or points to an object of a different type. + */ + template + T* get() { + if (UNLIKELY(!hasType())) { + throw std::invalid_argument("Invalid type"); + } + return static_cast(ptr()); + } + + template + const T* get() const { + if (UNLIKELY(!hasType())) { + throw std::invalid_argument("Invalid type"); + } + return static_cast(ptr()); + } + + /** + * Return true iff this DiscriminatedPtr is empty. + */ + bool empty() const { + return index() == 0; + } + + /** + * Return true iff the object pointed by this DiscriminatedPtr has type T, + * false otherwise. Fails at compile time if T is not a valid type (listed + * in Types...) + */ + template + bool hasType() const { + return index() == typeIndex(); + } + + /** + * Clear this DiscriminatedPtr, making it empty. + */ + void clear() { + data_ = 0; + } + + /** + * Assignment operator from a pointer of type T. + */ + template + DiscriminatedPtr& operator=(T* ptr) { + set(ptr); + return *this; + } + + /** + * Apply a visitor to this object, calling the appropriate overload for + * the type currently stored in DiscriminatedPtr. Throws invalid_argument + * if the DiscriminatedPtr is empty. + * + * The visitor must meet the following requirements: + * + * - The visitor must allow invocation as a function by overloading + * operator(), unambiguously accepting all values of type T* (or const T*) + * for all T in Types... + * - All operations of the function object on T* (or const T*) must + * return the same type (or a static_assert will fire). + */ + template + typename dptr_detail::VisitorResult::type apply(V&& visitor) { + size_t n = index(); + if (n == 0) { + throw std::invalid_argument("Empty DiscriminatedPtr"); + } + return dptr_detail::ApplyVisitor()( + n, std::forward(visitor), ptr()); + } + + template + typename dptr_detail::ConstVisitorResult::type apply( + V&& visitor) const { + size_t n = index(); + if (n == 0) { + throw std::invalid_argument("Empty DiscriminatedPtr"); + } + return dptr_detail::ApplyConstVisitor()( + n, std::forward(visitor), ptr()); + } + + private: + /** + * Get the 1-based type index of T in Types. + */ + template + uint16_t typeIndex() const { + return uint16_t(dptr_detail::GetTypeIndex::value); + } + + uint16_t index() const { + return data_ >> 48; + } + void* ptr() const { + return reinterpret_cast(data_ & ((1ULL << 48) - 1)); + } + + void set(void* p, uint16_t v) { + uintptr_t ip = reinterpret_cast(p); + CHECK(!(ip >> 48)); + ip |= static_cast(v) << 48; + data_ = ip; + } + + /** + * We store a pointer in the least significant 48 bits of data_, and a type + * index (0 = empty, or 1-based index in Types) in the most significant 16 + * bits. We rely on the fact that pointers have their most significant 16 + * bits clear on x86_64. + */ + uintptr_t data_; +}; + +template +decltype(auto) apply_visitor( + Visitor&& visitor, + const DiscriminatedPtr& variant) { + return variant.apply(std::forward(visitor)); +} + +template +decltype(auto) apply_visitor( + Visitor&& visitor, + DiscriminatedPtr& variant) { + return variant.apply(std::forward(visitor)); +} + +template +decltype(auto) apply_visitor( + Visitor&& visitor, + DiscriminatedPtr&& variant) { + return variant.apply(std::forward(visitor)); +} + +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/DynamicConverter.h b/native/iosTest/Pods/Folly/folly/DynamicConverter.h new file mode 100644 index 000000000..04aec594e --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/DynamicConverter.h @@ -0,0 +1,415 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// @author Nicholas Ormrod + +#pragma once + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace folly { +template +T convertTo(const dynamic&); +template +dynamic toDynamic(const T&); +} // namespace folly + +/** + * convertTo returns a well-typed representation of the input dynamic. + * + * Example: + * + * dynamic d = dynamic::array( + * dynamic::array(1, 2, 3), + * dynamic::array(4, 5)); // a vector of vector of int + * auto vvi = convertTo>>(d); + * + * See docs/DynamicConverter.md for supported types and customization + */ + +namespace folly { + +/////////////////////////////////////////////////////////////////////////////// +// traits + +namespace dynamicconverter_detail { + +BOOST_MPL_HAS_XXX_TRAIT_DEF(value_type) +BOOST_MPL_HAS_XXX_TRAIT_DEF(iterator) +BOOST_MPL_HAS_XXX_TRAIT_DEF(mapped_type) +BOOST_MPL_HAS_XXX_TRAIT_DEF(key_type) + +template +struct iterator_class_is_container { + typedef std::reverse_iterator some_iterator; + enum { + value = has_value_type::value && + std::is_constructible::value + }; +}; + +template +using class_is_container = + Conjunction, iterator_class_is_container>; + +template +using is_range = StrictConjunction, has_iterator>; + +template +using is_container = StrictConjunction, class_is_container>; + +template +using is_map = StrictConjunction, has_mapped_type>; + +template +using is_associative = StrictConjunction, has_key_type>; + +} // namespace dynamicconverter_detail + +/////////////////////////////////////////////////////////////////////////////// +// custom iterators + +/** + * We have iterators that dereference to dynamics, but need iterators + * that dereference to typename T. + * + * Implementation details: + * 1. We cache the value of the dereference operator. This is necessary + * because boost::iterator_adaptor requires *it to return a + * reference. + * 2. For const reasons, we cannot call operator= to refresh the + * cache: we must call the destructor then placement new. + */ + +namespace dynamicconverter_detail { + +template +struct Dereferencer { + static inline void derefToCache( + Optional* /* mem */, + const dynamic::const_item_iterator& /* it */) { + throw_exception("array", dynamic::Type::OBJECT); + } + + static inline void derefToCache( + Optional* mem, + const dynamic::const_iterator& it) { + mem->emplace(convertTo(*it)); + } +}; + +template +struct Dereferencer> { + static inline void derefToCache( + Optional>* mem, + const dynamic::const_item_iterator& it) { + mem->emplace(convertTo(it->first), convertTo(it->second)); + } + + // Intentional duplication of the code in Dereferencer + template + static inline void derefToCache( + Optional* mem, + const dynamic::const_iterator& it) { + mem->emplace(convertTo(*it)); + } +}; + +template +class Transformer + : public boost:: + iterator_adaptor, It, typename T::value_type> { + friend class boost::iterator_core_access; + + typedef typename T::value_type ttype; + + mutable Optional cache_; + + void increment() { + ++this->base_reference(); + cache_ = none; + } + + ttype& dereference() const { + if (!cache_) { + Dereferencer::derefToCache(&cache_, this->base_reference()); + } + return cache_.value(); + } + + public: + explicit Transformer(const It& it) : Transformer::iterator_adaptor_(it) {} +}; + +// conversion factory +template +inline std::move_iterator> conversionIterator(const It& it) { + return std::make_move_iterator(Transformer(it)); +} + +} // namespace dynamicconverter_detail + +/////////////////////////////////////////////////////////////////////////////// +// DynamicConverter specializations + +/** + * Each specialization of DynamicConverter has the function + * 'static T convert(const dynamic&);' + */ + +// default - intentionally unimplemented +template +struct DynamicConverter; + +// boolean +template <> +struct DynamicConverter { + static bool convert(const dynamic& d) { + return d.asBool(); + } +}; + +// integrals +template +struct DynamicConverter< + T, + typename std::enable_if< + std::is_integral::value && !std::is_same::value>::type> { + static T convert(const dynamic& d) { + return folly::to(d.asInt()); + } +}; + +// enums +template +struct DynamicConverter< + T, + typename std::enable_if::value>::type> { + static T convert(const dynamic& d) { + using type = typename std::underlying_type::type; + return static_cast(DynamicConverter::convert(d)); + } +}; + +// floating point +template +struct DynamicConverter< + T, + typename std::enable_if::value>::type> { + static T convert(const dynamic& d) { + return folly::to(d.asDouble()); + } +}; + +// fbstring +template <> +struct DynamicConverter { + static folly::fbstring convert(const dynamic& d) { + return d.asString(); + } +}; + +// std::string +template <> +struct DynamicConverter { + static std::string convert(const dynamic& d) { + return d.asString(); + } +}; + +// std::pair +template +struct DynamicConverter> { + static std::pair convert(const dynamic& d) { + if (d.isArray() && d.size() == 2) { + return std::make_pair(convertTo(d[0]), convertTo(d[1])); + } else if (d.isObject() && d.size() == 1) { + auto it = d.items().begin(); + return std::make_pair(convertTo(it->first), convertTo(it->second)); + } else { + throw_exception("array (size 2) or object (size 1)", d.type()); + } + } +}; + +// non-associative containers +template +struct DynamicConverter< + C, + typename std::enable_if< + dynamicconverter_detail::is_container::value && + !dynamicconverter_detail::is_associative::value>::type> { + static C convert(const dynamic& d) { + if (d.isArray()) { + return C( + dynamicconverter_detail::conversionIterator(d.begin()), + dynamicconverter_detail::conversionIterator(d.end())); + } else if (d.isObject()) { + return C( + dynamicconverter_detail::conversionIterator(d.items().begin()), + dynamicconverter_detail::conversionIterator(d.items().end())); + } else { + throw_exception("object or array", d.type()); + } + } +}; + +// associative containers +template +struct DynamicConverter< + C, + typename std::enable_if< + dynamicconverter_detail::is_container::value && + dynamicconverter_detail::is_associative::value>::type> { + static C convert(const dynamic& d) { + C ret; // avoid direct initialization due to unordered_map's constructor + // causing memory corruption if the iterator throws an exception + if (d.isArray()) { + ret.insert( + dynamicconverter_detail::conversionIterator(d.begin()), + dynamicconverter_detail::conversionIterator(d.end())); + } else if (d.isObject()) { + ret.insert( + dynamicconverter_detail::conversionIterator(d.items().begin()), + dynamicconverter_detail::conversionIterator(d.items().end())); + } else { + throw_exception("object or array", d.type()); + } + return ret; + } +}; + +/////////////////////////////////////////////////////////////////////////////// +// DynamicConstructor specializations + +/** + * Each specialization of DynamicConstructor has the function + * 'static dynamic construct(const C&);' + */ + +// default +template +struct DynamicConstructor { + static dynamic construct(const C& x) { + return dynamic(x); + } +}; + +// identity +template +struct DynamicConstructor< + C, + typename std::enable_if::value>::type> { + static dynamic construct(const C& x) { + return x; + } +}; + +// enums +template +struct DynamicConstructor< + C, + typename std::enable_if::value>::type> { + static dynamic construct(const C& x) { + return dynamic(to_underlying(x)); + } +}; + +// maps +template +struct DynamicConstructor< + C, + typename std::enable_if< + !std::is_same::value && + dynamicconverter_detail::is_map::value>::type> { + static dynamic construct(const C& x) { + dynamic d = dynamic::object; + for (const auto& pair : x) { + d.insert(toDynamic(pair.first), toDynamic(pair.second)); + } + return d; + } +}; + +// other ranges +template +struct DynamicConstructor< + C, + typename std::enable_if< + !std::is_same::value && + !dynamicconverter_detail::is_map::value && + !std::is_constructible::value && + dynamicconverter_detail::is_range::value>::type> { + static dynamic construct(const C& x) { + dynamic d = dynamic::array; + for (const auto& item : x) { + d.push_back(toDynamic(item)); + } + return d; + } +}; + +// pair +template +struct DynamicConstructor, void> { + static dynamic construct(const std::pair& x) { + dynamic d = dynamic::array; + d.push_back(toDynamic(x.first)); + d.push_back(toDynamic(x.second)); + return d; + } +}; + +// vector +template <> +struct DynamicConstructor, void> { + static dynamic construct(const std::vector& x) { + dynamic d = dynamic::array; + // Intentionally specifying the type as bool here. + // std::vector's iterators return a proxy which is a prvalue + // and hence cannot bind to an lvalue reference such as auto& + for (bool item : x) { + d.push_back(toDynamic(item)); + } + return d; + } +}; + +/////////////////////////////////////////////////////////////////////////////// +// implementation + +template +T convertTo(const dynamic& d) { + return DynamicConverter::type>::convert(d); +} + +template +dynamic toDynamic(const T& x) { + return DynamicConstructor::type>::construct(x); +} + +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/Exception.h b/native/iosTest/Pods/Folly/folly/Exception.h new file mode 100644 index 000000000..b050d641d --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/Exception.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#include +#include +#include + +#include +#include +#include +#include + +namespace folly { + +// Various helpers to throw appropriate std::system_error exceptions from C +// library errors (returned in errno, as positive return values (many POSIX +// functions), or as negative return values (Linux syscalls)) +// +// The *Explicit functions take an explicit value for errno. + +inline std::system_error makeSystemErrorExplicit(int err, const char* msg) { + // TODO: The C++ standard indicates that std::generic_category() should be + // used for POSIX errno codes. + // + // We should ideally change this to use std::generic_category() instead of + // std::system_category(). However, undertaking this change will require + // updating existing call sites that currently catch exceptions thrown by + // this code and currently expect std::system_category. + return std::system_error(err, std::system_category(), msg); +} + +template +std::system_error makeSystemErrorExplicit(int err, Args&&... args) { + return makeSystemErrorExplicit( + err, to(std::forward(args)...).c_str()); +} + +inline std::system_error makeSystemError(const char* msg) { + return makeSystemErrorExplicit(errno, msg); +} + +template +std::system_error makeSystemError(Args&&... args) { + return makeSystemErrorExplicit(errno, std::forward(args)...); +} + +// Helper to throw std::system_error +[[noreturn]] inline void throwSystemErrorExplicit(int err, const char* msg) { + throw_exception(makeSystemErrorExplicit(err, msg)); +} + +template +[[noreturn]] void throwSystemErrorExplicit(int err, Args&&... args) { + throw_exception(makeSystemErrorExplicit(err, std::forward(args)...)); +} + +// Helper to throw std::system_error from errno and components of a string +template +[[noreturn]] void throwSystemError(Args&&... args) { + throwSystemErrorExplicit(errno, std::forward(args)...); +} + +// Check a Posix return code (0 on success, error number on error), throw +// on error. +template +void checkPosixError(int err, Args&&... args) { + if (UNLIKELY(err != 0)) { + throwSystemErrorExplicit(err, std::forward(args)...); + } +} + +// Check a Linux kernel-style return code (>= 0 on success, negative error +// number on error), throw on error. +template +void checkKernelError(ssize_t ret, Args&&... args) { + if (UNLIKELY(ret < 0)) { + throwSystemErrorExplicit(int(-ret), std::forward(args)...); + } +} + +// Check a traditional Unix return code (-1 and sets errno on error), throw +// on error. +template +void checkUnixError(ssize_t ret, Args&&... args) { + if (UNLIKELY(ret == -1)) { + throwSystemError(std::forward(args)...); + } +} + +template +void checkUnixErrorExplicit(ssize_t ret, int savedErrno, Args&&... args) { + if (UNLIKELY(ret == -1)) { + throwSystemErrorExplicit(savedErrno, std::forward(args)...); + } +} + +// Check the return code from a fopen-style function (returns a non-nullptr +// FILE* on success, nullptr on error, sets errno). Works with fopen, fdopen, +// freopen, tmpfile, etc. +template +void checkFopenError(FILE* fp, Args&&... args) { + if (UNLIKELY(!fp)) { + throwSystemError(std::forward(args)...); + } +} + +template +void checkFopenErrorExplicit(FILE* fp, int savedErrno, Args&&... args) { + if (UNLIKELY(!fp)) { + throwSystemErrorExplicit(savedErrno, std::forward(args)...); + } +} + +/** + * If cond is not true, raise an exception of type E. E must have a ctor that + * works with const char* (a description of the failure). + */ +#define CHECK_THROW(cond, E) \ + do { \ + if (!(cond)) { \ + folly::throw_exception("Check failed: " #cond); \ + } \ + } while (0) + +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/ExceptionString.h b/native/iosTest/Pods/Folly/folly/ExceptionString.h new file mode 100644 index 000000000..13042ead4 --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/ExceptionString.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include +#include + +#include +#include +#include + +namespace folly { + +/** + * Debug string for an exception: include type and what(), if + * defined. + */ +inline fbstring exceptionStr(const std::exception& e) { +#if FOLLY_HAS_RTTI + fbstring rv(demangle(typeid(e))); + rv += ": "; +#else + fbstring rv("Exception (no RTTI available): "); +#endif + rv += e.what(); + return rv; +} + +inline fbstring exceptionStr(std::exception_ptr ep) { + if (!kHasExceptions) { + return "Exception (catch unavailable)"; + } + return catch_exception( + [&]() -> fbstring { + return catch_exception( + [&]() -> fbstring { std::rethrow_exception(ep); }, + [](auto&& e) { return exceptionStr(e); }); + }, + []() -> fbstring { return ""; }); +} + +template +auto exceptionStr(const E& e) -> typename std:: + enable_if::value, fbstring>::type { +#if FOLLY_HAS_RTTI + return demangle(typeid(e)); +#else + (void)e; + return "Exception (no RTTI available)"; +#endif +} + +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/ExceptionWrapper-inl.h b/native/iosTest/Pods/Folly/folly/ExceptionWrapper-inl.h new file mode 100644 index 000000000..16fb5d4b6 --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/ExceptionWrapper-inl.h @@ -0,0 +1,678 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * + * Author: Eric Niebler + */ + +#include + +namespace folly { + +template +struct exception_wrapper::arg_type_ + : public arg_type_ {}; +template +struct exception_wrapper::arg_type_ { + using type = Arg; +}; +template +struct exception_wrapper::arg_type_ { + using type = Arg; +}; +template +struct exception_wrapper::arg_type_ { + using type = Arg; +}; +template +struct exception_wrapper::arg_type_ { + using type = Arg; +}; +template +struct exception_wrapper::arg_type_ { + using type = AnyException; +}; +template +struct exception_wrapper::arg_type_ { + using type = AnyException; +}; +template +struct exception_wrapper::arg_type_ { + using type = AnyException; +}; +template +struct exception_wrapper::arg_type_ { + using type = AnyException; +}; + +template +inline Ret exception_wrapper::noop_(Args...) { + return Ret(); +} + +inline std::type_info const* exception_wrapper::uninit_type_( + exception_wrapper const*) { + return &typeid(void); +} + +template +inline exception_wrapper::Buffer::Buffer(in_place_type_t, As&&... as_) { + ::new (static_cast(&buff_)) Ex(std::forward(as_)...); +} + +template +inline Ex& exception_wrapper::Buffer::as() noexcept { + return *static_cast(static_cast(&buff_)); +} +template +inline Ex const& exception_wrapper::Buffer::as() const noexcept { + return *static_cast(static_cast(&buff_)); +} + +inline std::exception const* exception_wrapper::as_exception_or_null_( + std::exception const& ex) { + return &ex; +} +inline std::exception const* exception_wrapper::as_exception_or_null_( + AnyException) { + return nullptr; +} + +static_assert( + !kMicrosoftAbiVer || (kMicrosoftAbiVer >= 1900 && kMicrosoftAbiVer <= 2000), + "exception_wrapper is untested and possibly broken on your version of " + "MSVC"); + +inline std::uintptr_t exception_wrapper::ExceptionPtr::as_int_( + std::exception_ptr const& ptr, + std::exception const& e) noexcept { + if (!kMicrosoftAbiVer) { + return reinterpret_cast(&e); + } else { + // On Windows, as of MSVC2017, all thrown exceptions are copied to the stack + // first. Thus, we cannot depend on exception references associated with an + // exception_ptr to be live for the duration of the exception_ptr. We need + // to directly access the heap allocated memory inside the exception_ptr. + // + // std::exception_ptr is an opaque reinterpret_cast of + // std::shared_ptr<__ExceptionPtr> + // __ExceptionPtr is a non-virtual class with two members, a union and a + // bool. The union contains the now-undocumented EHExceptionRecord, which + // contains a struct which contains a void* which points to the heap + // allocated exception. + // We derive the offset to pExceptionObject via manual means. + FOLLY_PACK_PUSH + struct Win32ExceptionPtr { + char offset[8 + 4 * sizeof(void*)]; + void* exceptionObject; + } FOLLY_PACK_ATTR; + FOLLY_PACK_POP + + auto* win32ExceptionPtr = + reinterpret_cast const*>(&ptr) + ->get(); + return reinterpret_cast(win32ExceptionPtr->exceptionObject); + } +} +inline std::uintptr_t exception_wrapper::ExceptionPtr::as_int_( + std::exception_ptr const&, + AnyException e) noexcept { + return reinterpret_cast(e.typeinfo_) + 1; +} +inline bool exception_wrapper::ExceptionPtr::has_exception_() const { + return 0 == exception_or_type_ % 2; +} +inline std::exception const* exception_wrapper::ExceptionPtr::as_exception_() + const { + return reinterpret_cast(exception_or_type_); +} +inline std::type_info const* exception_wrapper::ExceptionPtr::as_type_() const { + return reinterpret_cast(exception_or_type_ - 1); +} + +inline void exception_wrapper::ExceptionPtr::copy_( + exception_wrapper const* from, + exception_wrapper* to) { + ::new (static_cast(&to->eptr_)) ExceptionPtr(from->eptr_); +} +inline void exception_wrapper::ExceptionPtr::move_( + exception_wrapper* from, + exception_wrapper* to) { + ::new (static_cast(&to->eptr_)) ExceptionPtr(std::move(from->eptr_)); + delete_(from); +} +inline void exception_wrapper::ExceptionPtr::delete_(exception_wrapper* that) { + that->eptr_.~ExceptionPtr(); + that->vptr_ = &uninit_; +} +[[noreturn]] inline void exception_wrapper::ExceptionPtr::throw_( + exception_wrapper const* that) { + std::rethrow_exception(that->eptr_.ptr_); +} +inline std::type_info const* exception_wrapper::ExceptionPtr::type_( + exception_wrapper const* that) { + if (auto e = get_exception_(that)) { + return &typeid(*e); + } + return that->eptr_.as_type_(); +} +inline std::exception const* exception_wrapper::ExceptionPtr::get_exception_( + exception_wrapper const* that) { + return that->eptr_.has_exception_() ? that->eptr_.as_exception_() : nullptr; +} +inline exception_wrapper exception_wrapper::ExceptionPtr::get_exception_ptr_( + exception_wrapper const* that) { + return *that; +} + +template +inline void exception_wrapper::InPlace::copy_( + exception_wrapper const* from, + exception_wrapper* to) { + ::new (static_cast(std::addressof(to->buff_.as()))) + Ex(from->buff_.as()); +} +template +inline void exception_wrapper::InPlace::move_( + exception_wrapper* from, + exception_wrapper* to) { + ::new (static_cast(std::addressof(to->buff_.as()))) + Ex(std::move(from->buff_.as())); + delete_(from); +} +template +inline void exception_wrapper::InPlace::delete_(exception_wrapper* that) { + that->buff_.as().~Ex(); + that->vptr_ = &uninit_; +} +template +[[noreturn]] inline void exception_wrapper::InPlace::throw_( + exception_wrapper const* that) { + throw that->buff_.as(); +} +template +inline std::type_info const* exception_wrapper::InPlace::type_( + exception_wrapper const*) { + return &typeid(Ex); +} +template +inline std::exception const* exception_wrapper::InPlace::get_exception_( + exception_wrapper const* that) { + return as_exception_or_null_(that->buff_.as()); +} +template +inline exception_wrapper exception_wrapper::InPlace::get_exception_ptr_( + exception_wrapper const* that) { + try { + throw_(that); + } catch (Ex const& ex) { + return exception_wrapper{std::current_exception(), ex}; + } +} + +template +[[noreturn]] inline void exception_wrapper::SharedPtr::Impl::throw_() + const { + throw ex_; +} +template +inline std::exception const* +exception_wrapper::SharedPtr::Impl::get_exception_() const noexcept { + return as_exception_or_null_(ex_); +} +template +inline exception_wrapper +exception_wrapper::SharedPtr::Impl::get_exception_ptr_() const noexcept { + try { + throw_(); + } catch (Ex& ex) { + return exception_wrapper{std::current_exception(), ex}; + } +} +inline void exception_wrapper::SharedPtr::copy_( + exception_wrapper const* from, + exception_wrapper* to) { + ::new (static_cast(std::addressof(to->sptr_))) SharedPtr(from->sptr_); +} +inline void exception_wrapper::SharedPtr::move_( + exception_wrapper* from, + exception_wrapper* to) { + ::new (static_cast(std::addressof(to->sptr_))) + SharedPtr(std::move(from->sptr_)); + delete_(from); +} +inline void exception_wrapper::SharedPtr::delete_(exception_wrapper* that) { + that->sptr_.~SharedPtr(); + that->vptr_ = &uninit_; +} +[[noreturn]] inline void exception_wrapper::SharedPtr::throw_( + exception_wrapper const* that) { + that->sptr_.ptr_->throw_(); + folly::assume_unreachable(); +} +inline std::type_info const* exception_wrapper::SharedPtr::type_( + exception_wrapper const* that) { + return that->sptr_.ptr_->info_; +} +inline std::exception const* exception_wrapper::SharedPtr::get_exception_( + exception_wrapper const* that) { + return that->sptr_.ptr_->get_exception_(); +} +inline exception_wrapper exception_wrapper::SharedPtr::get_exception_ptr_( + exception_wrapper const* that) { + return that->sptr_.ptr_->get_exception_ptr_(); +} + +template +inline exception_wrapper::exception_wrapper( + ThrownTag, + in_place_type_t, + As&&... as) + : eptr_{std::make_exception_ptr(Ex(std::forward(as)...)), + reinterpret_cast(std::addressof(typeid(Ex))) + 1u}, + vptr_(&ExceptionPtr::ops_) {} + +template +inline exception_wrapper::exception_wrapper( + OnHeapTag, + in_place_type_t, + As&&... as) + : sptr_{std::make_shared>(std::forward(as)...)}, + vptr_(&SharedPtr::ops_) {} + +template +inline exception_wrapper::exception_wrapper( + InSituTag, + in_place_type_t, + As&&... as) + : buff_{in_place_type, std::forward(as)...}, + vptr_(&InPlace::ops_) {} + +inline exception_wrapper::exception_wrapper(exception_wrapper&& that) noexcept + : exception_wrapper{} { + (vptr_ = that.vptr_)->move_(&that, this); // Move into *this, won't throw +} + +inline exception_wrapper::exception_wrapper( + exception_wrapper const& that) noexcept + : exception_wrapper{} { + that.vptr_->copy_(&that, this); // Copy into *this, won't throw + vptr_ = that.vptr_; +} + +// If `this == &that`, this move assignment operator leaves the object in a +// valid but unspecified state. +inline exception_wrapper& exception_wrapper::operator=( + exception_wrapper&& that) noexcept { + vptr_->delete_(this); // Free the current exception + (vptr_ = that.vptr_)->move_(&that, this); // Move into *this, won't throw + return *this; +} + +inline exception_wrapper& exception_wrapper::operator=( + exception_wrapper const& that) noexcept { + exception_wrapper(that).swap(*this); + return *this; +} + +inline exception_wrapper::~exception_wrapper() { + reset(); +} + +template +inline exception_wrapper::exception_wrapper( + std::exception_ptr ptr, + Ex& ex) noexcept + : eptr_{ptr, ExceptionPtr::as_int_(ptr, ex)}, vptr_(&ExceptionPtr::ops_) { + assert(eptr_.ptr_); +} + +namespace exception_wrapper_detail { +template +Ex&& dont_slice(Ex&& ex) { + assert(typeid(ex) == typeid(std::decay_t) || + !"Dynamic and static exception types don't match. Exception would " + "be sliced when storing in exception_wrapper."); + return std::forward(ex); +} +} // namespace exception_wrapper_detail + +template < + class Ex, + class Ex_, + FOLLY_REQUIRES_DEF(Conjunction< + exception_wrapper::IsStdException, + exception_wrapper::IsRegularExceptionType>::value)> +inline exception_wrapper::exception_wrapper(Ex&& ex) + : exception_wrapper{ + PlacementOf{}, + in_place_type, + exception_wrapper_detail::dont_slice(std::forward(ex))} {} + +template < + class Ex, + class Ex_, + FOLLY_REQUIRES_DEF(exception_wrapper::IsRegularExceptionType::value)> +inline exception_wrapper::exception_wrapper(in_place_t, Ex&& ex) + : exception_wrapper{ + PlacementOf{}, + in_place_type, + exception_wrapper_detail::dont_slice(std::forward(ex))} {} + +template < + class Ex, + typename... As, + FOLLY_REQUIRES_DEF(exception_wrapper::IsRegularExceptionType::value)> +inline exception_wrapper::exception_wrapper(in_place_type_t, As&&... as) + : exception_wrapper{PlacementOf{}, + in_place_type, + std::forward(as)...} {} + +inline void exception_wrapper::swap(exception_wrapper& that) noexcept { + exception_wrapper tmp(std::move(that)); + that = std::move(*this); + *this = std::move(tmp); +} + +inline exception_wrapper::operator bool() const noexcept { + return vptr_ != &uninit_; +} + +inline bool exception_wrapper::operator!() const noexcept { + return !static_cast(*this); +} + +inline void exception_wrapper::reset() { + vptr_->delete_(this); +} + +inline bool exception_wrapper::has_exception_ptr() const noexcept { + return vptr_ == &ExceptionPtr::ops_; +} + +inline std::exception* exception_wrapper::get_exception() noexcept { + return const_cast(vptr_->get_exception_(this)); +} +inline std::exception const* exception_wrapper::get_exception() const noexcept { + return vptr_->get_exception_(this); +} + +template +inline Ex* exception_wrapper::get_exception() noexcept { + Ex* object{nullptr}; + with_exception([&](Ex& ex) { object = &ex; }); + return object; +} + +template +inline Ex const* exception_wrapper::get_exception() const noexcept { + Ex const* object{nullptr}; + with_exception([&](Ex const& ex) { object = &ex; }); + return object; +} + +inline std::exception_ptr const& +exception_wrapper::to_exception_ptr() noexcept { + // Computing an exception_ptr is expensive so cache the result. + return (*this = vptr_->get_exception_ptr_(this)).eptr_.ptr_; +} +inline std::exception_ptr exception_wrapper::to_exception_ptr() const noexcept { + return vptr_->get_exception_ptr_(this).eptr_.ptr_; +} + +inline std::type_info const& exception_wrapper::none() noexcept { + return typeid(void); +} +inline std::type_info const& exception_wrapper::unknown() noexcept { + return typeid(Unknown); +} + +inline std::type_info const& exception_wrapper::type() const noexcept { + return *vptr_->type_(this); +} + +inline folly::fbstring exception_wrapper::what() const { + if (auto e = get_exception()) { + return class_name() + ": " + e->what(); + } + return class_name(); +} + +inline folly::fbstring exception_wrapper::class_name() const { + auto& ti = type(); + return ti == none() + ? "" + : ti == unknown() ? "" : folly::demangle(ti); +} + +template +inline bool exception_wrapper::is_compatible_with() const noexcept { + return with_exception([](Ex const&) {}); +} + +[[noreturn]] inline void exception_wrapper::throw_exception() const { + vptr_->throw_(this); + onNoExceptionError(__func__); +} + +template +[[noreturn]] inline void exception_wrapper::throw_with_nested(Ex&& ex) const { + try { + throw_exception(); + } catch (...) { + std::throw_with_nested(std::forward(ex)); + } +} + +template +struct exception_wrapper::ExceptionTypeOf { + using type = arg_type>; + static_assert( + std::is_reference::value, + "Always catch exceptions by reference."); + static_assert( + !IsConst || std::is_const>::value, + "handle() or with_exception() called on a const exception_wrapper " + "and asked to catch a non-const exception. Handler will never fire. " + "Catch exception by const reference to fix this."); +}; + +// Nests a throw in the proper try/catch blocks +template +struct exception_wrapper::HandleReduce { + bool* handled_; + + template < + class ThrowFn, + class CatchFn, + FOLLY_REQUIRES(!IsCatchAll::value)> + auto operator()(ThrowFn&& th, CatchFn& ca) const { + using Ex = _t>; + return [th = std::forward(th), &ca, handled_ = handled_] { + try { + th(); + } catch (Ex& e) { + // If we got here because a catch function threw, rethrow. + if (*handled_) { + throw; + } + *handled_ = true; + ca(e); + } + }; + } + + template < + class ThrowFn, + class CatchFn, + FOLLY_REQUIRES(IsCatchAll::value)> + auto operator()(ThrowFn&& th, CatchFn& ca) const { + return [th = std::forward(th), &ca, handled_ = handled_] { + try { + th(); + } catch (...) { + // If we got here because a catch function threw, rethrow. + if (*handled_) { + throw; + } + *handled_ = true; + ca(); + } + }; + } +}; + +// When all the handlers expect types derived from std::exception, we can +// sometimes invoke the handlers without throwing any exceptions. +template +struct exception_wrapper::HandleStdExceptReduce { + using StdEx = AddConstIf; + + template < + class ThrowFn, + class CatchFn, + FOLLY_REQUIRES(!IsCatchAll::value)> + auto operator()(ThrowFn&& th, CatchFn& ca) const { + using Ex = _t>; + return + [th = std::forward(th), &ca](auto&& continuation) -> StdEx* { + if (auto e = const_cast(th(continuation))) { + if (auto e2 = dynamic_cast>(e)) { + ca(*e2); + } else { + return e; + } + } + return nullptr; + }; + } + + template < + class ThrowFn, + class CatchFn, + FOLLY_REQUIRES(IsCatchAll::value)> + auto operator()(ThrowFn&& th, CatchFn& ca) const { + return [th = std::forward(th), &ca](auto &&) -> StdEx* { + // The following continuation causes ca() to execute if *this contains + // an exception /not/ derived from std::exception. + auto continuation = [&ca](StdEx* e) { + return e != nullptr ? e : ((void)ca(), nullptr); + }; + if (th(continuation) != nullptr) { + ca(); + } + return nullptr; + }; + } +}; + +// Called when some types in the catch clauses are not derived from +// std::exception. +template +inline void +exception_wrapper::handle_(std::false_type, This& this_, CatchFns&... fns) { + bool handled = false; + auto impl = exception_wrapper_detail::fold( + HandleReduce::value>{&handled}, + [&] { this_.throw_exception(); }, + fns...); + impl(); +} + +// Called when all types in the catch clauses are either derived from +// std::exception or a catch-all clause. +template +inline void +exception_wrapper::handle_(std::true_type, This& this_, CatchFns&... fns) { + using StdEx = exception_wrapper_detail:: + AddConstIf::value, std::exception>; + auto impl = exception_wrapper_detail::fold( + HandleStdExceptReduce::value>{}, + [&](auto&& continuation) { + return continuation( + const_cast(this_.vptr_->get_exception_(&this_))); + }, + fns...); + // This continuation gets evaluated if CatchFns... does not include a + // catch-all handler. It is a no-op. + auto continuation = [](StdEx* ex) { return ex; }; + if (nullptr != impl(continuation)) { + this_.throw_exception(); + } +} + +namespace exception_wrapper_detail { +template +struct catch_fn { + Fn fn_; + auto operator()(Ex& ex) { + return fn_(ex); + } +}; + +template +inline catch_fn catch_(Ex*, Fn fn) { + return {std::move(fn)}; +} +template +inline Fn catch_(void const*, Fn fn) { + return fn; +} +} // namespace exception_wrapper_detail + +template +inline bool exception_wrapper::with_exception_(This& this_, Fn fn_) { + if (!this_) { + return false; + } + bool handled = true; + auto fn = exception_wrapper_detail::catch_( + static_cast(nullptr), std::move(fn_)); + auto&& all = [&](...) { handled = false; }; + handle_(IsStdException>{}, this_, fn, all); + return handled; +} + +template +inline bool exception_wrapper::with_exception(Fn fn) { + return with_exception_(*this, std::move(fn)); +} +template +inline bool exception_wrapper::with_exception(Fn fn) const { + return with_exception_(*this, std::move(fn)); +} + +template +inline void exception_wrapper::handle(CatchFns... fns) { + using AllStdEx = + exception_wrapper_detail::AllOf...>; + if (!*this) { + onNoExceptionError(__func__); + } + this->handle_(AllStdEx{}, *this, fns...); +} +template +inline void exception_wrapper::handle(CatchFns... fns) const { + using AllStdEx = + exception_wrapper_detail::AllOf...>; + if (!*this) { + onNoExceptionError(__func__); + } + this->handle_(AllStdEx{}, *this, fns...); +} + +} // namespace folly diff --git a/native/iosTest/Pods/Folly/folly/ExceptionWrapper.h b/native/iosTest/Pods/Folly/folly/ExceptionWrapper.h new file mode 100644 index 000000000..8fb287b7d --- /dev/null +++ b/native/iosTest/Pods/Folly/folly/ExceptionWrapper.h @@ -0,0 +1,714 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Author: Eric Niebler + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __GNUC__ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Wpotentially-evaluated-expression" +// GCC gets confused about lambda scopes and issues shadow-local warnings for +// parameters in totally different functions. +FOLLY_GCC_DISABLE_NEW_SHADOW_WARNINGS +#endif + +#define FOLLY_EXCEPTION_WRAPPER_H_INCLUDED + +namespace folly { + +#define FOLLY_REQUIRES_DEF(...) \ + std::enable_if_t(__VA_ARGS__), long> + +#define FOLLY_REQUIRES(...) FOLLY_REQUIRES_DEF(__VA_ARGS__) = __LINE__ + +namespace exception_wrapper_detail { + +template