diff --git a/.circleci/config.yml b/.circleci/config.yml index f2eab6807d7..621ac49ffeb 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,8 +7,6 @@ jobs: # preinstalled to reduce installation time. docker: - image: fbopensource/zstd-circleci-primary:0.0.1 - # TODO: Re-enable aarch64 build: - # make aarch64build && make clean steps: - checkout - run: @@ -36,6 +34,7 @@ jobs: make ppc64build V=1; make clean make ppcbuild V=1; make clean make armbuild V=1; make clean + make aarch64build V=1; make clean make -C tests test-legacy test-longmatch; make clean make -C lib libzstd-nomt; make clean # This step should only be run in a cron job diff --git a/.circleci/images/primary/Dockerfile b/.circleci/images/primary/Dockerfile index dd800415252..bfcb6af6917 100644 --- a/.circleci/images/primary/Dockerfile +++ b/.circleci/images/primary/Dockerfile @@ -1,4 +1,4 @@ -FROM circleci/buildpack-deps:bionic +FROM circleci/buildpack-deps:focal RUN sudo dpkg --add-architecture i386 RUN sudo apt-get -y -qq update diff --git a/.cirrus.yml b/.cirrus.yml index fe17aacea5d..27ca65e8d29 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -2,9 +2,8 @@ task: name: FreeBSD (shortest) freebsd_instance: matrix: + image_family: freebsd-13-0 image_family: freebsd-12-2 - # The stable 11.3 image causes "Agent is not responding" so use a snapshot - image_family: freebsd-11-3-snap install_script: pkg install -y gmake coreutils script: | MOREFLAGS="-Werror" gmake -j all diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index bacdac27b06..755a46af145 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -26,8 +26,8 @@ If applicable, add screenshots and charts to help explain your problem. **Desktop (please complete the following information):** - OS: [e.g. Mac] - Version [e.g. 22] - - Compiler [e.g gcc] - - Flags [e.g O2] + - Compiler [e.g. gcc] + - Flags [e.g. O2] - Other relevant hardware specs [e.g. Dual-core] - Build system [e.g. Makefile] diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000000..5ace4600a1f --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,6 @@ +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" diff --git a/.github/workflows/dev-long-tests.yml b/.github/workflows/dev-long-tests.yml index 7a94fb66654..1c8c9ec555f 100644 --- a/.github/workflows/dev-long-tests.yml +++ b/.github/workflows/dev-long-tests.yml @@ -13,7 +13,7 @@ jobs: make-all: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 - name: make all run: make all @@ -24,7 +24,7 @@ jobs: DEVNULLRIGHTS: 1 READFROMBLOCKDEVICE: 1 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 - name: make test run: make test @@ -32,49 +32,77 @@ jobs: make-test-osx: runs-on: macos-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 - name: OS-X test run: make test # make -c lib all doesn't work because of the fact that it's not a tty no-intrinsics-fuzztest: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 - name: no intrinsics fuzztest run: MOREFLAGS="-DZSTD_NO_INTRINSICS" make -C tests fuzztest tsan-zstreamtest: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 - name: thread sanitizer zstreamtest run: CC=clang ZSTREAM_TESTTIME=-T3mn make tsan-test-zstream + ubsan-zstreamtest: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 + - name: undefined behavior sanitizer zstreamtest + run: CC=clang make uasan-test-zstream + # lasts ~15mn tsan-fuzztest: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 - name: thread sanitizer fuzztest run: CC=clang make tsan-fuzztest + + big-tests-zstreamtest32: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 + - name: zstream tests in 32bit mode, with big tests + run: | + sudo apt-get -qqq update + make libc6install + CC=clang make -C tests test-zstream32 FUZZER_FLAGS="--big-tests" + # lasts ~23mn gcc-8-asan-ubsan-testzstd: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 - name: gcc-8 + ASan + UBSan + Test Zstd + # See https://askubuntu.com/a/1428822 run: | + echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu focal main universe" | sudo tee -a /etc/apt/sources.list sudo apt-get -qqq update make gcc8install CC=gcc-8 make -j uasan-test-zstd - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v142 + msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=${{matrix.toolset}} /t:Clean,Build /p:Platform=${{matrix.platform}} /p:Configuration=${{matrix.configuration}} - visual-2015: - # only GH actions windows-2016 contains VS 2015 - runs-on: windows-2016 - strategy: - matrix: - platform: [x64, Win32] - configuration: [Debug, Release] + # This tests that we don't accidently grow the size too much. + # If the size grows intentionally, you can raise these numbers. + # But we do need to think about binary size, since it is a concern. + libzstd-size: + runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - name: Add MSBuild to PATH - uses: microsoft/setup-msbuild@v1.0.2 - - name: Build - working-directory: ${{env.GITHUB_WORKSPACE}} - run: > - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v140 - /t:Clean,Build /p:Platform=${{matrix.platform}} /p:Configuration=${{matrix.configuration}} + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 + - name: libzstd size test + run: | + make clean && make -j -C lib libzstd && ./tests/check_size.py lib/libzstd.so 1100000 + make clean && make -j -C lib libzstd ZSTD_LIB_COMPRESSION=0 ZSTD_LIB_DICTBUILDER=0 && ./tests/check_size.py lib/libzstd.so 400000 + make clean && make -j -C lib libzstd ZSTD_LIB_MINIFY=1 && ./tests/check_size.py lib/libzstd.so 300000 + make clean && make -j -C lib libzstd ZSTD_LIB_MINIFY=1 ZSTD_LIB_COMPRESSION=0 ZSTD_LIB_DICTBUILDER=0 && ./tests/check_size.py lib/libzstd.so 80000 minimal-decompressor-macros: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 - name: minimal decompressor macros run: | make clean && make -j all ZSTD_LIB_MINIFY=1 MOREFLAGS="-Werror" @@ -249,7 +310,7 @@ jobs: dynamic-bmi2: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 - name: dynamic bmi2 tests run: | make clean && make -j check MOREFLAGS="-O0 -Werror -mbmi2" @@ -261,7 +322,7 @@ jobs: test-variants: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 - name: make all variants & validate run: | make -j -C programs allVariants MOREFLAGS=-O0 @@ -270,7 +331,7 @@ jobs: qemu-consistency: name: QEMU ${{ matrix.name }} - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 strategy: fail-fast: false # 'false' means Don't stop matrix workflows even if some matrix failed. matrix: @@ -287,7 +348,7 @@ jobs: XCC: ${{ matrix.xcc }} XEMU: ${{ matrix.xemu }} steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 - name: apt update & install run: | sudo apt-get update @@ -332,39 +393,45 @@ jobs: mingw-short-test: runs-on: windows-latest strategy: - fail-fast: false + fail-fast: false # 'false' means Don't stop matrix workflows even if some matrix failed. matrix: include: [ - { compiler: gcc, platform: x64, script: "CFLAGS=-Werror make -j allzstd DEBUGLEVEL=2"}, - { compiler: gcc, platform: x86, script: "CFLAGS=-Werror make -j allzstd"}, - { compiler: clang, platform: x64, script: "CFLAGS='--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion' make -j allzstd V=1"}, + { compiler: gcc, msystem: MINGW32, cflags: "-Werror"}, + { compiler: gcc, msystem: MINGW64, cflags: "-Werror"}, + { compiler: clang, msystem: MINGW64, cflags: "--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion -Wno-unused-command-line-argument"}, ] + defaults: + run: + shell: msys2 {0} steps: - - uses: actions/checkout@v2 - - name: Mingw short test - run: | - ECHO "Building ${{matrix.compiler}} ${{matrix.platform}}" - $env:PATH_ORIGINAL = $env:PATH - $env:PATH_MINGW32 = "C:\msys64\mingw32\bin" - $env:PATH_MINGW64 = "C:\msys64\mingw64\bin" - COPY C:\msys64\usr\bin\make.exe C:\msys64\mingw32\bin\make.exe - COPY C:\msys64\usr\bin\make.exe C:\msys64\mingw64\bin\make.exe - IF ("${{matrix.platform}}" -eq "x64") - { - $env:PATH = $env:PATH_MINGW64 + ";" + $env:PATH_ORIGINAL - } - ELSEIF ("${{matrix.platform}}" -eq "x86") - { - $env:PATH = $env:PATH_MINGW32 + ";" + $env:PATH_ORIGINAL - } + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 + - uses: msys2/setup-msys2@v2 + with: + msystem: ${{ matrix.msystem }} + install: make diffutils + update: true + # Based on https://ariya.io/2020/07/on-github-actions-with-msys2 + - name: install mingw gcc i686 + if: ${{ (matrix.msystem == 'MINGW32') && (matrix.compiler == 'gcc') }} + run: pacman --noconfirm -S mingw-w64-i686-gcc + - name: install mingw gcc x86_64 + if: ${{ (matrix.msystem == 'MINGW64') && (matrix.compiler == 'gcc') }} + run: pacman --noconfirm -S mingw-w64-x86_64-gcc + - name: install mingw clang i686 + if: ${{ (matrix.msystem == 'MINGW32') && (matrix.compiler == 'clang') }} + run: pacman --noconfirm -S mingw-w64-i686-clang + - name: install mingw clang x86_64 + if: ${{ (matrix.msystem == 'MINGW64') && (matrix.compiler == 'clang') }} + run: pacman --noconfirm -S mingw-w64-x86_64-clang + - name: run mingw tests + run: | make -v - sh -c "${{matrix.compiler}} -v" - $env:CC = "${{matrix.compiler}}" - sh -c "${{matrix.script}}" - ECHO "Testing ${{matrix.compiler}} ${{matrix.platform}}" + export CC=${{ matrix.compiler }} + $CC --version + CFLAGS="${{ matrix.cflags }}" make -j allzstd + echo "Testing $CC ${{ matrix.msystem }}" make clean - make check - + MSYS="" make check visual-runtime-tests: runs-on: windows-latest @@ -373,9 +440,9 @@ jobs: platform: [x64, Win32] configuration: [Release] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 - name: Add MSBuild to PATH - uses: microsoft/setup-msbuild@v1.0.2 + uses: microsoft/setup-msbuild@v1.3 - name: Build and run tests working-directory: ${{env.GITHUB_WORKSPACE}} env: @@ -387,20 +454,109 @@ jobs: COPY build\VS2010\bin\${{matrix.platform}}_${{matrix.configuration}}\*.exe tests\ CD tests sh -e playTests.sh - DIR .\fuzzer.exe -T2m -# This test currently fails on Github Actions specifically. -# Possible reason : TTY emulation. -# Note that the same test works fine locally and on travisCI. -# This will have to be fixed before transferring the test to GA. -# versions-compatibility: -# runs-on: ubuntu-latest -# steps: -# - uses: actions/checkout@v2 -# - name: Versions Compatibility Test -# run: | -# make -C tests versionsTest + # Following instructions at: https://github.com/marketplace/actions/install-cygwin-action + cygwin-tests: + runs-on: windows-latest + steps: + - run: git config --global core.autocrlf input + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 + - uses: cygwin/cygwin-install-action@f5e0f048310c425e84bc789f493a828c6dc80a25 # tag=master + with: + platform: x86_64 + packages: >- + autoconf, + automake, + gcc-g++, + make, + mingw64-x86_64-gcc-g++, + patch, + perl + - name: cygwin tests + shell: C:\cygwin\bin\bash.exe --noprofile --norc -eo pipefail '{0}' + run: >- + export PATH=/usr/bin:$(cygpath ${SYSTEMROOT})/system32 && + export CFLAGS="-Werror -O1" && + ls && + make -j allzstd && + make -C tests fuzzer && + ./tests/fuzzer.exe -v -T1m + + intel-cet-compatibility: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 + - name: Build Zstd + run: | + make -j zstd V=1 + readelf -n zstd + - name: Get Intel SDE + run: | + curl -LO https://downloadmirror.intel.com/684899/sde-external-9.0.0-2021-11-07-lin.tar.xz + tar xJvf sde-external-9.0.0-2021-11-07-lin.tar.xz + - name: Configure Permissions + run: | + echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope + - name: Run Under SDE + run: | + sde-external-9.0.0-2021-11-07-lin/sde -cet -cet-raise 0 -cet-endbr-exe -cet-stderr -cet-abort -- ./zstd -b3 + + pkg-config: + runs-on: ubuntu-latest + container: + image: debian:testing + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 + - name: Install dependencies + run: | + apt -y update + apt -y install --no-install-recommends gcc libc6-dev make pkg-config + - name: Build and install + run: make -C lib install + - name: Test pkg-config + run: | + cc -Wall -Wextra -Wpedantic -Werror -o simple examples/simple_compression.c $(pkg-config --cflags --libs libzstd) + ./simple LICENSE + + versions-compatibility: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 + - name: Versions Compatibility Test + run: | + make -C tests versionsTest + + clangbuild: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 + - name: make clangbuild + run: | + make clangbuild + + clang-pgo: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 + - name: Build PGO Zstd with Clang + env: + CC: clang-14 + LLVM_PROFDATA: llvm-profdata-14 + run: | + make -C programs zstd-pgo + ./programs/zstd -b + + gcc-pgo: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 + - name: Build PGO Zstd with GCC + env: + CC: gcc + run: | + make -C programs zstd-pgo + ./programs/zstd -b # For reference : icc tests @@ -421,7 +577,7 @@ jobs: # sudo add-apt-repository "deb https://apt.repos.intel.com/oneapi all main" # sudo apt-get update # sudo apt-get install -y intel-basekit intel-hpckit -# - uses: actions/checkout@v2 +# - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 # - name: make check # run: | # make CC=/opt/intel/oneapi/compiler/latest/linux/bin/intel64/icc check diff --git a/.github/workflows/publish-release-artifacts.yml b/.github/workflows/publish-release-artifacts.yml index 952cb26b7e9..2c89a91a55a 100644 --- a/.github/workflows/publish-release-artifacts.yml +++ b/.github/workflows/publish-release-artifacts.yml @@ -3,16 +3,23 @@ name: publish-release-artifacts on: release: types: - - created + - published + +permissions: + contents: read jobs: publish-release-artifacts: + permissions: + contents: read # to fetch code (actions/checkout) + actions: write # to attach binaries to release artifacts (skx/github-action-publish-binaries) + runs-on: ubuntu-latest if: startsWith(github.ref, 'refs/tags/') steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 - name: Archive env: @@ -61,7 +68,7 @@ jobs: fi - name: Publish - uses: skx/github-action-publish-binaries@release-1.3 + uses: skx/github-action-publish-binaries@release-2.0 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml new file mode 100644 index 00000000000..6a9e5217e57 --- /dev/null +++ b/.github/workflows/scorecards.yml @@ -0,0 +1,64 @@ +name: Scorecards supply-chain security +on: + # Only the default branch is supported. + branch_protection_rule: + schedule: + - cron: '22 21 * * 2' + push: + # TODO: Add release branch when supported? + branches: [ "dev" ] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecards analysis + if: github.repository == 'facebook/zstd' + runs-on: ubuntu-latest + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Used to receive a badge. + id-token: write + # Needs for private repositories. + contents: read + actions: read + + steps: + - name: "Checkout code" + uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@e38b1902ae4f44df626f11ba0734b14fb91f8f86 # tag=v2.1.2 + with: + results_file: results.sarif + results_format: sarif + # (Optional) Read-only PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecards on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat. + # repo_token: ${{ secrets.SCORECARD_READ_TOKEN }} + + # Publish the results for public repositories to enable scorecard badges. For more details, see + # https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories, `publish_results` will automatically be set to `false`, regardless + # of the value entered here. + publish_results: true + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # tag=v3.1.2 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard. + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@3ebbd71c74ef574dbc558c82f70e52732c8b44fe # tag=v2.2.1 + with: + sarif_file: results.sarif diff --git a/.travis.yml b/.travis.yml index 6a1295b4cee..b96bf8ba217 100644 --- a/.travis.yml +++ b/.travis.yml @@ -92,8 +92,8 @@ matrix: - make -C tests versionsTest # meson dedicated test - - name: Xenial (Meson + clang) # ~15mn - dist: bionic + - name: Focal (Meson + clang) # ~15mn + dist: focal language: cpp compiler: clang install: @@ -102,7 +102,7 @@ matrix: travis_retry curl -o ~/ninja.zip -L 'https://github.com/ninja-build/ninja/releases/download/v1.9.0/ninja-linux.zip' && unzip ~/ninja.zip -d ~/.local/bin - | - travis_retry curl -o ~/get-pip.py -L 'https://bootstrap.pypa.io/get-pip.py' && + travis_retry curl -o ~/get-pip.py -L 'https://bootstrap.pypa.io/pip/3.6/get-pip.py' && python3 ~/get-pip.py --user && pip3 install --user meson script: diff --git a/CHANGELOG b/CHANGELOG index 4e0045b950c..4010c1ff5d3 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,63 @@ +v1.5.4 (Feb 2023) +perf: +20% faster huffman decompression for targets that can't compile x64 assembly (#3449, @terrelln) +perf: up to +10% faster streaming compression at levels 1-2 (#3114, @embg) +perf: +4-13% for levels 5-12 by optimizing function generation (#3295, @terrelln) +pref: +3-11% compression speed for `arm` target (#3199, #3164, #3145, #3141, #3138, @JunHe77 and #3139, #3160, @danlark1) +perf: +5-30% faster dictionary compression at levels 1-4 (#3086, #3114, #3152, @embg) +perf: +10-20% cold dict compression speed by prefetching CDict tables (#3177, @embg) +perf: +1% faster compression by removing a branch in ZSTD_fast_noDict (#3129, @felixhandte) +perf: Small compression ratio improvements in high compression mode (#2983, #3391, @Cyan4973 and #3285, #3302, @daniellerozenblit) +perf: small speed improvement by better detecting `STATIC_BMI2` for `clang` (#3080, @TocarIP) +perf: Improved streaming performance when `ZSTD_c_stableInBuffer` is set (#2974, @Cyan4973) +cli: Asynchronous I/O for improved cli speed (#2975, #2985, #3021, #3022, @yoniko) +cli: Change `zstdless` behavior to align with `zless` (#2909, @binhdvo) +cli: Keep original file if `-c` or `--stdout` is given (#3052, @dirkmueller) +cli: Keep original files when result is concatenated into a single output with `-o` (#3450, @Cyan4973) +cli: Preserve Permissions and Ownership of regular files (#3432, @felixhandte) +cli: Print zlib/lz4/lzma library versions with `-vv` (#3030, @terrelln) +cli: Print checksum value for single frame files with `-lv` (#3332, @Cyan4973) +cli: Print `dictID` when present with `-lv` (#3184, @htnhan) +cli: when `stderr` is *not* the console, disable status updates, but preserve final summary (#3458, @Cyan4973) +cli: support `--best` and `--no-name` in `gzip` compatibility mode (#3059, @dirkmueller) +cli: support for `posix` high resolution timer `clock_gettime()`, for improved benchmark accuracy (#3423, @Cyan4973) +cli: improved help/usage (`-h`, `-H`) formatting (#3094, @dirkmueller and #3385, @jonpalmisc) +cli: Fix better handling of bogus numeric values (#3268, @ctkhanhly) +cli: Fix input consists of multiple files _and_ `stdin` (#3222, @yoniko) +cli: Fix tiny files passthrough (#3215, @cgbur) +cli: Fix for `-r` on empty directory (#3027, @brailovich) +cli: Fix empty string as argument for `--output-dir-*` (#3220, @embg) +cli: Fix decompression memory usage reported by `-vv --long` (#3042, @u1f35c, and #3232, @zengyijing) +cli: Fix infinite loop when empty input is passed to trainer (#3081, @terrelln) +cli: Fix `--adapt` doesn't work when `--no-progress` is also set (#3354, @terrelln) +api: Support for Block-Level Sequence Producer (#3333, @embg) +api: Support for in-place decompression (#3432, @terrelln) +api: New `ZSTD_CCtx_setCParams()` function, set all parameters defined in a `ZSTD_compressionParameters` structure (#3403, @Cyan4973) +api: Streaming decompression detects incorrect header ID sooner (#3175, @Cyan4973) +api: Window size resizing optimization for edge case (#3345, @daniellerozenblit) +api: More accurate error codes for busy-loop scenarios (#3413, #3455, @Cyan4973) +api: Fix limit overflow in `compressBound` and `decompressBound` (#3362, #3373, Cyan4973) reported by @nigeltao +api: Deprecate several advanced experimental functions: streaming (#3408, @embg), copy (#3196, @mileshu) +bug: Fix corruption that rarely occurs in 32-bit mode with wlog=25 (#3361, @terrelln) +bug: Fix for block-splitter (#3033, @Cyan4973) +bug: Fixes for Sequence Compression API (#3023, #3040, @Cyan4973) +bug: Fix leaking thread handles on Windows (#3147, @animalize) +bug: Fix timing issues with cmake/meson builds (#3166, #3167, #3170, @Cyan4973) +build: Allow user to select legacy level for cmake (#3050, @shadchin) +build: Enable legacy support by default in cmake (#3079, @niamster) +build: Meson build script improvements (#3039, #3120, #3122, #3327, #3357, @eli-schwartz and #3276, @neheb) +build: Add aarch64 to supported architectures for zstd_trace (#3054, @ooosssososos) +build: support AIX architecture (#3219, @qiongsiwu) +build: Fix `ZSTD_LIB_MINIFY` build macro, which now reduces static library size by half (#3366, @terrelln) +build: Fix Windows issues with Multithreading translation layer (#3364, #3380, @yoniko) and ARM64 target (#3320, @cwoffenden) +build: Fix `cmake` script (#3382, #3392, @terrelln and #3252 @Tachi107 and #3167 @Cyan4973) +doc: Updated man page, providing more details for `--train` mode (#3112, @Cyan4973) +doc: Add decompressor errata document (#3092, @terrelln) +misc: Enable Intel CET (#2992, #2994, @hjl-tools) +misc: Fix `contrib/` seekable format (#3058, @yhoogstrate and #3346, @daniellerozenblit) +misc: Improve speed of the one-file library generator (#3241, @wahern and #3005, @cwoffenden) + +v1.5.3 (dev version, unpublished) + v1.5.2 (Jan, 2022) perf: Regain Minimal memset()-ing During Reuse of Compression Contexts (@Cyan4973, #2969) build: Build Zstd with `noexecstack` on All Architectures (@felixhandte, #2964) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e7e545129e5..f5e747ae1b9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -7,7 +7,7 @@ New versions are being developed in the "dev" branch, or in their own feature branch. When they are deemed ready for a release, they are merged into "release". -As a consequences, all contributions must stage first through "dev" +As a consequence, all contributions must stage first through "dev" or their own feature branch. ## Pull Requests @@ -134,11 +134,11 @@ It can be useful to look at additional static analyzers once in a while (and we - Static analyzers are full of false positive. The signal to noise ratio is actually pretty low. - A good CI policy is "zero-warning tolerance". That means that all issues must be solved, including false positives. This quickly becomes a tedious workload. - Multiple static analyzers will feature multiple kind of false positives, sometimes applying to the same code but in different ways leading to : - + torteous code, trying to please multiple constraints, hurting readability and therefore maintenance. Sometimes, such complexity introduce other more subtle bugs, that are just out of scope of the analyzers. + + tortuous code, trying to please multiple constraints, hurting readability and therefore maintenance. Sometimes, such complexity introduce other more subtle bugs, that are just out of scope of the analyzers. + sometimes, these constraints are mutually exclusive : if one try to solve one, the other static analyzer will complain, they can't be both happy at the same time. - As if that was not enough, the list of false positives change with each version. It's hard enough to follow one static analyzer, but multiple ones with their own update agenda, this quickly becomes a massive velocity reducer. -This is different from running a static analyzer once in a while, looking at the output, and __cherry picking__ a few warnings that seem helpful, either because they detected a genuine risk of bug, or because it helps expressing the code in a way which is more readable or more difficult to misuse. These kind of reports can be useful, and are accepted. +This is different from running a static analyzer once in a while, looking at the output, and __cherry picking__ a few warnings that seem helpful, either because they detected a genuine risk of bug, or because it helps expressing the code in a way which is more readable or more difficult to misuse. These kinds of reports can be useful, and are accepted. ## Continuous Integration CI tests run every time a pull request (PR) is created or updated. The exact tests @@ -197,7 +197,7 @@ something subtle merged is extensive benchmarking. You will be doing us a great take the time to run extensive, long-duration, and potentially cross-(os, platform, process, etc) benchmarks on your end before submitting a PR. Of course, you will not be able to benchmark your changes on every single processor and os out there (and neither will we) but do that best -you can:) We've adding some things to think about when benchmarking below in the Benchmarking +you can:) We've added some things to think about when benchmarking below in the Benchmarking Performance section which might be helpful for you. 3. Optimizing performance for a certain OS, processor vendor, compiler, or network system is a perfectly legitimate thing to do as long as it does not harm the overall performance health of Zstd. @@ -273,7 +273,7 @@ for that options you have just provided. If you want to look at the internals of benchmarking script works, you can check out programs/benchzstd.c For example: say you have made a change that you believe improves the speed of zstd level 1. The -very first thing you should use to asses whether you actually achieved any sort of improvement +very first thing you should use to assess whether you actually achieved any sort of improvement is `zstd -b`. You might try to do something like this. Note: you can use the `-i` option to specify a running time for your benchmark in seconds (default is 3 seconds). Usually, the longer the running time, the more stable your results will be. @@ -299,7 +299,7 @@ this method of evaluation will not be sufficient. ### Profiling There are a number of great profilers out there. We're going to briefly mention how you can profile your code using `instruments` on mac, `perf` on linux and `visual studio profiler` -on windows. +on Windows. Say you have an idea for a change that you think will provide some good performance gains for level 1 compression on Zstd. Typically this means, you have identified a section of @@ -315,8 +315,8 @@ might be). Most profilers (including the profilers discussed below) will generate a call graph of functions for you. Your goal will be to find your function of interest in this call graph -and then inspect the time spent inside of it. You might also want to to look at the -annotated assembly which most profilers will provide you with. +and then inspect the time spent inside of it. You might also want to look at the annotated +assembly which most profilers will provide you with. #### Instruments We will once again consider the scenario where you think you've identified a piece of code @@ -330,7 +330,7 @@ Instruments. * You will want a benchmark that runs for at least a few seconds (5 seconds will usually be long enough). This way the profiler will have something to work with and you will have ample time to attach your profiler to this process:) - * I will just use benchzstd as my bencharmking script for this example: + * I will just use benchzstd as my benchmarmking script for this example: ``` $ zstd -b1 -i5 # this will run for 5 seconds ``` @@ -455,7 +455,7 @@ This design requirement is fundamental to preserve the portability of the code b Any variable that can be `const` (aka. read-only) **must** be `const`. Any pointer which content will not be modified must be `const`. This property is then controlled at compiler level. - `const` variables are an important signal to readers that this variable isn’t modified. + `const` variables are an important signal to readers that this variable isn't modified. Conversely, non-const variables are a signal to readers to watch out for modifications later on in the function. * If a function must be inlined, mention it explicitly, using project's own portable macros, such as `FORCE_INLINE_ATTR`, diff --git a/LICENSE b/LICENSE index a793a802892..75800288cc2 100644 --- a/LICENSE +++ b/LICENSE @@ -2,7 +2,7 @@ BSD License For Zstandard software -Copyright (c) 2016-present, Facebook, Inc. All rights reserved. +Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: @@ -14,9 +14,9 @@ are permitted provided that the following conditions are met: this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name Facebook nor the names of its contributors may be used to - endorse or promote products derived from this software without specific - prior written permission. + * Neither the name Facebook, nor Meta, nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED diff --git a/Makefile b/Makefile index 9b5451d3d74..a7890a5b1d6 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2015-2021, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -123,6 +123,7 @@ contrib: lib $(MAKE) -C contrib/seekable_format/examples all $(MAKE) -C contrib/seekable_format/tests test $(MAKE) -C contrib/largeNbDicts all + $(MAKE) -C contrib/externalSequenceProducer all cd build/single_file_libs/ ; ./build_decoder_test.sh cd build/single_file_libs/ ; ./build_library_test.sh @@ -142,6 +143,7 @@ clean: $(Q)$(MAKE) -C contrib/seekable_format/examples $@ > $(VOID) $(Q)$(MAKE) -C contrib/seekable_format/tests $@ > $(VOID) $(Q)$(MAKE) -C contrib/largeNbDicts $@ > $(VOID) + $(Q)$(MAKE) -C contrib/externalSequenceProducer $@ > $(VOID) $(Q)$(RM) zstd$(EXT) zstdmt$(EXT) tmp* $(Q)$(RM) -r lz4 @echo Cleaning completed @@ -157,7 +159,7 @@ MKDIR ?= mkdir -p HAVE_COLORNEVER = $(shell echo a | egrep --color=never a > /dev/null 2> /dev/null && echo 1 || echo 0) EGREP_OPTIONS ?= -ifeq ($HAVE_COLORNEVER, 1) +ifeq ($(HAVE_COLORNEVER), 1) EGREP_OPTIONS += --color=never endif EGREP = egrep $(EGREP_OPTIONS) @@ -334,6 +336,8 @@ tsan-%: clean .PHONY: apt-install apt-install: + # TODO: uncomment once issue 3011 is resolved and remove hack from Github Actions .yml + # sudo apt-get update sudo apt-get -yq --no-install-suggests --no-install-recommends --force-yes install $(APT_PACKAGES) .PHONY: apt-add-repo @@ -400,7 +404,7 @@ cmakebuild: c89build: clean $(CC) -v - CFLAGS="-std=c89 -Werror -O0" $(MAKE) allmost # will fail, due to missing support for `long long` + CFLAGS="-std=c89 -Werror -Wno-attributes -Wpedantic -Wno-long-long -Wno-variadic-macros -O0" $(MAKE) lib zstd gnu90build: clean $(CC) -v diff --git a/README.md b/README.md index 69720ba2ccf..6bcf757d862 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ Zstandard's format is stable and documented in [RFC8878](https://datatracker.iet This repository represents the reference implementation, provided as an open-source dual [BSD](LICENSE) and [GPLv2](COPYING) licensed **C** library, and a command line utility producing and decoding `.zst`, `.gz`, `.xz` and `.lz4` files. Should your project require another programming language, -a list of known ports and bindings is provided on [Zstandard homepage](http://www.zstd.net/#other-languages). +a list of known ports and bindings is provided on [Zstandard homepage](https://facebook.github.io/zstd/#other-languages). **Development branch status:** @@ -39,7 +39,7 @@ compiled with [gcc] 9.3.0, on the [Silesia compression corpus]. [lzbench]: https://github.com/inikep/lzbench -[Silesia compression corpus]: http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia +[Silesia compression corpus]: https://sun.aei.polsl.pl//~sdeor/index.php?page=silesia [gcc]: https://gcc.gnu.org/ | Compressor name | Ratio | Compression| Decompress.| @@ -56,8 +56,8 @@ on the [Silesia compression corpus]. | lzf 3.6 -1 | 2.077 | 410 MB/s | 830 MB/s | | snappy 1.1.9 | 2.073 | 550 MB/s | 1750 MB/s | -[zlib]: http://www.zlib.net/ -[lz4]: http://www.lz4.org/ +[zlib]: https://www.zlib.net/ +[lz4]: https://lz4.github.io/lz4/ The negative compression levels, specified with `--fast=#`, offer faster compression and decompression speed @@ -124,14 +124,27 @@ Dictionary gains are mostly effective in the first few KB. Then, the compression ## Build instructions +`make` is the officially maintained build system of this project. +All other build systems are "compatible" and 3rd-party maintained, +they may feature small differences in advanced options. +When your system allows it, prefer using `make` to build `zstd` and `libzstd`. + ### Makefile If your system is compatible with standard `make` (or `gmake`), invoking `make` in root directory will generate `zstd` cli in root directory. +It will also create `libzstd` into `lib/`. Other available options include: - `make install` : create and install zstd cli, library and man pages -- `make check` : create and run `zstd`, tests its behavior on local platform +- `make check` : create and run `zstd`, test its behavior on local platform + +The `Makefile` follows the [GNU Standard Makefile conventions](https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html), +allowing staged install, standard flags, directory variables and command variables. + +For advanced use cases, specialized compilation flags which control binary generation +are documented in [`lib/README.md`](lib/README.md#modular-build) for the `libzstd` library +and in [`programs/README.md`](programs/README.md#compilation-variables) for the `zstd` CLI. ### cmake @@ -178,13 +191,15 @@ The output binary will be in `buck-out/gen/programs/`. ## Testing -You can run quick local smoke tests by executing the `playTest.sh` script from the `src/tests` directory. -Two env variables `$ZSTD_BIN` and `$DATAGEN_BIN` are needed for the test script to locate the zstd and datagen binary. -For information on CI testing, please refer to TESTING.md +You can run quick local smoke tests by running `make check`. +If you can't use `make`, execute the `playTest.sh` script from the `src/tests` directory. +Two env variables `$ZSTD_BIN` and `$DATAGEN_BIN` are needed for the test script to locate the `zstd` and `datagen` binary. +For information on CI testing, please refer to `TESTING.md`. ## Status -Zstandard is currently deployed within Facebook. It is used continuously to compress large amounts of data in multiple formats and use cases. +Zstandard is currently deployed within Facebook and many other large cloud infrastructures. +It is run continuously to compress large amounts of data in multiple formats and use cases. Zstandard is considered safe for production environments. ## License diff --git a/TESTING.md b/TESTING.md index 32b133b67bf..df842cc9411 100644 --- a/TESTING.md +++ b/TESTING.md @@ -22,7 +22,7 @@ They consist of the following tests: - `tests/playTests.sh --test-large-data` - Fuzzer tests: `tests/fuzzer.c`, `tests/zstreamtest.c`, and `tests/decodecorpus.c` - `tests/zstreamtest.c` under Tsan (streaming mode, including multithreaded mode) -- Valgrind Test (`make -C tests valgrindTest`) (testing CLI and fuzzer under valgrind) +- Valgrind Test (`make -C tests test-valgrind`) (testing CLI and fuzzer under `valgrind`) - Fuzzer tests (see above) on ARM, AArch64, PowerPC, and PowerPC64 Long Tests diff --git a/build/VS2008/zstd/zstd.vcproj b/build/VS2008/zstd/zstd.vcproj index c7eec577db3..91f2bda536c 100644 --- a/build/VS2008/zstd/zstd.vcproj +++ b/build/VS2008/zstd/zstd.vcproj @@ -384,6 +384,10 @@ RelativePath="..\..\..\programs\fileio.c" > + + diff --git a/build/VS2010/libzstd-dll/libzstd-dll.rc b/build/VS2010/libzstd-dll/libzstd-dll.rc index ee9f5628043..13e8746ffb5 100644 --- a/build/VS2010/libzstd-dll/libzstd-dll.rc +++ b/build/VS2010/libzstd-dll/libzstd-dll.rc @@ -32,11 +32,11 @@ BEGIN BEGIN BLOCK "040904B0" BEGIN - VALUE "CompanyName", "Yann Collet, Facebook, Inc." + VALUE "CompanyName", "Meta Platforms, Inc." VALUE "FileDescription", "Zstandard - Fast and efficient compression algorithm" VALUE "FileVersion", ZSTD_VERSION_STRING VALUE "InternalName", "libzstd.dll" - VALUE "LegalCopyright", "Copyright (c) 2013-present, Yann Collet, Facebook, Inc." + VALUE "LegalCopyright", "Copyright (c) Meta Platforms, Inc. and affiliates." VALUE "OriginalFilename", "libzstd.dll" VALUE "ProductName", "Zstandard" VALUE "ProductVersion", ZSTD_VERSION_STRING diff --git a/build/VS2010/zstd/zstd.rc b/build/VS2010/zstd/zstd.rc index f5e404730d2..a2118c2df10 100644 --- a/build/VS2010/zstd/zstd.rc +++ b/build/VS2010/zstd/zstd.rc @@ -32,11 +32,11 @@ BEGIN BEGIN BLOCK "040904B0" BEGIN - VALUE "CompanyName", "Yann Collet, Facebook, Inc." + VALUE "CompanyName", "Meta Platforms, Inc." VALUE "FileDescription", "Zstandard - Fast and efficient compression algorithm" VALUE "FileVersion", ZSTD_VERSION_STRING VALUE "InternalName", "zstd.exe" - VALUE "LegalCopyright", "Copyright (c) 2013-present, Yann Collet, Facebook, Inc." + VALUE "LegalCopyright", "Copyright (c) Meta Platforms, Inc. and affiliates." VALUE "OriginalFilename", "zstd.exe" VALUE "ProductName", "Zstandard" VALUE "ProductVersion", ZSTD_VERSION_STRING diff --git a/build/VS2010/zstd/zstd.vcxproj b/build/VS2010/zstd/zstd.vcxproj index 46e22f42e9b..8ab239dd814 100644 --- a/build/VS2010/zstd/zstd.vcxproj +++ b/build/VS2010/zstd/zstd.vcxproj @@ -62,6 +62,7 @@ + diff --git a/build/cmake/CMakeLists.txt b/build/cmake/CMakeLists.txt index 93a167cafb6..0bffc87d933 100644 --- a/build/cmake/CMakeLists.txt +++ b/build/cmake/CMakeLists.txt @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2016-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -8,18 +8,18 @@ # ################################################################ cmake_minimum_required(VERSION 2.8.12 FATAL_ERROR) - -# As of 2018-12-26 ZSTD has been validated to build with cmake version 3.13.2 new policies. -# Set and use the newest cmake policies that are validated to work -set(ZSTD_MAX_VALIDATED_CMAKE_MAJOR_VERSION "3") + +# As of 2018-12-26 ZSTD has been validated to build with cmake version 3.13.2 new policies. +# Set and use the newest cmake policies that are validated to work +set(ZSTD_MAX_VALIDATED_CMAKE_MAJOR_VERSION "3") set(ZSTD_MAX_VALIDATED_CMAKE_MINOR_VERSION "13") #Policies never changed at PATCH level if("${CMAKE_MAJOR_VERSION}" LESS 3) - set(ZSTD_CMAKE_POLICY_VERSION "${CMAKE_VERSION}") -elseif( "${ZSTD_MAX_VALIDATED_CMAKE_MAJOR_VERSION}" EQUAL "${CMAKE_MAJOR_VERSION}" AND + set(ZSTD_CMAKE_POLICY_VERSION "${CMAKE_VERSION}") +elseif( "${ZSTD_MAX_VALIDATED_CMAKE_MAJOR_VERSION}" EQUAL "${CMAKE_MAJOR_VERSION}" AND "${ZSTD_MAX_VALIDATED_CMAKE_MINOR_VERSION}" GREATER "${CMAKE_MINOR_VERSION}") - set(ZSTD_CMAKE_POLICY_VERSION "${CMAKE_VERSION}") -else() - set(ZSTD_CMAKE_POLICY_VERSION "${ZSTD_MAX_VALIDATED_CMAKE_MAJOR_VERSION}.${ZSTD_MAX_VALIDATED_CMAKE_MINOR_VERSION}.0") + set(ZSTD_CMAKE_POLICY_VERSION "${CMAKE_VERSION}") +else() + set(ZSTD_CMAKE_POLICY_VERSION "${ZSTD_MAX_VALIDATED_CMAKE_MAJOR_VERSION}.${ZSTD_MAX_VALIDATED_CMAKE_MINOR_VERSION}.0") endif() cmake_policy(VERSION ${ZSTD_CMAKE_POLICY_VERSION}) @@ -40,16 +40,18 @@ if( CMAKE_MAJOR_VERSION LESS 3 ) set(PROJECT_VERSION_PATCH ${zstd_VERSION_PATCH}) set(PROJECT_VERSION "${zstd_VERSION_MAJOR}.${zstd_VERSION_MINOR}.${zstd_VERSION_PATCH}") enable_language(C) # Main library is in C + enable_language(ASM) # And ASM enable_language(CXX) # Testing contributed code also utilizes CXX else() project(zstd VERSION "${zstd_VERSION_MAJOR}.${zstd_VERSION_MINOR}.${zstd_VERSION_PATCH}" LANGUAGES C # Main library is in C + ASM # And ASM CXX # Testing contributed code also utilizes CXX ) endif() message(STATUS "ZSTD VERSION: ${zstd_VERSION}") -set(zstd_HOMEPAGE_URL "http://www.zstd.net") +set(zstd_HOMEPAGE_URL "https://facebook.github.io/zstd") set(zstd_DESCRIPTION "Zstandard is a real-time compression algorithm, providing high compression ratios.") # Set a default build type if none was specified @@ -82,11 +84,12 @@ message(STATUS "CMAKE_INSTALL_LIBDIR: ${CMAKE_INSTALL_LIBDIR}") #----------------------------------------------------------------------------- # Legacy support -option(ZSTD_LEGACY_SUPPORT "LEGACY SUPPORT" OFF) +option(ZSTD_LEGACY_SUPPORT "LEGACY SUPPORT" ON) if (ZSTD_LEGACY_SUPPORT) message(STATUS "ZSTD_LEGACY_SUPPORT defined!") - add_definitions(-DZSTD_LEGACY_SUPPORT=5) + set(ZSTD_LEGACY_LEVEL 5 CACHE STRING "") + add_definitions(-DZSTD_LEGACY_SUPPORT=${ZSTD_LEGACY_LEVEL}) else () message(STATUS "ZSTD_LEGACY_SUPPORT not defined!") add_definitions(-DZSTD_LEGACY_SUPPORT=0) diff --git a/build/cmake/CMakeModules/AddZstdCompilationFlags.cmake b/build/cmake/CMakeModules/AddZstdCompilationFlags.cmake index e23b9d603eb..0265349fbfd 100644 --- a/build/cmake/CMakeModules/AddZstdCompilationFlags.cmake +++ b/build/cmake/CMakeModules/AddZstdCompilationFlags.cmake @@ -1,7 +1,8 @@ include(CheckCXXCompilerFlag) include(CheckCCompilerFlag) +include(CheckLinkerFlag) -function(EnableCompilerFlag _flag _C _CXX) +function(EnableCompilerFlag _flag _C _CXX _LD) string(REGEX REPLACE "\\+" "PLUS" varname "${_flag}") string(REGEX REPLACE "[^A-Za-z0-9]+" "_" varname "${varname}") string(REGEX REPLACE "^_+" "" varname "${varname}") @@ -18,43 +19,58 @@ function(EnableCompilerFlag _flag _C _CXX) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${_flag}" PARENT_SCOPE) endif () endif () + if (_LD) + CHECK_LINKER_FLAG(C ${_flag} LD_FLAG_${varname}) + if (LD_FLAG_${varname}) + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${_flag}" PARENT_SCOPE) + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${_flag}" PARENT_SCOPE) + endif () + endif () endfunction() macro(ADD_ZSTD_COMPILATION_FLAGS) if (CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang" OR MINGW) #Not only UNIX but also WIN32 for MinGW - #Set c++11 by default - EnableCompilerFlag("-std=c++11" false true) - #Set c99 by default - EnableCompilerFlag("-std=c99" true false) + # It's possible to select the exact standard used for compilation. + # It's not necessary, but can be employed for specific purposes. + # Note that zstd source code is compatible with both C++98 and above + # and C-gnu90 (c90 + long long + variadic macros ) and above + # EnableCompilerFlag("-std=c++11" false true) # Set C++ compilation to c++11 standard + # EnableCompilerFlag("-std=c99" true false) # Set C compiation to c99 standard if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND MSVC) # clang-cl normally maps -Wall to -Weverything. - EnableCompilerFlag("/clang:-Wall" true true) + EnableCompilerFlag("/clang:-Wall" true true false) else () - EnableCompilerFlag("-Wall" true true) + EnableCompilerFlag("-Wall" true true false) endif () - EnableCompilerFlag("-Wextra" true true) - EnableCompilerFlag("-Wundef" true true) - EnableCompilerFlag("-Wshadow" true true) - EnableCompilerFlag("-Wcast-align" true true) - EnableCompilerFlag("-Wcast-qual" true true) - EnableCompilerFlag("-Wstrict-prototypes" true false) + EnableCompilerFlag("-Wextra" true true false) + EnableCompilerFlag("-Wundef" true true false) + EnableCompilerFlag("-Wshadow" true true false) + EnableCompilerFlag("-Wcast-align" true true false) + EnableCompilerFlag("-Wcast-qual" true true false) + EnableCompilerFlag("-Wstrict-prototypes" true false false) # Enable asserts in Debug mode if (CMAKE_BUILD_TYPE MATCHES "Debug") - EnableCompilerFlag("-DDEBUGLEVEL=1" true true) + EnableCompilerFlag("-DDEBUGLEVEL=1" true true false) endif () + # Add noexecstack flags + # LDFLAGS + EnableCompilerFlag("-z noexecstack" false false true) + # CFLAGS & CXXFLAGS + EnableCompilerFlag("-Qunused-arguments" true true false) + EnableCompilerFlag("-Wa,--noexecstack" true true false) elseif (MSVC) # Add specific compilation flags for Windows Visual set(ACTIVATE_MULTITHREADED_COMPILATION "ON" CACHE BOOL "activate multi-threaded compilation (/MP flag)") if (CMAKE_GENERATOR MATCHES "Visual Studio" AND ACTIVATE_MULTITHREADED_COMPILATION) - EnableCompilerFlag("/MP" true true) + EnableCompilerFlag("/MP" true true false) endif () - + # UNICODE SUPPORT - EnableCompilerFlag("/D_UNICODE" true true) - EnableCompilerFlag("/DUNICODE" true true) + EnableCompilerFlag("/D_UNICODE" true true false) + EnableCompilerFlag("/DUNICODE" true true false) # Enable asserts in Debug mode if (CMAKE_BUILD_TYPE MATCHES "Debug") - EnableCompilerFlag("/DDEBUGLEVEL=1" true true) + EnableCompilerFlag("/DDEBUGLEVEL=1" true true false) endif () endif () diff --git a/build/cmake/CMakeModules/JoinPaths.cmake b/build/cmake/CMakeModules/JoinPaths.cmake new file mode 100644 index 00000000000..c68d91b84db --- /dev/null +++ b/build/cmake/CMakeModules/JoinPaths.cmake @@ -0,0 +1,23 @@ +# This module provides function for joining paths +# known from most languages +# +# SPDX-License-Identifier: (MIT OR CC0-1.0) +# Copyright 2020 Jan Tojnar +# https://github.com/jtojnar/cmake-snips +# +# Modelled after Python’s os.path.join +# https://docs.python.org/3.7/library/os.path.html#os.path.join +# Windows not supported +function(join_paths joined_path first_path_segment) + set(temp_path "${first_path_segment}") + foreach(current_segment IN LISTS ARGN) + if(NOT ("${current_segment}" STREQUAL "")) + if(IS_ABSOLUTE "${current_segment}") + set(temp_path "${current_segment}") + else() + set(temp_path "${temp_path}/${current_segment}") + endif() + endif() + endforeach() + set(${joined_path} "${temp_path}" PARENT_SCOPE) +endfunction() diff --git a/build/cmake/README.md b/build/cmake/README.md index 73b30dc773b..a460dd16187 100644 --- a/build/cmake/README.md +++ b/build/cmake/README.md @@ -1,13 +1,13 @@ # Cmake contributions Contributions to the cmake build configurations are welcome. Please -use case sensitivity that matches modern (ie. cmake version 2.6 and above) +use case sensitivity that matches modern (i.e. cmake version 2.6 and above) conventions of using lower-case for commands, and upper-case for variables. ## How to build -As cmake doesn't support command like `cmake clean`, it's recommended to perform a "out of source build". +As cmake doesn't support command like `cmake clean`, it's recommended to perform an "out of source build". To do this, you can create a new directory and build in it: ```sh cd build/cmake @@ -37,7 +37,7 @@ cmake -LH .. Bool options can be set to `ON/OFF` with `-D[option]=[ON/OFF]`. You can configure cmake options like this: ```sh cd build/cmake/builddir -cmake -DZSTD_BUILD_TESTS=ON -DZSTD_LEGACY_SUPPORT=ON .. +cmake -DZSTD_BUILD_TESTS=ON -DZSTD_LEGACY_SUPPORT=OFF .. make ``` diff --git a/build/cmake/contrib/CMakeLists.txt b/build/cmake/contrib/CMakeLists.txt index f7631d08ca6..8df2a17b3a8 100644 --- a/build/cmake/contrib/CMakeLists.txt +++ b/build/cmake/contrib/CMakeLists.txt @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2016-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/build/cmake/contrib/gen_html/CMakeLists.txt b/build/cmake/contrib/gen_html/CMakeLists.txt index 8fdd61131ea..d1ff6c64bba 100644 --- a/build/cmake/contrib/gen_html/CMakeLists.txt +++ b/build/cmake/contrib/gen_html/CMakeLists.txt @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2015-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/build/cmake/contrib/pzstd/CMakeLists.txt b/build/cmake/contrib/pzstd/CMakeLists.txt index 27af86c88dd..f7098fa0f7f 100644 --- a/build/cmake/contrib/pzstd/CMakeLists.txt +++ b/build/cmake/contrib/pzstd/CMakeLists.txt @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2016-present, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/build/cmake/lib/CMakeLists.txt b/build/cmake/lib/CMakeLists.txt index 4905bd91338..30349586ba9 100644 --- a/build/cmake/lib/CMakeLists.txt +++ b/build/cmake/lib/CMakeLists.txt @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2015-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -77,6 +77,12 @@ if (MSVC) set(PlatformDependResources ${MSVC_RESOURCE_DIR}/libzstd-dll.rc) endif () +# Explicitly set the language to C for all files, including ASM files. +# Our assembly expects to be compiled by a C compiler, and is only enabled for +# __GNUC__ compatible compilers. Otherwise all the ASM code is disabled by +# macros. +set_source_files_properties(${Sources} PROPERTIES LANGUAGE C) + # Split project to static and shared libraries build set(library_targets) if (ZSTD_BUILD_SHARED) @@ -135,30 +141,17 @@ if (ZSTD_BUILD_STATIC) OUTPUT_NAME ${STATIC_LIBRARY_BASE_NAME}) endif () -if (UNIX OR MINGW) - # pkg-config - set(PREFIX "${CMAKE_INSTALL_PREFIX}") - set(EXEC_PREFIX "\${prefix}") - set(LIBDIR "${CMAKE_INSTALL_FULL_LIBDIR}") - set(INCLUDEDIR "${CMAKE_INSTALL_FULL_INCLUDEDIR}") - set(VERSION "${zstd_VERSION}") - - string(LENGTH "${PREFIX}" PREFIX_LENGTH) - string(SUBSTRING "${LIBDIR}" 0 ${PREFIX_LENGTH} LIBDIR_PREFIX) - string(SUBSTRING "${LIBDIR}" ${PREFIX_LENGTH} -1 LIBDIR_SUFFIX) - string(SUBSTRING "${INCLUDEDIR}" 0 ${PREFIX_LENGTH} INCLUDEDIR_PREFIX) - string(SUBSTRING "${INCLUDEDIR}" ${PREFIX_LENGTH} -1 INCLUDEDIR_SUFFIX) - - if ("${INCLUDEDIR_PREFIX}" STREQUAL "${PREFIX}") - set(INCLUDEDIR "\${prefix}${INCLUDEDIR_SUFFIX}") - endif() - if ("${LIBDIR_PREFIX}" STREQUAL "${PREFIX}") - set(LIBDIR "\${exec_prefix}${LIBDIR_SUFFIX}") - endif() - - configure_file("${LIBRARY_DIR}/libzstd.pc.in" "${CMAKE_CURRENT_BINARY_DIR}/libzstd.pc" @ONLY) - install(FILES "${CMAKE_CURRENT_BINARY_DIR}/libzstd.pc" DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig") -endif () +# pkg-config +include(JoinPaths) # can be replaced by cmake_path(APPEND) in CMake 3.20 +set(PREFIX "${CMAKE_INSTALL_PREFIX}") +set(EXEC_PREFIX "\${prefix}") +join_paths(LIBDIR "\${exec_prefix}" "${CMAKE_INSTALL_LIBDIR}") +join_paths(INCLUDEDIR "\${prefix}" "${CMAKE_INSTALL_INCLUDEDIR}") +set(LIBS_PRIVATE "${THREADS_LIBS}") +set(VERSION "${zstd_VERSION}") + +configure_file("${LIBRARY_DIR}/libzstd.pc.in" "${CMAKE_CURRENT_BINARY_DIR}/libzstd.pc" @ONLY) +install(FILES "${CMAKE_CURRENT_BINARY_DIR}/libzstd.pc" DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig") # install target install(FILES diff --git a/build/cmake/programs/CMakeLists.txt b/build/cmake/programs/CMakeLists.txt index 490030783d3..58d998e4275 100644 --- a/build/cmake/programs/CMakeLists.txt +++ b/build/cmake/programs/CMakeLists.txt @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2015-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -32,7 +32,7 @@ if (MSVC) set(PlatformDependResources ${MSVC_RESOURCE_DIR}/zstd.rc) endif () -add_executable(zstd ${PROGRAMS_DIR}/zstdcli.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${PROGRAMS_DIR}/fileio.c ${PROGRAMS_DIR}/benchfn.c ${PROGRAMS_DIR}/benchzstd.c ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/dibio.c ${PROGRAMS_DIR}/zstdcli_trace.c ${PlatformDependResources}) +add_executable(zstd ${PROGRAMS_DIR}/zstdcli.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${PROGRAMS_DIR}/fileio.c ${PROGRAMS_DIR}/fileio_asyncio.c ${PROGRAMS_DIR}/benchfn.c ${PROGRAMS_DIR}/benchzstd.c ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/dibio.c ${PROGRAMS_DIR}/zstdcli_trace.c ${PlatformDependResources}) target_link_libraries(zstd ${PROGRAMS_ZSTD_LINK_TARGET}) if (CMAKE_SYSTEM_NAME MATCHES "(Solaris|SunOS)") target_link_libraries(zstd rt) @@ -75,7 +75,7 @@ if (UNIX) ${CMAKE_CURRENT_BINARY_DIR}/zstdless.1 DESTINATION "${MAN_INSTALL_DIR}") - add_executable(zstd-frugal ${PROGRAMS_DIR}/zstdcli.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${PROGRAMS_DIR}/fileio.c) + add_executable(zstd-frugal ${PROGRAMS_DIR}/zstdcli.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${PROGRAMS_DIR}/fileio.c ${PROGRAMS_DIR}/fileio_asyncio.c) target_link_libraries(zstd-frugal ${PROGRAMS_ZSTD_LINK_TARGET}) set_property(TARGET zstd-frugal APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_NOBENCH;ZSTD_NODICT;ZSTD_NOTRACE") endif () diff --git a/build/cmake/tests/CMakeLists.txt b/build/cmake/tests/CMakeLists.txt index 8bba6ea65a6..250f0508f37 100644 --- a/build/cmake/tests/CMakeLists.txt +++ b/build/cmake/tests/CMakeLists.txt @@ -1,6 +1,6 @@ # ################################################################ # zstd - Makefile -# Copyright (C) Yann Collet 2014-present +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # BSD license @@ -27,7 +27,7 @@ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # You can contact the author at : -# - zstd homepage : http://www.zstd.net/ +# - zstd homepage : https://facebook.github.io/zstd/ # ################################################################ project(tests) @@ -57,7 +57,9 @@ target_link_libraries(datagen libzstd_static) # fullbench # add_executable(fullbench ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${PROGRAMS_DIR}/benchfn.c ${PROGRAMS_DIR}/benchzstd.c ${TESTS_DIR}/fullbench.c) -set_property(TARGET fullbench APPEND PROPERTY COMPILE_OPTIONS "-Wno-deprecated-declarations") +if (NOT MSVC) + target_compile_options(fullbench PRIVATE "-Wno-deprecated-declarations") +endif() target_link_libraries(fullbench libzstd_static) add_test(NAME fullbench COMMAND fullbench ${ZSTD_FULLBENCH_FLAGS}) @@ -65,7 +67,9 @@ add_test(NAME fullbench COMMAND fullbench ${ZSTD_FULLBENCH_FLAGS}) # fuzzer # add_executable(fuzzer ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${TESTS_DIR}/fuzzer.c) -set_property(TARGET fuzzer APPEND PROPERTY COMPILE_OPTIONS "-Wno-deprecated-declarations") +if (NOT MSVC) + target_compile_options(fuzzer PRIVATE "-Wno-deprecated-declarations") +endif() target_link_libraries(fuzzer libzstd_static) AddTestFlagsOption(ZSTD_FUZZER_FLAGS "$ENV{FUZZERTEST} $ENV{FUZZER_FLAGS}" "Semicolon-separated list of flags to pass to the fuzzer test (see `fuzzer -h` for usage)") @@ -77,8 +81,10 @@ add_test(NAME fuzzer COMMAND fuzzer ${ZSTD_FUZZER_FLAGS}) # # zstreamtest # -add_executable(zstreamtest ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${TESTS_DIR}/seqgen.c ${TESTS_DIR}/zstreamtest.c) -set_property(TARGET zstreamtest APPEND PROPERTY COMPILE_OPTIONS "-Wno-deprecated-declarations") +add_executable(zstreamtest ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${TESTS_DIR}/seqgen.c ${TESTS_DIR}/zstreamtest.c ${TESTS_DIR}/external_matchfinder.c) +if (NOT MSVC) + target_compile_options(zstreamtest PRIVATE "-Wno-deprecated-declarations") +endif() target_link_libraries(zstreamtest libzstd_static) AddTestFlagsOption(ZSTD_ZSTREAM_FLAGS "$ENV{ZSTREAM_TESTTIME} $ENV{FUZZER_FLAGS}" "Semicolon-separated list of flags to pass to the zstreamtest test (see `zstreamtest -h` for usage)") @@ -90,13 +96,14 @@ add_test(NAME zstreamtest COMMAND zstreamtest ${ZSTD_ZSTREAM_FLAGS}) AddTestFlagsOption(ZSTD_PLAYTESTS_FLAGS "$ENV{PLAYTESTS_FLAGS}" "Semicolon-separated list of flags to pass to the playTests.sh test") add_test(NAME playTests COMMAND sh -c "\"${TESTS_DIR}/playTests.sh\" ${ZSTD_PLAYTESTS_FLAGS}") -if (ZSTD_BUILD_PROGRAMS) +find_program(UNAME uname) # Run script only in unix shell environments +if (ZSTD_BUILD_PROGRAMS AND UNAME) set_property(TEST playTests APPEND PROPERTY ENVIRONMENT "ZSTD_BIN=$" "DATAGEN_BIN=$" ) else() - message(STATUS "Disabling playTests.sh test because ZSTD_BUILD_PROGRAMS is not enabled") + message(STATUS "Disabling playTests.sh test because requirements not met") set_tests_properties(playTests PROPERTIES DISABLED YES) endif() diff --git a/build/meson/contrib/pzstd/meson.build b/build/meson/contrib/pzstd/meson.build index 2c47999fa27..b95dc796463 100644 --- a/build/meson/contrib/pzstd/meson.build +++ b/build/meson/contrib/pzstd/meson.build @@ -18,7 +18,8 @@ pzstd_sources = [join_paths(zstd_rootdir, 'programs/util.c'), join_paths(zstd_rootdir, 'contrib/pzstd/SkippableFrame.cpp')] pzstd = executable('pzstd', pzstd_sources, - cpp_args: [ '-DNDEBUG', '-Wno-shadow', '-Wno-deprecated-declarations' ], + cpp_args: pzstd_warning_flags, include_directories: pzstd_includes, dependencies: [ libzstd_dep, thread_dep ], + override_options: ['b_ndebug=true'], install: true) diff --git a/build/meson/lib/meson.build b/build/meson/lib/meson.build index 6b093378101..68db2ca6cbb 100644 --- a/build/meson/lib/meson.build +++ b/build/meson/lib/meson.build @@ -47,7 +47,7 @@ libzstd_sources = [join_paths(zstd_rootdir, 'lib/common/entropy_common.c'), # really we need anything that defines __GNUC__ as that is what ZSTD_ASM_SUPPORTED is gated on # but these are the two compilers that are supported in tree and actually handle this correctly -# Otherwise, explicitly disable assmebly. +# Otherwise, explicitly disable assembly. if [compiler_gcc, compiler_clang].contains(cc_id) libzstd_sources += join_paths(zstd_rootdir, 'lib/decompress/huf_decompress_amd64.S') else @@ -83,7 +83,8 @@ libzstd_c_args = [] if cc_id == compiler_msvc if default_library_type != 'static' libzstd_sources += [windows_mod.compile_resources( - join_paths(zstd_rootdir, 'build/VS2010/libzstd-dll/libzstd-dll.rc'))] + join_paths(zstd_rootdir, 'build/VS2010/libzstd-dll/libzstd-dll.rc'), + include_directories: libzstd_includes)] libzstd_c_args += ['-DZSTD_DLL_EXPORT=1', '-DZSTD_HEAPMODE=0', '-D_CONSOLE', @@ -125,12 +126,41 @@ libzstd = library('zstd', libzstd_dep = declare_dependency(link_with: libzstd, include_directories: libzstd_includes) +# we link to both: +# - the shared library (for public symbols) +# - the static library (for private symbols) +# +# this is needed because internally private symbols are used all the time, and +# -fvisibility=hidden means those cannot be found +if get_option('default_library') == 'static' + libzstd_static = libzstd + libzstd_internal_dep = libzstd_dep +else + if get_option('default_library') == 'shared' + libzstd_static = static_library('zstd_objlib', + objects: libzstd.extract_all_objects(recursive: true), + build_by_default: false) + else + libzstd_static = libzstd.get_static_lib() + endif + + if cc_id == compiler_msvc + # msvc does not actually support linking to both, but errors out with: + # error LNK2005: ZSTD_ already defined in zstd.lib(zstd-1.dll) + libzstd_internal_dep = declare_dependency(link_with: libzstd_static) + else + libzstd_internal_dep = declare_dependency(link_with: libzstd, + # the static library must be linked after the shared one + dependencies: declare_dependency(link_with: libzstd_static)) + endif +endif + pkgconfig.generate(libzstd, name: 'libzstd', filebase: 'libzstd', description: 'fast lossless compression algorithm library', version: zstd_libversion, - url: 'http://www.zstd.net/') + url: 'https://facebook.github.io/zstd/') install_headers(join_paths(zstd_rootdir, 'lib/zstd.h'), join_paths(zstd_rootdir, 'lib/zdict.h'), diff --git a/build/meson/meson.build b/build/meson/meson.build index 0c29a7621b9..576dc44db37 100644 --- a/build/meson/meson.build +++ b/build/meson/meson.build @@ -12,7 +12,10 @@ project('zstd', ['c', 'cpp'], license: ['BSD', 'GPLv2'], default_options : [ - 'c_std=gnu99', + # There shouldn't be any need to force a C standard convention for zstd + # but in case one would want that anyway, this can be done here. + # 'c_std=gnu99', + # c++11 standard is useful for pzstd 'cpp_std=c++11', 'buildtype=release', 'warning_level=3', @@ -20,8 +23,10 @@ project('zstd', # so this isn't safe #'werror=true' ], - version: 'DUMMY', - meson_version: '>=0.48.0') + version: run_command( + find_program('GetZstdLibraryVersion.py'), '../../lib/zstd.h', + check: true).stdout().strip(), + meson_version: '>=0.50.0') cc = meson.get_compiler('c') cxx = meson.get_compiler('cpp') @@ -42,16 +47,6 @@ compiler_msvc = 'msvc' zstd_version = meson.project_version() -zstd_h_file = join_paths(meson.current_source_dir(), '../../lib/zstd.h') -GetZstdLibraryVersion_py = find_program('GetZstdLibraryVersion.py', native : true) -r = run_command(GetZstdLibraryVersion_py, zstd_h_file) -if r.returncode() == 0 - zstd_version = r.stdout().strip() - message('Project version is now: @0@'.format(zstd_version)) -else - error('Cannot find project version in @0@'.format(zstd_h_file)) -endif - zstd_libversion = zstd_version # ============================================================================= @@ -92,7 +87,7 @@ feature_lz4 = get_option('lz4') # Dependencies # ============================================================================= -libm_dep = cc.find_library('m', required: bin_tests) +libm_dep = cc.find_library('m', required: false) thread_dep = dependency('threads', required: feature_multi_thread) use_multi_thread = thread_dep.found() # Arguments in dependency should be equivalent to those passed to pkg-config @@ -109,8 +104,10 @@ use_lz4 = lz4_dep.found() add_project_arguments('-DXXH_NAMESPACE=ZSTD_', language: ['c']) +pzstd_warning_flags = [] if [compiler_gcc, compiler_clang].contains(cc_id) common_warning_flags = [ '-Wundef', '-Wshadow', '-Wcast-align', '-Wcast-qual' ] + pzstd_warning_flags = ['-Wno-shadow', '-Wno-deprecated-declarations'] if cc_id == compiler_clang common_warning_flags += ['-Wconversion', '-Wno-sign-conversion', '-Wdocumentation'] endif diff --git a/build/meson/meson_options.txt b/build/meson/meson_options.txt index accf3fa1035..f35cd5fc8b1 100644 --- a/build/meson/meson_options.txt +++ b/build/meson/meson_options.txt @@ -14,7 +14,7 @@ option('legacy_level', type: 'integer', min: 0, max: 7, value: 5, description: 'Support any legacy format: 7 to 1 for v0.7+ to v0.1+') option('debug_level', type: 'integer', min: 0, max: 9, value: 1, description: 'Enable run-time debug. See lib/common/debug.h') -option('backtrace', type: 'boolean', value: false, +option('backtrace', type: 'feature', value: 'disabled', description: 'Display a stack backtrace when execution generates a runtime exception') option('static_runtime', type: 'boolean', value: false, description: 'Link to static run-time libraries on MSVC') diff --git a/build/meson/programs/meson.build b/build/meson/programs/meson.build index 4181030c2ee..e611dc3374c 100644 --- a/build/meson/programs/meson.build +++ b/build/meson/programs/meson.build @@ -14,20 +14,26 @@ zstd_programs_sources = [join_paths(zstd_rootdir, 'programs/zstdcli.c'), join_paths(zstd_rootdir, 'programs/util.c'), join_paths(zstd_rootdir, 'programs/timefn.c'), join_paths(zstd_rootdir, 'programs/fileio.c'), + join_paths(zstd_rootdir, 'programs/fileio_asyncio.c'), join_paths(zstd_rootdir, 'programs/benchfn.c'), join_paths(zstd_rootdir, 'programs/benchzstd.c'), join_paths(zstd_rootdir, 'programs/datagen.c'), join_paths(zstd_rootdir, 'programs/dibio.c'), - join_paths(zstd_rootdir, 'programs/zstdcli_trace.c'), - # needed due to use of private symbol + -fvisibility=hidden - join_paths(zstd_rootdir, 'lib/common/xxhash.c')] + join_paths(zstd_rootdir, 'programs/zstdcli_trace.c')] +zstd_deps = [ libzstd_internal_dep ] zstd_c_args = libzstd_debug_cflags + +zstd_frugal_deps = [ libzstd_internal_dep ] +zstd_frugal_c_args = [ '-DZSTD_NOBENCH', '-DZSTD_NODICT', '-DZSTD_NOTRACE' ] + if use_multi_thread + zstd_deps += [ thread_dep ] zstd_c_args += [ '-DZSTD_MULTITHREAD' ] + zstd_frugal_deps += [ thread_dep ] + zstd_frugal_c_args += [ '-DZSTD_MULTITHREAD' ] endif -zstd_deps = [ libzstd_dep ] if use_zlib zstd_deps += [ zlib_dep ] zstd_c_args += [ '-DZSTD_GZCOMPRESS', '-DZSTD_GZDECOMPRESS' ] @@ -45,7 +51,8 @@ endif export_dynamic_on_windows = false # explicit backtrace enable/disable for Linux & Darwin -if not use_backtrace +have_execinfo = cc.has_header('execinfo.h', required: use_backtrace) +if not have_execinfo zstd_c_args += '-DBACKTRACE_ENABLE=0' elif use_debug and host_machine_os == os_windows # MinGW target zstd_c_args += '-DBACKTRACE_ENABLE=1' @@ -55,7 +62,8 @@ endif if cc_id == compiler_msvc if default_library_type != 'static' zstd_programs_sources += [windows_mod.compile_resources( - join_paths(zstd_rootdir, 'build/VS2010/zstd/zstd.rc'))] + join_paths(zstd_rootdir, 'build/VS2010/zstd/zstd.rc'), + include_directories: libzstd_includes)] endif endif @@ -69,14 +77,15 @@ zstd = executable('zstd', zstd_frugal_sources = [join_paths(zstd_rootdir, 'programs/zstdcli.c'), join_paths(zstd_rootdir, 'programs/timefn.c'), join_paths(zstd_rootdir, 'programs/util.c'), - join_paths(zstd_rootdir, 'programs/fileio.c')] + join_paths(zstd_rootdir, 'programs/fileio.c'), + join_paths(zstd_rootdir, 'programs/fileio_asyncio.c')] # Minimal target, with only zstd compression and decompression. # No bench. No legacy. executable('zstd-frugal', zstd_frugal_sources, - dependencies: libzstd_dep, - c_args: [ '-DZSTD_NOBENCH', '-DZSTD_NODICT', '-DZSTD_NOTRACE' ], + dependencies: zstd_frugal_deps, + c_args: zstd_frugal_c_args, install: true) install_data(join_paths(zstd_rootdir, 'programs/zstdgrep'), diff --git a/build/meson/tests/meson.build b/build/meson/tests/meson.build index 14f45982ab1..e70b73432c5 100644 --- a/build/meson/tests/meson.build +++ b/build/meson/tests/meson.build @@ -21,7 +21,6 @@ FUZZER_FLAGS = ['--no-big-tests'] FUZZERTEST = '-T200s' ZSTREAM_TESTTIME = '-T90s' DECODECORPUS_TESTTIME = '-T30' -ZSTDRTTEST = ['--test-large-data'] # ============================================================================= # Executables @@ -38,7 +37,7 @@ testcommon_sources = [join_paths(zstd_rootdir, 'programs/datagen.c'), testcommon = static_library('testcommon', testcommon_sources, # needed due to use of private symbol + -fvisibility=hidden - objects: libzstd.extract_all_objects(recursive: false)) + link_with: libzstd_static) testcommon_dep = declare_dependency(link_with: testcommon, dependencies: libzstd_deps, @@ -66,8 +65,10 @@ fuzzer = executable('fuzzer', dependencies: [ testcommon_dep, thread_dep ], install: false) -zstreamtest_sources = [join_paths(zstd_rootdir, 'tests/seqgen.c'), - join_paths(zstd_rootdir, 'tests/zstreamtest.c')] +zstreamtest_sources = [ + join_paths(zstd_rootdir, 'tests/seqgen.c'), + join_paths(zstd_rootdir, 'tests/zstreamtest.c'), + join_paths(zstd_rootdir, 'tests/external_matchfinder.c')] zstreamtest = executable('zstreamtest', zstreamtest_sources, include_directories: test_includes, @@ -116,11 +117,7 @@ decodecorpus = executable('decodecorpus', dependencies: [ testcommon_dep, libm_dep ], install: false) -poolTests_sources = [join_paths(zstd_rootdir, 'tests/poolTests.c'), - join_paths(zstd_rootdir, 'lib/common/pool.c'), - join_paths(zstd_rootdir, 'lib/common/threading.c'), - join_paths(zstd_rootdir, 'lib/common/zstd_common.c'), - join_paths(zstd_rootdir, 'lib/common/error_private.c')] +poolTests_sources = [join_paths(zstd_rootdir, 'tests/poolTests.c')] poolTests = executable('poolTests', poolTests_sources, include_directories: test_includes, @@ -138,24 +135,38 @@ checkTag = executable('checkTag', # ============================================================================= if tests_supported_oses.contains(host_machine_os) - valgrind_prog = find_program('valgrind', ['/usr/bin/valgrind'], required: true) + valgrind_prog = find_program('valgrind', ['/usr/bin/valgrind'], required: false) valgrindTest_py = files('valgrindTest.py') - test('valgrindTest', - valgrindTest_py, - args: [valgrind_prog.path(), zstd, datagen, fuzzer, fullbench], - depends: [zstd, datagen, fuzzer, fullbench], - timeout: 600) # Timeout should work on HDD drive + if valgrind_prog.found() + test('valgrindTest', + valgrindTest_py, + args: [valgrind_prog.path(), zstd, datagen, fuzzer, fullbench], + depends: [zstd, datagen, fuzzer, fullbench], + timeout: 600) # Timeout should work on HDD drive + endif endif if host_machine_os != os_windows playTests_sh = find_program(join_paths(zstd_rootdir, 'tests/playTests.sh'), required: true) - test('test-zstd', - playTests_sh, - args: ZSTDRTTEST, - env: ['ZSTD_BIN=' + zstd.full_path(), 'DATAGEN_BIN=./datagen'], - depends: [datagen], - workdir: meson.current_build_dir(), - timeout: 2800) # Timeout should work on HDD drive + + # add slow tests only if the meson version is new enough to support + # test setups with default-excluded suites + if meson.version().version_compare('>=0.57.0') + matrix = {'fast': [], 'slow': ['--test-large-data']} + else + matrix = {'fast': []} + endif + + foreach suite, opt: matrix + test('test-zstd-'+suite, + playTests_sh, + args: opt, + env: ['ZSTD_BIN=' + zstd.full_path(), 'DATAGEN_BIN=./datagen'], + depends: [datagen], + suite: suite, + workdir: meson.current_build_dir(), + timeout: 2800) # Timeout should work on HDD drive + endforeach endif test('test-fullbench-1', @@ -194,3 +205,11 @@ test('test-decodecorpus', args: ['-t', DECODECORPUS_TESTTIME], timeout: 60) test('test-poolTests', poolTests) # should be fast + +if meson.version().version_compare('>=0.57.0') + add_test_setup('fast', + is_default: true, + exclude_suites: ['slow']) + add_test_setup('slow', + exclude_suites: ['fast']) +endif diff --git a/build/meson/tests/valgrindTest.py b/build/meson/tests/valgrindTest.py index 218f7458bbf..05d84878b9d 100644 --- a/build/meson/tests/valgrindTest.py +++ b/build/meson/tests/valgrindTest.py @@ -21,7 +21,7 @@ def valgrindTest(valgrind, datagen, fuzzer, zstd, fullbench): if subprocess.call([*VALGRIND_ARGS, zstd], stdout=subprocess.DEVNULL) == 0: - raise subprocess.CalledProcessError('zstd without argument should have failed') + raise subprocess.SubprocessError('zstd without argument should have failed') with subprocess.Popen([datagen, '-g80'], stdout=subprocess.PIPE) as p1, \ subprocess.Popen([*VALGRIND_ARGS, zstd, '-', '-c'], @@ -30,7 +30,7 @@ def valgrindTest(valgrind, datagen, fuzzer, zstd, fullbench): p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits. p2.communicate() if p2.returncode != 0: - raise subprocess.CalledProcessError() + raise subprocess.SubprocessError() with subprocess.Popen([datagen, '-g16KB'], stdout=subprocess.PIPE) as p1, \ subprocess.Popen([*VALGRIND_ARGS, zstd, '-vf', '-', '-c'], @@ -39,7 +39,7 @@ def valgrindTest(valgrind, datagen, fuzzer, zstd, fullbench): p1.stdout.close() p2.communicate() if p2.returncode != 0: - raise subprocess.CalledProcessError() + raise subprocess.SubprocessError() with tempfile.NamedTemporaryFile() as tmp_fd: with subprocess.Popen([datagen, '-g2930KB'], stdout=subprocess.PIPE) as p1, \ @@ -48,7 +48,7 @@ def valgrindTest(valgrind, datagen, fuzzer, zstd, fullbench): p1.stdout.close() p2.communicate() if p2.returncode != 0: - raise subprocess.CalledProcessError() + raise subprocess.SubprocessError() subprocess.check_call([*VALGRIND_ARGS, zstd, '-vdf', tmp_fd.name, '-c'], stdout=subprocess.DEVNULL) @@ -60,7 +60,7 @@ def valgrindTest(valgrind, datagen, fuzzer, zstd, fullbench): p1.stdout.close() p2.communicate() if p2.returncode != 0: - raise subprocess.CalledProcessError() + raise subprocess.SubprocessError() subprocess.check_call([*VALGRIND_ARGS, fuzzer, '-T1mn', '-t1']) subprocess.check_call([*VALGRIND_ARGS, fullbench, '-i1']) diff --git a/build/single_file_libs/README.md b/build/single_file_libs/README.md index 1705b769b85..64c973a68d6 100644 --- a/build/single_file_libs/README.md +++ b/build/single_file_libs/README.md @@ -12,7 +12,7 @@ This is the most common use case. The decompression library is small, adding, fo Create `zstddeclib.c` from the Zstd source using: ``` cd zstd/build/single_file_libs -./combine.sh -r ../../lib -o zstddeclib.c zstddeclib-in.c +python3 combine.py -r ../../lib -x legacy/zstd_legacy.h -o zstddeclib.c zstddeclib-in.c ``` Then add the resulting file to your project (see the [example files](examples)). @@ -26,7 +26,7 @@ The same tool can amalgamate the entire Zstd library for ease of adding both com Create `zstd.c` from the Zstd source using: ``` cd zstd/build/single_file_libs -./combine.sh -r ../../lib -o zstd.c zstd-in.c +python3 combine.py -r ../../lib -x legacy/zstd_legacy.h -k zstd.h -o zstd.c zstd-in.c ``` It's possible to create a compressor-only library but since the decompressor is so small in comparison this doesn't bring much of a gain (but for the curious, simply remove the files in the _decompress_ section at the end of `zstd-in.c`). diff --git a/build/single_file_libs/build_decoder_test.sh b/build/single_file_libs/build_decoder_test.sh index 48d017fcffa..c4ca55fa5dd 100755 --- a/build/single_file_libs/build_decoder_test.sh +++ b/build/single_file_libs/build_decoder_test.sh @@ -11,7 +11,7 @@ IN_FILES="examples/emscripten.c" # Emscripten build using emcc. emscripten_emcc_build() { - # Compile the the same example as above + # Compile the same example as above CC_FLAGS="-Wall -Wextra -Wshadow -Werror -Os -g0 -flto" emcc $CC_FLAGS -s WASM=1 -I. -o $OUT_WASM $IN_FILES # Did compilation work? diff --git a/build/single_file_libs/build_library_test.sh b/build/single_file_libs/build_library_test.sh index 7fb99656bcc..f4ba109ab38 100755 --- a/build/single_file_libs/build_library_test.sh +++ b/build/single_file_libs/build_library_test.sh @@ -14,7 +14,7 @@ IN_FILES="zstd.c examples/roundtrip.c" # Emscripten build using emcc. emscripten_emcc_build() { - # Compile the the same example as above + # Compile the same example as above CC_FLAGS="-Wall -Wextra -Wshadow -Werror -Os -g0 -flto" emcc $CC_FLAGS -s WASM=1 -I. -o $OUT_WASM $IN_FILES # Did compilation work? @@ -69,7 +69,7 @@ fi echo "Single file library creation script: PASSED" # Copy the header to here (for the tests) -cp "$ZSTD_SRC_ROOT/zstd.h" zstd.h +cp "$ZSTD_SRC_ROOT/zstd.h" examples/zstd.h # Compile the generated output cc -Wall -Wextra -Werror -Wshadow -pthread -I. -Os -g0 -o $OUT_FILE zstd.c examples/roundtrip.c diff --git a/build/single_file_libs/combine.py b/build/single_file_libs/combine.py new file mode 100755 index 00000000000..771dd20bfb1 --- /dev/null +++ b/build/single_file_libs/combine.py @@ -0,0 +1,234 @@ +#!/usr/bin/env python3 + +# Tool to bundle multiple C/C++ source files, inlining any includes. +# +# Note: there are two types of exclusion options: the '-x' flag, which besides +# excluding a file also adds an #error directive in place of the #include, and +# the '-k' flag, which keeps the #include and doesn't inline the file. The +# intended use cases are: '-x' for files that would normally be #if'd out, so +# features that 100% won't be used in the amalgamated file, for which every +# occurrence adds the error, and '-k' for headers that we wish to manually +# include, such as a project's public API, for which occurrences after the first +# are removed. +# +# Todo: the error handling could be better, which currently throws and halts +# (which is functional just not very friendly). +# +# Author: Carl Woffenden, Numfum GmbH (this script is released under a CC0 license/Public Domain) + +import argparse, re, sys + +from pathlib import Path +from typing import Any, List, Optional, Pattern, Set, TextIO + +# Set of file roots when searching (equivalent to -I paths for the compiler). +roots: Set[Path] = set() + +# Set of (canonical) file Path objects to exclude from inlining (and not only +# exclude but to add a compiler error directive when they're encountered). +excludes: Set[Path] = set() + +# Set of (canonical) file Path objects to keep as include directives. +keeps: Set[Path] = set() + +# Whether to keep the #pragma once directives (unlikely, since this will result +# in a warning, but the option is there). +keep_pragma: bool = False + +# Destination file object (or stdout if no output file was supplied). +destn: TextIO = sys.stdout + +# Set of file Path objects previously inlined (and to ignore if reencountering). +found: Set[Path] = set() + +# Compiled regex Pattern to handle "#pragma once" in various formats: +# +# #pragma once +# #pragma once +# # pragma once +# #pragma once +# #pragma once // comment +# +# Ignoring commented versions, same as include_regex. +# +pragma_regex: Pattern = re.compile(r'^\s*#\s*pragma\s*once\s*') + +# Compiled regex Pattern to handle the following type of file includes: +# +# #include "file" +# #include "file" +# # include "file" +# #include "file" +# #include "file" // comment +# #include "file" // comment with quote " +# +# And all combinations of, as well as ignoring the following: +# +# #include +# //#include "file" +# /*#include "file"*/ +# +# We don't try to catch errors since the compiler will do this (and the code is +# expected to be valid before processing) and we don't care what follows the +# file (whether it's a valid comment or not, since anything after the quoted +# string is ignored) +# +include_regex: Pattern = re.compile(r'^\s*#\s*include\s*"(.+?)"') + +# Simple tests to prove include_regex's cases. +# +def test_match_include() -> bool: + if (include_regex.match('#include "file"') and + include_regex.match(' #include "file"') and + include_regex.match('# include "file"') and + include_regex.match('#include "file"') and + include_regex.match('#include "file" // comment')): + if (not include_regex.match('#include ') and + not include_regex.match('//#include "file"') and + not include_regex.match('/*#include "file"*/')): + found = include_regex.match('#include "file" // "') + if (found and found.group(1) == 'file'): + print('#include match valid') + return True + return False + +# Simple tests to prove pragma_regex's cases. +# +def test_match_pragma() -> bool: + if (pragma_regex.match('#pragma once') and + pragma_regex.match(' #pragma once') and + pragma_regex.match('# pragma once') and + pragma_regex.match('#pragma once') and + pragma_regex.match('#pragma once // comment')): + if (not pragma_regex.match('//#pragma once') and + not pragma_regex.match('/*#pragma once*/')): + print('#pragma once match valid') + return True + return False + +# Finds 'file'. First the list of 'root' paths are searched, followed by the +# currently processing file's 'parent' path, returning a valid Path in +# canonical form. If no match is found None is returned. +# +def resolve_include(file: str, parent: Optional[Path] = None) -> Optional[Path]: + for root in roots: + found = root.joinpath(file).resolve() + if (found.is_file()): + return found + if (parent): + found = parent.joinpath(file).resolve(); + else: + found = Path(file) + if (found.is_file()): + return found + return None + +# Helper to resolve lists of files. 'file_list' is passed in from the arguments +# and each entry resolved to its canonical path (like any include entry, either +# from the list of root paths or the owning file's 'parent', which in this case +# is case is the input file). The results are stored in 'resolved'. +# +def resolve_excluded_files(file_list: Optional[List[str]], resolved: Set[Path], parent: Optional[Path] = None) -> None: + if (file_list): + for filename in file_list: + found = resolve_include(filename, parent) + if (found): + resolved.add(found) + else: + error_line(f'Warning: excluded file not found: {filename}') + +# Writes 'line' to the open 'destn' (or stdout). +# +def write_line(line: str) -> None: + print(line, file=destn) + +# Logs 'line' to stderr. This is also used for general notifications that we +# don't want to go to stdout (so the source can be piped). +# +def error_line(line: Any) -> None: + print(line, file=sys.stderr) + +# Inline the contents of 'file' (with any of its includes also inlined, etc.). +# +# Note: text encoding errors are ignored and replaced with ? when reading the +# input files. This isn't ideal, but it's more than likely in the comments than +# code and a) the text editor has probably also failed to read the same content, +# and b) the compiler probably did too. +# +def add_file(file: Path, file_name: str = None) -> None: + if (file.is_file()): + if (not file_name): + file_name = file.name + error_line(f'Processing: {file_name}') + with file.open('r', errors='replace') as opened: + for line in opened: + line = line.rstrip('\n') + match_include = include_regex.match(line); + if (match_include): + # We have a quoted include directive so grab the file + inc_name = match_include.group(1) + resolved = resolve_include(inc_name, file.parent) + if (resolved): + if (resolved in excludes): + # The file was excluded so error if the compiler uses it + write_line(f'#error Using excluded file: {inc_name} (re-amalgamate source to fix)') + error_line(f'Excluding: {inc_name}') + else: + if (resolved not in found): + # The file was not previously encountered + found.add(resolved) + if (resolved in keeps): + # But the include was flagged to keep as included + write_line(f'/**** *NOT* inlining {inc_name} ****/') + write_line(line) + error_line(f'Not inlining: {inc_name}') + else: + # The file was neither excluded nor seen before so inline it + write_line(f'/**** start inlining {inc_name} ****/') + add_file(resolved, inc_name) + write_line(f'/**** ended inlining {inc_name} ****/') + else: + write_line(f'/**** skipping file: {inc_name} ****/') + else: + # The include file didn't resolve to a file + write_line(f'#error Unable to find: {inc_name}') + error_line(f'Error: Unable to find: {inc_name}') + else: + # Skip any 'pragma once' directives, otherwise write the source line + if (keep_pragma or not pragma_regex.match(line)): + write_line(line) + else: + error_line(f'Error: Invalid file: {file}') + +# Start here +parser = argparse.ArgumentParser(description='Amalgamate Tool', epilog=f'example: {sys.argv[0]} -r ../my/path -r ../other/path -o out.c in.c') +parser.add_argument('-r', '--root', action='append', type=Path, help='file root search path') +parser.add_argument('-x', '--exclude', action='append', help='file to completely exclude from inlining') +parser.add_argument('-k', '--keep', action='append', help='file to exclude from inlining but keep the include directive') +parser.add_argument('-p', '--pragma', action='store_true', default=False, help='keep any "#pragma once" directives (removed by default)') +parser.add_argument('-o', '--output', type=argparse.FileType('w'), help='output file (otherwise stdout)') +parser.add_argument('input', type=Path, help='input file') +args = parser.parse_args() + +# Fail early on an invalid input (and store it so we don't recurse) +args.input = args.input.resolve(strict=True) +found.add(args.input) + +# Resolve all of the root paths upfront (we'll halt here on invalid roots) +if (args.root): + for path in args.root: + roots.add(path.resolve(strict=True)) + +# The remaining params: so resolve the excluded files and #pragma once directive +resolve_excluded_files(args.exclude, excludes, args.input.parent) +resolve_excluded_files(args.keep, keeps, args.input.parent) +keep_pragma = args.pragma; + +# Then recursively process the input file +try: + if (args.output): + destn = args.output + add_file(args.input) +finally: + if (destn): + destn.close() diff --git a/build/single_file_libs/combine.sh b/build/single_file_libs/combine.sh index 8eac4f9eb14..222c2c32a51 100755 --- a/build/single_file_libs/combine.sh +++ b/build/single_file_libs/combine.sh @@ -2,11 +2,7 @@ # Tool to bundle multiple C/C++ source files, inlining any includes. # -# Note: this POSIX-compliant script is many times slower than the original bash -# implementation (due to the grep calls) but it runs and works everywhere. -# # TODO: ROOTS, FOUND, etc., as arrays (since they fail on paths with spaces) -# TODO: revert to Bash-only regex (the grep ones being too slow) # # Author: Carl Woffenden, Numfum GmbH (this script is released under a CC0 license/Public Domain) @@ -52,13 +48,54 @@ test_deps() { fi } +# Test if glob pattern $1 matches subject $2 (see fnmatch(3)) +fnmatch() { + case "$2" in + $1) + return 0 + ;; + esac + return 1 +} + +# Test if line $1 is local include directive +is_include_line() { + fnmatch "*#*include*" "$1" || return 1 + printf "%s\n" "$1" | grep -Eq '^\s*#\s*include\s*".+"' +} + +# Test if line $1 is pragma once directive +is_pragma_once_line() { + fnmatch "*#*pragma*once*" "$1" || return 1 + printf "%s\n" "$1" | grep -Eq '^\s*#\s*pragma\s*once\s*' +} + # Tests if list $1 has item $2 (returning zero on a match) +# (originally used grep -Eq "(^|\s*)$2(\$|\s*)) +readonly list_FS="$IFS" list_has_item() { - if echo "$1" | grep -Eq "(^|\s*)$2(\$|\s*)"; then - return 0 - else - return 1 + # Re: escaping glob pattern special characters in item string: + # + # bash (tested 3.2.57, 5.1.4), dash (tested 0.5.10.2), NetBSD /bin/sh + # (tested 8.2), and Solaris /bin/sh (tested 11.4) require escaping + # backslashes in a bracket expression despite POSIX specifying that + # backslash loses significance in a bracket expression. + # + # Conversely, neither FreeBSD /bin/sh (tested 12.2) nor OpenBSD /bin/sh + # (tested 7.1) obey backslash-escaping in case statement patterns even + # outside bracket expressions, so escape special characters using bracket + # expressions. + # + # Solaris /bin/sh (tested 11.4) requires vertical bar (|) to be escaped. + # + # All accommodations should behave as expected under strict POSIX semantics. + if fnmatch "*[\\*?[|]*" "$2"; then + set -- "$1" "$(printf '%s\n' "$2" | sed -e 's/[*?[|]/[&]/g; s/[\]/[\\&]/g')" fi + for item_P in "*[$list_FS]$2[$list_FS]*" "*[$list_FS]$2" "$2[$list_FS]*" "$2"; do + fnmatch "${item_P}" "$1" && return 0 + done + return 1 } # Adds a new line with the supplied arguments to $DESTN (or stdout) @@ -124,13 +161,13 @@ add_file() { # Read the file local line= while IFS= read -r line; do - if echo "$line" | grep -Eq '^\s*#\s*include\s*".+"'; then + if is_include_line "$line"; then # We have an include directive so strip the (first) file local inc=$(echo "$line" | grep -Eo '".*"' | sed -E 's/"([^"]+)"/\1/' | head -1) local res_inc="$(resolve_include "$srcdir" "$inc")" if list_has_item "$XINCS" "$inc"; then # The file was excluded so error if the source attempts to use it - write_line "#error Using excluded file: $inc" + write_line "#error Using excluded file: $inc (re-amalgamate source to fix)" log_line "Excluding: $inc" else if ! list_has_item "$FOUND" "$res_inc"; then @@ -155,7 +192,7 @@ add_file() { # Skip any 'pragma once' directives, otherwise write the source line local write=$PONCE if [ $write -eq 0 ]; then - if echo "$line" | grep -Eqv '^\s*#\s*pragma\s*once\s*'; then + if ! is_pragma_once_line "$line"; then write=1 fi fi @@ -200,6 +237,7 @@ if [ -n "$1" ]; then printf "" > "$DESTN" fi test_deps + log_line "Processing using the slower shell script; this might take a while" add_file "$1" else echo "Input file not found: \"$1\"" diff --git a/build/single_file_libs/create_single_file_decoder.sh b/build/single_file_libs/create_single_file_decoder.sh index b5f5613ae2e..3c0c577df5f 100755 --- a/build/single_file_libs/create_single_file_decoder.sh +++ b/build/single_file_libs/create_single_file_decoder.sh @@ -4,8 +4,13 @@ ZSTD_SRC_ROOT="../../lib" # Amalgamate the sources -echo "Amalgamating files... this can take a while" -./combine.sh -r "$ZSTD_SRC_ROOT" -o zstddeclib.c zstddeclib-in.c +echo "Amalgamating files..." +# Using the faster Python script if we have 3.8 or higher +if python3 -c 'import sys; assert sys.version_info >= (3,8)' 2>/dev/null; then + ./combine.py -r "$ZSTD_SRC_ROOT" -x legacy/zstd_legacy.h -o zstddeclib.c zstddeclib-in.c +else + ./combine.sh -r "$ZSTD_SRC_ROOT" -x legacy/zstd_legacy.h -o zstddeclib.c zstddeclib-in.c +fi # Did combining work? if [ $? -ne 0 ]; then echo "Combine script: FAILED" diff --git a/build/single_file_libs/create_single_file_library.sh b/build/single_file_libs/create_single_file_library.sh index 6f38526d5bf..a6f71f0f085 100755 --- a/build/single_file_libs/create_single_file_library.sh +++ b/build/single_file_libs/create_single_file_library.sh @@ -4,8 +4,13 @@ ZSTD_SRC_ROOT="../../lib" # Amalgamate the sources -echo "Amalgamating files... this can take a while" -./combine.sh -r "$ZSTD_SRC_ROOT" -o zstd.c zstd-in.c +echo "Amalgamating files..." +# Using the faster Python script if we have 3.8 or higher +if python3 -c 'import sys; assert sys.version_info >= (3,8)' 2>/dev/null; then + ./combine.py -r "$ZSTD_SRC_ROOT" -x legacy/zstd_legacy.h -o zstd.c zstd-in.c +else + ./combine.sh -r "$ZSTD_SRC_ROOT" -x legacy/zstd_legacy.h -o zstd.c zstd-in.c +fi # Did combining work? if [ $? -ne 0 ]; then echo "Combine script: FAILED" diff --git a/build/single_file_libs/zstd-in.c b/build/single_file_libs/zstd-in.c index b694681396a..e6fca9e4a44 100644 --- a/build/single_file_libs/zstd-in.c +++ b/build/single_file_libs/zstd-in.c @@ -4,11 +4,11 @@ * * Generate using: * \code - * combine.sh -r ../../lib -o zstd.c zstd-in.c + * python combine.py -r ../../lib -x legacy/zstd_legacy.h -o zstd.c zstd-in.c * \endcode */ /* - * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -25,9 +25,13 @@ * Note: MEM_MODULE stops xxhash redefining BYTE, U16, etc., which are also * defined in mem.h (breaking C99 compatibility). * - * Note: the undefs for xxHash allow Zstd's implementation to coincide with with + * Note: the undefs for xxHash allow Zstd's implementation to coincide with * standalone xxHash usage (with global defines). * + * Note: if you enable ZSTD_LEGACY_SUPPORT the combine.py script will need + * re-running without the "-x legacy/zstd_legacy.h" option (it excludes the + * legacy support at the source level). + * * Note: multithreading is enabled for all platforms apart from Emscripten. */ #define DEBUGLEVEL 0 diff --git a/build/single_file_libs/zstddeclib-in.c b/build/single_file_libs/zstddeclib-in.c index 72abe61343d..8d9c1f54bb7 100644 --- a/build/single_file_libs/zstddeclib-in.c +++ b/build/single_file_libs/zstddeclib-in.c @@ -4,11 +4,11 @@ * * Generate using: * \code - * combine.sh -r ../../lib -o zstddeclib.c zstddeclib-in.c + * python combine.py -r ../../lib -x legacy/zstd_legacy.h -o zstddeclib.c zstddeclib-in.c * \endcode */ /* - * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -25,8 +25,12 @@ * Note: MEM_MODULE stops xxhash redefining BYTE, U16, etc., which are also * defined in mem.h (breaking C99 compatibility). * - * Note: the undefs for xxHash allow Zstd's implementation to coincide with with + * Note: the undefs for xxHash allow Zstd's implementation to coincide with * standalone xxHash usage (with global defines). + * + * Note: if you enable ZSTD_LEGACY_SUPPORT the combine.py script will need + * re-running without the "-x legacy/zstd_legacy.h" option (it excludes the + * legacy support at the source level). */ #define DEBUGLEVEL 0 #define MEM_MODULE diff --git a/contrib/VS2005/zstd/zstd.vcproj b/contrib/VS2005/zstd/zstd.vcproj index 78645d18a36..e37ebee3911 100644 --- a/contrib/VS2005/zstd/zstd.vcproj +++ b/contrib/VS2005/zstd/zstd.vcproj @@ -363,6 +363,10 @@ RelativePath="..\..\..\programs\fileio.c" > + + diff --git a/contrib/diagnose_corruption/Makefile b/contrib/diagnose_corruption/Makefile index a21a0021216..ecc9e63952c 100644 --- a/contrib/diagnose_corruption/Makefile +++ b/contrib/diagnose_corruption/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2019-present, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/diagnose_corruption/check_flipped_bits.c b/contrib/diagnose_corruption/check_flipped_bits.c index cc40ab84beb..09ddd467476 100644 --- a/contrib/diagnose_corruption/check_flipped_bits.c +++ b/contrib/diagnose_corruption/check_flipped_bits.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/externalSequenceProducer/.gitignore b/contrib/externalSequenceProducer/.gitignore new file mode 100644 index 00000000000..147710aee78 --- /dev/null +++ b/contrib/externalSequenceProducer/.gitignore @@ -0,0 +1,2 @@ +# build artifacts +externalSequenceProducer diff --git a/contrib/externalSequenceProducer/Makefile b/contrib/externalSequenceProducer/Makefile new file mode 100644 index 00000000000..0591ae01ba5 --- /dev/null +++ b/contrib/externalSequenceProducer/Makefile @@ -0,0 +1,40 @@ +# ################################################################ +# Copyright (c) Yann Collet, Meta Platforms, Inc. +# All rights reserved. +# +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# ################################################################ + +PROGDIR = ../../programs +LIBDIR = ../../lib + +LIBZSTD = $(LIBDIR)/libzstd.a + +CPPFLAGS+= -I$(LIBDIR) -I$(LIBDIR)/compress -I$(LIBDIR)/common + +CFLAGS ?= -O3 +CFLAGS += -std=gnu99 +DEBUGFLAGS= -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ + -Wstrict-aliasing=1 -Wswitch-enum \ + -Wstrict-prototypes -Wundef -Wpointer-arith \ + -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \ + -Wredundant-decls +CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS) + +default: externalSequenceProducer + +all: externalSequenceProducer + +externalSequenceProducer: sequence_producer.c main.c $(LIBZSTD) + $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ + +.PHONY: $(LIBZSTD) +$(LIBZSTD): + $(MAKE) -C $(LIBDIR) libzstd.a CFLAGS="$(CFLAGS)" + +clean: + $(RM) *.o + $(MAKE) -C $(LIBDIR) clean > /dev/null + $(RM) externalSequenceProducer diff --git a/contrib/externalSequenceProducer/README.md b/contrib/externalSequenceProducer/README.md new file mode 100644 index 00000000000..c16a170073b --- /dev/null +++ b/contrib/externalSequenceProducer/README.md @@ -0,0 +1,14 @@ +externalSequenceProducer +===================== + +`externalSequenceProducer` is a test tool for the Block-Level Sequence Producer API. +It demonstrates how to use the API to perform a simple round-trip test. + +A sample sequence producer is provided in sequence_producer.c, but the user can swap +this out with a different one if desired. The sample sequence producer implements +LZ parsing with a 1KB hashtable. Dictionary-based parsing is not currently supported. + +Command line : +``` +externalSequenceProducer filename +``` diff --git a/contrib/externalSequenceProducer/main.c b/contrib/externalSequenceProducer/main.c new file mode 100644 index 00000000000..e67e295383c --- /dev/null +++ b/contrib/externalSequenceProducer/main.c @@ -0,0 +1,107 @@ +/* + * Copyright (c) Yann Collet, Meta Platforms, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include +#include +#include +#include + +#define ZSTD_STATIC_LINKING_ONLY +#include "zstd.h" +#include "zstd_errors.h" +#include "sequence_producer.h" // simpleSequenceProducer + +#define CHECK(res) \ +do { \ + if (ZSTD_isError(res)) { \ + printf("ERROR: %s\n", ZSTD_getErrorName(res)); \ + return 1; \ + } \ +} while (0) \ + +int main(int argc, char *argv[]) { + if (argc != 2) { + printf("Usage: externalSequenceProducer \n"); + return 1; + } + + ZSTD_CCtx* const zc = ZSTD_createCCtx(); + + int simpleSequenceProducerState = 0xdeadbeef; + + // Here is the crucial bit of code! + ZSTD_registerSequenceProducer( + zc, + &simpleSequenceProducerState, + simpleSequenceProducer + ); + + { + size_t const res = ZSTD_CCtx_setParameter(zc, ZSTD_c_enableSeqProducerFallback, 1); + CHECK(res); + } + + FILE *f = fopen(argv[1], "rb"); + assert(f); + { + int const ret = fseek(f, 0, SEEK_END); + assert(ret == 0); + } + size_t const srcSize = ftell(f); + { + int const ret = fseek(f, 0, SEEK_SET); + assert(ret == 0); + } + + char* const src = malloc(srcSize + 1); + assert(src); + { + size_t const ret = fread(src, srcSize, 1, f); + assert(ret == 1); + int const ret2 = fclose(f); + assert(ret2 == 0); + } + + size_t const dstSize = ZSTD_compressBound(srcSize); + char* const dst = malloc(dstSize); + assert(dst); + + size_t const cSize = ZSTD_compress2(zc, dst, dstSize, src, srcSize); + CHECK(cSize); + + char* const val = malloc(srcSize); + assert(val); + + { + size_t const res = ZSTD_decompress(val, srcSize, dst, cSize); + CHECK(res); + } + + if (memcmp(src, val, srcSize) == 0) { + printf("Compression and decompression were successful!\n"); + printf("Original size: %lu\n", srcSize); + printf("Compressed size: %lu\n", cSize); + } else { + printf("ERROR: input and validation buffers don't match!\n"); + for (size_t i = 0; i < srcSize; i++) { + if (src[i] != val[i]) { + printf("First bad index: %zu\n", i); + break; + } + } + return 1; + } + + ZSTD_freeCCtx(zc); + free(src); + free(dst); + free(val); + return 0; +} diff --git a/contrib/externalSequenceProducer/sequence_producer.c b/contrib/externalSequenceProducer/sequence_producer.c new file mode 100644 index 00000000000..60a2f9572b0 --- /dev/null +++ b/contrib/externalSequenceProducer/sequence_producer.c @@ -0,0 +1,80 @@ +/* + * Copyright (c) Yann Collet, Meta Platforms, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "zstd_compress_internal.h" +#include "sequence_producer.h" + +#define HSIZE 1024 +static U32 const HLOG = 10; +static U32 const MLS = 4; +static U32 const BADIDX = 0xffffffff; + +size_t simpleSequenceProducer( + void* sequenceProducerState, + ZSTD_Sequence* outSeqs, size_t outSeqsCapacity, + const void* src, size_t srcSize, + const void* dict, size_t dictSize, + int compressionLevel, + size_t windowSize +) { + const BYTE* const istart = (const BYTE*)src; + const BYTE* const iend = istart + srcSize; + const BYTE* ip = istart; + const BYTE* anchor = istart; + size_t seqCount = 0; + U32 hashTable[HSIZE]; + + (void)sequenceProducerState; + (void)dict; + (void)dictSize; + (void)outSeqsCapacity; + (void)compressionLevel; + + { int i; + for (i=0; i < HSIZE; i++) { + hashTable[i] = BADIDX; + } } + + while (ip + MLS < iend) { + size_t const hash = ZSTD_hashPtr(ip, HLOG, MLS); + U32 const matchIndex = hashTable[hash]; + hashTable[hash] = (U32)(ip - istart); + + if (matchIndex != BADIDX) { + const BYTE* const match = istart + matchIndex; + U32 const matchLen = (U32)ZSTD_count(ip, match, iend); + if (matchLen >= ZSTD_MINMATCH_MIN) { + U32 const litLen = (U32)(ip - anchor); + U32 const offset = (U32)(ip - match); + ZSTD_Sequence const seq = { + offset, litLen, matchLen, 0 + }; + + /* Note: it's crucial to stay within the window size! */ + if (offset <= windowSize) { + outSeqs[seqCount++] = seq; + ip += matchLen; + anchor = ip; + continue; + } + } + } + + ip++; + } + + { ZSTD_Sequence const finalSeq = { + 0, (U32)(iend - anchor), 0, 0 + }; + outSeqs[seqCount++] = finalSeq; + } + + return seqCount; +} diff --git a/contrib/externalSequenceProducer/sequence_producer.h b/contrib/externalSequenceProducer/sequence_producer.h new file mode 100644 index 00000000000..19f9982ac36 --- /dev/null +++ b/contrib/externalSequenceProducer/sequence_producer.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) Yann Collet, Meta Platforms, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef MATCHFINDER_H +#define MATCHFINDER_H + +#define ZSTD_STATIC_LINKING_ONLY +#include "zstd.h" + +size_t simpleSequenceProducer( + void* sequenceProducerState, + ZSTD_Sequence* outSeqs, size_t outSeqsCapacity, + const void* src, size_t srcSize, + const void* dict, size_t dictSize, + int compressionLevel, + size_t windowSize +); + +#endif diff --git a/contrib/freestanding_lib/freestanding.py b/contrib/freestanding_lib/freestanding.py index cd9d6377498..df6983245d6 100755 --- a/contrib/freestanding_lib/freestanding.py +++ b/contrib/freestanding_lib/freestanding.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # ################################################################ -# Copyright (c) 2021-2021, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -340,7 +340,7 @@ def _preprocess_once(self): if macro2 is not None and not resolved: assert ifdef and defined and op == '&&' and cmp is not None - # If the statment is true, but we have a single value check, then + # If the statement is true, but we have a single value check, then # check the value. defined_value = self._defs[macro] are_ints = True @@ -431,7 +431,7 @@ def __init__( external_xxhash: bool, xxh64_state: Optional[str], xxh64_prefix: Optional[str], rewritten_includes: [(str, str)], defs: [(str, Optional[str])], replaces: [(str, str)], - undefs: [str], excludes: [str], seds: [str], + undefs: [str], excludes: [str], seds: [str], spdx: bool, ): self._zstd_deps = zstd_deps self._mem = mem @@ -446,6 +446,7 @@ def __init__( self._undefs = undefs self._excludes = excludes self._seds = seds + self._spdx = spdx def _dst_lib_file_paths(self): """ @@ -640,6 +641,27 @@ def _process_seds(self): for sed in self._seds: self._process_sed(sed) + def _process_spdx(self): + if not self._spdx: + return + self._log("Processing spdx") + SPDX_C = "// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause\n" + SPDX_H_S = "/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */\n" + for filepath in self._dst_lib_file_paths(): + file = FileLines(filepath) + if file.lines[0] == SPDX_C or file.lines[0] == SPDX_H_S: + continue + for line in file.lines: + if "SPDX-License-Identifier" in line: + raise RuntimeError(f"Unexpected SPDX license identifier: {file.filename} {repr(line)}") + if file.filename.endswith(".c"): + file.lines.insert(0, SPDX_C) + elif file.filename.endswith(".h") or file.filename.endswith(".S"): + file.lines.insert(0, SPDX_H_S) + else: + raise RuntimeError(f"Unexpected file extension: {file.filename}") + file.write() + def go(self): @@ -651,6 +673,7 @@ def go(self): self._rewrite_includes() self._replace_xxh64_prefix() self._process_seds() + self._process_spdx() def parse_optional_pair(defines: [str]) -> [(str, Optional[str])]: @@ -689,8 +712,9 @@ def main(name, args): parser.add_argument("--xxh64-prefix", default=None, help="Alternate XXH64 function prefix (excluding _) e.g. --xxh64-prefix=xxh64") parser.add_argument("--rewrite-include", default=[], dest="rewritten_includes", action="append", help="Rewrite an include REGEX=NEW (e.g. '=')") parser.add_argument("--sed", default=[], dest="seds", action="append", help="Apply a sed replacement. Format: `s/REGEX/FORMAT/[g]`. REGEX is a Python regex. FORMAT is a Python format string formatted by the regex dict.") + parser.add_argument("--spdx", action="store_true", help="Add SPDX License Identifiers") parser.add_argument("-D", "--define", default=[], dest="defs", action="append", help="Pre-define this macro (can be passed multiple times)") - parser.add_argument("-U", "--undefine", default=[], dest="undefs", action="append", help="Pre-undefine this macro (can be passed mutliple times)") + parser.add_argument("-U", "--undefine", default=[], dest="undefs", action="append", help="Pre-undefine this macro (can be passed multiple times)") parser.add_argument("-R", "--replace", default=[], dest="replaces", action="append", help="Pre-define this macro and replace the first ifndef block with its definition") parser.add_argument("-E", "--exclude", default=[], dest="excludes", action="append", help="Exclude all lines between 'BEGIN ' and 'END '") args = parser.parse_args(args) @@ -743,6 +767,7 @@ def main(name, args): args.undefs, args.excludes, args.seds, + args.spdx, ).go() if __name__ == "__main__": diff --git a/contrib/gen_html/Makefile b/contrib/gen_html/Makefile index 425f266c4e4..26e870399aa 100644 --- a/contrib/gen_html/Makefile +++ b/contrib/gen_html/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2016-present, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/gen_html/gen_html.cpp b/contrib/gen_html/gen_html.cpp index 90d5b21a3aa..1da5879adae 100644 --- a/contrib/gen_html/gen_html.cpp +++ b/contrib/gen_html/gen_html.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Przemyslaw Skibinski, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/largeNbDicts/Makefile b/contrib/largeNbDicts/Makefile index 4c055b0ed3f..40734e62e47 100644 --- a/contrib/largeNbDicts/Makefile +++ b/contrib/largeNbDicts/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2018-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/largeNbDicts/README.md b/contrib/largeNbDicts/README.md index f29bcdfe8e3..010102c904f 100644 --- a/contrib/largeNbDicts/README.md +++ b/contrib/largeNbDicts/README.md @@ -14,12 +14,20 @@ Command line : ``` largeNbDicts [Options] filename(s) -Options : --r : recursively load all files in subdirectories (default: off) --B# : split input into blocks of size # (default: no split) --# : use compression level # (default: 3) --D # : use # as a dictionary (default: create one) --i# : nb benchmark rounds (default: 6) ---nbDicts=# : set nb of dictionaries to # (default: one per block) --h : help (this text) +Options : +-z : benchmark compression (default) +-d : benchmark decompression +-r : recursively load all files in subdirectories (default: off) +-B# : split input into blocks of size # (default: no split) +-# : use compression level # (default: 3) +-D # : use # as a dictionary (default: create one) +-i# : nb benchmark rounds (default: 6) +--nbBlocks=#: use # blocks for bench (default: one per file) +--nbDicts=# : create # dictionaries for bench (default: one per block) +-h : help (this text) + +Advanced Options (see zstd.h for documentation) : +--dedicated-dict-search +--dict-content-type=# +--dict-attach-pref=# ``` diff --git a/contrib/largeNbDicts/largeNbDicts.c b/contrib/largeNbDicts/largeNbDicts.c index ddbb3e081be..eeaaf7182e5 100644 --- a/contrib/largeNbDicts/largeNbDicts.c +++ b/contrib/largeNbDicts/largeNbDicts.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -19,7 +19,7 @@ /*--- Dependencies ---*/ #include /* size_t */ -#include /* malloc, free, abort */ +#include /* malloc, free, abort, qsort*/ #include /* fprintf */ #include /* UINT_MAX */ #include /* assert */ @@ -39,6 +39,7 @@ #define BLOCKSIZE_DEFAULT 0 /* no slicing into blocks */ #define DICTSIZE (4 KB) #define CLEVEL_DEFAULT 3 +#define DICT_LOAD_METHOD ZSTD_dlm_byCopy #define BENCH_TIME_DEFAULT_S 6 #define RUN_TIME_DEFAULT_MS 1000 @@ -156,19 +157,6 @@ createDictionaryBuffer(const char* dictionaryName, } } -static ZSTD_CDict* createCDictForDedicatedDictSearch(const void* dict, size_t dictSize, int compressionLevel) -{ - ZSTD_CCtx_params* params = ZSTD_createCCtxParams(); - ZSTD_CCtxParams_init(params, compressionLevel); - ZSTD_CCtxParams_setParameter(params, ZSTD_c_enableDedicatedDictSearch, 1); - ZSTD_CCtxParams_setParameter(params, ZSTD_c_compressionLevel, compressionLevel); - - ZSTD_CDict* cdict = ZSTD_createCDict_advanced2(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, params, ZSTD_defaultCMem); - - ZSTD_freeCCtxParams(params); - return cdict; -} - /*! BMK_loadFiles() : * Loads `buffer`, with content from files listed within `fileNamesTable`. * Fills `buffer` entirely. @@ -461,14 +449,12 @@ static void freeCDictCollection(cdict_collection_t cdictc) } /* returns .buffers=NULL if operation fails */ -static cdict_collection_t createCDictCollection(const void* dictBuffer, size_t dictSize, size_t nbCDict, int cLevel, int dedicatedDictSearch) +static cdict_collection_t createCDictCollection(const void* dictBuffer, size_t dictSize, size_t nbCDict, ZSTD_dictContentType_e dictContentType, ZSTD_CCtx_params* cctxParams) { ZSTD_CDict** const cdicts = malloc(nbCDict * sizeof(ZSTD_CDict*)); if (cdicts==NULL) return kNullCDictCollection; for (size_t dictNb=0; dictNb < nbCDict; dictNb++) { - cdicts[dictNb] = dedicatedDictSearch ? - createCDictForDedicatedDictSearch(dictBuffer, dictSize, cLevel) : - ZSTD_createCDict(dictBuffer, dictSize, cLevel); + cdicts[dictNb] = ZSTD_createCDict_advanced2(dictBuffer, dictSize, DICT_LOAD_METHOD, dictContentType, cctxParams, ZSTD_defaultCMem); CONTROL(cdicts[dictNb] != NULL); } cdict_collection_t cdictc; @@ -591,11 +577,13 @@ typedef struct { cdict_collection_t dictionaries; } compressInstructions; -compressInstructions createCompressInstructions(cdict_collection_t dictionaries) +compressInstructions createCompressInstructions(cdict_collection_t dictionaries, ZSTD_CCtx_params* cctxParams) { compressInstructions ci; ci.cctx = ZSTD_createCCtx(); CONTROL(ci.cctx != NULL); + if (cctxParams) + ZSTD_CCtx_setParametersUsingCCtxParams(ci.cctx, cctxParams); ci.nbDicts = dictionaries.nbCDict; ci.dictNb = 0; ci.dictionaries = dictionaries; @@ -636,10 +624,10 @@ size_t compress(const void* src, size_t srcSize, void* dst, size_t dstCapacity, compressInstructions* const ci = (compressInstructions*) payload; (void)dstCapacity; - ZSTD_compress_usingCDict(ci->cctx, - dst, srcSize, - src, srcSize, - ci->dictionaries.cdicts[ci->dictNb]); + ZSTD_CCtx_refCDict(ci->cctx, ci->dictionaries.cdicts[ci->dictNb]); + ZSTD_compress2(ci->cctx, + dst, srcSize, + src, srcSize); ci->dictNb = ci->dictNb + 1; if (ci->dictNb >= ci->nbDicts) ci->dictNb = 0; @@ -663,25 +651,55 @@ size_t decompress(const void* src, size_t srcSize, void* dst, size_t dstCapacity return result; } +typedef enum { + fastest = 0, + median = 1, +} metricAggregatePref_e; -static int benchMem(slice_collection_t dstBlocks, - slice_collection_t srcBlocks, +/* compareFunction() : + * Sort input in decreasing order when used with qsort() */ +int compareFunction(const void *a, const void *b) +{ + double x = *(const double *)a; + double y = *(const double *)b; + if (x < y) + return 1; + else if (x > y) + return -1; + return 0; +} + +double aggregateData(double *data, size_t size, + metricAggregatePref_e metricAggregatePref) +{ + qsort(data, size, sizeof(*data), compareFunction); + if (metricAggregatePref == fastest) + return data[0]; + else /* median */ + return (data[(size - 1) / 2] + data[size / 2]) / 2; +} + +static int benchMem(slice_collection_t dstBlocks, slice_collection_t srcBlocks, ddict_collection_t ddictionaries, - cdict_collection_t cdictionaries, - unsigned nbRounds, int benchCompression) + cdict_collection_t cdictionaries, unsigned nbRounds, + int benchCompression, const char *exeName, + ZSTD_CCtx_params *cctxParams, + metricAggregatePref_e metricAggregatePref) { assert(dstBlocks.nbSlices == srcBlocks.nbSlices); + if (benchCompression) assert(cctxParams); unsigned const ms_per_round = RUN_TIME_DEFAULT_MS; unsigned const total_time_ms = nbRounds * ms_per_round; - double bestSpeed = 0.; + double *const speedPerRound = (double *)malloc(nbRounds * sizeof(double)); BMK_timedFnState_t* const benchState = BMK_createTimedFnState(total_time_ms, ms_per_round); decompressInstructions di = createDecompressInstructions(ddictionaries); - compressInstructions ci = createCompressInstructions(cdictionaries); + compressInstructions ci = + createCompressInstructions(cdictionaries, cctxParams); void* payload = benchCompression ? (void*)&ci : (void*)&di; BMK_benchParams_t const bp = { .benchFn = benchCompression ? compress : decompress, @@ -697,6 +715,7 @@ static int benchMem(slice_collection_t dstBlocks, .blockResults = NULL }; + size_t roundNb = 0; for (;;) { BMK_runOutcome_t const outcome = BMK_benchTimedFn(benchState, bp); CONTROL(BMK_isSuccessful_runOutcome(outcome)); @@ -706,16 +725,57 @@ static int benchMem(slice_collection_t dstBlocks, double const dTime_sec = (double)dTime_ns / 1000000000; size_t const srcSize = result.sumOfReturn; double const speed_MBps = (double)srcSize / dTime_sec / (1 MB); - if (speed_MBps > bestSpeed) bestSpeed = speed_MBps; + speedPerRound[roundNb] = speed_MBps; if (benchCompression) - DISPLAY("Compression Speed : %.1f MB/s \r", bestSpeed); + DISPLAY("Compression Speed : %.1f MB/s \r", speed_MBps); else - DISPLAY("Decompression Speed : %.1f MB/s \r", bestSpeed); + DISPLAY("Decompression Speed : %.1f MB/s \r", speed_MBps); fflush(stdout); if (BMK_isCompleted_TimedFn(benchState)) break; + roundNb++; } DISPLAY("\n"); + /* BMK_benchTimedFn may not run exactly nbRounds iterations */ + double speedAggregated = + aggregateData(speedPerRound, roundNb + 1, metricAggregatePref); + if (metricAggregatePref == fastest) + DISPLAY("Fastest Speed : %.1f MB/s \n", speedAggregated); + else + DISPLAY("Median Speed : %.1f MB/s \n", speedAggregated); + + char* csvFileName = malloc(strlen(exeName) + 5); + strcpy(csvFileName, exeName); + strcat(csvFileName, ".csv"); + FILE* csvFile = fopen(csvFileName, "r"); + if (!csvFile) { + csvFile = fopen(csvFileName, "wt"); + assert(csvFile); + fprintf(csvFile, "%s\n", exeName); + /* Print table headers */ + fprintf( + csvFile, + "Compression/Decompression,Level,nbDicts,dictAttachPref,metricAggregatePref,Speed\n"); + } else { + fclose(csvFile); + csvFile = fopen(csvFileName, "at"); + assert(csvFile); + } + + int cLevel = -1; + int dictAttachPref = -1; + if (benchCompression) { + ZSTD_CCtxParams_getParameter(cctxParams, ZSTD_c_compressionLevel, + &cLevel); + ZSTD_CCtxParams_getParameter(cctxParams, ZSTD_c_forceAttachDict, + &dictAttachPref); + } + fprintf(csvFile, "%s,%d,%ld,%d,%d,%.1f\n", + benchCompression ? "Compression" : "Decompression", cLevel, + benchCompression ? ci.nbDicts : di.nbDicts, dictAttachPref, + metricAggregatePref, speedAggregated); + fclose(csvFile); + free(csvFileName); freeDecompressInstructions(di); freeCompressInstructions(ci); @@ -730,12 +790,11 @@ static int benchMem(slice_collection_t dstBlocks, * dictionary : optional (can be NULL), file to load as dictionary, * if none provided : will be calculated on the fly by the program. * @return : 0 is success, 1+ otherwise */ -int bench(const char** fileNameTable, unsigned nbFiles, - const char* dictionary, - size_t blockSize, int clevel, - unsigned nbDictMax, unsigned nbBlocks, +int bench(const char **fileNameTable, unsigned nbFiles, const char *dictionary, + size_t blockSize, int clevel, unsigned nbDictMax, unsigned nbBlocks, unsigned nbRounds, int benchCompression, - int dedicatedDictSearch) + ZSTD_dictContentType_e dictContentType, ZSTD_CCtx_params *cctxParams, + const char *exeName, metricAggregatePref_e metricAggregatePref) { int result = 0; @@ -786,20 +845,18 @@ int bench(const char** fileNameTable, unsigned nbFiles, /* dictionary determination */ buffer_t const dictBuffer = createDictionaryBuffer(dictionary, srcs.buffer.ptr, - srcs.slices.capacities, srcs.slices.nbSlices, + srcSlices.capacities, srcSlices.nbSlices, DICTSIZE); CONTROL(dictBuffer.ptr != NULL); - ZSTD_CDict* const cdict = dedicatedDictSearch ? - createCDictForDedicatedDictSearch(dictBuffer.ptr, dictBuffer.size, clevel) : - ZSTD_createCDict(dictBuffer.ptr, dictBuffer.size, clevel); + ZSTD_CDict* const cdict = ZSTD_createCDict_advanced2(dictBuffer.ptr, dictBuffer.size, DICT_LOAD_METHOD, dictContentType, cctxParams, ZSTD_defaultCMem); CONTROL(cdict != NULL); size_t const cTotalSizeNoDict = compressBlocks(NULL, dstSlices, srcSlices, NULL, clevel); CONTROL(cTotalSizeNoDict != 0); DISPLAYLEVEL(3, "compressing at level %u without dictionary : Ratio=%.2f (%u bytes) \n", clevel, - (double)totalSrcSlicesSize / cTotalSizeNoDict, (unsigned)cTotalSizeNoDict); + (double)totalSrcSlicesSize / (double)cTotalSizeNoDict, (unsigned)cTotalSizeNoDict); size_t* const cSizes = malloc(nbBlocks * sizeof(size_t)); CONTROL(cSizes != NULL); @@ -808,21 +865,21 @@ int bench(const char** fileNameTable, unsigned nbFiles, CONTROL(cTotalSize != 0); DISPLAYLEVEL(3, "compressed using a %u bytes dictionary : Ratio=%.2f (%u bytes) \n", (unsigned)dictBuffer.size, - (double)totalSrcSlicesSize / cTotalSize, (unsigned)cTotalSize); + (double)totalSrcSlicesSize / (double)cTotalSize, (unsigned)cTotalSize); /* now dstSlices contain the real compressed size of each block, instead of the maximum capacity */ shrinkSizes(dstSlices, cSizes); unsigned const nbDicts = nbDictMax ? nbDictMax : nbBlocks; - cdict_collection_t const cdictionaries = createCDictCollection(dictBuffer.ptr, dictBuffer.size, nbDicts, clevel, dedicatedDictSearch); + cdict_collection_t const cdictionaries = createCDictCollection(dictBuffer.ptr, dictBuffer.size, nbDicts, dictContentType, cctxParams); CONTROL(cdictionaries.cdicts != NULL); ddict_collection_t const ddictionaries = createDDictCollection(dictBuffer.ptr, dictBuffer.size, nbDicts); CONTROL(ddictionaries.ddicts != NULL); if (benchCompression) { - size_t const dictMem = ZSTD_estimateCDictSize(dictBuffer.size, ZSTD_dlm_byCopy); + size_t const dictMem = ZSTD_sizeof_CDict(cdictionaries.cdicts[0]); size_t const allDictMem = dictMem * nbDicts; DISPLAYLEVEL(3, "generating %u dictionaries, using %.1f MB of memory \n", nbDicts, (double)allDictMem / (1 MB)); @@ -832,11 +889,13 @@ int bench(const char** fileNameTable, unsigned nbFiles, buffer_collection_t resultCollection = createBufferCollection_fromSliceCollection(srcSlices); CONTROL(resultCollection.buffer.ptr != NULL); - result = benchMem(dstSlices, resultCollection.slices, ddictionaries, cdictionaries, nbRounds, benchCompression); + result = benchMem(dstSlices, resultCollection.slices, ddictionaries, + cdictionaries, nbRounds, benchCompression, exeName, + cctxParams, metricAggregatePref); freeBufferCollection(resultCollection); } else { - size_t const dictMem = ZSTD_estimateDDictSize(dictBuffer.size, ZSTD_dlm_byCopy); + size_t const dictMem = ZSTD_estimateDDictSize(dictBuffer.size, DICT_LOAD_METHOD); size_t const allDictMem = dictMem * nbDicts; DISPLAYLEVEL(3, "generating %u dictionaries, using %.1f MB of memory \n", nbDicts, (double)allDictMem / (1 MB)); @@ -846,7 +905,9 @@ int bench(const char** fileNameTable, unsigned nbFiles, buffer_collection_t resultCollection = createBufferCollection_fromSliceCollectionSizes(srcSlices); CONTROL(resultCollection.buffer.ptr != NULL); - result = benchMem(resultCollection.slices, dstSlices, ddictionaries, cdictionaries, nbRounds, benchCompression); + result = benchMem(resultCollection.slices, dstSlices, ddictionaries, + cdictionaries, nbRounds, benchCompression, exeName, + NULL, metricAggregatePref); freeBufferCollection(resultCollection); } @@ -924,9 +985,15 @@ int usage(const char* exeName) DISPLAY ("-# : use compression level # (default: %u) \n", CLEVEL_DEFAULT); DISPLAY ("-D # : use # as a dictionary (default: create one) \n"); DISPLAY ("-i# : nb benchmark rounds (default: %u) \n", BENCH_TIME_DEFAULT_S); + DISPLAY ("-p# : print speed for all rounds 0=fastest 1=median (default: 0) \n"); DISPLAY ("--nbBlocks=#: use # blocks for bench (default: one per file) \n"); DISPLAY ("--nbDicts=# : create # dictionaries for bench (default: one per block) \n"); DISPLAY ("-h : help (this text) \n"); + DISPLAY (" \n"); + DISPLAY ("Advanced Options (see zstd.h for documentation) : \n"); + DISPLAY ("--dedicated-dict-search\n"); + DISPLAY ("--dict-content-type=#\n"); + DISPLAY ("--dict-attach-pref=#\n"); return 0; } @@ -956,6 +1023,10 @@ int main (int argc, const char** argv) size_t blockSize = BLOCKSIZE_DEFAULT; unsigned nbDicts = 0; /* determine nbDicts automatically: 1 dictionary per block */ unsigned nbBlocks = 0; /* determine nbBlocks automatically, from source and blockSize */ + ZSTD_dictContentType_e dictContentType = ZSTD_dct_auto; + ZSTD_dictAttachPref_e dictAttachPref = ZSTD_dictDefaultAttach; + ZSTD_paramSwitch_e prefetchCDictTables = ZSTD_ps_auto; + metricAggregatePref_e metricAggregatePref = fastest; for (int argNb = 1; argNb < argc ; argNb++) { const char* argument = argv[argNb]; @@ -965,6 +1036,7 @@ int main (int argc, const char** argv) if (!strcmp(argument, "-r")) { recursiveMode = 1; continue; } if (!strcmp(argument, "-D")) { argNb++; assert(argNb < argc); dictionary = argv[argNb]; continue; } if (longCommandWArg(&argument, "-i")) { nbRounds = readU32FromChar(&argument); continue; } + if (longCommandWArg(&argument, "-p")) { metricAggregatePref = (int)readU32FromChar(&argument); continue;} if (longCommandWArg(&argument, "--dictionary=")) { dictionary = argument; continue; } if (longCommandWArg(&argument, "-B")) { blockSize = readU32FromChar(&argument); continue; } if (longCommandWArg(&argument, "--blockSize=")) { blockSize = readU32FromChar(&argument); continue; } @@ -972,6 +1044,9 @@ int main (int argc, const char** argv) if (longCommandWArg(&argument, "--nbBlocks=")) { nbBlocks = readU32FromChar(&argument); continue; } if (longCommandWArg(&argument, "--clevel=")) { cLevel = (int)readU32FromChar(&argument); continue; } if (longCommandWArg(&argument, "--dedicated-dict-search")) { dedicatedDictSearch = 1; continue; } + if (longCommandWArg(&argument, "--dict-content-type=")) { dictContentType = (int)readU32FromChar(&argument); continue; } + if (longCommandWArg(&argument, "--dict-attach-pref=")) { dictAttachPref = (int)readU32FromChar(&argument); continue; } + if (longCommandWArg(&argument, "--prefetch-cdict-tables=")) { prefetchCDictTables = (int)readU32FromChar(&argument); continue; } if (longCommandWArg(&argument, "-")) { cLevel = (int)readU32FromChar(&argument); continue; } /* anything that's not a command is a filename */ nameTable[nameIdx++] = argument; @@ -989,10 +1064,22 @@ int main (int argc, const char** argv) nameTable = NULL; /* UTIL_createFileNamesTable() takes ownership of nameTable */ } - int result = bench(filenameTable->fileNames, (unsigned)filenameTable->tableSize, dictionary, blockSize, cLevel, nbDicts, nbBlocks, nbRounds, benchCompression, dedicatedDictSearch); + ZSTD_CCtx_params* cctxParams = ZSTD_createCCtxParams(); + ZSTD_CCtxParams_init(cctxParams, cLevel); + ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_enableDedicatedDictSearch, dedicatedDictSearch); + ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_nbWorkers, 0); + ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_forceAttachDict, dictAttachPref); + ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_prefetchCDictTables, prefetchCDictTables); + + int result = + bench(filenameTable->fileNames, (unsigned)filenameTable->tableSize, + dictionary, blockSize, cLevel, nbDicts, nbBlocks, nbRounds, + benchCompression, dictContentType, cctxParams, exeName, + metricAggregatePref); UTIL_freeFileNamesTable(filenameTable); free(nameTable); + ZSTD_freeCCtxParams(cctxParams); return result; } diff --git a/contrib/linux-kernel/Makefile b/contrib/linux-kernel/Makefile index 47a4317404b..63dd15d958f 100644 --- a/contrib/linux-kernel/Makefile +++ b/contrib/linux-kernel/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -26,14 +26,13 @@ libzstd: --rewrite-include '"(\.\./)?zstd_errors.h"=' \ --sed 's,/\*\*\*,/* *,g' \ --sed 's,/\*\*,/*,g' \ + --spdx \ -DZSTD_NO_INTRINSICS \ -DZSTD_NO_UNUSED_FUNCTIONS \ -DZSTD_LEGACY_SUPPORT=0 \ -DZSTD_STATIC_LINKING_ONLY \ -DFSE_STATIC_LINKING_ONLY \ - -DHUF_STATIC_LINKING_ONLY \ -DXXH_STATIC_LINKING_ONLY \ - -DMEM_FORCE_MEMORY_ACCESS=0 \ -D__GNUC__ \ -D__linux__=1 \ -DSTATIC_BMI2=0 \ @@ -49,16 +48,19 @@ libzstd: -UZSTD_MULTITHREAD \ -U_MSC_VER \ -U_WIN32 \ - -RZSTDLIB_VISIBILITY= \ - -RZSTDERRORLIB_VISIBILITY= \ + -RZSTDLIB_VISIBLE= \ + -RZSTDERRORLIB_VISIBLE= \ -RZSTD_FALLTHROUGH=fallthrough \ -DZSTD_HAVE_WEAK_SYMBOLS=0 \ -DZSTD_TRACE=0 \ -DZSTD_NO_TRACE \ + -DZSTD_DISABLE_ASM \ -DZSTD_LINUX_KERNEL + rm linux/lib/zstd/decompress/huf_decompress_amd64.S mv linux/lib/zstd/zstd.h linux/include/linux/zstd_lib.h mv linux/lib/zstd/zstd_errors.h linux/include/linux/ cp linux_zstd.h linux/include/linux/zstd.h + cp zstd_common_module.c linux/lib/zstd cp zstd_compress_module.c linux/lib/zstd cp zstd_decompress_module.c linux/lib/zstd cp decompress_sources.h linux/lib/zstd @@ -102,4 +104,5 @@ test: libzstd .PHONY: clean clean: - $(RM) -rf linux test/test test/static_test + $(RM) -rf linux + $(MAKE) -C test clean diff --git a/contrib/linux-kernel/decompress_sources.h b/contrib/linux-kernel/decompress_sources.h index a06ca187aab..8a47eb2a451 100644 --- a/contrib/linux-kernel/decompress_sources.h +++ b/contrib/linux-kernel/decompress_sources.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/linux.mk b/contrib/linux-kernel/linux.mk index f6f3a8983d8..464c410b276 100644 --- a/contrib/linux-kernel/linux.mk +++ b/contrib/linux-kernel/linux.mk @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause # ################################################################ -# Copyright (c) Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -10,16 +10,10 @@ # ################################################################ obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o - -ccflags-y += -Wno-error=deprecated-declarations +obj-$(CONFIG_ZSTD_COMMON) += zstd_common.o zstd_compress-y := \ zstd_compress_module.o \ - common/debug.o \ - common/entropy_common.o \ - common/error_private.o \ - common/fse_decompress.o \ - common/zstd_common.o \ compress/fse_compress.o \ compress/hist.o \ compress/huf_compress.o \ @@ -35,13 +29,15 @@ zstd_compress-y := \ zstd_decompress-y := \ zstd_decompress_module.o \ + decompress/huf_decompress.o \ + decompress/zstd_ddict.o \ + decompress/zstd_decompress.o \ + decompress/zstd_decompress_block.o \ + +zstd_common-y := \ + zstd_common_module.o \ common/debug.o \ common/entropy_common.o \ common/error_private.o \ common/fse_decompress.o \ common/zstd_common.o \ - decompress/huf_decompress.o \ - decompress/huf_decompress_amd64.o \ - decompress/zstd_ddict.o \ - decompress/zstd_decompress.o \ - decompress/zstd_decompress_block.o \ diff --git a/contrib/linux-kernel/linux_zstd.h b/contrib/linux-kernel/linux_zstd.h index 113408eef6e..f109d49f43f 100644 --- a/contrib/linux-kernel/linux_zstd.h +++ b/contrib/linux-kernel/linux_zstd.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/mem.h b/contrib/linux-kernel/mem.h index 1d9cc03924c..a7231822b6e 100644 --- a/contrib/linux-kernel/mem.h +++ b/contrib/linux-kernel/mem.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/Makefile b/contrib/linux-kernel/test/Makefile index be82b3fbac8..67b55e66504 100644 --- a/contrib/linux-kernel/test/Makefile +++ b/contrib/linux-kernel/test/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -45,4 +45,5 @@ clean: $(RM) -f $(LINUX_ZSTDLIB)/*.o $(RM) -f $(LINUX_ZSTDLIB)/**/*.o $(RM) -f *.o *.a + $(RM) -f static_test $(RM) -f test diff --git a/contrib/linux-kernel/test/include/linux/compiler.h b/contrib/linux-kernel/test/include/linux/compiler.h index de43edb695c..988ce4a20ff 100644 --- a/contrib/linux-kernel/test/include/linux/compiler.h +++ b/contrib/linux-kernel/test/include/linux/compiler.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/include/linux/errno.h b/contrib/linux-kernel/test/include/linux/errno.h index b2475225723..b4bdcba0ea6 100644 --- a/contrib/linux-kernel/test/include/linux/errno.h +++ b/contrib/linux-kernel/test/include/linux/errno.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/include/linux/kernel.h b/contrib/linux-kernel/test/include/linux/kernel.h index 1f702abac55..a4d791cd1d5 100644 --- a/contrib/linux-kernel/test/include/linux/kernel.h +++ b/contrib/linux-kernel/test/include/linux/kernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/include/linux/limits.h b/contrib/linux-kernel/test/include/linux/limits.h index db9c0990488..574aa7b343f 100644 --- a/contrib/linux-kernel/test/include/linux/limits.h +++ b/contrib/linux-kernel/test/include/linux/limits.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/include/linux/math64.h b/contrib/linux-kernel/test/include/linux/math64.h index 8eefa2d5c9d..7f6713e73bc 100644 --- a/contrib/linux-kernel/test/include/linux/math64.h +++ b/contrib/linux-kernel/test/include/linux/math64.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/include/linux/module.h b/contrib/linux-kernel/test/include/linux/module.h index be6d20daea2..06ef56f9ebe 100644 --- a/contrib/linux-kernel/test/include/linux/module.h +++ b/contrib/linux-kernel/test/include/linux/module.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -12,6 +12,8 @@ #define EXPORT_SYMBOL(symbol) \ void* __##symbol = symbol +#define EXPORT_SYMBOL_GPL(symbol) \ + void* __##symbol = symbol #define MODULE_LICENSE(license) #define MODULE_DESCRIPTION(description) diff --git a/contrib/linux-kernel/test/include/linux/printk.h b/contrib/linux-kernel/test/include/linux/printk.h index eab08e0c4cc..92a25278e71 100644 --- a/contrib/linux-kernel/test/include/linux/printk.h +++ b/contrib/linux-kernel/test/include/linux/printk.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/include/linux/stddef.h b/contrib/linux-kernel/test/include/linux/stddef.h index 8538eb3e40c..15c7408fcdf 100644 --- a/contrib/linux-kernel/test/include/linux/stddef.h +++ b/contrib/linux-kernel/test/include/linux/stddef.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/include/linux/swab.h b/contrib/linux-kernel/test/include/linux/swab.h index 783046b42d1..2b48b434c97 100644 --- a/contrib/linux-kernel/test/include/linux/swab.h +++ b/contrib/linux-kernel/test/include/linux/swab.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/include/linux/types.h b/contrib/linux-kernel/test/include/linux/types.h index 459a4570060..b413db6f983 100644 --- a/contrib/linux-kernel/test/include/linux/types.h +++ b/contrib/linux-kernel/test/include/linux/types.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/include/linux/xxhash.h b/contrib/linux-kernel/test/include/linux/xxhash.h index 7e92a706e7e..d41cbd93318 100644 --- a/contrib/linux-kernel/test/include/linux/xxhash.h +++ b/contrib/linux-kernel/test/include/linux/xxhash.h @@ -2,7 +2,7 @@ * xxHash - Extremely Fast Hash algorithm * Copyright (C) 2012-2016, Yann Collet. * - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are @@ -260,7 +260,7 @@ XXH_API void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state * xxHash - Extremely Fast Hash algorithm * Copyright (C) 2012-2016, Yann Collet. * - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are diff --git a/contrib/linux-kernel/test/static_test.c b/contrib/linux-kernel/test/static_test.c index d2b8b5a322c..ba4a420d417 100644 --- a/contrib/linux-kernel/test/static_test.c +++ b/contrib/linux-kernel/test/static_test.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/test.c b/contrib/linux-kernel/test/test.c index 6cd1730bb3a..0f4ba3f45ef 100644 --- a/contrib/linux-kernel/test/test.c +++ b/contrib/linux-kernel/test/test.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -186,11 +186,14 @@ static void __attribute__((noinline)) use(void *x) { asm volatile("" : "+r"(x)); } +static void __attribute__((noinline)) fill_stack(void) { + memset(g_stack, 0x33, 8192); +} + static void __attribute__((noinline)) set_stack(void) { char stack[8192]; g_stack = stack; - memset(g_stack, 0x33, 8192); use(g_stack); } @@ -208,6 +211,7 @@ static void __attribute__((noinline)) check_stack(void) { static void test_stack_usage(test_data_t const *data) { set_stack(); + fill_stack(); test_f2fs(); test_btrfs(data); test_decompress_unzstd(data); diff --git a/contrib/linux-kernel/zstd_common_module.c b/contrib/linux-kernel/zstd_common_module.c new file mode 100644 index 00000000000..2fead39eb74 --- /dev/null +++ b/contrib/linux-kernel/zstd_common_module.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include + +#include "common/huf.h" +#include "common/fse.h" +#include "common/zstd_internal.h" + +// Export symbols shared by compress and decompress into a common module + +#undef ZSTD_isError /* defined within zstd_internal.h */ +EXPORT_SYMBOL_GPL(FSE_readNCount); +EXPORT_SYMBOL_GPL(HUF_readStats); +EXPORT_SYMBOL_GPL(HUF_readStats_wksp); +EXPORT_SYMBOL_GPL(ZSTD_isError); +EXPORT_SYMBOL_GPL(ZSTD_getErrorName); +EXPORT_SYMBOL_GPL(ZSTD_getErrorCode); +EXPORT_SYMBOL_GPL(ZSTD_customMalloc); +EXPORT_SYMBOL_GPL(ZSTD_customCalloc); +EXPORT_SYMBOL_GPL(ZSTD_customFree); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Zstd Common"); diff --git a/contrib/linux-kernel/zstd_compress_module.c b/contrib/linux-kernel/zstd_compress_module.c index 65548a4bb93..8ecf43226af 100644 --- a/contrib/linux-kernel/zstd_compress_module.c +++ b/contrib/linux-kernel/zstd_compress_module.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -133,7 +133,11 @@ EXPORT_SYMBOL(zstd_init_cstream); size_t zstd_reset_cstream(zstd_cstream *cstream, unsigned long long pledged_src_size) { - return ZSTD_resetCStream(cstream, pledged_src_size); + if (pledged_src_size == 0) + pledged_src_size = ZSTD_CONTENTSIZE_UNKNOWN; + ZSTD_FORWARD_IF_ERR( ZSTD_CCtx_reset(cstream, ZSTD_reset_session_only) ); + ZSTD_FORWARD_IF_ERR( ZSTD_CCtx_setPledgedSrcSize(cstream, pledged_src_size) ); + return 0; } EXPORT_SYMBOL(zstd_reset_cstream); diff --git a/contrib/linux-kernel/zstd_decompress_module.c b/contrib/linux-kernel/zstd_decompress_module.c index f4ed952ed48..eb1c49e6972 100644 --- a/contrib/linux-kernel/zstd_decompress_module.c +++ b/contrib/linux-kernel/zstd_decompress_module.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/zstd_deps.h b/contrib/linux-kernel/zstd_deps.h index 7a5bf44839c..92516141603 100644 --- a/contrib/linux-kernel/zstd_deps.h +++ b/contrib/linux-kernel/zstd_deps.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/match_finders/README.md b/contrib/match_finders/README.md index 0f4a3b1ce82..54055c37086 100644 --- a/contrib/match_finders/README.md +++ b/contrib/match_finders/README.md @@ -14,7 +14,7 @@ * files would be small relative to the size of the file. * * Various 'diffing' algorithms utilize this notion of edit distance and - * the corrensponding concept of a minimal edit script between two + * the corresponding concept of a minimal edit script between two * sequences to identify the regions within two files where they differ. * The core algorithm used in this match finder is described in: * @@ -28,12 +28,12 @@ * * Note: after some experimentation, this approach proved to not provide enough * utility to justify the additional CPU used in finding matches. The one area - * where this approach consistenly outperforms Zstandard even on level 19 is - * when compressing small files (<10 KB) using a equally small dictionary that + * where this approach consistently outperforms Zstandard even on level 19 is + * when compressing small files (<10 KB) using an equally small dictionary that * is very similar to the source file. For the use case that this was intended, * (large similar files) this approach by itself took 5-10X longer than zstd-19 and * generally resulted in 2-3X larger files. The core advantage that zstd-19 has - * over this appraoch for match finding is the overlapping matches. This approach + * over this approach for match finding is the overlapping matches. This approach * cannot find any. * * I'm leaving this in the contrib section in case this ever becomes interesting diff --git a/contrib/match_finders/zstd_edist.c b/contrib/match_finders/zstd_edist.c index aab545fd3ba..d685cdd9e2f 100644 --- a/contrib/match_finders/zstd_edist.c +++ b/contrib/match_finders/zstd_edist.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -12,7 +12,7 @@ * Dependencies ***************************************/ -/* Currently relies on qsort when combining contiguous matches. This can probabily +/* Currently relies on qsort when combining contiguous matches. This can probably * be avoided but would require changes to the algorithm. The qsort is far from * the bottleneck in this algorithm even for medium sized files so it's probably * not worth trying to address */ @@ -26,7 +26,7 @@ * Constants ***************************************/ -/* Just a sential for the entires of the diagnomal matrix */ +/* Just a sential for the entries of the diagonal matrix */ #define ZSTD_EDIST_DIAG_MAX (S32)(1 << 30) /* How large should a snake be to be considered a 'big' snake. @@ -39,7 +39,7 @@ #define ZSTD_EDIST_SNAKE_ITER_THRESH 200 /* After how many iterations should be just give up and take - * the best availabe edit script for this round */ + * the best available edit script for this round */ #define ZSTD_EDIST_EXPENSIVE_THRESH 1024 /*-************************************* @@ -57,10 +57,10 @@ typedef struct { const BYTE* src; size_t dictSize; size_t srcSize; - S32* forwardDiag; /* Entires of the forward diagonal stored here */ - S32* backwardDiag; /* Entires of the backward diagonal stored here. + S32* forwardDiag; /* Entries of the forward diagonal stored here */ + S32* backwardDiag; /* Entries of the backward diagonal stored here. * Note: this buffer and the 'forwardDiag' buffer - * are contiguous. See the ZSTD_eDist_genSequences */ + * are contiguous. See the ZSTD_eDist_genSequences */ ZSTD_eDist_match* matches; /* Accumulate matches of length 1 in this buffer. * In a subsequence post-processing step, we combine * contiguous matches. */ @@ -192,7 +192,7 @@ static void ZSTD_eDist_diag(ZSTD_eDist_state* state, if (!useHeuristics) continue; - /* Everything under this point is a heuritic. Using these will + /* Everything under this point is a heuristic. Using these will * substantially speed up the match finding. In some cases, taking * the total match finding time from several minutes to seconds. * Of course, the caveat is that the edit script found may no longer @@ -366,8 +366,8 @@ static int ZSTD_eDist_compare(ZSTD_eDist_state* state, } } else if (srcLow == srcHigh) { while (dictLow < dictHigh) { - /* Reaching this point means deleteing dict[dictLow] from - * the current positino of dict */ + /* Reaching this point means deleting dict[dictLow] from + * the current position of dict */ dictLow++; } } else { @@ -395,7 +395,7 @@ static int ZSTD_eDist_matchComp(const void* p, const void* q) } /* The matches from the approach above will all be of the form - * (dictIdx, srcIdx, 1). this method combines contiguous matches + * (dictIdx, srcIdx, 1). This method combines contiguous matches * of length MINMATCH or greater. Matches less than MINMATCH * are discarded */ static void ZSTD_eDist_combineMatches(ZSTD_eDist_state* state) @@ -460,7 +460,7 @@ static size_t ZSTD_eDist_convertMatchesToSequences(ZSTD_Sequence* sequences, } /*-************************************* -* Interal utils +* Internal utils ***************************************/ static size_t ZSTD_eDist_hamingDist(const BYTE* const a, diff --git a/contrib/match_finders/zstd_edist.h b/contrib/match_finders/zstd_edist.h index c775a498503..c739e2abc51 100644 --- a/contrib/match_finders/zstd_edist.h +++ b/contrib/match_finders/zstd_edist.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -21,7 +21,7 @@ * files would be small relative to the size of the file. * * Various 'diffing' algorithms utilize this notion of edit distance and - * the corrensponding concept of a minimal edit script between two + * the corresponding concept of a minimal edit script between two * sequences to identify the regions within two files where they differ. * The core algorithm used in this match finder is described in: * @@ -35,12 +35,12 @@ * * Note: after some experimentation, this approach proved to not provide enough * utility to justify the additional CPU used in finding matches. The one area - * where this approach consistenly outperforms Zstandard even on level 19 is - * when compressing small files (<10 KB) using a equally small dictionary that + * where this approach consistently outperforms Zstandard even on level 19 is + * when compressing small files (<10 KB) using an equally small dictionary that * is very similar to the source file. For the use case that this was intended, * (large similar files) this approach by itself took 5-10X longer than zstd-19 and * generally resulted in 2-3X larger files. The core advantage that zstd-19 has - * over this appraoch for match finding is the overlapping matches. This approach + * over this approach for match finding is the overlapping matches. This approach * cannot find any. * * I'm leaving this in the contrib section in case this ever becomes interesting diff --git a/contrib/pzstd/ErrorHolder.h b/contrib/pzstd/ErrorHolder.h index 829651c5961..2c2797edea0 100644 --- a/contrib/pzstd/ErrorHolder.h +++ b/contrib/pzstd/ErrorHolder.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/Logging.h b/contrib/pzstd/Logging.h index beb160b647d..84a08d20080 100644 --- a/contrib/pzstd/Logging.h +++ b/contrib/pzstd/Logging.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/Makefile b/contrib/pzstd/Makefile index 3d930cca93f..830053cd70b 100644 --- a/contrib/pzstd/Makefile +++ b/contrib/pzstd/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2016-present, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/Options.cpp b/contrib/pzstd/Options.cpp index 90f9d571fcf..a77824edb05 100644 --- a/contrib/pzstd/Options.cpp +++ b/contrib/pzstd/Options.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/Options.h b/contrib/pzstd/Options.h index 924543abfd5..92c18a350cf 100644 --- a/contrib/pzstd/Options.h +++ b/contrib/pzstd/Options.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/Pzstd.cpp b/contrib/pzstd/Pzstd.cpp index 2c09bda7a2d..67b94199149 100644 --- a/contrib/pzstd/Pzstd.cpp +++ b/contrib/pzstd/Pzstd.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -10,11 +10,13 @@ #include "Pzstd.h" #include "SkippableFrame.h" #include "utils/FileSystem.h" +#include "utils/Portability.h" #include "utils/Range.h" #include "utils/ScopeGuard.h" #include "utils/ThreadPool.h" #include "utils/WorkQueue.h" +#include #include #include #include @@ -336,6 +338,10 @@ static size_t calculateStep( const ZSTD_parameters ¶ms) { (void)size; (void)numThreads; + // Not validated to work correctly for window logs > 23. + // It will definitely fail if windowLog + 2 is >= 4GB because + // the skippable frame can only store sizes up to 4GB. + assert(params.cParams.windowLog <= 23); return size_t{1} << (params.cParams.windowLog + 2); } @@ -587,7 +593,8 @@ std::uint64_t writeFile( // start writing before compression is done because we need to know the // compressed size. // Wait for the compressed size to be available and write skippable frame - SkippableFrame frame(out->size()); + assert(uint64_t(out->size()) < uint64_t(1) << 32); + SkippableFrame frame(uint32_t(out->size())); if (!writeData(frame.data(), outputFd)) { errorHolder.setError("Failed to write output"); return bytesWritten; diff --git a/contrib/pzstd/Pzstd.h b/contrib/pzstd/Pzstd.h index c667c887d7b..3645e594268 100644 --- a/contrib/pzstd/Pzstd.h +++ b/contrib/pzstd/Pzstd.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/README.md b/contrib/pzstd/README.md index 84d94581583..bc8f831f82c 100644 --- a/contrib/pzstd/README.md +++ b/contrib/pzstd/README.md @@ -31,7 +31,7 @@ If this number is not suitable, during compilation you can define `PZSTD_NUM_THR ## Benchmarks -As a reference, PZstandard and Pigz were compared on an Intel Core i7 @ 3.1 GHz, each using 4 threads, with the [Silesia compression corpus](http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia). +As a reference, PZstandard and Pigz were compared on an Intel Core i7 @ 3.1 GHz, each using 4 threads, with the [Silesia compression corpus](https://sun.aei.polsl.pl//~sdeor/index.php?page=silesia). Compression Speed vs Ratio with 4 Threads | Decompression Speed with 4 Threads ------------------------------------------|----------------------------------- diff --git a/contrib/pzstd/SkippableFrame.cpp b/contrib/pzstd/SkippableFrame.cpp index 769866dfc81..3bea4eb65bf 100644 --- a/contrib/pzstd/SkippableFrame.cpp +++ b/contrib/pzstd/SkippableFrame.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/SkippableFrame.h b/contrib/pzstd/SkippableFrame.h index 60deed0405b..817415e9231 100644 --- a/contrib/pzstd/SkippableFrame.h +++ b/contrib/pzstd/SkippableFrame.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/main.cpp b/contrib/pzstd/main.cpp index b93f043b16b..422b4a56a71 100644 --- a/contrib/pzstd/main.cpp +++ b/contrib/pzstd/main.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/test/OptionsTest.cpp b/contrib/pzstd/test/OptionsTest.cpp index e601148255d..91e39750d0c 100644 --- a/contrib/pzstd/test/OptionsTest.cpp +++ b/contrib/pzstd/test/OptionsTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/test/PzstdTest.cpp b/contrib/pzstd/test/PzstdTest.cpp index 5c7d6631080..75453f5f6f3 100644 --- a/contrib/pzstd/test/PzstdTest.cpp +++ b/contrib/pzstd/test/PzstdTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/test/RoundTrip.h b/contrib/pzstd/test/RoundTrip.h index c6364ecb422..f777622a393 100644 --- a/contrib/pzstd/test/RoundTrip.h +++ b/contrib/pzstd/test/RoundTrip.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/test/RoundTripTest.cpp b/contrib/pzstd/test/RoundTripTest.cpp index 36af0673ae6..c37646d1c44 100644 --- a/contrib/pzstd/test/RoundTripTest.cpp +++ b/contrib/pzstd/test/RoundTripTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/Buffer.h b/contrib/pzstd/utils/Buffer.h index d17ad2f2cf1..a85f770ba18 100644 --- a/contrib/pzstd/utils/Buffer.h +++ b/contrib/pzstd/utils/Buffer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/FileSystem.h b/contrib/pzstd/utils/FileSystem.h index 3cfbe86e507..8d57d05f0fa 100644 --- a/contrib/pzstd/utils/FileSystem.h +++ b/contrib/pzstd/utils/FileSystem.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -8,16 +8,18 @@ */ #pragma once +#include "utils/Portability.h" #include "utils/Range.h" #include #include #include +#include #include // A small subset of `std::filesystem`. // `std::filesystem` should be a drop in replacement. -// See http://en.cppreference.com/w/cpp/filesystem for documentation. +// See https://en.cppreference.com/w/cpp/filesystem for documentation. namespace pzstd { @@ -28,7 +30,7 @@ typedef struct ::_stat64 file_status; typedef struct ::stat file_status; #endif -/// http://en.cppreference.com/w/cpp/filesystem/status +/// https://en.cppreference.com/w/cpp/filesystem/status inline file_status status(StringPiece path, std::error_code& ec) noexcept { file_status status; #if defined(_MSC_VER) @@ -44,7 +46,7 @@ inline file_status status(StringPiece path, std::error_code& ec) noexcept { return status; } -/// http://en.cppreference.com/w/cpp/filesystem/is_regular_file +/// https://en.cppreference.com/w/cpp/filesystem/is_regular_file inline bool is_regular_file(file_status status) noexcept { #if defined(S_ISREG) return S_ISREG(status.st_mode); @@ -55,12 +57,12 @@ inline bool is_regular_file(file_status status) noexcept { #endif } -/// http://en.cppreference.com/w/cpp/filesystem/is_regular_file +/// https://en.cppreference.com/w/cpp/filesystem/is_regular_file inline bool is_regular_file(StringPiece path, std::error_code& ec) noexcept { return is_regular_file(status(path, ec)); } -/// http://en.cppreference.com/w/cpp/filesystem/is_directory +/// https://en.cppreference.com/w/cpp/filesystem/is_directory inline bool is_directory(file_status status) noexcept { #if defined(S_ISDIR) return S_ISDIR(status.st_mode); @@ -71,22 +73,22 @@ inline bool is_directory(file_status status) noexcept { #endif } -/// http://en.cppreference.com/w/cpp/filesystem/is_directory +/// https://en.cppreference.com/w/cpp/filesystem/is_directory inline bool is_directory(StringPiece path, std::error_code& ec) noexcept { return is_directory(status(path, ec)); } -/// http://en.cppreference.com/w/cpp/filesystem/file_size +/// https://en.cppreference.com/w/cpp/filesystem/file_size inline std::uintmax_t file_size( StringPiece path, std::error_code& ec) noexcept { auto stat = status(path, ec); if (ec) { - return -1; + return std::numeric_limits::max(); } if (!is_regular_file(stat)) { ec.assign(ENOTSUP, std::generic_category()); - return -1; + return std::numeric_limits::max(); } ec.clear(); return stat.st_size; diff --git a/contrib/pzstd/utils/Likely.h b/contrib/pzstd/utils/Likely.h index 7cea8da2771..52243a64eab 100644 --- a/contrib/pzstd/utils/Likely.h +++ b/contrib/pzstd/utils/Likely.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/Portability.h b/contrib/pzstd/utils/Portability.h new file mode 100644 index 00000000000..ef1f86e51f6 --- /dev/null +++ b/contrib/pzstd/utils/Portability.h @@ -0,0 +1,16 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + */ + +#pragma once + +#include + +// Required for windows, which defines min/max, but we want the std:: version. +#undef min +#undef max diff --git a/contrib/pzstd/utils/Range.h b/contrib/pzstd/utils/Range.h index 6a850ad4eaa..0fd8f9f8655 100644 --- a/contrib/pzstd/utils/Range.h +++ b/contrib/pzstd/utils/Range.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,7 +14,9 @@ #pragma once #include "utils/Likely.h" +#include "utils/Portability.h" +#include #include #include #include diff --git a/contrib/pzstd/utils/ResourcePool.h b/contrib/pzstd/utils/ResourcePool.h index 8dfcdd76590..7c4bb623580 100644 --- a/contrib/pzstd/utils/ResourcePool.h +++ b/contrib/pzstd/utils/ResourcePool.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/ScopeGuard.h b/contrib/pzstd/utils/ScopeGuard.h index 31768f43d22..911fd984214 100644 --- a/contrib/pzstd/utils/ScopeGuard.h +++ b/contrib/pzstd/utils/ScopeGuard.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -15,7 +15,7 @@ namespace pzstd { /** * Dismissable scope guard. * `Function` must be callable and take no parameters. - * Unless `dissmiss()` is called, the callable is executed upon destruction of + * Unless `dismiss()` is called, the callable is executed upon destruction of * `ScopeGuard`. * * Example: diff --git a/contrib/pzstd/utils/ThreadPool.h b/contrib/pzstd/utils/ThreadPool.h index 8ece8e0da4e..a087d7c1cff 100644 --- a/contrib/pzstd/utils/ThreadPool.h +++ b/contrib/pzstd/utils/ThreadPool.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/WorkQueue.h b/contrib/pzstd/utils/WorkQueue.h index 1d14d922c64..d7947b814dd 100644 --- a/contrib/pzstd/utils/WorkQueue.h +++ b/contrib/pzstd/utils/WorkQueue.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/test/BufferTest.cpp b/contrib/pzstd/utils/test/BufferTest.cpp index fbba74e8262..58bf08dcd69 100644 --- a/contrib/pzstd/utils/test/BufferTest.cpp +++ b/contrib/pzstd/utils/test/BufferTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/test/RangeTest.cpp b/contrib/pzstd/utils/test/RangeTest.cpp index 755b50fa6e8..8b7dee27193 100644 --- a/contrib/pzstd/utils/test/RangeTest.cpp +++ b/contrib/pzstd/utils/test/RangeTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/test/ResourcePoolTest.cpp b/contrib/pzstd/utils/test/ResourcePoolTest.cpp index 6fe145180be..750ee084b07 100644 --- a/contrib/pzstd/utils/test/ResourcePoolTest.cpp +++ b/contrib/pzstd/utils/test/ResourcePoolTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/test/ScopeGuardTest.cpp b/contrib/pzstd/utils/test/ScopeGuardTest.cpp index 7bc624da79b..0f77cdf38b7 100644 --- a/contrib/pzstd/utils/test/ScopeGuardTest.cpp +++ b/contrib/pzstd/utils/test/ScopeGuardTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/test/ThreadPoolTest.cpp b/contrib/pzstd/utils/test/ThreadPoolTest.cpp index 703fd4c9ca1..a01052e605f 100644 --- a/contrib/pzstd/utils/test/ThreadPoolTest.cpp +++ b/contrib/pzstd/utils/test/ThreadPoolTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/test/WorkQueueTest.cpp b/contrib/pzstd/utils/test/WorkQueueTest.cpp index 14cf77304f2..16600bb6031 100644 --- a/contrib/pzstd/utils/test/WorkQueueTest.cpp +++ b/contrib/pzstd/utils/test/WorkQueueTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/recovery/Makefile b/contrib/recovery/Makefile index 9a9f4f2e81d..be6ea4b0e9c 100644 --- a/contrib/recovery/Makefile +++ b/contrib/recovery/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2019-present, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/recovery/recover_directory.c b/contrib/recovery/recover_directory.c index 13f83fd106b..b9bd7ab4996 100644 --- a/contrib/recovery/recover_directory.c +++ b/contrib/recovery/recover_directory.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/seekable_format/examples/Makefile b/contrib/seekable_format/examples/Makefile index 9df6b75fb84..fcc04587ce8 100644 --- a/contrib/seekable_format/examples/Makefile +++ b/contrib/seekable_format/examples/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2017-present, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/seekable_format/examples/parallel_compression.c b/contrib/seekable_format/examples/parallel_compression.c index 4118b0ad762..0ec9fbd20f7 100644 --- a/contrib/seekable_format/examples/parallel_compression.c +++ b/contrib/seekable_format/examples/parallel_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/seekable_format/examples/parallel_processing.c b/contrib/seekable_format/examples/parallel_processing.c index 36226b49fd3..b1709db770e 100644 --- a/contrib/seekable_format/examples/parallel_processing.c +++ b/contrib/seekable_format/examples/parallel_processing.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/seekable_format/examples/seekable_compression.c b/contrib/seekable_format/examples/seekable_compression.c index 9a331a89531..182b46f6458 100644 --- a/contrib/seekable_format/examples/seekable_compression.c +++ b/contrib/seekable_format/examples/seekable_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/seekable_format/examples/seekable_decompression.c b/contrib/seekable_format/examples/seekable_decompression.c index e9e2013331e..2c4f3ba0cd5 100644 --- a/contrib/seekable_format/examples/seekable_decompression.c +++ b/contrib/seekable_format/examples/seekable_decompression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/seekable_format/examples/seekable_decompression_mem.c b/contrib/seekable_format/examples/seekable_decompression_mem.c index e7b1c65059c..44a06fbbfb0 100644 --- a/contrib/seekable_format/examples/seekable_decompression_mem.c +++ b/contrib/seekable_format/examples/seekable_decompression_mem.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/seekable_format/tests/Makefile b/contrib/seekable_format/tests/Makefile index d51deb3ea82..a81f2229fa5 100644 --- a/contrib/seekable_format/tests/Makefile +++ b/contrib/seekable_format/tests/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2017-present, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/seekable_format/tests/seekable_tests.c b/contrib/seekable_format/tests/seekable_tests.c index a482638b918..1bb2d0e8130 100644 --- a/contrib/seekable_format/tests/seekable_tests.c +++ b/contrib/seekable_format/tests/seekable_tests.c @@ -186,6 +186,40 @@ int main(int argc, const char** argv) } printf("Success!\n"); + + printf("Test %u - check ZSTD magic in compressing empty string: ", testNb++); + { // compressing empty string should return a zstd header + size_t const capacity = 255; + char* inBuffer = malloc(capacity); + assert(inBuffer != NULL); + inBuffer[0] = '\0'; + void* const outBuffer = malloc(capacity); + assert(outBuffer != NULL); + + ZSTD_seekable_CStream *s = ZSTD_seekable_createCStream(); + ZSTD_seekable_initCStream(s, 1, 1, 255); + + ZSTD_inBuffer input = { .src=inBuffer, .pos=0, .size=0 }; + ZSTD_outBuffer output = { .dst=outBuffer, .pos=0, .size=capacity }; + + ZSTD_seekable_compressStream(s, &output, &input); + ZSTD_seekable_endStream(s, &output); + + if((((char*)output.dst)[0] != '\x28') | (((char*)output.dst)[1] != '\xb5') | (((char*)output.dst)[2] != '\x2f') | (((char*)output.dst)[3] != '\xfd')) { + printf("%#02x %#02x %#02x %#02x\n", ((char*)output.dst)[0], ((char*)output.dst)[1] , ((char*)output.dst)[2] , ((char*)output.dst)[3] ); + + free(inBuffer); + free(outBuffer); + ZSTD_seekable_freeCStream(s); + goto _test_error; + } + + free(inBuffer); + free(outBuffer); + ZSTD_seekable_freeCStream(s); + } + printf("Success!\n"); + /* TODO: Add more tests */ printf("Finished tests\n"); return 0; diff --git a/contrib/seekable_format/zstd_seekable_compression_format.md b/contrib/seekable_format/zstd_seekable_compression_format.md index 55aebfd2e9d..7bd0790e81b 100644 --- a/contrib/seekable_format/zstd_seekable_compression_format.md +++ b/contrib/seekable_format/zstd_seekable_compression_format.md @@ -2,7 +2,7 @@ ### Notices -Copyright (c) 2017-present Facebook, Inc. +Copyright (c) Meta Platforms, Inc. and affiliates. Permission is granted to copy and distribute this document for any purpose and without charge, diff --git a/contrib/seekable_format/zstdseek_compress.c b/contrib/seekable_format/zstdseek_compress.c index 242bd2ac3a1..113f6f99ce2 100644 --- a/contrib/seekable_format/zstdseek_compress.c +++ b/contrib/seekable_format/zstdseek_compress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -350,7 +350,7 @@ size_t ZSTD_seekable_writeSeekTable(ZSTD_frameLog* fl, ZSTD_outBuffer* output) size_t ZSTD_seekable_endStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output) { - if (!zcs->writingSeekTable && zcs->frameDSize) { + if (!zcs->writingSeekTable) { const size_t endFrame = ZSTD_seekable_endFrame(zcs, output); if (ZSTD_isError(endFrame)) return endFrame; /* return an accurate size hint */ diff --git a/contrib/seekable_format/zstdseek_decompress.c b/contrib/seekable_format/zstdseek_decompress.c index 5eed024950b..fbb2d4fe3be 100644 --- a/contrib/seekable_format/zstdseek_decompress.c +++ b/contrib/seekable_format/zstdseek_decompress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -23,13 +23,64 @@ # endif #endif +/* ************************************************************ +* Detect POSIX version +* PLATFORM_POSIX_VERSION = 0 for non-Unix e.g. Windows +* PLATFORM_POSIX_VERSION = 1 for Unix-like but non-POSIX +* PLATFORM_POSIX_VERSION > 1 is equal to found _POSIX_VERSION +* Value of PLATFORM_POSIX_VERSION can be forced on command line +***************************************************************/ +#ifndef PLATFORM_POSIX_VERSION + +# if (defined(__APPLE__) && defined(__MACH__)) || defined(__SVR4) || defined(_AIX) || defined(__hpux) /* POSIX.1-2001 (SUSv3) conformant */ \ + || defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) /* BSD distros */ + /* exception rule : force posix version to 200112L, + * note: it's better to use unistd.h's _POSIX_VERSION whenever possible */ +# define PLATFORM_POSIX_VERSION 200112L + +/* try to determine posix version through official unistd.h's _POSIX_VERSION (https://pubs.opengroup.org/onlinepubs/7908799/xsh/unistd.h.html). + * note : there is no simple way to know in advance if is present or not on target system, + * Posix specification mandates its presence and its content, but target system must respect this spec. + * It's necessary to _not_ #include whenever target OS is not unix-like + * otherwise it will block preprocessing stage. + * The following list of build macros tries to "guess" if target OS is likely unix-like, and therefore can #include + */ +# elif !defined(_WIN32) \ + && ( defined(__unix__) || defined(__unix) \ + || defined(__midipix__) || defined(__VMS) || defined(__HAIKU__) ) + +# if defined(__linux__) || defined(__linux) || defined(__CYGWIN__) +# ifndef _POSIX_C_SOURCE +# define _POSIX_C_SOURCE 200809L /* feature test macro : https://www.gnu.org/software/libc/manual/html_node/Feature-Test-Macros.html */ +# endif +# endif +# include /* declares _POSIX_VERSION */ +# if defined(_POSIX_VERSION) /* POSIX compliant */ +# define PLATFORM_POSIX_VERSION _POSIX_VERSION +# else +# define PLATFORM_POSIX_VERSION 1 +# endif + +# ifdef __UCLIBC__ +# ifndef __USE_MISC +# define __USE_MISC /* enable st_mtim on uclibc */ +# endif +# endif + +# else /* non-unix target platform (like Windows) */ +# define PLATFORM_POSIX_VERSION 0 +# endif + +#endif /* PLATFORM_POSIX_VERSION */ + + /* ************************************************************ * Avoid fseek()'s 2GiB barrier with MSVC, macOS, *BSD, MinGW ***************************************************************/ #if defined(_MSC_VER) && _MSC_VER >= 1400 # define LONG_SEEK _fseeki64 #elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */ -# define LONG_SEEK fseeko +# define LONG_SEEK fseeko #elif defined(__MINGW32__) && !defined(__STRICT_ANSI__) && !defined(__NO_MINGW_LFS) && defined(__MSVCRT__) # define LONG_SEEK fseeko64 #elif defined(_WIN32) && !defined(__DJGPP__) @@ -466,9 +517,9 @@ size_t ZSTD_seekable_decompress(ZSTD_seekable* zs, void* dst, size_t len, unsign size_t forwardProgress; if (zs->decompressedOffset < offset) { /* dummy decompressions until we get to the target offset */ - outTmp = (ZSTD_outBuffer){zs->outBuff, MIN(SEEKABLE_BUFF_SIZE, offset - zs->decompressedOffset), 0}; + outTmp = (ZSTD_outBuffer){zs->outBuff, (size_t) (MIN(SEEKABLE_BUFF_SIZE, offset - zs->decompressedOffset)), 0}; } else { - outTmp = (ZSTD_outBuffer){dst, len, zs->decompressedOffset - offset}; + outTmp = (ZSTD_outBuffer){dst, len, (size_t) (zs->decompressedOffset - offset)}; } prevOutPos = outTmp.pos; diff --git a/contrib/seqBench/Makefile b/contrib/seqBench/Makefile new file mode 100644 index 00000000000..e7f08a42cc8 --- /dev/null +++ b/contrib/seqBench/Makefile @@ -0,0 +1,58 @@ +# ################################################################ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# ################################################################ + +PROGDIR = ../../programs +LIBDIR = ../../lib + +LIBZSTD = $(LIBDIR)/libzstd.a + +CPPFLAGS+= -I$(LIBDIR) -I$(LIBDIR)/common -I$(LIBDIR)/dictBuilder -I$(PROGDIR) + +CFLAGS ?= -O3 -g +CFLAGS += -std=gnu99 +DEBUGFLAGS= -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ + -Wstrict-aliasing=1 -Wswitch-enum \ + -Wstrict-prototypes -Wundef -Wpointer-arith \ + -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \ + -Wredundant-decls +CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS) + + +default: seqBench + +all : seqBench + +seqBench: util.o timefn.o benchfn.o datagen.o xxhash.o seqBench.c $(LIBZSTD) + $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ + +.PHONY: $(LIBZSTD) +$(LIBZSTD): + $(MAKE) -C $(LIBDIR) libzstd.a CFLAGS="$(CFLAGS)" + +benchfn.o: $(PROGDIR)/benchfn.c + $(CC) $(CPPFLAGS) $(CFLAGS) $^ -c + +timefn.o: $(PROGDIR)/timefn.c + $(CC) $(CPPFLAGS) $(CFLAGS) $^ -c + +datagen.o: $(PROGDIR)/datagen.c + $(CC) $(CPPFLAGS) $(CFLAGS) $^ -c + +util.o: $(PROGDIR)/util.c + $(CC) $(CPPFLAGS) $(CFLAGS) $^ -c + + +xxhash.o : $(LIBDIR)/common/xxhash.c + $(CC) $(CPPFLAGS) $(CFLAGS) $^ -c + + +clean: + $(RM) *.o + $(MAKE) -C $(LIBDIR) clean > /dev/null + $(RM) seqBench diff --git a/contrib/seqBench/seqBench.c b/contrib/seqBench/seqBench.c new file mode 100644 index 00000000000..7efebec7bea --- /dev/null +++ b/contrib/seqBench/seqBench.c @@ -0,0 +1,53 @@ +#define ZSTD_STATIC_LINKING_ONLY +#include +#include +#include +#include +#include + +int main(int argc, char *argv[]) { + ZSTD_CCtx* zc = ZSTD_createCCtx(); + + if (argc != 2) { + printf("Usage: seqBench \n"); // TODO provide the block delim option here + return 1; + } + + FILE *f = fopen(argv[1], "rb"); + fseek(f, 0, SEEK_END); + long inBufSize = ftell(f); + fseek(f, 0, SEEK_SET); + + char *inBuf = malloc(inBufSize + 1); + fread(inBuf, inBufSize, 1, f); + fclose(f); + + size_t seqsSize = ZSTD_sequenceBound(inBufSize); + ZSTD_Sequence *seqs = (ZSTD_Sequence*)malloc(seqsSize * sizeof(ZSTD_Sequence)); + char *outBuf = malloc(ZSTD_compressBound(inBufSize)); + + ZSTD_generateSequences(zc, seqs, seqsSize, inBuf, inBufSize); + ZSTD_CCtx_setParameter(zc, ZSTD_c_blockDelimiters, ZSTD_sf_explicitBlockDelimiters); + size_t outBufSize = ZSTD_compressSequences(zc, outBuf, inBufSize, seqs, seqsSize, inBuf, inBufSize); + if (ZSTD_isError(outBufSize)) { + printf("ERROR: %lu\n", outBufSize); + return 1; + } + + char *validationBuf = malloc(inBufSize); + ZSTD_decompress(validationBuf, inBufSize, outBuf, outBufSize); + + if (memcmp(inBuf, validationBuf, inBufSize) == 0) { + printf("Compression and decompression were successful!\n"); + } else { + printf("ERROR: input and validation buffers don't match!\n"); + for (int i = 0; i < inBufSize; i++) { + if (inBuf[i] != validationBuf[i]) { + printf("First bad index: %d\n", i); + break; + } + } + } + + return 0; +} diff --git a/doc/README.md b/doc/README.md index bb7a3e4902e..8f3babcdbb2 100644 --- a/doc/README.md +++ b/doc/README.md @@ -5,8 +5,9 @@ This directory contains material defining the Zstandard format, as well as detailed instructions to use `zstd` library. __`zstd_manual.html`__ : Documentation of `zstd.h` API, in html format. -Click on this link: [http://zstd.net/zstd_manual.html](http://zstd.net/zstd_manual.html) -to display documentation of latest release in readable format within a browser. +Unfortunately, Github doesn't display `html` files in parsed format, just as source code. +For a readable display of html API documentation of latest release, +use this link: [https://raw.githack.com/facebook/zstd/release/doc/zstd_manual.html](https://raw.githack.com/facebook/zstd/release/doc/zstd_manual.html) . __`zstd_compression_format.md`__ : This document defines the Zstandard compression format. Compliant decoders must adhere to this document, diff --git a/doc/decompressor_errata.md b/doc/decompressor_errata.md new file mode 100644 index 00000000000..b162e7fd6e7 --- /dev/null +++ b/doc/decompressor_errata.md @@ -0,0 +1,84 @@ +Decompressor Errata +=================== + +This document captures known decompressor bugs, where the decompressor rejects a valid zstd frame. +Each entry will contain: +1. The last affected decompressor versions. +2. The decompressor components affected. +2. Whether the compressed frame could ever be produced by the reference compressor. +3. An example frame. +4. A description of the bug. + +The document is in reverse chronological order, with the bugs that affect the most recent zstd decompressor versions listed first. + + +Compressed block with 0 literals and 0 sequences +------------------------------------------------ + +**Last affected version**: v1.5.2 + +**Affected decompressor component(s)**: Library & CLI + +**Produced by the reference compressor**: No + +**Example Frame**: `28b5 2ffd 2000 1500 0000 00` + +The zstd decoder incorrectly rejected blocks of type `Compressed_Block` that encodes literals as `Raw_Literals_Block` with no literals, and has no sequences. + +This type of block was never generated by the reference compressor. + +Additionally, these blocks were disallowed by the spec up until spec version 0.3.2 when the restriction was lifted by [PR#1689](https://github.com/facebook/zstd/pull/1689). + +> A Compressed_Block has the extra restriction that Block_Size is always strictly less than the decompressed size. If this condition cannot be respected, the block must be sent uncompressed instead (Raw_Block). + +First block is RLE block +------------------------ + +**Last affected version**: v1.4.3 + +**Affected decompressor component(s)**: CLI only + +**Produced by the reference compressor**: No + +**Example Frame**: `28b5 2ffd a001 0002 0002 0010 000b 0000 00` + +The zstd CLI decompressor rejected cases where the first block was an RLE block whose `Block_Size` is 131072, and the frame contains more than one block. +This only affected the zstd CLI, and not the library. + +The example is an RLE block with 131072 bytes, followed by a second RLE block with 1 byte. + +The compressor currently works around this limitation by explicitly avoiding producing RLE blocks as the first +block. + +https://github.com/facebook/zstd/blob/8814aa5bfa74f05a86e55e9d508da177a893ceeb/lib/compress/zstd_compress.c#L3527-L3535 + +Tiny FSE Table & Block +---------------------- + +**Last affected version**: v1.3.4 + +**Affected decompressor component(s)**: Library & CLI + +**Produced by the reference compressor**: Possibly until version v1.3.4, but probably never + +**Example Frame**: `28b5 2ffd 2027 c500 0080 f3f1 f0ec ebc6 c5c7 f09d 4300 0000 e0e0 0658 0100 603e 52` + +The zstd library rejected blocks of type `Compressed_Block` whose offset of the last table with type `FSE_Compressed_Mode` was less than 4 bytes from the end of the block. + +In more depth, let `Last_Table_Offset` be the offset in the compressed block (excluding the header) that +the last table with type `FSE_Compressed_Mode` started. If `Block_Content - Last_Table_Offset < 4` then +the buggy zstd decompressor would reject the block. This occurs when the last serialized table is 2 bytes +and the bitstream size is 1 byte. + +For example: +* There is 1 sequence in the block +* `Literals_Lengths_Mode` is `FSE_Compressed_Mode` & the serialized table size is 2 bytes +* `Offsets_Mode` is `Predefined_Mode` +* `Match_Lengths_Mode` is `Predefined_Mode` +* The bitstream is 1 byte. E.g. there is only one sequence and it fits in 1 byte. + +The total `Block_Content` is `5` bytes, and `Last_Table_Offset` is `2`. + +See the compressor workaround code: + +https://github.com/facebook/zstd/blob/8814aa5bfa74f05a86e55e9d508da177a893ceeb/lib/compress/zstd_compress.c#L2667-L2682 diff --git a/doc/educational_decoder/Makefile b/doc/educational_decoder/Makefile index a9c601ebca7..f6deeb13d2f 100644 --- a/doc/educational_decoder/Makefile +++ b/doc/educational_decoder/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/doc/educational_decoder/harness.c b/doc/educational_decoder/harness.c index 935f60da87e..12c5a801beb 100644 --- a/doc/educational_decoder/harness.c +++ b/doc/educational_decoder/harness.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/doc/educational_decoder/zstd_decompress.c b/doc/educational_decoder/zstd_decompress.c index 93640708663..9ade7650268 100644 --- a/doc/educational_decoder/zstd_decompress.c +++ b/doc/educational_decoder/zstd_decompress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -1890,7 +1890,7 @@ static size_t HUF_decompress_4stream(const HUF_dtable *const dtable, /// Initializes a Huffman table using canonical Huffman codes /// For more explanation on canonical Huffman codes see -/// http://www.cs.uofs.edu/~mccloske/courses/cmps340/huff_canonical_dec2015.html +/// https://www.cs.scranton.edu/~mccloske/courses/cmps340/huff_canonical_dec2015.html /// Codes within a level are allocated in symbol order (i.e. smaller symbols get /// earlier codes) static void HUF_init_dtable(HUF_dtable *const table, const u8 *const bits, diff --git a/doc/educational_decoder/zstd_decompress.h b/doc/educational_decoder/zstd_decompress.h index d89c8352324..c13c8134dee 100644 --- a/doc/educational_decoder/zstd_decompress.h +++ b/doc/educational_decoder/zstd_decompress.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/doc/images/zstd_logo86.png b/doc/images/zstd_logo86.png index 216f228061c..8abefe21b1e 100644 Binary files a/doc/images/zstd_logo86.png and b/doc/images/zstd_logo86.png differ diff --git a/doc/zstd_compression_format.md b/doc/zstd_compression_format.md index fc09bd5538c..e40677a1dc8 100644 --- a/doc/zstd_compression_format.md +++ b/doc/zstd_compression_format.md @@ -3,7 +3,7 @@ Zstandard Compression Format ### Notices -Copyright (c) 2016-2021 Yann Collet, Facebook, Inc. +Copyright (c) Meta Platforms, Inc. and affiliates. Permission is granted to copy and distribute this document for any purpose and without charge, @@ -26,7 +26,7 @@ The purpose of this document is to define a lossless compressed data format, that is independent of CPU type, operating system, file system and character set, suitable for file compression, pipe and streaming compression, -using the [Zstandard algorithm](http://www.zstandard.org). +using the [Zstandard algorithm](https://facebook.github.io/zstd/). The text of the specification assumes a basic background in programming at the level of bits and other primitive data representations. @@ -35,7 +35,7 @@ even for an arbitrarily long sequentially presented input data stream, using only an a priori bounded amount of intermediate storage, and hence can be used in data communications. The format uses the Zstandard compression method, -and optional [xxHash-64 checksum method](http://www.xxhash.org), +and optional [xxHash-64 checksum method](https://cyan4973.github.io/xxHash/), for detection of data corruption. The data format defined by this specification @@ -134,7 +134,7 @@ __`Content_Checksum`__ An optional 32-bit checksum, only present if `Content_Checksum_flag` is set. The content checksum is the result -of [xxh64() hash function](http://www.xxhash.org) +of [xxh64() hash function](https://cyan4973.github.io/xxHash/) digesting the original (decoded) data as input, and a seed of zero. The low 4 bytes of the checksum are stored in __little-endian__ format. @@ -435,7 +435,7 @@ They can be decoded first, and then copied during [Sequence Execution], or they can be decoded on the flow during [Sequence Execution]. Literals can be stored uncompressed or compressed using Huffman prefix codes. -When compressed, an optional tree description can be present, +When compressed, a tree description may optionally be present, followed by 1 or 4 streams. | `Literals_Section_Header` | [`Huffman_Tree_Description`] | [jumpTable] | Stream1 | [Stream2] | [Stream3] | [Stream4] | @@ -510,7 +510,7 @@ Its value is : `Size_Format = (Literals_Section_Header[0]>>2) & 3` `Regenerated_Size = (Literals_Section_Header[0]>>4) + (Literals_Section_Header[1]<<4) + (Literals_Section_Header[2]<<12)` Only Stream1 is present for these cases. -Note : it's allowed to represent a short value (for example `13`) +Note : it's allowed to represent a short value (for example `27`) using a long format, even if it's less efficient. __`Size_Format` for `Compressed_Literals_Block` and `Treeless_Literals_Block`__ : @@ -521,19 +521,33 @@ __`Size_Format` for `Compressed_Literals_Block` and `Treeless_Literals_Block`__ Both `Regenerated_Size` and `Compressed_Size` use 10 bits (0-1023). `Literals_Section_Header` uses 3 bytes. - `Size_Format` == 01 : 4 streams. - Both `Regenerated_Size` and `Compressed_Size` use 10 bits (0-1023). + Both `Regenerated_Size` and `Compressed_Size` use 10 bits (6-1023). `Literals_Section_Header` uses 3 bytes. - `Size_Format` == 10 : 4 streams. - Both `Regenerated_Size` and `Compressed_Size` use 14 bits (0-16383). + Both `Regenerated_Size` and `Compressed_Size` use 14 bits (6-16383). `Literals_Section_Header` uses 4 bytes. - `Size_Format` == 11 : 4 streams. - Both `Regenerated_Size` and `Compressed_Size` use 18 bits (0-262143). + Both `Regenerated_Size` and `Compressed_Size` use 18 bits (6-262143). `Literals_Section_Header` uses 5 bytes. Both `Compressed_Size` and `Regenerated_Size` fields follow __little-endian__ convention. Note: `Compressed_Size` __includes__ the size of the Huffman Tree description _when_ it is present. +4 streams is superior to 1 stream in decompression speed, +by exploiting instruction level parallelism. +But it's also more expensive, +costing on average ~7.3 bytes more than the 1 stream mode, mostly from the jump table. + +In general, use the 4 streams mode when there are more literals to decode, +to favor higher decompression speeds. +Beyond 1KB, the 4 streams mode is compulsory anyway. + +Note that a minimum of 6 bytes is required for the 4 streams mode. +That's a technical minimum, but it's not recommended to employ the 4 streams mode +for such a small quantity, that would be wasteful. +A more practical lower bound would be around ~256 bytes. + #### Raw Literals Block The data in Stream1 is `Regenerated_Size` bytes long, it contains the raw literals data to be used during [Sequence Execution]. @@ -945,14 +959,14 @@ sequences are applied to them: |:--------------:|:-----------------:|:------------------:|:------------------:|:------------------:|:-----------------------:| | | | 1 | 4 | 8 | starting values | | 1114 | 11 | 1111 | 1 | 4 | non-repeat | -| 1 | 22 | 1111 | 1 | 4 | repeat 1; no change | +| 1 | 22 | 1111 | 1 | 4 | repeat 1: no change | | 2225 | 22 | 2222 | 1111 | 1 | non-repeat | | 1114 | 111 | 1111 | 2222 | 1111 | non-repeat | | 3336 | 33 | 3333 | 1111 | 2222 | non-repeat | -| 2 | 22 | 1111 | 3333 | 2222 | repeat 2; swap 1 & 2 | -| 3 | 33 | 2222 | 1111 | 3333 | repeat 3; rotate 3 to 1 | -| 3 | 0 | 2221 | 2222 | 1111 | insert resolved offset | -| 1 | 0 | 2222 | 2221 | 3333 | repeat 2 | +| 2 | 22 | 1111 | 3333 | 2222 | repeat 2: swap 1 & 2 | +| 3 | 33 | 2222 | 1111 | 3333 | repeat 3: rotate 3 to 1 | +| 3 | 0 | 2221 | 2222 | 1111 | special case : insert `repeat1 - 1` | +| 1 | 0 | 2222 | 2221 | 1111 | == repeat 2 | Skippable Frames @@ -967,14 +981,14 @@ into a flow of concatenated frames. Skippable frames defined in this specification are compatible with [LZ4] ones. -[LZ4]:http://www.lz4.org +[LZ4]:https://lz4.github.io/lz4/ From a compliant decoder perspective, skippable frames need just be skipped, and their content ignored, resuming decoding after the skippable frame. It can be noted that a skippable frame can be used to watermark a stream of concatenated frames -embedding any kind of tracking information (even just an UUID). +embedding any kind of tracking information (even just a UUID). Users wary of such possibility should scan the stream of concatenated frames in an attempt to detect such frame for analysis or removal. diff --git a/doc/zstd_manual.html b/doc/zstd_manual.html index 9f73c4c815e..c33f0e26287 100644 --- a/doc/zstd_manual.html +++ b/doc/zstd_manual.html @@ -1,10 +1,10 @@ -zstd 1.5.2 Manual +zstd 1.5.4 Manual -

zstd 1.5.2 Manual

+

zstd 1.5.4 Manual


Contents

    @@ -121,7 +121,9 @@

    zstd 1.5.2 Manual

    note 6 : This function replaces ZSTD_getDecompressedSize()


    -
    unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
    +
    ZSTD_DEPRECATED("Replaced by ZSTD_getFrameContentSize")
    +ZSTDLIB_API
    +unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
     

    NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize(). Both functions work the same way, but ZSTD_getDecompressedSize() blends "empty", "unknown" and "error" results to the same return value (0), @@ -137,8 +139,30 @@

    zstd 1.5.2 Manual

    or an error code if input is invalid


    -

    Helper functions

    #define ZSTD_COMPRESSBOUND(srcSize)   ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0))  /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
    -size_t      ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
    +

    Helper functions

    /* ZSTD_compressBound() :
    + * maximum compressed size in worst case single-pass scenario.
    + * When invoking `ZSTD_compress()` or any other one-pass compression function,
    + * it's recommended to provide @dstCapacity >= ZSTD_compressBound(srcSize)
    + * as it eliminates one potential failure scenario,
    + * aka not enough room in dst buffer to write the compressed frame.
    + * Note : ZSTD_compressBound() itself can fail, if @srcSize > ZSTD_MAX_INPUT_SIZE .
    + *        In which case, ZSTD_compressBound() will return an error code
    + *        which can be tested using ZSTD_isError().
    + *
    + * ZSTD_COMPRESSBOUND() :
    + * same as ZSTD_compressBound(), but as a macro.
    + * It can be used to produce constants, which can be useful for static allocation,
    + * for example to size a static array on stack.
    + * Will produce constant value 0 if srcSize too large.
    + */
    +#define ZSTD_MAX_INPUT_SIZE ((sizeof(size_t)==8) ? 0xFF00FF00FF00FF00LLU : 0xFF00FF00U)
    +#define ZSTD_COMPRESSBOUND(srcSize)   (((size_t)(srcSize) >= ZSTD_MAX_INPUT_SIZE) ? 0 : (srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0))  /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
    +size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
    +/* ZSTD_isError() :
    + * Most ZSTD_* functions returning a size_t value can be tested for error,
    + * using ZSTD_isError().
    + * @return 1 if error, 0 otherwise
    + */
     unsigned    ZSTD_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
     const char* ZSTD_getErrorName(size_t code);     /*!< provides readable string from an error code */
     int         ZSTD_minCLevel(void);               /*!< minimum negative compression level allowed, requires v1.4.0+ */
    @@ -359,6 +383,9 @@ 

    Decompression context

      When decompressing many times,
          * ZSTD_c_validateSequences
          * ZSTD_c_useBlockSplitter
          * ZSTD_c_useRowMatchFinder
    +     * ZSTD_c_prefetchCDictTables
    +     * ZSTD_c_enableSeqProducerFallback
    +     * ZSTD_c_maxBlockSize
          * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
          * note : never ever use experimentalParam? names directly;
          *        also, the enums values themselves are unstable and can still change.
    @@ -377,7 +404,11 @@ 

    Decompression context

      When decompressing many times,
          ZSTD_c_experimentalParam12=1009,
          ZSTD_c_experimentalParam13=1010,
          ZSTD_c_experimentalParam14=1011,
    -     ZSTD_c_experimentalParam15=1012
    +     ZSTD_c_experimentalParam15=1012,
    +     ZSTD_c_experimentalParam16=1013,
    +     ZSTD_c_experimentalParam17=1014,
    +     ZSTD_c_experimentalParam18=1015,
    +     ZSTD_c_experimentalParam19=1016
     } ZSTD_cParameter;
     

    typedef struct {
    @@ -440,7 +471,7 @@ 

    Decompression context

      When decompressing many times,
                       They will be used to compress next frame.
                       Resetting session never fails.
       - The parameters : changes all parameters back to "default".
    -                  This removes any reference to any dictionary too.
    +                  This also removes any reference to any dictionary or external sequence producer.
                       Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
                       otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
       - Both : similar to resetting the session, followed by resetting parameters.
    @@ -479,13 +510,15 @@ 

    Decompression context

      When decompressing many times,
          * ZSTD_d_stableOutBuffer
          * ZSTD_d_forceIgnoreChecksum
          * ZSTD_d_refMultipleDDicts
    +     * ZSTD_d_disableHuffmanAssembly
          * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
          * note : never ever use experimentalParam? names directly
          */
          ZSTD_d_experimentalParam1=1000,
          ZSTD_d_experimentalParam2=1001,
          ZSTD_d_experimentalParam3=1002,
    -     ZSTD_d_experimentalParam4=1003
    +     ZSTD_d_experimentalParam4=1003,
    +     ZSTD_d_experimentalParam5=1004
     
     } ZSTD_dParameter;
     

    @@ -654,6 +687,9 @@

    Streaming compression functions

    typedef enum {
          ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
          ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
          ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
    +
    + Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API
    + to compress with a dictionary.
      
     


    @@ -689,6 +725,33 @@

    ZSTD_DStream management functions

    ZSTD_DStream* ZSTD_
     size_t ZSTD_freeDStream(ZSTD_DStream* zds);  /* accept NULL pointer */
     

    Streaming decompression functions


    +
    size_t ZSTD_initDStream(ZSTD_DStream* zds);
    +

    Initialize/reset DStream state for new decompression operation. + Call before new decompression operation using same DStream. + + Note : This function is redundant with the advanced API and equivalent to: + ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); + ZSTD_DCtx_refDDict(zds, NULL); + +


    + +
    size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
    +

    Streaming decompression function. + Call repetitively to consume full input updating it as necessary. + Function will update both input and output `pos` fields exposing current state via these fields: + - `input.pos < input.size`, some input remaining and caller should provide remaining input + on the next call. + - `output.pos < output.size`, decoder finished and flushed all remaining buffers. + - `output.pos == output.size`, potentially uncflushed data present in the internal buffers, + call ZSTD_decompressStream() again to flush remaining data to output. + Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX. + + @return : 0 when a frame is completely decoded and fully flushed, + or an error code, which can be tested using ZSTD_isError(), + or any other value > 0, which means there is some decoding or flushing to do to complete current frame. + +


    +
    size_t ZSTD_DStreamInSize(void);    /*!< recommended size for input buffer */
     

    size_t ZSTD_DStreamOutSize(void);   /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
    @@ -794,7 +857,7 @@ 

    Streaming decompression functions


    If @return == 0, the dictID could not be decoded. This could for one of the following reasons : - The frame does not require a dictionary to be decoded (most common case). - - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information. + - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden piece of information. Note : this use case also happens when using a non-conformant dictionary. - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`). - This is not a Zstandard frame. @@ -814,8 +877,9 @@

    Streaming decompression functions


    @result : 0, or an error code (which can be tested with ZSTD_isError()). Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary, meaning "return to no-dictionary mode". - Note 1 : Dictionary is sticky, it will be used for all future compressed frames. - To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters). + Note 1 : Dictionary is sticky, it will be used for all future compressed frames, + until parameters are reset, a new dictionary is loaded, or the dictionary + is explicitly invalidated by loading a NULL dictionary. Note 2 : Loading a dictionary involves building tables. It's also a CPU consuming operation, with non-negligible impact on latency. Tables are dependent on compression parameters, and for this reason, @@ -828,7 +892,7 @@

    Streaming decompression functions



    size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
    -

    Reference a prepared dictionary, to be used for all next compressed frames. +

    Reference a prepared dictionary, to be used for all future compressed frames. Note that compression parameters are enforced from within CDict, and supersede any compression parameter previously set within CCtx. The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs. @@ -863,9 +927,9 @@

    Streaming decompression functions



    size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
    -

    Create an internal DDict from dict buffer, - to be used to decompress next frames. - The dictionary remains valid for all future frames, until explicitly invalidated. +

    Create an internal DDict from dict buffer, to be used to decompress all future frames. + The dictionary remains valid for all future frames, until explicitly invalidated, or + a new dictionary is loaded. @result : 0, or an error code (which can be tested with ZSTD_isError()). Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary, meaning "return to no-dictionary mode". @@ -889,9 +953,10 @@

    Streaming decompression functions


    The memory for the table is allocated on the first call to refDDict, and can be freed with ZSTD_freeDCtx(). + If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary + will be managed, and referencing a dictionary effectively "discards" any previous one. + @result : 0, or an error code (which can be tested with ZSTD_isError()). - Note 1 : Currently, only one dictionary can be managed. - Referencing a new dictionary effectively "discards" any previous one. Special: referencing a NULL DDict means "return to no-dictionary mode". Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx. @@ -1127,20 +1192,74 @@

    Streaming decompression functions


    or an error code (if srcSize is too small)


    +
    ZSTDLIB_STATIC_API size_t ZSTD_decompressionMargin(const void* src, size_t srcSize);
    +

    Zstd supports in-place decompression, where the input and output buffers overlap. + In this case, the output buffer must be at least (Margin + Output_Size) bytes large, + and the input buffer must be at the end of the output buffer. + + _______________________ Output Buffer ________________________ + | | + | ____ Input Buffer ____| + | | | + v v v + |---------------------------------------|-----------|----------| + ^ ^ ^ + |___________________ Output_Size ___________________|_ Margin _| + + NOTE: See also ZSTD_DECOMPRESSION_MARGIN(). + NOTE: This applies only to single-pass decompression through ZSTD_decompress() or + ZSTD_decompressDCtx(). + NOTE: This function supports multi-frame input. + + @param src The compressed frame(s) + @param srcSize The size of the compressed frame(s) + @returns The decompression margin or an error that can be checked with ZSTD_isError(). + +


    + +
    #define ZSTD_DECOMPRESSION_MARGIN(originalSize, blockSize) ((size_t)(                                              \
    +        ZSTD_FRAMEHEADERSIZE_MAX                                                              /* Frame header */ + \
    +        4                                                                                         /* checksum */ + \
    +        ((originalSize) == 0 ? 0 : 3 * (((originalSize) + (blockSize) - 1) / blockSize)) /* 3 bytes per block */ + \
    +        (blockSize)                                                                    /* One block of margin */   \
    +    ))
    +

    Similar to ZSTD_decompressionMargin(), but instead of computing the margin from + the compressed frame, compute it from the original size and the blockSizeLog. + See ZSTD_decompressionMargin() for details. + + WARNING: This macro does not support multi-frame input, the input must be a single + zstd frame. If you need that support use the function, or implement it yourself. + + @param originalSize The original uncompressed size of the data. + @param blockSize The block size == MIN(windowSize, ZSTD_BLOCKSIZE_MAX). + Unless you explicitly set the windowLog smaller than + ZSTD_BLOCKSIZELOG_MAX you can just use ZSTD_BLOCKSIZE_MAX. + +


    +
    typedef enum {
       ZSTD_sf_noBlockDelimiters = 0,         /* Representation of ZSTD_Sequence has no block delimiters, sequences only */
       ZSTD_sf_explicitBlockDelimiters = 1    /* Representation of ZSTD_Sequence contains explicit block delimiters */
     } ZSTD_sequenceFormat_e;
     

    -

    Generate sequences using ZSTD_compress2, given a source buffer. +

    ZSTDLIB_STATIC_API size_t ZSTD_sequenceBound(size_t srcSize);
    +

    `srcSize` : size of the input buffer + @return : upper-bound for the number of sequences that can be generated + from a buffer of srcSize bytes + + note : returns number of sequences - to get bytes, multiply by sizeof(ZSTD_Sequence). + +


    + +

    Generate sequences using ZSTD_compress2(), given a source buffer. Each block will end with a dummy sequence with offset == 0, matchLength == 0, and litLength == length of last literals. litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0) simply acts as a block delimiter. - zc can be used to insert custom compression params. - This function invokes ZSTD_compress2 + @zc can be used to insert custom compression params. + This function invokes ZSTD_compress2(). The output of this function can be fed into ZSTD_compressSequences() with CCtx setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters @@ -1150,7 +1269,7 @@

    Streaming decompression functions


    ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize);
     

    Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals - by merging them into into the literals of the next sequence. + by merging them into the literals of the next sequence. As such, the final generated result has no explicit representation of block boundaries, and the final last literals segment is not represented in the sequences. @@ -1161,10 +1280,13 @@

    Streaming decompression functions



    -
    ZSTDLIB_STATIC_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize,
    -                      const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
    -                      const void* src, size_t srcSize);
    -

    Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst. +

    ZSTDLIB_STATIC_API size_t
    +ZSTD_compressSequences( ZSTD_CCtx* cctx, void* dst, size_t dstSize,
    +            const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
    +            const void* src, size_t srcSize);
    +

    Compress an array of ZSTD_Sequence, associated with @src buffer, into dst. + @src contains the entire input (not just the literals). + If @srcSize > sum(sequence.length), the remaining bytes are considered all literals If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.) The entire source is compressed into a single frame. @@ -1189,7 +1311,7 @@

    Streaming decompression functions


    Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused. Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly, and cannot emit an RLE block that disagrees with the repcode history - @return : final compressed size or a ZSTD error. + @return : final compressed size, or a ZSTD error code.


    @@ -1197,7 +1319,7 @@

    Streaming decompression functions


    const void* src, size_t srcSize, unsigned magicVariant);

    Generates a zstd skippable frame containing data given by src, and writes it to dst buffer. - Skippable frames begin with a a 4-byte magic number. There are 16 possible choices of magic number, + Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number, ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15. As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant. @@ -1253,8 +1375,11 @@

    Streaming decompression functions


    and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter(). Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits. - Note 2 : only single-threaded compression is supported. + Note : only single-threaded compression is supported. ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. + + Note 2 : ZSTD_estimateCCtxSize* functions are not compatible with the Block-Level Sequence Producer API at this time. + Size estimates assume that no external sequence producer is registered.


    @@ -1274,7 +1399,12 @@

    Streaming decompression functions


    or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame(); Note : if streaming is init with function ZSTD_init?Stream_usingDict(), an internal ?Dict will be created, which additional size is not estimated here. - In this case, get total size by adding ZSTD_estimate?DictSize + In this case, get total size by adding ZSTD_estimate?DictSize + Note 2 : only single-threaded compression is supported. + ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. + Note 3 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time. + Size estimates assume that no external sequence producer is registered. +


    ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
    @@ -1374,18 +1504,27 @@ 

    Streaming decompression functions


    This function never fails (wide contract)


    +
    ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams);
    +

    Set all parameters provided within @cparams into the working @cctx. + Note : if modifying parameters during compression (MT mode only), + note that changes to the .windowLog parameter will be ignored. + @return 0 on success, or an error code (can be checked with ZSTD_isError()) +


    +
    ZSTD_DEPRECATED("use ZSTD_compress2")
    +ZSTDLIB_STATIC_API
     size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
    -                              void* dst, size_t dstCapacity,
    -                        const void* src, size_t srcSize,
    -                        const void* dict,size_t dictSize,
    -                              ZSTD_parameters params);
    +                  void* dst, size_t dstCapacity,
    +            const void* src, size_t srcSize,
    +            const void* dict,size_t dictSize,
    +                  ZSTD_parameters params);
     

    Note : this function is now DEPRECATED. It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters. This prototype will generate compilation warnings.


    ZSTD_DEPRECATED("use ZSTD_compress2 with ZSTD_CCtx_loadDictionary")
    +ZSTDLIB_STATIC_API
     size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
                                       void* dst, size_t dstCapacity,
                                 const void* src, size_t srcSize,
    @@ -1547,6 +1686,7 @@ 

    Streaming decompression functions



    ZSTD_DEPRECATED("use ZSTD_DCtx_setParameter() instead")
    +ZSTDLIB_STATIC_API
     size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
     

    This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter(). Instruct the decoder context about what kind of data to decode next. @@ -1573,6 +1713,7 @@

    Streaming decompression functions


    Advanced Streaming compression functions


    ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API
     size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
                  int compressionLevel,
                  unsigned long long pledgedSrcSize);
    @@ -1590,6 +1731,7 @@ 

    Advanced Streaming compression functions


    ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API
     size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
              const void* dict, size_t dictSize,
                    int compressionLevel);
    @@ -1607,6 +1749,7 @@ 

    Advanced Streaming compression functions


    ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API
     size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
             const void* dict, size_t dictSize,
                   ZSTD_parameters params,
    @@ -1628,6 +1771,7 @@ 

    Advanced Streaming compression functions


    ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API
     size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
     

    This function is DEPRECATED, and equivalent to: ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); @@ -1639,6 +1783,7 @@

    Advanced Streaming compression functions


    ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API
     size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
                        const ZSTD_CDict* cdict,
                              ZSTD_frameParameters fParams,
    @@ -1660,6 +1805,7 @@ 

    Advanced Streaming compression functions


    ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API
     size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
     

    This function is DEPRECATED, and is equivalent to: ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); @@ -1705,32 +1851,32 @@

    Advanced Streaming compression functions


    Advanced Streaming decompression functions


    -
    ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
    +
    ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_loadDictionary, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
     

    ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); ZSTD_DCtx_loadDictionary(zds, dict, dictSize); note: no dictionary will be used if dict == NULL or dictSize < 8 - Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x


    -
    ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
    +
    ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_refDDict, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
     

    ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); ZSTD_DCtx_refDDict(zds, ddict); note : ddict is referenced, it must outlive decompression session - Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x


    -
    ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
    +
    ZSTD_DEPRECATED("use ZSTD_DCtx_reset, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
     

    ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); re-use decompression parameters from previous init; saves dictionary loading - Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x


    @@ -1748,7 +1894,6 @@

    Advanced Streaming decompression functions

    Start by initializing a context. Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression. - It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx() Then, consume your input using ZSTD_compressContinue(). There are some important considerations to keep in mind when using this advanced function : @@ -1772,8 +1917,9 @@

    Advanced Streaming decompression functions

    Buffer-less streaming compression functions

    ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
     ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
     ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
    -ZSTDLIB_STATIC_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**<  note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
     

    +
    size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**<  note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
    +

    size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
     

    Buffer-less streaming decompression (synchronous mode)

    @@ -1785,8 +1931,8 @@ 

    Buffer-less streaming compression functions

    ZSTDLIB_S
       Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
       Data fragment must be large enough to ensure successful decoding.
      `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
    -  @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
    -           >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
    +  result  : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
    +           >0 : `srcSize` is too small, please provide at least result bytes on next attempt.
                errorCode, which can be tested using ZSTD_isError().
     
       It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
    @@ -1805,7 +1951,7 @@ 

    Buffer-less streaming compression functions

    ZSTDLIB_S
     
       The most memory efficient way is to use a round buffer of sufficient size.
       Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
    -  which can @return an error code if required value is too large for current system (in 32-bits mode).
    +  which can return an error code if required value is too large for current system (in 32-bits mode).
       In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
       up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
       which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
    @@ -1825,7 +1971,7 @@ 

    Buffer-less streaming compression functions

    ZSTDLIB_S
       ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
       ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
     
    - @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
    +  result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
       It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
       It can also be an error code, which can be tested with ZSTD_isError().
     
    @@ -1856,6 +2002,8 @@ 

    Buffer-less streaming decompression functions

    typedef
         unsigned headerSize;
         unsigned dictID;
         unsigned checksumFlag;
    +    unsigned _reserved1;
    +    unsigned _reserved2;
     } ZSTD_frameHeader;
     

    ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize);   /**< doesn't consume input */
    @@ -1883,7 +2031,6 @@ 

    Buffer-less streaming decompression functions

    typedef
         - It is necessary to init context before starting
           + compression : any ZSTD_compressBegin*() variant, including with dictionary
           + decompression : any ZSTD_decompressBegin*() variant, including with dictionary
    -      + copyCCtx() and copyDCtx() can be used too
         - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
           + If input is larger than a block size, it's necessary to split input data into multiple blocks
           + For inputs larger than a single block, consider using regular ZSTD_compress() instead.
    @@ -1904,5 +2051,31 @@ 

    Raw zstd block functions

    ZSTDLIB_STATIC_API size_t ZS
     ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
     ZSTDLIB_STATIC_API size_t ZSTD_insertBlock    (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize);  /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
     

    +
    ZSTDLIB_STATIC_API void
    +ZSTD_registerSequenceProducer(
    +  ZSTD_CCtx* cctx,
    +  void* sequenceProducerState,
    +  ZSTD_sequenceProducer_F* sequenceProducer
    +);
    +

    Instruct zstd to use a block-level external sequence producer function. + + The sequenceProducerState must be initialized by the caller, and the caller is + responsible for managing its lifetime. This parameter is sticky across + compressions. It will remain set until the user explicitly resets compression + parameters. + + Sequence producer registration is considered to be an "advanced parameter", + part of the "advanced API". This means it will only have an effect on compression + APIs which respect advanced parameters, such as compress2() and compressStream2(). + Older compression APIs such as compressCCtx(), which predate the introduction of + "advanced parameters", will ignore any external sequence producer setting. + + The sequence producer can be "cleared" by registering a NULL function pointer. This + removes all limitations described above in the "LIMITATIONS" section of the API docs. + + The user is strongly encouraged to read the full API documentation (above) before + calling this function. +


    + diff --git a/examples/Makefile b/examples/Makefile index 8d7361dd867..31f52d35763 100644 --- a/examples/Makefile +++ b/examples/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/examples/common.h b/examples/common.h index 5f45b340666..4873e877a7a 100644 --- a/examples/common.h +++ b/examples/common.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -21,6 +21,17 @@ #include // stat #include + +/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */ +#if defined(__GNUC__) +# define UNUSED_ATTR __attribute__((unused)) +#else +# define UNUSED_ATTR +#endif + +#define HEADER_FUNCTION static UNUSED_ATTR + + /* * Define the returned error code from utility functions. */ @@ -68,7 +79,7 @@ typedef enum { * * @return The size of a given file path. */ -static size_t fsize_orDie(const char *filename) +HEADER_FUNCTION size_t fsize_orDie(const char *filename) { struct stat st; if (stat(filename, &st) != 0) { @@ -96,7 +107,7 @@ static size_t fsize_orDie(const char *filename) * @return If successful this function will return a FILE pointer to an * opened file otherwise it sends an error to stderr and exits. */ -static FILE* fopen_orDie(const char *filename, const char *instruction) +HEADER_FUNCTION FILE* fopen_orDie(const char *filename, const char *instruction) { FILE* const inFile = fopen(filename, instruction); if (inFile) return inFile; @@ -108,7 +119,7 @@ static FILE* fopen_orDie(const char *filename, const char *instruction) /*! fclose_orDie() : * Close an opened file using given FILE pointer. */ -static void fclose_orDie(FILE* file) +HEADER_FUNCTION void fclose_orDie(FILE* file) { if (!fclose(file)) { return; }; /* error */ @@ -123,7 +134,7 @@ static void fclose_orDie(FILE* file) * * @return The number of bytes read. */ -static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file) +HEADER_FUNCTION size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file) { size_t const readSize = fread(buffer, 1, sizeToRead, file); if (readSize == sizeToRead) return readSize; /* good */ @@ -143,7 +154,7 @@ static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file) * * @return The number of bytes written. */ -static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file) +HEADER_FUNCTION size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file) { size_t const writtenSize = fwrite(buffer, 1, sizeToWrite, file); if (writtenSize == sizeToWrite) return sizeToWrite; /* good */ @@ -159,7 +170,7 @@ static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file) * cated memory. If there is an error, this function will send that * error to stderr and exit. */ -static void* malloc_orDie(size_t size) +HEADER_FUNCTION void* malloc_orDie(size_t size) { void* const buff = malloc(size); if (buff) return buff; @@ -177,7 +188,7 @@ static void* malloc_orDie(size_t size) * @return If successful this function will load file into buffer and * return file size, otherwise it will printout an error to stderr and exit. */ -static size_t loadFile_orDie(const char* fileName, void* buffer, size_t bufferSize) +HEADER_FUNCTION size_t loadFile_orDie(const char* fileName, void* buffer, size_t bufferSize) { size_t const fileSize = fsize_orDie(fileName); CHECK(fileSize <= bufferSize, "File too large!"); @@ -201,7 +212,8 @@ static size_t loadFile_orDie(const char* fileName, void* buffer, size_t bufferSi * @return If successful this function will return buffer and bufferSize(=fileSize), * otherwise it will printout an error to stderr and exit. */ -static void* mallocAndLoadFile_orDie(const char* fileName, size_t* bufferSize) { +HEADER_FUNCTION void* mallocAndLoadFile_orDie(const char* fileName, size_t* bufferSize) +{ size_t const fileSize = fsize_orDie(fileName); *bufferSize = fileSize; void* const buffer = malloc_orDie(*bufferSize); @@ -217,7 +229,7 @@ static void* mallocAndLoadFile_orDie(const char* fileName, size_t* bufferSize) { * Note: This function will send an error to stderr and exit if it * cannot write to a given file. */ -static void saveFile_orDie(const char* fileName, const void* buff, size_t buffSize) +HEADER_FUNCTION void saveFile_orDie(const char* fileName, const void* buff, size_t buffSize) { FILE* const oFile = fopen_orDie(fileName, "wb"); size_t const wSize = fwrite(buff, 1, buffSize, oFile); diff --git a/examples/dictionary_compression.c b/examples/dictionary_compression.c index 0eee6508e74..83edc1cad9b 100644 --- a/examples/dictionary_compression.c +++ b/examples/dictionary_compression.c @@ -1,12 +1,22 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. - */ +**/ + +/* This example deals with Dictionary compression, + * its counterpart is `examples/dictionary_decompression.c` . + * These examples presume that a dictionary already exists. + * The main method to create a dictionary is `zstd --train`, + * look at the CLI documentation for details. + * Another possible method is to employ dictionary training API, + * published in `lib/zdict.h` . +**/ + #include // printf #include // free #include // memset, strcat @@ -14,7 +24,7 @@ #include "common.h" // Helper functions, CHECK(), and CHECK_ZSTD() /* createDict() : - `dictFileName` is supposed to have been created using `zstd --train` */ +** `dictFileName` is supposed already created using `zstd --train` */ static ZSTD_CDict* createCDict_orDie(const char* dictFileName, int cLevel) { size_t dictSize; diff --git a/examples/dictionary_decompression.c b/examples/dictionary_decompression.c index 107cfc1ee1a..e6c999964a2 100644 --- a/examples/dictionary_decompression.c +++ b/examples/dictionary_decompression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/multiple_simple_compression.c b/examples/multiple_simple_compression.c index 5d2a28fcdca..bf77ca13317 100644 --- a/examples/multiple_simple_compression.c +++ b/examples/multiple_simple_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/multiple_streaming_compression.c b/examples/multiple_streaming_compression.c index d4efc8e5773..b12ad03dce1 100644 --- a/examples/multiple_streaming_compression.c +++ b/examples/multiple_streaming_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/simple_compression.c b/examples/simple_compression.c index 27a65b17f50..7c880725fc7 100644 --- a/examples/simple_compression.c +++ b/examples/simple_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/simple_decompression.c b/examples/simple_decompression.c index 59c1fd414aa..f499156f64e 100644 --- a/examples/simple_decompression.c +++ b/examples/simple_decompression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/streaming_compression.c b/examples/streaming_compression.c index ff1875829ea..ed0a3a69cd9 100644 --- a/examples/streaming_compression.c +++ b/examples/streaming_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/streaming_compression_thread_pool.c b/examples/streaming_compression_thread_pool.c index 21cb3d54999..a1a024129f2 100644 --- a/examples/streaming_compression_thread_pool.c +++ b/examples/streaming_compression_thread_pool.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Martin Liska, SUSE, Facebook, Inc. + * Copyright (c) Martin Liska, SUSE, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/streaming_decompression.c b/examples/streaming_decompression.c index 6dc4c22677b..95fa1122773 100644 --- a/examples/streaming_decompression.c +++ b/examples/streaming_decompression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/streaming_memory_usage.c b/examples/streaming_memory_usage.c index a5219ef1e47..957acb61a39 100644 --- a/examples/streaming_memory_usage.c +++ b/examples/streaming_memory_usage.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/Makefile b/lib/Makefile index ef202183d84..a4cf61ab10e 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -298,7 +298,7 @@ INSTALL_DATA ?= $(INSTALL) -m 644 libzstd.pc: libzstd.pc.in @echo creating pkgconfig - @sed $(SED_ERE_OPT) \ + @sed \ -e 's|@PREFIX@|$(PREFIX)|' \ -e 's|@EXEC_PREFIX@|$(PCEXEC_PREFIX)|' \ -e 's|@INCLUDEDIR@|$(PCINCPREFIX)$(PCINCDIR)|' \ diff --git a/lib/README.md b/lib/README.md index 4c9d8f05912..c3b5d1817f3 100644 --- a/lib/README.md +++ b/lib/README.md @@ -91,7 +91,7 @@ The file structure is designed to make this selection manually achievable for an `ZSTD_LIB_COMPRESSION, ZSTD_LIB_DECOMPRESSION`, `ZSTD_LIB_DICTBUILDER`, and `ZSTD_LIB_DEPRECATED` as `0` to forgo compilation of the corresponding features. This will also disable compilation of all - dependencies (eg. `ZSTD_LIB_COMPRESSION=0` will also disable + dependencies (e.g. `ZSTD_LIB_COMPRESSION=0` will also disable dictBuilder). - There are a number of options that can help minimize the binary size of @@ -161,6 +161,13 @@ The file structure is designed to make this selection manually achievable for an `ZSTD_DCtx` decompression contexts, but might also result in a small decompression speed cost. +- The C compiler macros `ZSTDLIB_VISIBLE`, `ZSTDERRORLIB_VISIBLE` and `ZDICTLIB_VISIBLE` + can be overridden to control the visibility of zstd's API. Additionally, + `ZSTDLIB_STATIC_API` and `ZDICTLIB_STATIC_API` can be overridden to control the visibility + of zstd's static API. Specifically, it can be set to `ZSTDLIB_HIDDEN` to hide the symbols + from the shared library. These macros default to `ZSTDLIB_VISIBILITY`, + `ZSTDERRORLIB_VSIBILITY`, and `ZDICTLIB_VISIBILITY` if unset, for backwards compatibility + with the old macro names. #### Windows : using MinGW+MSYS to create DLL diff --git a/lib/common/bits.h b/lib/common/bits.h new file mode 100644 index 00000000000..7939f3d0f32 --- /dev/null +++ b/lib/common/bits.h @@ -0,0 +1,175 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_BITS_H +#define ZSTD_BITS_H + +#include "mem.h" + +MEM_STATIC unsigned ZSTD_countTrailingZeros32_fallback(U32 val) +{ + assert(val != 0); + { + static const int DeBruijnBytePos[32] = {0, 1, 28, 2, 29, 14, 24, 3, + 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, + 26, 12, 18, 6, 11, 5, 10, 9}; + return DeBruijnBytePos[((U32) ((val & -(S32) val) * 0x077CB531U)) >> 27]; + } +} + +MEM_STATIC unsigned ZSTD_countTrailingZeros32(U32 val) +{ + assert(val != 0); +# if defined(_MSC_VER) +# if STATIC_BMI2 == 1 + return _tzcnt_u32(val); +# else + if (val != 0) { + unsigned long r; + _BitScanForward(&r, val); + return (unsigned)r; + } else { + /* Should not reach this code path */ + __assume(0); + } +# endif +# elif defined(__GNUC__) && (__GNUC__ >= 4) + return (unsigned)__builtin_ctz(val); +# else + return ZSTD_countTrailingZeros32_fallback(val); +# endif +} + +MEM_STATIC unsigned ZSTD_countLeadingZeros32_fallback(U32 val) { + assert(val != 0); + { + static const U32 DeBruijnClz[32] = {0, 9, 1, 10, 13, 21, 2, 29, + 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, + 19, 27, 23, 6, 26, 5, 4, 31}; + val |= val >> 1; + val |= val >> 2; + val |= val >> 4; + val |= val >> 8; + val |= val >> 16; + return 31 - DeBruijnClz[(val * 0x07C4ACDDU) >> 27]; + } +} + +MEM_STATIC unsigned ZSTD_countLeadingZeros32(U32 val) +{ + assert(val != 0); +# if defined(_MSC_VER) +# if STATIC_BMI2 == 1 + return _lzcnt_u32(val); +# else + if (val != 0) { + unsigned long r; + _BitScanReverse(&r, val); + return (unsigned)(31 - r); + } else { + /* Should not reach this code path */ + __assume(0); + } +# endif +# elif defined(__GNUC__) && (__GNUC__ >= 4) + return (unsigned)__builtin_clz(val); +# else + return ZSTD_countLeadingZeros32_fallback(val); +# endif +} + +MEM_STATIC unsigned ZSTD_countTrailingZeros64(U64 val) +{ + assert(val != 0); +# if defined(_MSC_VER) && defined(_WIN64) +# if STATIC_BMI2 == 1 + return _tzcnt_u64(val); +# else + if (val != 0) { + unsigned long r; + _BitScanForward64(&r, val); + return (unsigned)r; + } else { + /* Should not reach this code path */ + __assume(0); + } +# endif +# elif defined(__GNUC__) && (__GNUC__ >= 4) && defined(__LP64__) + return (unsigned)__builtin_ctzll(val); +# else + { + U32 mostSignificantWord = (U32)(val >> 32); + U32 leastSignificantWord = (U32)val; + if (leastSignificantWord == 0) { + return 32 + ZSTD_countTrailingZeros32(mostSignificantWord); + } else { + return ZSTD_countTrailingZeros32(leastSignificantWord); + } + } +# endif +} + +MEM_STATIC unsigned ZSTD_countLeadingZeros64(U64 val) +{ + assert(val != 0); +# if defined(_MSC_VER) && defined(_WIN64) +# if STATIC_BMI2 == 1 + return _lzcnt_u64(val); +# else + if (val != 0) { + unsigned long r; + _BitScanReverse64(&r, val); + return (unsigned)(63 - r); + } else { + /* Should not reach this code path */ + __assume(0); + } +# endif +# elif defined(__GNUC__) && (__GNUC__ >= 4) + return (unsigned)(__builtin_clzll(val)); +# else + { + U32 mostSignificantWord = (U32)(val >> 32); + U32 leastSignificantWord = (U32)val; + if (mostSignificantWord == 0) { + return 32 + ZSTD_countLeadingZeros32(leastSignificantWord); + } else { + return ZSTD_countLeadingZeros32(mostSignificantWord); + } + } +# endif +} + +MEM_STATIC unsigned ZSTD_NbCommonBytes(size_t val) +{ + if (MEM_isLittleEndian()) { + if (MEM_64bits()) { + return ZSTD_countTrailingZeros64((U64)val) >> 3; + } else { + return ZSTD_countTrailingZeros32((U32)val) >> 3; + } + } else { /* Big Endian CPU */ + if (MEM_64bits()) { + return ZSTD_countLeadingZeros64((U64)val) >> 3; + } else { + return ZSTD_countLeadingZeros32((U32)val) >> 3; + } + } +} + +MEM_STATIC unsigned ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */ +{ + assert(val != 0); + return 31 - ZSTD_countLeadingZeros32(val); +} + +#endif /* ZSTD_BITS_H */ diff --git a/lib/common/bitstream.h b/lib/common/bitstream.h index 84b6062ff35..db1b4cf1369 100644 --- a/lib/common/bitstream.h +++ b/lib/common/bitstream.h @@ -1,7 +1,7 @@ /* ****************************************************************** * bitstream * Part of FSE library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -30,14 +30,15 @@ extern "C" { #include "compiler.h" /* UNLIKELY() */ #include "debug.h" /* assert(), DEBUGLOG(), RAWLOG() */ #include "error_private.h" /* error codes and messages */ +#include "bits.h" /* ZSTD_highbit32 */ /*========================================= * Target specific =========================================*/ #ifndef ZSTD_NO_INTRINSICS -# if defined(__BMI__) && defined(__GNUC__) -# include /* support for bextr (experimental) */ +# if (defined(__BMI__) || defined(__BMI2__)) && defined(__GNUC__) +# include /* support for bextr (experimental)/bzhi */ # elif defined(__ICCARM__) # include # endif @@ -132,48 +133,6 @@ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC); MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); /* faster, but works only if nbBits >= 1 */ - - -/*-************************************************************** -* Internal functions -****************************************************************/ -MEM_STATIC unsigned BIT_highbit32 (U32 val) -{ - assert(val != 0); - { -# if defined(_MSC_VER) /* Visual */ -# if STATIC_BMI2 == 1 - return _lzcnt_u32(val) ^ 31; -# else - if (val != 0) { - unsigned long r; - _BitScanReverse(&r, val); - return (unsigned)r; - } else { - /* Should not reach this code path */ - __assume(0); - } -# endif -# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ - return __builtin_clz (val) ^ 31; -# elif defined(__ICCARM__) /* IAR Intrinsic */ - return 31 - __CLZ(val); -# else /* Software version */ - static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, - 11, 14, 16, 18, 22, 25, 3, 30, - 8, 12, 20, 28, 15, 17, 24, 7, - 19, 27, 23, 6, 26, 5, 4, 31 }; - U32 v = val; - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; -# endif - } -} - /*===== Local Constants =====*/ static const unsigned BIT_mask[] = { 0, 1, 3, 7, 0xF, 0x1F, @@ -203,6 +162,16 @@ MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, return 0; } +MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) +{ +#if defined(STATIC_BMI2) && STATIC_BMI2 == 1 && !defined(ZSTD_NO_INTRINSICS) + return _bzhi_u64(bitContainer, nbBits); +#else + assert(nbBits < BIT_MASK_SIZE); + return bitContainer & BIT_mask[nbBits]; +#endif +} + /*! BIT_addBits() : * can add up to 31 bits into `bitC`. * Note : does not check for register overflow ! */ @@ -212,7 +181,7 @@ MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32); assert(nbBits < BIT_MASK_SIZE); assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); - bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos; + bitC->bitContainer |= BIT_getLowerBits(value, nbBits) << bitC->bitPos; bitC->bitPos += nbBits; } @@ -291,7 +260,7 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer); bitD->bitContainer = MEM_readLEST(bitD->ptr); { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; - bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */ + bitD->bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */ if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ } } else { bitD->ptr = bitD->start; @@ -319,7 +288,7 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si default: break; } { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; - bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; + bitD->bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0; if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */ } bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8; @@ -350,16 +319,6 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 c #endif } -MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) -{ -#if defined(STATIC_BMI2) && STATIC_BMI2 == 1 - return _bzhi_u64(bitContainer, nbBits); -#else - assert(nbBits < BIT_MASK_SIZE); - return bitContainer & BIT_mask[nbBits]; -#endif -} - /*! BIT_lookBits() : * Provides next n bits from local register. * local register is not modified. @@ -406,7 +365,7 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned n } /*! BIT_readBitsFast() : - * unsafe version; only works only if nbBits >= 1 */ + * unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits) { size_t const value = BIT_lookBitsFast(bitD, nbBits); diff --git a/lib/common/compiler.h b/lib/common/compiler.h index 516930c01ec..d4f2f285d79 100644 --- a/lib/common/compiler.h +++ b/lib/common/compiler.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -165,6 +165,12 @@ #define UNLIKELY(x) (x) #endif +#if __has_builtin(__builtin_unreachable) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))) +# define ZSTD_UNREACHABLE { assert(0), __builtin_unreachable(); } +#else +# define ZSTD_UNREACHABLE { assert(0); } +#endif + /* disable warnings */ #ifdef _MSC_VER /* Visual Studio */ # include /* For Visual 2005 */ @@ -181,6 +187,8 @@ # ifdef __AVX2__ //MSVC does not have a BMI2 specific flag, but every CPU that supports AVX2 also supports BMI2 # define STATIC_BMI2 1 # endif +# elif defined(__BMI2__) && defined(__x86_64__) && defined(__GNUC__) +# define STATIC_BMI2 1 # endif #endif @@ -273,7 +281,18 @@ * Sanitizer *****************************************************************/ -#if ZSTD_MEMORY_SANITIZER +/* Issue #3240 reports an ASAN failure on an llvm-mingw build. Out of an + * abundance of caution, disable our custom poisoning on mingw. */ +#ifdef __MINGW32__ +#ifndef ZSTD_ASAN_DONT_POISON_WORKSPACE +#define ZSTD_ASAN_DONT_POISON_WORKSPACE 1 +#endif +#ifndef ZSTD_MSAN_DONT_POISON_WORKSPACE +#define ZSTD_MSAN_DONT_POISON_WORKSPACE 1 +#endif +#endif + +#if ZSTD_MEMORY_SANITIZER && !defined(ZSTD_MSAN_DONT_POISON_WORKSPACE) /* Not all platforms that support msan provide sanitizers/msan_interface.h. * We therefore declare the functions we need ourselves, rather than trying to * include the header file... */ @@ -294,7 +313,7 @@ void __msan_poison(const volatile void *a, size_t size); intptr_t __msan_test_shadow(const volatile void *x, size_t size); #endif -#if ZSTD_ADDRESS_SANITIZER +#if ZSTD_ADDRESS_SANITIZER && !defined(ZSTD_ASAN_DONT_POISON_WORKSPACE) /* Not all platforms that support asan provide sanitizers/asan_interface.h. * We therefore declare the functions we need ourselves, rather than trying to * include the header file... */ diff --git a/lib/common/cpu.h b/lib/common/cpu.h index 8acd33be3cd..8bc34a36da2 100644 --- a/lib/common/cpu.h +++ b/lib/common/cpu.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/common/debug.c b/lib/common/debug.c index bb863c9ea61..ebf7bfccfa6 100644 --- a/lib/common/debug.c +++ b/lib/common/debug.c @@ -1,7 +1,7 @@ /* ****************************************************************** * debug * Part of FSE library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy diff --git a/lib/common/debug.h b/lib/common/debug.h index 3b2a320a188..0e9817ea6d6 100644 --- a/lib/common/debug.h +++ b/lib/common/debug.h @@ -1,7 +1,7 @@ /* ****************************************************************** * debug * Part of FSE library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy diff --git a/lib/common/entropy_common.c b/lib/common/entropy_common.c index 4229b40c5ee..e2173afb0a8 100644 --- a/lib/common/entropy_common.c +++ b/lib/common/entropy_common.c @@ -1,6 +1,6 @@ /* ****************************************************************** * Common functions of New Generation Entropy library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -19,8 +19,8 @@ #include "error_private.h" /* ERR_*, ERROR */ #define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */ #include "fse.h" -#define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */ #include "huf.h" +#include "bits.h" /* ZSDT_highbit32, ZSTD_countTrailingZeros32 */ /*=== Version ===*/ @@ -38,34 +38,6 @@ const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); } /*-************************************************************** * FSE NCount encoding-decoding ****************************************************************/ -static U32 FSE_ctz(U32 val) -{ - assert(val != 0); - { -# if defined(_MSC_VER) /* Visual */ - if (val != 0) { - unsigned long r; - _BitScanForward(&r, val); - return (unsigned)r; - } else { - /* Should not reach this code path */ - __assume(0); - } -# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */ - return __builtin_ctz(val); -# elif defined(__ICCARM__) /* IAR Intrinsic */ - return __CTZ(val); -# else /* Software version */ - U32 count = 0; - while ((val & 1) == 0) { - val >>= 1; - ++count; - } - return count; -# endif - } -} - FORCE_INLINE_TEMPLATE size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, const void* headerBuffer, size_t hbSize) @@ -113,7 +85,7 @@ size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigne * repeat. * Avoid UB by setting the high bit to 1. */ - int repeats = FSE_ctz(~bitStream | 0x80000000) >> 1; + int repeats = ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1; while (repeats >= 12) { charnum += 3 * 12; if (LIKELY(ip <= iend-7)) { @@ -124,7 +96,7 @@ size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigne ip = iend - 4; } bitStream = MEM_readLE32(ip) >> bitCount; - repeats = FSE_ctz(~bitStream | 0x80000000) >> 1; + repeats = ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1; } charnum += 3 * repeats; bitStream >>= 2 * repeats; @@ -189,7 +161,7 @@ size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigne * know that threshold > 1. */ if (remaining <= 1) break; - nbBits = BIT_highbit32(remaining) + 1; + nbBits = ZSTD_highbit32(remaining) + 1; threshold = 1 << (nbBits - 1); } if (charnum >= maxSV1) break; @@ -264,7 +236,7 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, const void* src, size_t srcSize) { U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32]; - return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0); + return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* flags */ 0); } FORCE_INLINE_TEMPLATE size_t @@ -312,14 +284,14 @@ HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats, if (weightTotal == 0) return ERROR(corruption_detected); /* get last non-null symbol weight (implied, total must be 2^n) */ - { U32 const tableLog = BIT_highbit32(weightTotal) + 1; + { U32 const tableLog = ZSTD_highbit32(weightTotal) + 1; if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected); *tableLogPtr = tableLog; /* determine last weight */ { U32 const total = 1 << tableLog; U32 const rest = total - weightTotal; - U32 const verif = 1 << BIT_highbit32(rest); - U32 const lastWeight = BIT_highbit32(rest) + 1; + U32 const verif = 1 << ZSTD_highbit32(rest); + U32 const lastWeight = ZSTD_highbit32(rest) + 1; if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ huffWeight[oSize] = (BYTE)lastWeight; rankStats[lastWeight]++; @@ -356,13 +328,13 @@ size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, - int bmi2) + int flags) { #if DYNAMIC_BMI2 - if (bmi2) { + if (flags & HUF_flags_bmi2) { return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize); } #endif - (void)bmi2; + (void)flags; return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize); } diff --git a/lib/common/error_private.c b/lib/common/error_private.c index 6d1135f8c37..075fc5ef42f 100644 --- a/lib/common/error_private.c +++ b/lib/common/error_private.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -27,9 +27,11 @@ const char* ERR_getErrorString(ERR_enum code) case PREFIX(version_unsupported): return "Version not supported"; case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter"; case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding"; - case PREFIX(corruption_detected): return "Corrupted block detected"; + case PREFIX(corruption_detected): return "Data corruption detected"; case PREFIX(checksum_wrong): return "Restored data doesn't match checksum"; + case PREFIX(literals_headerWrong): return "Header of Literals' block doesn't respect format specification"; case PREFIX(parameter_unsupported): return "Unsupported parameter"; + case PREFIX(parameter_combination_unsupported): return "Unsupported combination of parameters"; case PREFIX(parameter_outOfBound): return "Parameter is out of bound"; case PREFIX(init_missing): return "Context should be init first"; case PREFIX(memory_allocation): return "Allocation error : not enough memory"; @@ -38,17 +40,22 @@ const char* ERR_getErrorString(ERR_enum code) case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported"; case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large"; case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small"; + case PREFIX(stabilityCondition_notRespected): return "pledged buffer stability condition is not respected"; case PREFIX(dictionary_corrupted): return "Dictionary is corrupted"; case PREFIX(dictionary_wrong): return "Dictionary mismatch"; case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples"; case PREFIX(dstSize_tooSmall): return "Destination buffer is too small"; case PREFIX(srcSize_wrong): return "Src size is incorrect"; case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer"; + case PREFIX(noForwardProgress_destFull): return "Operation made no progress over multiple calls, due to output buffer being full"; + case PREFIX(noForwardProgress_inputEmpty): return "Operation made no progress over multiple calls, due to input being empty"; /* following error codes are not stable and may be removed or changed in a future version */ case PREFIX(frameIndex_tooLarge): return "Frame index is too large"; case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking"; case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong"; case PREFIX(srcBuffer_wrong): return "Source buffer is wrong"; + case PREFIX(sequenceProducer_failed): return "Block-level external sequence producer returned an error code"; + case PREFIX(externalSequences_invalid): return "External sequences are not valid"; case PREFIX(maxCode): default: return notErrorCode; } diff --git a/lib/common/error_private.h b/lib/common/error_private.h index 007d81066ab..325daad404b 100644 --- a/lib/common/error_private.h +++ b/lib/common/error_private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/common/fse.h b/lib/common/fse.h index 714bfd3e7f2..02a1f0bc530 100644 --- a/lib/common/fse.h +++ b/lib/common/fse.h @@ -1,7 +1,7 @@ /* ****************************************************************** * FSE : Finite State Entropy codec * Public Prototypes declaration - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -53,34 +53,6 @@ extern "C" { FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */ -/*-**************************************** -* FSE simple functions -******************************************/ -/*! FSE_compress() : - Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'. - 'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize). - @return : size of compressed data (<= dstCapacity). - Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! - if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead. - if FSE_isError(return), compression failed (more details using FSE_getErrorName()) -*/ -FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity, - const void* src, size_t srcSize); - -/*! FSE_decompress(): - Decompress FSE data from buffer 'cSrc', of size 'cSrcSize', - into already allocated destination buffer 'dst', of size 'dstCapacity'. - @return : size of regenerated data (<= maxDstSize), - or an error code, which can be tested using FSE_isError() . - - ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!! - Why ? : making this distinction requires a header. - Header management is intentionally delegated to the user layer, which can better manage special cases. -*/ -FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity, - const void* cSrc, size_t cSrcSize); - - /*-***************************************** * Tool functions ******************************************/ @@ -91,20 +63,6 @@ FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */ -/*-***************************************** -* FSE advanced functions -******************************************/ -/*! FSE_compress2() : - Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog' - Both parameters can be defined as '0' to mean : use default value - @return : size of compressed data - Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!! - if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression. - if FSE_isError(return), it's an error code. -*/ -FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); - - /*-***************************************** * FSE detailed API ******************************************/ @@ -164,8 +122,6 @@ FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, /*! Constructor and Destructor of FSE_CTable. Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */ typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */ -FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog); -FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct); /*! FSE_buildCTable(): Builds `ct`, which must be already allocated, using FSE_createCTable(). @@ -241,23 +197,7 @@ FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize, int bmi2); -/*! Constructor and Destructor of FSE_DTable. - Note that its size depends on 'tableLog' */ typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ -FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog); -FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt); - -/*! FSE_buildDTable(): - Builds 'dt', which must be already allocated, using FSE_createDTable(). - return : 0, or an errorCode, which can be tested using FSE_isError() */ -FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); - -/*! FSE_decompress_usingDTable(): - Decompress compressed source `cSrc` of size `cSrcSize` using `dt` - into `dst` which must be already allocated. - @return : size of regenerated data (necessarily <= `dstCapacity`), - or an errorCode, which can be tested using FSE_isError() */ -FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt); /*! Tutorial : @@ -320,16 +260,6 @@ If there is an error, the function will return an error code, which can be teste unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus); /**< same as FSE_optimalTableLog(), which used `minus==2` */ -/* FSE_compress_wksp() : - * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`). - * FSE_COMPRESS_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable. - */ -#define FSE_COMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) ) -size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); - -size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits); -/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */ - size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue); /**< build a fake FSE_CTable, designed to compress always the same symbolValue */ @@ -347,19 +277,11 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsi FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */ -size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits); -/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */ - -size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue); -/**< build a fake FSE_DTable, designed to always generate the same symbolValue */ - -#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1) +#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + 1 + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1) #define FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue) (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned)) -size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize); -/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)` */ - size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2); -/**< Same as FSE_decompress_wksp() but with dynamic BMI2 support. Pass 1 if your CPU supports BMI2 or 0 if it doesn't. */ +/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)`. + * Set bmi2 to 1 if your CPU supports BMI2 or 0 if it doesn't */ typedef enum { FSE_repeat_none, /**< Cannot use the previous table */ @@ -555,7 +477,7 @@ MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePt /* FSE_getMaxNbBits() : * Approximate maximum cost of a symbol, in bits. - * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2) + * Fractional get rounded up (i.e. a symbol with a normalized frequency of 3 gives the same result as a frequency of 2) * note 1 : assume symbolValue is valid (<= maxSymbolValue) * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue) diff --git a/lib/common/fse_decompress.c b/lib/common/fse_decompress.c index a5a358015fc..1e1c9f92d6b 100644 --- a/lib/common/fse_decompress.c +++ b/lib/common/fse_decompress.c @@ -1,6 +1,6 @@ /* ****************************************************************** * FSE : Finite State Entropy decoder - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -24,6 +24,7 @@ #include "error_private.h" #define ZSTD_DEPS_NEED_MALLOC #include "zstd_deps.h" +#include "bits.h" /* ZSTD_highbit32 */ /* ************************************************************** @@ -55,19 +56,6 @@ #define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) #define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) - -/* Function templates */ -FSE_DTable* FSE_createDTable (unsigned tableLog) -{ - if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX; - return (FSE_DTable*)ZSTD_malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) ); -} - -void FSE_freeDTable (FSE_DTable* dt) -{ - ZSTD_free(dt); -} - static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize) { void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */ @@ -127,10 +115,10 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo } } /* Now we spread those positions across the table. - * The benefit of doing it in two stages is that we avoid the the + * The benefit of doing it in two stages is that we avoid the * variable size inner loop, which caused lots of branch misses. * Now we can run through all the positions without any branch misses. - * We unroll the loop twice, since that is what emperically worked best. + * We unroll the loop twice, since that is what empirically worked best. */ { size_t position = 0; @@ -166,7 +154,7 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo for (u=0; utableLog = 0; - DTableH->fastMode = 0; - - cell->newState = 0; - cell->symbol = symbolValue; - cell->nbBits = 0; - - return 0; -} - - -size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) -{ - void* ptr = dt; - FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; - void* dPtr = dt + 1; - FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr; - const unsigned tableSize = 1 << nbBits; - const unsigned tableMask = tableSize - 1; - const unsigned maxSV1 = tableMask+1; - unsigned s; - - /* Sanity checks */ - if (nbBits < 1) return ERROR(GENERIC); /* min size */ - - /* Build Decoding Table */ - DTableH->tableLog = (U16)nbBits; - DTableH->fastMode = 1; - for (s=0; sfastMode; - - /* select fast mode (static) */ - if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); - return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); -} - - -size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize) -{ - return FSE_decompress_wksp_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, /* bmi2 */ 0); -} - typedef struct { short ncount[FSE_MAX_SYMBOL_VALUE + 1]; FSE_DTable dtable[1]; /* Dynamically sized */ @@ -342,7 +267,8 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body( } if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge); - workSpace = wksp->dtable + FSE_DTABLE_SIZE_U32(tableLog); + assert(sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog) <= wkspSize); + workSpace = (BYTE*)workSpace + sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog); wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog); CHECK_F( FSE_buildDTable_internal(wksp->dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) ); @@ -382,22 +308,4 @@ size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize); } - -typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; - -#ifndef ZSTD_NO_UNUSED_FUNCTIONS -size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) { - U32 wksp[FSE_BUILD_DTABLE_WKSP_SIZE_U32(FSE_TABLELOG_ABSOLUTE_MAX, FSE_MAX_SYMBOL_VALUE)]; - return FSE_buildDTable_wksp(dt, normalizedCounter, maxSymbolValue, tableLog, wksp, sizeof(wksp)); -} - -size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize) -{ - /* Static analyzer seems unable to understand this table will be properly initialized later */ - U32 wksp[FSE_DECOMPRESS_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)]; - return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, FSE_MAX_TABLELOG, wksp, sizeof(wksp)); -} -#endif - - #endif /* FSE_COMMONDEFS_ONLY */ diff --git a/lib/common/huf.h b/lib/common/huf.h index 85518481ec6..73d1ee56543 100644 --- a/lib/common/huf.h +++ b/lib/common/huf.h @@ -1,7 +1,7 @@ /* ****************************************************************** * huff0 huffman codec, * part of Finite State Entropy library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -21,99 +21,22 @@ extern "C" { /* *** Dependencies *** */ #include "zstd_deps.h" /* size_t */ - - -/* *** library symbols visibility *** */ -/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual, - * HUF symbols remain "private" (internal symbols for library only). - * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */ -#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) -# define HUF_PUBLIC_API __attribute__ ((visibility ("default"))) -#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ -# define HUF_PUBLIC_API __declspec(dllexport) -#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) -# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */ -#else -# define HUF_PUBLIC_API -#endif - - -/* ========================== */ -/* *** simple functions *** */ -/* ========================== */ - -/** HUF_compress() : - * Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'. - * 'dst' buffer must be already allocated. - * Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize). - * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB. - * @return : size of compressed data (<= `dstCapacity`). - * Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! - * if HUF_isError(return), compression failed (more details using HUF_getErrorName()) - */ -HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity, - const void* src, size_t srcSize); - -/** HUF_decompress() : - * Decompress HUF data from buffer 'cSrc', of size 'cSrcSize', - * into already allocated buffer 'dst', of minimum size 'dstSize'. - * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data. - * Note : in contrast with FSE, HUF_decompress can regenerate - * RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, - * because it knows size to regenerate (originalSize). - * @return : size of regenerated data (== originalSize), - * or an error code, which can be tested using HUF_isError() - */ -HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize, - const void* cSrc, size_t cSrcSize); +#include "mem.h" /* U32 */ +#define FSE_STATIC_LINKING_ONLY +#include "fse.h" /* *** Tool functions *** */ -#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ -HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ +#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ +size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ /* Error Management */ -HUF_PUBLIC_API unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ -HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */ +unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ +const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */ -/* *** Advanced function *** */ - -/** HUF_compress2() : - * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`. - * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX . - * `tableLog` must be `<= HUF_TABLELOG_MAX` . */ -HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned tableLog); - -/** HUF_compress4X_wksp() : - * Same as HUF_compress2(), but uses externally allocated `workSpace`. - * `workspace` must be at least as large as HUF_WORKSPACE_SIZE */ #define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */) #define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64)) -HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned tableLog, - void* workSpace, size_t wkspSize); - -#endif /* HUF_H_298734234 */ - -/* ****************************************************************** - * WARNING !! - * The following section contains advanced and experimental definitions - * which shall never be used in the context of a dynamic library, - * because they are not guaranteed to remain stable in the future. - * Only consider them in association with static linking. - * *****************************************************************/ -#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY) -#define HUF_H_HUF_STATIC_LINKING_ONLY - -/* *** Dependencies *** */ -#include "mem.h" /* U32 */ -#define FSE_STATIC_LINKING_ONLY -#include "fse.h" - /* *** Constants *** */ #define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */ @@ -154,25 +77,49 @@ typedef U32 HUF_DTable; /* **************************************** * Advanced decompression functions ******************************************/ -size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ -#endif -size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */ -size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */ -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */ -size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ -size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ -size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ -#endif +/** + * Huffman flags bitset. + * For all flags, 0 is the default value. + */ +typedef enum { + /** + * If compiled with DYNAMIC_BMI2: Set flag only if the CPU supports BMI2 at runtime. + * Otherwise: Ignored. + */ + HUF_flags_bmi2 = (1 << 0), + /** + * If set: Test possible table depths to find the one that produces the smallest header + encoded size. + * If unset: Use heuristic to find the table depth. + */ + HUF_flags_optimalDepth = (1 << 1), + /** + * If set: If the previous table can encode the input, always reuse the previous table. + * If unset: If the previous table can encode the input, reuse the previous table if it results in a smaller output. + */ + HUF_flags_preferRepeat = (1 << 2), + /** + * If set: Sample the input and check if the sample is uncompressible, if it is then don't attempt to compress. + * If unset: Always histogram the entire input. + */ + HUF_flags_suspectUncompressible = (1 << 3), + /** + * If set: Don't use assembly implementations + * If unset: Allow using assembly implementations + */ + HUF_flags_disableAsm = (1 << 4), + /** + * If set: Don't use the fast decoding loop, always use the fallback decoding loop. + * If unset: Use the fast decoding loop when possible. + */ + HUF_flags_disableFast = (1 << 5) +} HUF_flags_e; /* **************************************** * HUF detailed API * ****************************************/ +#define HUF_OPTIMAL_DEPTH_THRESHOLD ZSTD_btultra /*! HUF_compress() does the following: * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h") @@ -185,12 +132,12 @@ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, * For example, it's possible to compress several blocks using the same 'CTable', * or to save and regenerate 'CTable' using external methods. */ -unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); -size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */ -size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog); +unsigned HUF_minTableLog(unsigned symbolCardinality); +unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue); +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, void* workSpace, + size_t wkspSize, HUF_CElt* table, const unsigned* count, int flags); /* table is used as scratch space for building and testing tables, not a return value */ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize); -size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); -size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2); +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags); size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); @@ -199,6 +146,7 @@ typedef enum { HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */ } HUF_repeat; + /** HUF_compress4X_repeat() : * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. * If it uses hufTable it does not modify hufTable or repeat. @@ -209,13 +157,13 @@ size_t HUF_compress4X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible); + HUF_CElt* hufTable, HUF_repeat* repeat, int flags); /** HUF_buildCTable_wksp() : * Same as HUF_buildCTable(), but using externally allocated scratch buffer. * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE. */ -#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1) +#define HUF_CTABLE_WORKSPACE_SIZE_U32 ((4 * (HUF_SYMBOLVALUE_MAX + 1)) + 192) #define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned)) size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, @@ -241,7 +189,7 @@ size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize, void* workspace, size_t wkspSize, - int bmi2); + int flags); /** HUF_readCTable() : * Loading a CTable saved with HUF_writeCTable() */ @@ -279,32 +227,12 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize); #define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9)) #define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32)) -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize); -size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); -#endif -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize); -size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); -#endif - -size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#endif -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#endif - /* ====================== */ /* single stream variants */ /* ====================== */ -size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); -size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U64 U64 */ -size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); -size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2); +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags); /** HUF_compress1X_repeat() : * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. * If it uses hufTable it does not modify hufTable or repeat. @@ -315,49 +243,30 @@ size_t HUF_compress1X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible); - -size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */ -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */ -#endif - -size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); -size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ -size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ -#endif -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ -size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ -#endif + HUF_CElt* hufTable, HUF_repeat* repeat, int flags); -size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */ -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#endif +size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); #ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); /**< double-symbols decoder */ #endif /* BMI2 variants. * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. */ -size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags); #ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); +size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); #endif -size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); -size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags); +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); #ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2); +size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags); #endif #ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2); +size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags); #endif -#endif /* HUF_STATIC_LINKING_ONLY */ +#endif /* HUF_H_298734234 */ #if defined (__cplusplus) } diff --git a/lib/common/mem.h b/lib/common/mem.h index 85581c38478..98dd47a0476 100644 --- a/lib/common/mem.h +++ b/lib/common/mem.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -133,21 +133,15 @@ MEM_STATIC size_t MEM_swapST(size_t in); /*-************************************************************** * Memory I/O Implementation *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. +/* MEM_FORCE_MEMORY_ACCESS : For accessing unaligned memory: + * Method 0 : always use `memcpy()`. Safe and portable. + * Method 1 : Use compiler extension to set unaligned access. * Method 2 : direct access. This method is portable but violate C standard. * It can generate buggy code on targets depending on alignment. - * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) + * Default : method 1 if supported, else method 0 */ #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) +# ifdef __GNUC__ # define MEM_FORCE_MEMORY_ACCESS 1 # endif #endif @@ -190,30 +184,19 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; } #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32)) - __pragma( pack(push, 1) ) - typedef struct { U16 v; } unalign16; - typedef struct { U32 v; } unalign32; - typedef struct { U64 v; } unalign64; - typedef struct { size_t v; } unalignArch; - __pragma( pack(pop) ) -#else - typedef struct { U16 v; } __attribute__((packed)) unalign16; - typedef struct { U32 v; } __attribute__((packed)) unalign32; - typedef struct { U64 v; } __attribute__((packed)) unalign64; - typedef struct { size_t v; } __attribute__((packed)) unalignArch; -#endif +typedef __attribute__((aligned(1))) U16 unalign16; +typedef __attribute__((aligned(1))) U32 unalign32; +typedef __attribute__((aligned(1))) U64 unalign64; +typedef __attribute__((aligned(1))) size_t unalignArch; -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; } -MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; } +MEM_STATIC U16 MEM_read16(const void* ptr) { return *(const unalign16*)ptr; } +MEM_STATIC U32 MEM_read32(const void* ptr) { return *(const unalign32*)ptr; } +MEM_STATIC U64 MEM_read64(const void* ptr) { return *(const unalign64*)ptr; } +MEM_STATIC size_t MEM_readST(const void* ptr) { return *(const unalignArch*)ptr; } -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; } -MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; } -MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; } +MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(unalign16*)memPtr = value; } +MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(unalign32*)memPtr = value; } +MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(unalign64*)memPtr = value; } #else @@ -257,6 +240,14 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value) #endif /* MEM_FORCE_MEMORY_ACCESS */ +MEM_STATIC U32 MEM_swap32_fallback(U32 in) +{ + return ((in << 24) & 0xff000000 ) | + ((in << 8) & 0x00ff0000 ) | + ((in >> 8) & 0x0000ff00 ) | + ((in >> 24) & 0x000000ff ); +} + MEM_STATIC U32 MEM_swap32(U32 in) { #if defined(_MSC_VER) /* Visual Studio */ @@ -265,22 +256,13 @@ MEM_STATIC U32 MEM_swap32(U32 in) || (defined(__clang__) && __has_builtin(__builtin_bswap32)) return __builtin_bswap32(in); #else - return ((in << 24) & 0xff000000 ) | - ((in << 8) & 0x00ff0000 ) | - ((in >> 8) & 0x0000ff00 ) | - ((in >> 24) & 0x000000ff ); + return MEM_swap32_fallback(in); #endif } -MEM_STATIC U64 MEM_swap64(U64 in) +MEM_STATIC U64 MEM_swap64_fallback(U64 in) { -#if defined(_MSC_VER) /* Visual Studio */ - return _byteswap_uint64(in); -#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ - || (defined(__clang__) && __has_builtin(__builtin_bswap64)) - return __builtin_bswap64(in); -#else - return ((in << 56) & 0xff00000000000000ULL) | + return ((in << 56) & 0xff00000000000000ULL) | ((in << 40) & 0x00ff000000000000ULL) | ((in << 24) & 0x0000ff0000000000ULL) | ((in << 8) & 0x000000ff00000000ULL) | @@ -288,6 +270,17 @@ MEM_STATIC U64 MEM_swap64(U64 in) ((in >> 24) & 0x0000000000ff0000ULL) | ((in >> 40) & 0x000000000000ff00ULL) | ((in >> 56) & 0x00000000000000ffULL); +} + +MEM_STATIC U64 MEM_swap64(U64 in) +{ +#if defined(_MSC_VER) /* Visual Studio */ + return _byteswap_uint64(in); +#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ + || (defined(__clang__) && __has_builtin(__builtin_bswap64)) + return __builtin_bswap64(in); +#else + return MEM_swap64_fallback(in); #endif } diff --git a/lib/common/pool.c b/lib/common/pool.c index 2e37cdd73c8..f3d9d08547c 100644 --- a/lib/common/pool.c +++ b/lib/common/pool.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -12,7 +12,7 @@ /* ====== Dependencies ======= */ #include "zstd_deps.h" /* size_t */ #include "debug.h" /* assert */ -#include "zstd_internal.h" /* ZSTD_customMalloc, ZSTD_customFree */ +#include "zstd_internal.h" /* ZSTD_customCalloc, ZSTD_customFree */ #include "pool.h" /* ====== Compiler specifics ====== */ @@ -96,9 +96,7 @@ static void* POOL_thread(void* opaque) { /* If the intended queue size was 0, signal after finishing job */ ZSTD_pthread_mutex_lock(&ctx->queueMutex); ctx->numThreadsBusy--; - if (ctx->queueSize == 1) { - ZSTD_pthread_cond_signal(&ctx->queuePushCond); - } + ZSTD_pthread_cond_signal(&ctx->queuePushCond); ZSTD_pthread_mutex_unlock(&ctx->queueMutex); } } /* for (;;) */ @@ -128,7 +126,7 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, * empty and full queues. */ ctx->queueSize = queueSize + 1; - ctx->queue = (POOL_job*)ZSTD_customMalloc(ctx->queueSize * sizeof(POOL_job), customMem); + ctx->queue = (POOL_job*)ZSTD_customCalloc(ctx->queueSize * sizeof(POOL_job), customMem); ctx->queueHead = 0; ctx->queueTail = 0; ctx->numThreadsBusy = 0; @@ -142,7 +140,7 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, } ctx->shutdown = 0; /* Allocate space for the thread handles */ - ctx->threads = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), customMem); + ctx->threads = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), customMem); ctx->threadCapacity = 0; ctx->customMem = customMem; /* Check for errors */ @@ -175,7 +173,7 @@ static void POOL_join(POOL_ctx* ctx) { /* Join all of the threads */ { size_t i; for (i = 0; i < ctx->threadCapacity; ++i) { - ZSTD_pthread_join(ctx->threads[i], NULL); /* note : could fail */ + ZSTD_pthread_join(ctx->threads[i]); /* note : could fail */ } } } @@ -190,6 +188,17 @@ void POOL_free(POOL_ctx *ctx) { ZSTD_customFree(ctx, ctx->customMem); } +/*! POOL_joinJobs() : + * Waits for all queued jobs to finish executing. + */ +void POOL_joinJobs(POOL_ctx* ctx) { + ZSTD_pthread_mutex_lock(&ctx->queueMutex); + while(!ctx->queueEmpty || ctx->numThreadsBusy > 0) { + ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex); + } + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); +} + void ZSTD_freeThreadPool (ZSTD_threadPool* pool) { POOL_free (pool); } @@ -211,7 +220,7 @@ static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads) return 0; } /* numThreads > threadCapacity */ - { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem); + { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem); if (!threadPool) return 1; /* replace existing thread pool */ ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool)); @@ -262,7 +271,9 @@ static int isQueueFull(POOL_ctx const* ctx) { static void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque) { - POOL_job const job = {function, opaque}; + POOL_job job; + job.function = function; + job.opaque = opaque; assert(ctx != NULL); if (ctx->shutdown) return; @@ -330,6 +341,11 @@ void POOL_free(POOL_ctx* ctx) { (void)ctx; } +void POOL_joinJobs(POOL_ctx* ctx){ + assert(!ctx || ctx == &g_poolCtx); + (void)ctx; +} + int POOL_resize(POOL_ctx* ctx, size_t numThreads) { (void)ctx; (void)numThreads; return 0; diff --git a/lib/common/pool.h b/lib/common/pool.h index 0ebde1805db..eb22ff509f5 100644 --- a/lib/common/pool.h +++ b/lib/common/pool.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -38,6 +38,12 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, */ void POOL_free(POOL_ctx* ctx); + +/*! POOL_joinJobs() : + * Waits for all queued jobs to finish executing. + */ +void POOL_joinJobs(POOL_ctx* ctx); + /*! POOL_resize() : * Expands or shrinks pool's number of threads. * This is more efficient than releasing + creating a new context, diff --git a/lib/common/portability_macros.h b/lib/common/portability_macros.h index 2143817f574..8fd6ea82d19 100644 --- a/lib/common/portability_macros.h +++ b/lib/common/portability_macros.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -12,7 +12,7 @@ #define ZSTD_PORTABILITY_MACROS_H /** - * This header file contains macro defintions to support portability. + * This header file contains macro definitions to support portability. * This header is shared between C and ASM code, so it MUST only * contain macro definitions. It MUST not contain any C code. * @@ -88,7 +88,7 @@ #endif /** - * Only enable assembly for GNUC comptabile compilers, + * Only enable assembly for GNUC compatible compilers, * because other platforms may not support GAS assembly syntax. * * Only enable assembly for Linux / MacOS, other platforms may @@ -134,4 +134,23 @@ # define ZSTD_ENABLE_ASM_X86_64_BMI2 0 #endif +/* + * For x86 ELF targets, add .note.gnu.property section for Intel CET in + * assembly sources when CET is enabled. + * + * Additionally, any function that may be called indirectly must begin + * with ZSTD_CET_ENDBRANCH. + */ +#if defined(__ELF__) && (defined(__x86_64__) || defined(__i386__)) \ + && defined(__has_include) +# if __has_include() +# include +# define ZSTD_CET_ENDBRANCH _CET_ENDBR +# endif +#endif + +#ifndef ZSTD_CET_ENDBRANCH +# define ZSTD_CET_ENDBRANCH +#endif + #endif /* ZSTD_PORTABILITY_MACROS_H */ diff --git a/lib/common/threading.c b/lib/common/threading.c index 92cf57c195a..f2341105a1c 100644 --- a/lib/common/threading.c +++ b/lib/common/threading.c @@ -23,8 +23,7 @@ int g_ZSTD_threading_useless_symbol; #if defined(ZSTD_MULTITHREAD) && defined(_WIN32) /** - * Windows minimalist Pthread Wrapper, based on : - * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html + * Windows minimalist Pthread Wrapper */ @@ -35,37 +34,92 @@ int g_ZSTD_threading_useless_symbol; /* === Implementation === */ +typedef struct { + void* (*start_routine)(void*); + void* arg; + int initialized; + ZSTD_pthread_cond_t initialized_cond; + ZSTD_pthread_mutex_t initialized_mutex; +} ZSTD_thread_params_t; + static unsigned __stdcall worker(void *arg) { - ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg; - thread->arg = thread->start_routine(thread->arg); + void* (*start_routine)(void*); + void* thread_arg; + + /* Inialized thread_arg and start_routine and signal main thread that we don't need it + * to wait any longer. + */ + { + ZSTD_thread_params_t* thread_param = (ZSTD_thread_params_t*)arg; + thread_arg = thread_param->arg; + start_routine = thread_param->start_routine; + + /* Signal main thread that we are running and do not depend on its memory anymore */ + ZSTD_pthread_mutex_lock(&thread_param->initialized_mutex); + thread_param->initialized = 1; + ZSTD_pthread_cond_signal(&thread_param->initialized_cond); + ZSTD_pthread_mutex_unlock(&thread_param->initialized_mutex); + } + + start_routine(thread_arg); + return 0; } int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, void* (*start_routine) (void*), void* arg) { + ZSTD_thread_params_t thread_param; (void)unused; - thread->arg = arg; - thread->start_routine = start_routine; - thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL); - if (!thread->handle) + thread_param.start_routine = start_routine; + thread_param.arg = arg; + thread_param.initialized = 0; + *thread = NULL; + + /* Setup thread initialization synchronization */ + if(ZSTD_pthread_cond_init(&thread_param.initialized_cond, NULL)) { + /* Should never happen on Windows */ + return -1; + } + if(ZSTD_pthread_mutex_init(&thread_param.initialized_mutex, NULL)) { + /* Should never happen on Windows */ + ZSTD_pthread_cond_destroy(&thread_param.initialized_cond); + return -1; + } + + /* Spawn thread */ + *thread = (HANDLE)_beginthreadex(NULL, 0, worker, &thread_param, 0, NULL); + if (!thread) { + ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex); + ZSTD_pthread_cond_destroy(&thread_param.initialized_cond); return errno; - else - return 0; + } + + /* Wait for thread to be initialized */ + ZSTD_pthread_mutex_lock(&thread_param.initialized_mutex); + while(!thread_param.initialized) { + ZSTD_pthread_cond_wait(&thread_param.initialized_cond, &thread_param.initialized_mutex); + } + ZSTD_pthread_mutex_unlock(&thread_param.initialized_mutex); + ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex); + ZSTD_pthread_cond_destroy(&thread_param.initialized_cond); + + return 0; } -int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr) +int ZSTD_pthread_join(ZSTD_pthread_t thread) { DWORD result; - if (!thread.handle) return 0; + if (!thread) return 0; + + result = WaitForSingleObject(thread, INFINITE); + CloseHandle(thread); - result = WaitForSingleObject(thread.handle, INFINITE); switch (result) { case WAIT_OBJECT_0: - if (value_ptr) *value_ptr = thread.arg; return 0; case WAIT_ABANDONED: return EINVAL; diff --git a/lib/common/threading.h b/lib/common/threading.h index fd0060d5aa2..fb5c1c87873 100644 --- a/lib/common/threading.h +++ b/lib/common/threading.h @@ -23,8 +23,7 @@ extern "C" { #if defined(ZSTD_MULTITHREAD) && defined(_WIN32) /** - * Windows minimalist Pthread Wrapper, based on : - * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html + * Windows minimalist Pthread Wrapper */ #ifdef WINVER # undef WINVER @@ -62,16 +61,12 @@ extern "C" { #define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a)) /* ZSTD_pthread_create() and ZSTD_pthread_join() */ -typedef struct { - HANDLE handle; - void* (*start_routine)(void*); - void* arg; -} ZSTD_pthread_t; +typedef HANDLE ZSTD_pthread_t; int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, void* (*start_routine) (void*), void* arg); -int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr); +int ZSTD_pthread_join(ZSTD_pthread_t thread); /** * add here more wrappers as required @@ -99,7 +94,7 @@ int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr); #define ZSTD_pthread_t pthread_t #define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) -#define ZSTD_pthread_join(a, b) pthread_join((a),(b)) +#define ZSTD_pthread_join(a) pthread_join((a),NULL) #else /* DEBUGLEVEL >= 1 */ @@ -124,7 +119,7 @@ int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond); #define ZSTD_pthread_t pthread_t #define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) -#define ZSTD_pthread_join(a, b) pthread_join((a),(b)) +#define ZSTD_pthread_join(a) pthread_join((a),NULL) #endif diff --git a/lib/common/xxhash.c b/lib/common/xxhash.c index d49497cf1cf..fd237c9062a 100644 --- a/lib/common/xxhash.c +++ b/lib/common/xxhash.c @@ -1,9 +1,9 @@ /* * xxHash - Fast Hash algorithm - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : - * - xxHash homepage: http://www.xxhash.com + * - xxHash homepage: https://cyan4973.github.io/xxHash/ * - xxHash source repository : https://github.com/Cyan4973/xxHash * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/common/xxhash.h b/lib/common/xxhash.h index 8ebbfdd6261..b8b73290bbc 100644 --- a/lib/common/xxhash.h +++ b/lib/common/xxhash.h @@ -1,9 +1,9 @@ /* * xxHash - Fast Hash algorithm - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : - * - xxHash homepage: http://www.xxhash.com + * - xxHash homepage: https://cyan4973.github.io/xxHash/ * - xxHash source repository : https://github.com/Cyan4973/xxHash * * This source code is licensed under both the BSD-style license (found in the @@ -1314,7 +1314,7 @@ XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, * care, as what works on one compiler/platform/optimization level may cause * another to read garbage data or even crash. * - * See http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details. + * See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details. * * Prefer these methods in priority order (0 > 3 > 1 > 2) */ @@ -1534,7 +1534,7 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_ * @brief Used to prevent unwanted optimizations for @p var. * * It uses an empty GCC inline assembly statement with a register constraint - * which forces @p var into a general purpose register (eg eax, ebx, ecx + * which forces @p var into a general purpose register (e.g. eax, ebx, ecx * on x86) and marks it as modified. * * This is used in a few places to avoid unwanted autovectorization (e.g. @@ -1655,7 +1655,7 @@ static xxh_u32 XXH_read32(const void* ptr) /* * Portable and safe solution. Generally efficient. - * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html */ static xxh_u32 XXH_read32(const void* memPtr) { @@ -2296,7 +2296,7 @@ static xxh_u64 XXH_read64(const void* ptr) /* * Portable and safe solution. Generally efficient. - * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html */ static xxh_u64 XXH_read64(const void* memPtr) { @@ -2809,7 +2809,7 @@ enum XXH_VECTOR_TYPE /* fake enum */ { * @ingroup tuning * @brief Selects the minimum alignment for XXH3's accumulators. * - * When using SIMD, this should match the alignment reqired for said vector + * When using SIMD, this should match the alignment required for said vector * type, so, for example, 32 for AVX2. * * Default: Auto detected. @@ -3026,7 +3026,7 @@ enum XXH_VECTOR_TYPE /* fake enum */ { * have more than 2 NEON (F0/F1) micro-ops. If you are only using NEON instructions, * you are only using 2/3 of the CPU bandwidth. * - * This is even more noticable on the more advanced cores like the A76 which + * This is even more noticeable on the more advanced cores like the A76 which * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once. * * Therefore, @ref XXH3_NEON_LANES lanes will be processed using NEON, and the diff --git a/lib/common/zstd_common.c b/lib/common/zstd_common.c index 3d7e35b309b..3208552475d 100644 --- a/lib/common/zstd_common.c +++ b/lib/common/zstd_common.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/common/zstd_deps.h b/lib/common/zstd_deps.h index 14211344a02..4d767ae9b05 100644 --- a/lib/common/zstd_deps.h +++ b/lib/common/zstd_deps.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/common/zstd_internal.h b/lib/common/zstd_internal.h index e4d36ce0905..37836dc703f 100644 --- a/lib/common/zstd_internal.h +++ b/lib/common/zstd_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -28,7 +28,6 @@ #include "../zstd.h" #define FSE_STATIC_LINKING_ONLY #include "fse.h" -#define HUF_STATIC_LINKING_ONLY #include "huf.h" #ifndef XXH_STATIC_LINKING_ONLY # define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ @@ -93,9 +92,9 @@ typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; #define ZSTD_FRAMECHECKSUMSIZE 4 #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ -#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ +#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */) /* for a non-null block */ +#define MIN_LITERALS_FOR_4_STREAMS 6 -#define HufLog 12 typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e; #define LONGNBSEQ 0x7F00 @@ -103,6 +102,7 @@ typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingTy #define MINMATCH 3 #define Litbits 8 +#define LitHufLog 11 #define MaxLit ((1<= length) return; op += 16; @@ -250,7 +246,6 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e COPY16(op, ip); } while (op < oend); -#endif } } @@ -299,11 +294,11 @@ typedef enum { typedef struct { seqDef* sequencesStart; seqDef* sequences; /* ptr to end of sequences */ - BYTE* litStart; - BYTE* lit; /* ptr to end of literals */ - BYTE* llCode; - BYTE* mlCode; - BYTE* ofCode; + BYTE* litStart; + BYTE* lit; /* ptr to end of literals */ + BYTE* llCode; + BYTE* mlCode; + BYTE* ofCode; size_t maxNbSeq; size_t maxNbLit; @@ -311,8 +306,8 @@ typedef struct { * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment * the existing value of the litLength or matchLength by 0x10000. */ - ZSTD_longLengthType_e longLengthType; - U32 longLengthPos; /* Index of the sequence to apply long length modification to */ + ZSTD_longLengthType_e longLengthType; + U32 longLengthPos; /* Index of the sequence to apply long length modification to */ } seqStore_t; typedef struct { @@ -331,10 +326,10 @@ MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore seqLen.matchLength = seq->mlBase + MINMATCH; if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) { if (seqStore->longLengthType == ZSTD_llt_literalLength) { - seqLen.litLength += 0xFFFF; + seqLen.litLength += 0x10000; } if (seqStore->longLengthType == ZSTD_llt_matchLength) { - seqLen.matchLength += 0xFFFF; + seqLen.matchLength += 0x10000; } } return seqLen; @@ -347,12 +342,13 @@ MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore * `decompressedBound != ZSTD_CONTENTSIZE_ERROR` */ typedef struct { + size_t nbBlocks; size_t compressedSize; unsigned long long decompressedBound; } ZSTD_frameSizeInfo; /* decompress & legacy */ const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */ -void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */ +int ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */ /* custom memory allocation functions */ void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem); @@ -360,98 +356,6 @@ void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem); void ZSTD_customFree(void* ptr, ZSTD_customMem customMem); -MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */ -{ - assert(val != 0); - { -# if defined(_MSC_VER) /* Visual */ -# if STATIC_BMI2 == 1 - return _lzcnt_u32(val)^31; -# else - if (val != 0) { - unsigned long r; - _BitScanReverse(&r, val); - return (unsigned)r; - } else { - /* Should not reach this code path */ - __assume(0); - } -# endif -# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */ - return __builtin_clz (val) ^ 31; -# elif defined(__ICCARM__) /* IAR Intrinsic */ - return 31 - __CLZ(val); -# else /* Software version */ - static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; - U32 v = val; - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - return DeBruijnClz[(v * 0x07C4ACDDU) >> 27]; -# endif - } -} - -/** - * Counts the number of trailing zeros of a `size_t`. - * Most compilers should support CTZ as a builtin. A backup - * implementation is provided if the builtin isn't supported, but - * it may not be terribly efficient. - */ -MEM_STATIC unsigned ZSTD_countTrailingZeros(size_t val) -{ - if (MEM_64bits()) { -# if defined(_MSC_VER) && defined(_WIN64) -# if STATIC_BMI2 - return _tzcnt_u64(val); -# else - if (val != 0) { - unsigned long r; - _BitScanForward64(&r, (U64)val); - return (unsigned)r; - } else { - /* Should not reach this code path */ - __assume(0); - } -# endif -# elif defined(__GNUC__) && (__GNUC__ >= 4) - return __builtin_ctzll((U64)val); -# else - static const int DeBruijnBytePos[64] = { 0, 1, 2, 7, 3, 13, 8, 19, - 4, 25, 14, 28, 9, 34, 20, 56, - 5, 17, 26, 54, 15, 41, 29, 43, - 10, 31, 38, 35, 21, 45, 49, 57, - 63, 6, 12, 18, 24, 27, 33, 55, - 16, 53, 40, 42, 30, 37, 44, 48, - 62, 11, 23, 32, 52, 39, 36, 47, - 61, 22, 51, 46, 60, 50, 59, 58 }; - return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; -# endif - } else { /* 32 bits */ -# if defined(_MSC_VER) - if (val != 0) { - unsigned long r; - _BitScanForward(&r, (U32)val); - return (unsigned)r; - } else { - /* Should not reach this code path */ - __assume(0); - } -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return __builtin_ctz((U32)val); -# else - static const int DeBruijnBytePos[32] = { 0, 1, 28, 2, 29, 14, 24, 3, - 30, 22, 20, 15, 25, 17, 4, 8, - 31, 27, 13, 23, 21, 19, 16, 7, - 26, 12, 18, 6, 11, 5, 10, 9 }; - return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; -# endif - } -} - - /* ZSTD_invalidateRepCodes() : * ensures next compression will not use repcodes from previous block. * Note : only works with regular variant; diff --git a/lib/common/zstd_trace.h b/lib/common/zstd_trace.h index f9121f7d8ed..da20534ebd8 100644 --- a/lib/common/zstd_trace.h +++ b/lib/common/zstd_trace.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -21,13 +21,13 @@ extern "C" { * For now, enable conservatively: * - Only GNUC * - Only ELF - * - Only x86-64 and i386 + * - Only x86-64, i386 and aarch64 * Also, explicitly disable on platforms known not to work so they aren't * forgotten in the future. */ #if !defined(ZSTD_HAVE_WEAK_SYMBOLS) && \ defined(__GNUC__) && defined(__ELF__) && \ - (defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86)) && \ + (defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86) || defined(__aarch64__)) && \ !defined(__APPLE__) && !defined(_WIN32) && !defined(__MINGW32__) && \ !defined(__CYGWIN__) && !defined(_AIX) # define ZSTD_HAVE_WEAK_SYMBOLS 1 diff --git a/lib/compress/clevels.h b/lib/compress/clevels.h index 7ed2e00490b..c18da465f32 100644 --- a/lib/compress/clevels.h +++ b/lib/compress/clevels.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/compress/fse_compress.c b/lib/compress/fse_compress.c index 5547b4ac099..5d3770808dd 100644 --- a/lib/compress/fse_compress.c +++ b/lib/compress/fse_compress.c @@ -1,6 +1,6 @@ /* ****************************************************************** * FSE : Finite State Entropy encoder - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -26,6 +26,7 @@ #define ZSTD_DEPS_NEED_MALLOC #define ZSTD_DEPS_NEED_MATH64 #include "../common/zstd_deps.h" /* ZSTD_malloc, ZSTD_free, ZSTD_memcpy, ZSTD_memset */ +#include "../common/bits.h" /* ZSTD_highbit32 */ /* ************************************************************** @@ -90,7 +91,7 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, assert(tableLog < 16); /* required for threshold strategy to work */ /* For explanations on how to distribute symbol values over the table : - * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ + * https://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ #ifdef __clang_analyzer__ ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */ @@ -191,7 +192,7 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, break; default : assert(normalizedCounter[s] > 1); - { U32 const maxBitsOut = tableLog - BIT_highbit32 ((U32)normalizedCounter[s]-1); + { U32 const maxBitsOut = tableLog - ZSTD_highbit32 ((U32)normalizedCounter[s]-1); U32 const minStatePlus = (U32)normalizedCounter[s] << maxBitsOut; symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus; symbolTT[s].deltaFindState = (int)(total - (unsigned)normalizedCounter[s]); @@ -342,21 +343,11 @@ size_t FSE_writeNCount (void* buffer, size_t bufferSize, * FSE Compression Code ****************************************************************/ -FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog) -{ - size_t size; - if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX; - size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32); - return (FSE_CTable*)ZSTD_malloc(size); -} - -void FSE_freeCTable (FSE_CTable* ct) { ZSTD_free(ct); } - /* provides the minimum logSize to safely represent a distribution */ static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue) { - U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1; - U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2; + U32 minBitsSrc = ZSTD_highbit32((U32)(srcSize)) + 1; + U32 minBitsSymbols = ZSTD_highbit32(maxSymbolValue) + 2; U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols; assert(srcSize > 1); /* Not supported, RLE should be used instead */ return minBits; @@ -364,7 +355,7 @@ static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue) unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus) { - U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus; + U32 maxBitsSrc = ZSTD_highbit32((U32)(srcSize - 1)) - minus; U32 tableLog = maxTableLog; U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue); assert(srcSize > 1); /* Not supported, RLE should be used instead */ @@ -532,40 +523,6 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog, return tableLog; } - -/* fake FSE_CTable, for raw (uncompressed) input */ -size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits) -{ - const unsigned tableSize = 1 << nbBits; - const unsigned tableMask = tableSize - 1; - const unsigned maxSymbolValue = tableMask; - void* const ptr = ct; - U16* const tableU16 = ( (U16*) ptr) + 2; - void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1); /* assumption : tableLog >= 1 */ - FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); - unsigned s; - - /* Sanity checks */ - if (nbBits < 1) return ERROR(GENERIC); /* min size */ - - /* header */ - tableU16[-2] = (U16) nbBits; - tableU16[-1] = (U16) maxSymbolValue; - - /* Build table */ - for (s=0; s not compressible */ - if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */ - } - - tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue); - CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue, /* useLowProbCount */ srcSize >= 2048) ); - - /* Write table description header */ - { CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); - op += nc_err; - } - - /* Compress */ - CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) ); - { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) ); - if (cSize == 0) return 0; /* not enough space for compressed data */ - op += cSize; - } - - /* check compressibility */ - if ( (size_t)(op-ostart) >= srcSize-1 ) return 0; - - return op-ostart; -} - -typedef struct { - FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)]; - union { - U32 hist_wksp[HIST_WKSP_SIZE_U32]; - BYTE scratchBuffer[1 << FSE_MAX_TABLELOG]; - } workspace; -} fseWkspMax_t; - -size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog) -{ - fseWkspMax_t scratchBuffer; - DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_COMPRESS_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */ - if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); - return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer)); -} - -size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcSize) -{ - return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG); -} -#endif - #endif /* FSE_COMMONDEFS_ONLY */ diff --git a/lib/compress/hist.c b/lib/compress/hist.c index 073c57e7527..e2fb431f03a 100644 --- a/lib/compress/hist.c +++ b/lib/compress/hist.c @@ -1,7 +1,7 @@ /* ****************************************************************** * hist : Histogram functions * part of Finite State Entropy project - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy diff --git a/lib/compress/hist.h b/lib/compress/hist.h index 228ed48a71d..887896b813b 100644 --- a/lib/compress/hist.h +++ b/lib/compress/hist.h @@ -1,7 +1,7 @@ /* ****************************************************************** * hist : Histogram functions * part of Finite State Entropy project - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy diff --git a/lib/compress/huf_compress.c b/lib/compress/huf_compress.c index 2b3d6adc2a2..29871877a7f 100644 --- a/lib/compress/huf_compress.c +++ b/lib/compress/huf_compress.c @@ -1,6 +1,6 @@ /* ****************************************************************** * Huffman encoder, part of New Generation Entropy library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -29,9 +29,9 @@ #include "hist.h" #define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */ #include "../common/fse.h" /* header compression */ -#define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "../common/error_private.h" +#include "../common/bits.h" /* ZSTD_highbit32 */ /* ************************************************************** @@ -42,13 +42,67 @@ /* ************************************************************** -* Utils +* Required declarations ****************************************************************/ -unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) +typedef struct nodeElt_s { + U32 count; + U16 parent; + BYTE byte; + BYTE nbBits; +} nodeElt; + + +/* ************************************************************** +* Debug Traces +****************************************************************/ + +#if DEBUGLEVEL >= 2 + +static size_t showU32(const U32* arr, size_t size) { - return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); + size_t u; + for (u=0; u= sizeof(HUF_WriteCTableWksp)); + /* check conditions */ if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC); if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); @@ -207,16 +266,6 @@ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, return ((maxSymbolValue+1)/2) + 1; } -/*! HUF_writeCTable() : - `CTable` : Huffman tree to save, using huf representation. - @return : size of saved CTable */ -size_t HUF_writeCTable (void* dst, size_t maxDstSize, - const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog) -{ - HUF_WriteCTableWksp wksp; - return HUF_writeCTable_wksp(dst, maxDstSize, CTable, maxSymbolValue, huffLog, &wksp, sizeof(wksp)); -} - size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights) { @@ -272,68 +321,64 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void U32 HUF_getNbBitsFromCTable(HUF_CElt const* CTable, U32 symbolValue) { - const HUF_CElt* ct = CTable + 1; + const HUF_CElt* const ct = CTable + 1; assert(symbolValue <= HUF_SYMBOLVALUE_MAX); return (U32)HUF_getNbBits(ct[symbolValue]); } -typedef struct nodeElt_s { - U32 count; - U16 parent; - BYTE byte; - BYTE nbBits; -} nodeElt; - /** * HUF_setMaxHeight(): - * Enforces maxNbBits on the Huffman tree described in huffNode. + * Try to enforce @targetNbBits on the Huffman tree described in @huffNode. * - * It sets all nodes with nbBits > maxNbBits to be maxNbBits. Then it adjusts - * the tree to so that it is a valid canonical Huffman tree. + * It attempts to convert all nodes with nbBits > @targetNbBits + * to employ @targetNbBits instead. Then it adjusts the tree + * so that it remains a valid canonical Huffman tree. * * @pre The sum of the ranks of each symbol == 2^largestBits, * where largestBits == huffNode[lastNonNull].nbBits. * @post The sum of the ranks of each symbol == 2^largestBits, - * where largestBits is the return value <= maxNbBits. + * where largestBits is the return value (expected <= targetNbBits). * - * @param huffNode The Huffman tree modified in place to enforce maxNbBits. + * @param huffNode The Huffman tree modified in place to enforce targetNbBits. + * It's presumed sorted, from most frequent to rarest symbol. * @param lastNonNull The symbol with the lowest count in the Huffman tree. - * @param maxNbBits The maximum allowed number of bits, which the Huffman tree + * @param targetNbBits The allowed number of bits, which the Huffman tree * may not respect. After this function the Huffman tree will - * respect maxNbBits. - * @return The maximum number of bits of the Huffman tree after adjustment, - * necessarily no more than maxNbBits. + * respect targetNbBits. + * @return The maximum number of bits of the Huffman tree after adjustment. */ -static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits) +static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 targetNbBits) { const U32 largestBits = huffNode[lastNonNull].nbBits; - /* early exit : no elt > maxNbBits, so the tree is already valid. */ - if (largestBits <= maxNbBits) return largestBits; + /* early exit : no elt > targetNbBits, so the tree is already valid. */ + if (largestBits <= targetNbBits) return largestBits; + + DEBUGLOG(5, "HUF_setMaxHeight (targetNbBits = %u)", targetNbBits); /* there are several too large elements (at least >= 2) */ { int totalCost = 0; - const U32 baseCost = 1 << (largestBits - maxNbBits); + const U32 baseCost = 1 << (largestBits - targetNbBits); int n = (int)lastNonNull; - /* Adjust any ranks > maxNbBits to maxNbBits. + /* Adjust any ranks > targetNbBits to targetNbBits. * Compute totalCost, which is how far the sum of the ranks is * we are over 2^largestBits after adjust the offending ranks. */ - while (huffNode[n].nbBits > maxNbBits) { + while (huffNode[n].nbBits > targetNbBits) { totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); - huffNode[n].nbBits = (BYTE)maxNbBits; + huffNode[n].nbBits = (BYTE)targetNbBits; n--; } - /* n stops at huffNode[n].nbBits <= maxNbBits */ - assert(huffNode[n].nbBits <= maxNbBits); - /* n end at index of smallest symbol using < maxNbBits */ - while (huffNode[n].nbBits == maxNbBits) --n; + /* n stops at huffNode[n].nbBits <= targetNbBits */ + assert(huffNode[n].nbBits <= targetNbBits); + /* n end at index of smallest symbol using < targetNbBits */ + while (huffNode[n].nbBits == targetNbBits) --n; - /* renorm totalCost from 2^largestBits to 2^maxNbBits + /* renorm totalCost from 2^largestBits to 2^targetNbBits * note : totalCost is necessarily a multiple of baseCost */ - assert((totalCost & (baseCost - 1)) == 0); - totalCost >>= (largestBits - maxNbBits); + assert(((U32)totalCost & (baseCost - 1)) == 0); + totalCost >>= (largestBits - targetNbBits); assert(totalCost > 0); /* repay normalized cost */ @@ -342,19 +387,19 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits) /* Get pos of last (smallest = lowest cum. count) symbol per rank */ ZSTD_memset(rankLast, 0xF0, sizeof(rankLast)); - { U32 currentNbBits = maxNbBits; + { U32 currentNbBits = targetNbBits; int pos; for (pos=n ; pos >= 0; pos--) { if (huffNode[pos].nbBits >= currentNbBits) continue; - currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */ - rankLast[maxNbBits-currentNbBits] = (U32)pos; + currentNbBits = huffNode[pos].nbBits; /* < targetNbBits */ + rankLast[targetNbBits-currentNbBits] = (U32)pos; } } while (totalCost > 0) { /* Try to reduce the next power of 2 above totalCost because we * gain back half the rank. */ - U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1; + U32 nBitsToDecrease = ZSTD_highbit32((U32)totalCost) + 1; for ( ; nBitsToDecrease > 1; nBitsToDecrease--) { U32 const highPos = rankLast[nBitsToDecrease]; U32 const lowPos = rankLast[nBitsToDecrease-1]; @@ -394,7 +439,7 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits) rankLast[nBitsToDecrease] = noSymbol; else { rankLast[nBitsToDecrease]--; - if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease) + if (huffNode[rankLast[nBitsToDecrease]].nbBits != targetNbBits-nBitsToDecrease) rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */ } } /* while (totalCost > 0) */ @@ -406,11 +451,11 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits) * TODO. */ while (totalCost < 0) { /* Sometimes, cost correction overshoot */ - /* special case : no rank 1 symbol (using maxNbBits-1); - * let's create one from largest rank 0 (using maxNbBits). + /* special case : no rank 1 symbol (using targetNbBits-1); + * let's create one from largest rank 0 (using targetNbBits). */ if (rankLast[1] == noSymbol) { - while (huffNode[n].nbBits == maxNbBits) n--; + while (huffNode[n].nbBits == targetNbBits) n--; huffNode[n+1].nbBits--; assert(n >= 0); rankLast[1] = (U32)(n+1); @@ -424,7 +469,7 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits) } /* repay normalized cost */ } /* there are several too large elements (at least >= 2) */ - return maxNbBits; + return targetNbBits; } typedef struct { @@ -432,7 +477,7 @@ typedef struct { U16 curr; } rankPos; -typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32]; +typedef nodeElt huffNodeTable[2 * (HUF_SYMBOLVALUE_MAX + 1)]; /* Number of buckets available for HUF_sort() */ #define RANK_POSITION_TABLE_SIZE 192 @@ -451,8 +496,8 @@ typedef struct { * Let buckets 166 to 192 represent all remaining counts up to RANK_POSITION_MAX_COUNT_LOG using log2 bucketing. */ #define RANK_POSITION_MAX_COUNT_LOG 32 -#define RANK_POSITION_LOG_BUCKETS_BEGIN (RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */ -#define RANK_POSITION_DISTINCT_COUNT_CUTOFF RANK_POSITION_LOG_BUCKETS_BEGIN + BIT_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */ +#define RANK_POSITION_LOG_BUCKETS_BEGIN ((RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */) +#define RANK_POSITION_DISTINCT_COUNT_CUTOFF (RANK_POSITION_LOG_BUCKETS_BEGIN + ZSTD_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */) /* Return the appropriate bucket index for a given count. See definition of * RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy. @@ -460,7 +505,7 @@ typedef struct { static U32 HUF_getIndex(U32 const count) { return (count < RANK_POSITION_DISTINCT_COUNT_CUTOFF) ? count - : BIT_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN; + : ZSTD_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN; } /* Helper swap function for HUF_quickSortPartition() */ @@ -583,7 +628,7 @@ static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSy /* Sort each bucket. */ for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) { - U32 const bucketSize = rankPosition[n].curr-rankPosition[n].base; + int const bucketSize = rankPosition[n].curr - rankPosition[n].base; U32 const bucketStartIdx = rankPosition[n].base; if (bucketSize > 1) { assert(bucketStartIdx < maxSymbolValue1); @@ -594,6 +639,7 @@ static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSy assert(HUF_isSorted(huffNode, maxSymbolValue1)); } + /** HUF_buildCTable_wksp() : * Same as HUF_buildCTable(), but using externally allocated scratch buffer. * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables). @@ -614,6 +660,7 @@ static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue) int lowS, lowN; int nodeNb = STARTNODE; int n, nodeRoot; + DEBUGLOG(5, "HUF_buildTree (alphabet size = %u)", maxSymbolValue + 1); /* init for parents */ nonNullRank = (int)maxSymbolValue; while(huffNode[nonNullRank].count == 0) nonNullRank--; @@ -640,6 +687,8 @@ static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue) for (n=0; n<=nonNullRank; n++) huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; + DEBUGLOG(6, "Initial distribution of bits completed (%zu sorted symbols)", showHNodeBits(huffNode, maxSymbolValue+1)); + return nonNullRank; } @@ -677,28 +726,36 @@ static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, i CTable[0] = maxNbBits; } -size_t HUF_buildCTable_wksp (HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize) +size_t +HUF_buildCTable_wksp(HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, + void* workSpace, size_t wkspSize) { - HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(U32)); + HUF_buildCTable_wksp_tables* const wksp_tables = + (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(U32)); nodeElt* const huffNode0 = wksp_tables->huffNodeTbl; nodeElt* const huffNode = huffNode0+1; int nonNullRank; + HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE == sizeof(HUF_buildCTable_wksp_tables)); + + DEBUGLOG(5, "HUF_buildCTable_wksp (alphabet size = %u)", maxSymbolValue+1); + /* safety checks */ if (wkspSize < sizeof(HUF_buildCTable_wksp_tables)) - return ERROR(workSpace_tooSmall); + return ERROR(workSpace_tooSmall); if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT; if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) - return ERROR(maxSymbolValue_tooLarge); + return ERROR(maxSymbolValue_tooLarge); ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable)); /* sort, decreasing order */ HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition); + DEBUGLOG(6, "sorted symbols completed (%zu symbols)", showHNodeSymbols(huffNode, maxSymbolValue+1)); /* build tree */ nonNullRank = HUF_buildTree(huffNode, maxSymbolValue); - /* enforce maxTableLog */ + /* determine and enforce maxTableLog */ maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits); if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ @@ -807,7 +864,7 @@ FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int id #if DEBUGLEVEL >= 1 { size_t const nbBits = HUF_getNbBits(elt); - size_t const dirtyBits = nbBits == 0 ? 0 : BIT_highbit32((U32)nbBits) + 1; + size_t const dirtyBits = nbBits == 0 ? 0 : ZSTD_highbit32((U32)nbBits) + 1; (void)dirtyBits; /* Middle bits are 0. */ assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0); @@ -887,7 +944,7 @@ static size_t HUF_closeCStream(HUF_CStream_t* bitC) { size_t const nbBits = bitC->bitPos[0] & 0xFF; if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ - return (bitC->ptr - bitC->startPtr) + (nbBits > 0); + return (size_t)(bitC->ptr - bitC->startPtr) + (nbBits > 0); } } @@ -1048,9 +1105,9 @@ HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize, static size_t HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, - const HUF_CElt* CTable, const int bmi2) + const HUF_CElt* CTable, const int flags) { - if (bmi2) { + if (flags & HUF_flags_bmi2) { return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable); } return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable); @@ -1061,28 +1118,23 @@ HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, static size_t HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, - const HUF_CElt* CTable, const int bmi2) + const HUF_CElt* CTable, const int flags) { - (void)bmi2; + (void)flags; return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); } #endif -size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags) { - return HUF_compress1X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); -} - -size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2) -{ - return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2); + return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); } static size_t HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, - const HUF_CElt* CTable, int bmi2) + const HUF_CElt* CTable, int flags) { size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */ const BYTE* ip = (const BYTE*) src; @@ -1096,7 +1148,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, op += 6; /* jumpTable */ assert(op <= oend); - { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart, (U16)cSize); op += cSize; @@ -1104,7 +1156,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, ip += segmentSize; assert(op <= oend); - { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart+2, (U16)cSize); op += cSize; @@ -1112,7 +1164,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, ip += segmentSize; assert(op <= oend); - { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart+4, (U16)cSize); op += cSize; @@ -1121,7 +1173,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, ip += segmentSize; assert(op <= oend); assert(ip <= iend); - { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) ); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, flags) ); if (cSize == 0 || cSize > 65535) return 0; op += cSize; } @@ -1129,14 +1181,9 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, return (size_t)(op-ostart); } -size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) -{ - return HUF_compress4X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); -} - -size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2) +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags) { - return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2); + return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); } typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e; @@ -1144,11 +1191,11 @@ typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e; static size_t HUF_compressCTable_internal( BYTE* const ostart, BYTE* op, BYTE* const oend, const void* src, size_t srcSize, - HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2) + HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int flags) { size_t const cSize = (nbStreams==HUF_singleStream) ? - HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) : - HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2); + HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags) : + HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags); if (HUF_isError(cSize)) { return cSize; } if (cSize==0) { return 0; } /* uncompressible */ op += cSize; @@ -1171,6 +1218,79 @@ typedef struct { #define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096 #define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */ +unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue) +{ + unsigned cardinality = 0; + unsigned i; + + for (i = 0; i < maxSymbolValue + 1; i++) { + if (count[i] != 0) cardinality += 1; + } + + return cardinality; +} + +unsigned HUF_minTableLog(unsigned symbolCardinality) +{ + U32 minBitsSymbols = ZSTD_highbit32(symbolCardinality) + 1; + return minBitsSymbols; +} + +unsigned HUF_optimalTableLog( + unsigned maxTableLog, + size_t srcSize, + unsigned maxSymbolValue, + void* workSpace, size_t wkspSize, + HUF_CElt* table, + const unsigned* count, + int flags) +{ + assert(srcSize > 1); /* Not supported, RLE should be used instead */ + assert(wkspSize >= sizeof(HUF_buildCTable_wksp_tables)); + + if (!(flags & HUF_flags_optimalDepth)) { + /* cheap evaluation, based on FSE */ + return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); + } + + { BYTE* dst = (BYTE*)workSpace + sizeof(HUF_WriteCTableWksp); + size_t dstSize = wkspSize - sizeof(HUF_WriteCTableWksp); + size_t maxBits, hSize, newSize; + const unsigned symbolCardinality = HUF_cardinality(count, maxSymbolValue); + const unsigned minTableLog = HUF_minTableLog(symbolCardinality); + size_t optSize = ((size_t) ~0) - 1; + unsigned optLog = maxTableLog, optLogGuess; + + DEBUGLOG(6, "HUF_optimalTableLog: probing huf depth (srcSize=%zu)", srcSize); + + /* Search until size increases */ + for (optLogGuess = minTableLog; optLogGuess <= maxTableLog; optLogGuess++) { + DEBUGLOG(7, "checking for huffLog=%u", optLogGuess); + maxBits = HUF_buildCTable_wksp(table, count, maxSymbolValue, optLogGuess, workSpace, wkspSize); + if (ERR_isError(maxBits)) continue; + + if (maxBits < optLogGuess && optLogGuess > minTableLog) break; + + hSize = HUF_writeCTable_wksp(dst, dstSize, table, maxSymbolValue, (U32)maxBits, workSpace, wkspSize); + + if (ERR_isError(hSize)) continue; + + newSize = HUF_estimateCompressedSize(table, count, maxSymbolValue) + hSize; + + if (newSize > optSize + 1) { + break; + } + + if (newSize < optSize) { + optSize = newSize; + optLog = optLogGuess; + } + } + assert(optLog <= HUF_TABLELOG_MAX); + return optLog; + } +} + /* HUF_compress_internal() : * `workSpace_align4` must be aligned on 4-bytes boundaries, * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */ @@ -1180,14 +1300,14 @@ HUF_compress_internal (void* dst, size_t dstSize, unsigned maxSymbolValue, unsigned huffLog, HUF_nbStreams_e nbStreams, void* workSpace, size_t wkspSize, - HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat, - const int bmi2, unsigned suspectUncompressible) + HUF_CElt* oldHufTable, HUF_repeat* repeat, int flags) { HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t)); BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart; + DEBUGLOG(5, "HUF_compress_internal (srcSize=%zu)", srcSize); HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE); /* checks & inits */ @@ -1201,16 +1321,17 @@ HUF_compress_internal (void* dst, size_t dstSize, if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT; /* Heuristic : If old table is valid, use it for small inputs */ - if (preferRepeat && repeat && *repeat == HUF_repeat_valid) { + if ((flags & HUF_flags_preferRepeat) && repeat && *repeat == HUF_repeat_valid) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, - nbStreams, oldHufTable, bmi2); + nbStreams, oldHufTable, flags); } /* If uncompressible data is suspected, do a smaller sampling first */ DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2); - if (suspectUncompressible && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) { + if ((flags & HUF_flags_suspectUncompressible) && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) { size_t largestTotal = 0; + DEBUGLOG(5, "input suspected incompressible : sampling to check"); { unsigned maxSymbolValueBegin = maxSymbolValue; CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) ); largestTotal += largestBegin; @@ -1227,6 +1348,7 @@ HUF_compress_internal (void* dst, size_t dstSize, if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */ } + DEBUGLOG(6, "histogram detail completed (%zu symbols)", showU32(table->count, maxSymbolValue+1)); /* Check validity of previous table */ if ( repeat @@ -1235,19 +1357,20 @@ HUF_compress_internal (void* dst, size_t dstSize, *repeat = HUF_repeat_none; } /* Heuristic : use existing table for small inputs */ - if (preferRepeat && repeat && *repeat != HUF_repeat_none) { + if ((flags & HUF_flags_preferRepeat) && repeat && *repeat != HUF_repeat_none) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, - nbStreams, oldHufTable, bmi2); + nbStreams, oldHufTable, flags); } /* Build Huffman Tree */ - huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); + huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, &table->wksps, sizeof(table->wksps), table->CTable, table->count, flags); { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count, maxSymbolValue, huffLog, &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp)); CHECK_F(maxBits); huffLog = (U32)maxBits; + DEBUGLOG(6, "bit distribution completed (%zu symbols)", showCTableBits(table->CTable + 1, maxSymbolValue+1)); } /* Zero unused symbols in CTable, so we can check it for validity */ { @@ -1266,7 +1389,7 @@ HUF_compress_internal (void* dst, size_t dstSize, if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, - nbStreams, oldHufTable, bmi2); + nbStreams, oldHufTable, flags); } } /* Use the new huffman table */ @@ -1278,46 +1401,20 @@ HUF_compress_internal (void* dst, size_t dstSize, } return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, - nbStreams, table->CTable, bmi2); -} - - -size_t HUF_compress1X_wksp (void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned huffLog, - void* workSpace, size_t wkspSize) -{ - return HUF_compress_internal(dst, dstSize, src, srcSize, - maxSymbolValue, huffLog, HUF_singleStream, - workSpace, wkspSize, - NULL, NULL, 0, 0 /*bmi2*/, 0); + nbStreams, table->CTable, flags); } size_t HUF_compress1X_repeat (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize, - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, - int bmi2, unsigned suspectUncompressible) + HUF_CElt* hufTable, HUF_repeat* repeat, int flags) { + DEBUGLOG(5, "HUF_compress1X_repeat (srcSize = %zu)", srcSize); return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_singleStream, workSpace, wkspSize, hufTable, - repeat, preferRepeat, bmi2, suspectUncompressible); -} - -/* HUF_compress4X_repeat(): - * compress input using 4 streams. - * provide workspace to generate compression tables */ -size_t HUF_compress4X_wksp (void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned huffLog, - void* workSpace, size_t wkspSize) -{ - return HUF_compress_internal(dst, dstSize, src, srcSize, - maxSymbolValue, huffLog, HUF_fourStreams, - workSpace, wkspSize, - NULL, NULL, 0, 0 /*bmi2*/, 0); + repeat, flags); } /* HUF_compress4X_repeat(): @@ -1328,43 +1425,11 @@ size_t HUF_compress4X_repeat (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize, - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible) + HUF_CElt* hufTable, HUF_repeat* repeat, int flags) { + DEBUGLOG(5, "HUF_compress4X_repeat (srcSize = %zu)", srcSize); return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_fourStreams, workSpace, wkspSize, - hufTable, repeat, preferRepeat, bmi2, suspectUncompressible); + hufTable, repeat, flags); } - -#ifndef ZSTD_NO_UNUSED_FUNCTIONS -/** HUF_buildCTable() : - * @return : maxNbBits - * Note : count is used before tree is written, so they can safely overlap - */ -size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits) -{ - HUF_buildCTable_wksp_tables workspace; - return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, &workspace, sizeof(workspace)); -} - -size_t HUF_compress1X (void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned huffLog) -{ - U64 workSpace[HUF_WORKSPACE_SIZE_U64]; - return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); -} - -size_t HUF_compress2 (void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned huffLog) -{ - U64 workSpace[HUF_WORKSPACE_SIZE_U64]; - return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); -} - -size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize) -{ - return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT); -} -#endif diff --git a/lib/compress/zstd_compress.c b/lib/compress/zstd_compress.c index f06456af926..b55f684cd71 100644 --- a/lib/compress/zstd_compress.c +++ b/lib/compress/zstd_compress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -16,7 +16,6 @@ #include "hist.h" /* HIST_countFast_wksp */ #define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */ #include "../common/fse.h" -#define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "zstd_compress_internal.h" #include "zstd_compress_sequences.h" @@ -27,6 +26,7 @@ #include "zstd_opt.h" #include "zstd_ldm.h" #include "zstd_compress_superblock.h" +#include "../common/bits.h" /* ZSTD_highbit32 */ /* *************************************************************** * Tuning parameters @@ -58,14 +58,17 @@ * Helper functions ***************************************/ /* ZSTD_compressBound() - * Note that the result from this function is only compatible with the "normal" - * full-block strategy. - * When there are a lot of small blocks due to frequent flush in streaming mode - * the overhead of headers can make the compressed data to be larger than the - * return value of ZSTD_compressBound(). + * Note that the result from this function is only valid for + * the one-pass compression functions. + * When employing the streaming mode, + * if flushes are frequently altering the size of blocks, + * the overhead from block headers can make the compressed data larger + * than the return value of ZSTD_compressBound(). */ size_t ZSTD_compressBound(size_t srcSize) { - return ZSTD_COMPRESSBOUND(srcSize); + size_t const r = ZSTD_COMPRESSBOUND(srcSize); + if (r==0) return ERROR(srcSize_wrong); + return r; } @@ -177,12 +180,9 @@ size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx) if (cctx==NULL) return 0; /* support free on NULL */ RETURN_ERROR_IF(cctx->staticSize, memory_allocation, "not compatible with static CCtx"); - { - int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx); + { int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx); ZSTD_freeCCtxContent(cctx); - if (!cctxInWorkspace) { - ZSTD_customFree(cctx, cctx->customMem); - } + if (!cctxInWorkspace) ZSTD_customFree(cctx, cctx->customMem); } return 0; } @@ -267,9 +267,9 @@ static int ZSTD_allocateChainTable(const ZSTD_strategy strategy, return forDDSDict || ((strategy != ZSTD_fast) && !ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder)); } -/* Returns 1 if compression parameters are such that we should +/* Returns ZSTD_ps_enable if compression parameters are such that we should * enable long distance matching (wlog >= 27, strategy >= btopt). - * Returns 0 otherwise. + * Returns ZSTD_ps_disable otherwise. */ static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode, const ZSTD_compressionParameters* const cParams) { @@ -277,6 +277,34 @@ static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode, return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable; } +static int ZSTD_resolveExternalSequenceValidation(int mode) { + return mode; +} + +/* Resolves maxBlockSize to the default if no value is present. */ +static size_t ZSTD_resolveMaxBlockSize(size_t maxBlockSize) { + if (maxBlockSize == 0) { + return ZSTD_BLOCKSIZE_MAX; + } else { + return maxBlockSize; + } +} + +static ZSTD_paramSwitch_e ZSTD_resolveExternalRepcodeSearch(ZSTD_paramSwitch_e value, int cLevel) { + if (value != ZSTD_ps_auto) return value; + if (cLevel < 10) { + return ZSTD_ps_disable; + } else { + return ZSTD_ps_enable; + } +} + +/* Returns 1 if compression parameters are such that CDict hashtable and chaintable indices are tagged. + * If so, the tags need to be removed in ZSTD_resetCCtx_byCopyingCDict. */ +static int ZSTD_CDictIndicesAreTagged(const ZSTD_compressionParameters* const cParams) { + return cParams->strategy == ZSTD_fast || cParams->strategy == ZSTD_dfast; +} + static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( ZSTD_compressionParameters cParams) { @@ -294,6 +322,10 @@ static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( } cctxParams.useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.useBlockSplitter, &cParams); cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); + cctxParams.validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams.validateSequences); + cctxParams.maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams.maxBlockSize); + cctxParams.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams.searchForExternalRepcodes, + cctxParams.compressionLevel); assert(!ZSTD_checkCParams(cParams)); return cctxParams; } @@ -339,10 +371,13 @@ size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) #define ZSTD_NO_CLEVEL 0 /** - * Initializes the cctxParams from params and compressionLevel. + * Initializes `cctxParams` from `params` and `compressionLevel`. * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL. */ -static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel) +static void +ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, + const ZSTD_parameters* params, + int compressionLevel) { assert(!ZSTD_checkCParams(params->cParams)); ZSTD_memset(cctxParams, 0, sizeof(*cctxParams)); @@ -355,6 +390,9 @@ static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_par cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, ¶ms->cParams); cctxParams->useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->useBlockSplitter, ¶ms->cParams); cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, ¶ms->cParams); + cctxParams->validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams->validateSequences); + cctxParams->maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams->maxBlockSize); + cctxParams->searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams->searchForExternalRepcodes, compressionLevel); DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d", cctxParams->useRowMatchFinder, cctxParams->useBlockSplitter, cctxParams->ldmParams.enableLdm); } @@ -369,7 +407,7 @@ size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_paramete /** * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone. - * @param param Validated zstd parameters. + * @param params Validated zstd parameters. */ static void ZSTD_CCtxParams_setZstdParams( ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params) @@ -478,8 +516,8 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) return bounds; case ZSTD_c_enableLongDistanceMatching: - bounds.lowerBound = 0; - bounds.upperBound = 1; + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; return bounds; case ZSTD_c_ldmHashLog: @@ -572,6 +610,26 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) bounds.upperBound = 1; return bounds; + case ZSTD_c_prefetchCDictTables: + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; + return bounds; + + case ZSTD_c_enableSeqProducerFallback: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + + case ZSTD_c_maxBlockSize: + bounds.lowerBound = ZSTD_BLOCKSIZE_MAX_MIN; + bounds.upperBound = ZSTD_BLOCKSIZE_MAX; + return bounds; + + case ZSTD_c_searchForExternalRepcodes: + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; + return bounds; + default: bounds.error = ERROR(parameter_unsupported); return bounds; @@ -636,6 +694,10 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) case ZSTD_c_useBlockSplitter: case ZSTD_c_useRowMatchFinder: case ZSTD_c_deterministicRefPrefix: + case ZSTD_c_prefetchCDictTables: + case ZSTD_c_enableSeqProducerFallback: + case ZSTD_c_maxBlockSize: + case ZSTD_c_searchForExternalRepcodes: default: return 0; } @@ -648,7 +710,7 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value) if (ZSTD_isUpdateAuthorized(param)) { cctx->cParamsChanged = 1; } else { - RETURN_ERROR(stage_wrong, "can only set params in ctx init stage"); + RETURN_ERROR(stage_wrong, "can only set params in cctx init stage"); } } switch(param) @@ -691,6 +753,10 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value) case ZSTD_c_useBlockSplitter: case ZSTD_c_useRowMatchFinder: case ZSTD_c_deterministicRefPrefix: + case ZSTD_c_prefetchCDictTables: + case ZSTD_c_enableSeqProducerFallback: + case ZSTD_c_maxBlockSize: + case ZSTD_c_searchForExternalRepcodes: break; default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); @@ -746,12 +812,12 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, case ZSTD_c_minMatch : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_minMatch, value); - CCtxParams->cParams.minMatch = value; + CCtxParams->cParams.minMatch = (U32)value; return CCtxParams->cParams.minMatch; case ZSTD_c_targetLength : BOUNDCHECK(ZSTD_c_targetLength, value); - CCtxParams->cParams.targetLength = value; + CCtxParams->cParams.targetLength = (U32)value; return CCtxParams->cParams.targetLength; case ZSTD_c_strategy : @@ -764,12 +830,12 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, /* Content size written in frame header _when known_ (default:1) */ DEBUGLOG(4, "set content size flag = %u", (value!=0)); CCtxParams->fParams.contentSizeFlag = value != 0; - return CCtxParams->fParams.contentSizeFlag; + return (size_t)CCtxParams->fParams.contentSizeFlag; case ZSTD_c_checksumFlag : /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */ CCtxParams->fParams.checksumFlag = value != 0; - return CCtxParams->fParams.checksumFlag; + return (size_t)CCtxParams->fParams.checksumFlag; case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */ DEBUGLOG(4, "set dictIDFlag = %u", (value!=0)); @@ -778,18 +844,18 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, case ZSTD_c_forceMaxWindow : CCtxParams->forceWindow = (value != 0); - return CCtxParams->forceWindow; + return (size_t)CCtxParams->forceWindow; case ZSTD_c_forceAttachDict : { const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value; - BOUNDCHECK(ZSTD_c_forceAttachDict, pref); + BOUNDCHECK(ZSTD_c_forceAttachDict, (int)pref); CCtxParams->attachDictPref = pref; return CCtxParams->attachDictPref; } case ZSTD_c_literalCompressionMode : { const ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value; - BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm); + BOUNDCHECK(ZSTD_c_literalCompressionMode, (int)lcm); CCtxParams->literalCompressionMode = lcm; return CCtxParams->literalCompressionMode; } @@ -840,47 +906,48 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, case ZSTD_c_enableDedicatedDictSearch : CCtxParams->enableDedicatedDictSearch = (value!=0); - return CCtxParams->enableDedicatedDictSearch; + return (size_t)CCtxParams->enableDedicatedDictSearch; case ZSTD_c_enableLongDistanceMatching : + BOUNDCHECK(ZSTD_c_enableLongDistanceMatching, value); CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value; return CCtxParams->ldmParams.enableLdm; case ZSTD_c_ldmHashLog : if (value!=0) /* 0 ==> auto */ BOUNDCHECK(ZSTD_c_ldmHashLog, value); - CCtxParams->ldmParams.hashLog = value; + CCtxParams->ldmParams.hashLog = (U32)value; return CCtxParams->ldmParams.hashLog; case ZSTD_c_ldmMinMatch : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_ldmMinMatch, value); - CCtxParams->ldmParams.minMatchLength = value; + CCtxParams->ldmParams.minMatchLength = (U32)value; return CCtxParams->ldmParams.minMatchLength; case ZSTD_c_ldmBucketSizeLog : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value); - CCtxParams->ldmParams.bucketSizeLog = value; + CCtxParams->ldmParams.bucketSizeLog = (U32)value; return CCtxParams->ldmParams.bucketSizeLog; case ZSTD_c_ldmHashRateLog : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_ldmHashRateLog, value); - CCtxParams->ldmParams.hashRateLog = value; + CCtxParams->ldmParams.hashRateLog = (U32)value; return CCtxParams->ldmParams.hashRateLog; case ZSTD_c_targetCBlockSize : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_targetCBlockSize, value); - CCtxParams->targetCBlockSize = value; + CCtxParams->targetCBlockSize = (U32)value; return CCtxParams->targetCBlockSize; case ZSTD_c_srcSizeHint : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_srcSizeHint, value); CCtxParams->srcSizeHint = value; - return CCtxParams->srcSizeHint; + return (size_t)CCtxParams->srcSizeHint; case ZSTD_c_stableInBuffer: BOUNDCHECK(ZSTD_c_stableInBuffer, value); @@ -917,6 +984,27 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, CCtxParams->deterministicRefPrefix = !!value; return CCtxParams->deterministicRefPrefix; + case ZSTD_c_prefetchCDictTables: + BOUNDCHECK(ZSTD_c_prefetchCDictTables, value); + CCtxParams->prefetchCDictTables = (ZSTD_paramSwitch_e)value; + return CCtxParams->prefetchCDictTables; + + case ZSTD_c_enableSeqProducerFallback: + BOUNDCHECK(ZSTD_c_enableSeqProducerFallback, value); + CCtxParams->enableMatchFinderFallback = value; + return CCtxParams->enableMatchFinderFallback; + + case ZSTD_c_maxBlockSize: + if (value!=0) /* 0 ==> default */ + BOUNDCHECK(ZSTD_c_maxBlockSize, value); + CCtxParams->maxBlockSize = value; + return CCtxParams->maxBlockSize; + + case ZSTD_c_searchForExternalRepcodes: + BOUNDCHECK(ZSTD_c_searchForExternalRepcodes, value); + CCtxParams->searchForExternalRepcodes = (ZSTD_paramSwitch_e)value; + return CCtxParams->searchForExternalRepcodes; + default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } } @@ -1049,6 +1137,18 @@ size_t ZSTD_CCtxParams_getParameter( case ZSTD_c_deterministicRefPrefix: *value = (int)CCtxParams->deterministicRefPrefix; break; + case ZSTD_c_prefetchCDictTables: + *value = (int)CCtxParams->prefetchCDictTables; + break; + case ZSTD_c_enableSeqProducerFallback: + *value = CCtxParams->enableMatchFinderFallback; + break; + case ZSTD_c_maxBlockSize: + *value = (int)CCtxParams->maxBlockSize; + break; + case ZSTD_c_searchForExternalRepcodes: + *value = (int)CCtxParams->searchForExternalRepcodes; + break; default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } return 0; @@ -1075,9 +1175,24 @@ size_t ZSTD_CCtx_setParametersUsingCCtxParams( return 0; } +size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams) +{ + DEBUGLOG(4, "ZSTD_CCtx_setCParams"); + assert(cctx != NULL); + if (cctx->streamStage != zcss_init) { + /* All parameters in @cparams are allowed to be updated during MT compression. + * This must be signaled, so that MT compression picks up the changes */ + cctx->cParamsChanged = 1; + } + /* only update if parameters are valid */ + FORWARD_IF_ERROR(ZSTD_checkCParams(cparams), ""); + cctx->requestedParams.cParams = cparams; + return 0; +} + size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize) { - DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize); + DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %llu bytes", pledgedSrcSize); RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't set pledgedSrcSize when not in init stage."); cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1; @@ -1220,6 +1335,7 @@ size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset) RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't reset parameters only when not in init stage."); ZSTD_clearAllDicts(cctx); + ZSTD_memset(&cctx->externalMatchCtx, 0, sizeof(cctx->externalMatchCtx)); return ZSTD_CCtxParams_reset(&cctx->requestedParams); } return 0; @@ -1316,7 +1432,8 @@ static ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize, - ZSTD_cParamMode_e mode) + ZSTD_cParamMode_e mode, + ZSTD_paramSwitch_e useRowMatchFinder) { const U64 minSrcSize = 513; /* (1<<9) + 1 */ const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1); @@ -1350,8 +1467,8 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, } /* resize windowLog if input is small enough, to use less memory */ - if ( (srcSize < maxWindowResize) - && (dictSize < maxWindowResize) ) { + if ( (srcSize <= maxWindowResize) + && (dictSize <= maxWindowResize) ) { U32 const tSize = (U32)(srcSize + dictSize); static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN; U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN : @@ -1369,6 +1486,42 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */ + /* We can't use more than 32 bits of hash in total, so that means that we require: + * (hashLog + 8) <= 32 && (chainLog + 8) <= 32 + */ + if (mode == ZSTD_cpm_createCDict && ZSTD_CDictIndicesAreTagged(&cPar)) { + U32 const maxShortCacheHashLog = 32 - ZSTD_SHORT_CACHE_TAG_BITS; + if (cPar.hashLog > maxShortCacheHashLog) { + cPar.hashLog = maxShortCacheHashLog; + } + if (cPar.chainLog > maxShortCacheHashLog) { + cPar.chainLog = maxShortCacheHashLog; + } + } + + + /* At this point, we aren't 100% sure if we are using the row match finder. + * Unless it is explicitly disabled, conservatively assume that it is enabled. + * In this case it will only be disabled for small sources, so shrinking the + * hash log a little bit shouldn't result in any ratio loss. + */ + if (useRowMatchFinder == ZSTD_ps_auto) + useRowMatchFinder = ZSTD_ps_enable; + + /* We can't hash more than 32-bits in total. So that means that we require: + * (hashLog - rowLog + 8) <= 32 + */ + if (ZSTD_rowMatchFinderUsed(cPar.strategy, useRowMatchFinder)) { + /* Switch to 32-entry rows if searchLog is 5 (or more) */ + U32 const rowLog = BOUNDED(4, cPar.searchLog, 6); + U32 const maxRowHashLog = 32 - ZSTD_ROW_HASH_TAG_BITS; + U32 const maxHashLog = maxRowHashLog + rowLog; + assert(cPar.hashLog >= rowLog); + if (cPar.hashLog > maxHashLog) { + cPar.hashLog = maxHashLog; + } + } + return cPar; } @@ -1379,7 +1532,7 @@ ZSTD_adjustCParams(ZSTD_compressionParameters cPar, { cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */ if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN; - return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown); + return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown, ZSTD_ps_auto); } static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode); @@ -1410,7 +1563,7 @@ ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( ZSTD_overrideCParams(&cParams, &CCtxParams->cParams); assert(!ZSTD_checkCParams(cParams)); /* srcSizeHint == 0 means 0 */ - return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode); + return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode, CCtxParams->useRowMatchFinder); } static size_t @@ -1455,6 +1608,13 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams, return tableSpace + optSpace + slackSpace + lazyAdditionalSpace; } +/* Helper function for calculating memory requirements. + * Gives a tighter bound than ZSTD_sequenceBound() by taking minMatch into account. */ +static size_t ZSTD_maxNbSeq(size_t blockSize, unsigned minMatch, int useSequenceProducer) { + U32 const divider = (minMatch==3 || useSequenceProducer) ? 3 : 4; + return blockSize / divider; +} + static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( const ZSTD_compressionParameters* cParams, const ldmParams_t* ldmParams, @@ -1462,12 +1622,13 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( const ZSTD_paramSwitch_e useRowMatchFinder, const size_t buffInSize, const size_t buffOutSize, - const U64 pledgedSrcSize) + const U64 pledgedSrcSize, + int useSequenceProducer, + size_t maxBlockSize) { size_t const windowSize = (size_t) BOUNDED(1ULL, 1ULL << cParams->windowLog, pledgedSrcSize); - size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); - U32 const divider = (cParams->minMatch==3) ? 3 : 4; - size_t const maxNbSeq = blockSize / divider; + size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(maxBlockSize), windowSize); + size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, cParams->minMatch, useSequenceProducer); size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize) + ZSTD_cwksp_aligned_alloc_size(maxNbSeq * sizeof(seqDef)) + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE)); @@ -1486,6 +1647,11 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0; + size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize); + size_t const externalSeqSpace = useSequenceProducer + ? ZSTD_cwksp_aligned_alloc_size(maxNbExternalSeq * sizeof(ZSTD_Sequence)) + : 0; + size_t const neededSpace = cctxSpace + entropySpace + @@ -1494,7 +1660,8 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( ldmSeqSpace + matchStateSize + tokenSpace + - bufferSpace; + bufferSpace + + externalSeqSpace; DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace); return neededSpace; @@ -1512,7 +1679,7 @@ size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params) * be needed. However, we still allocate two 0-sized buffers, which can * take space under ASAN. */ return ZSTD_estimateCCtxSize_usingCCtxParams_internal( - &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN); + &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN, params->useSequenceProducer, params->maxBlockSize); } size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) @@ -1562,7 +1729,7 @@ size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params) RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); { ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); - size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog); + size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(params->maxBlockSize), (size_t)1 << cParams.windowLog); size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered) ? ((size_t)1 << cParams.windowLog) + blockSize : 0; @@ -1573,7 +1740,7 @@ size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params) return ZSTD_estimateCCtxSize_usingCCtxParams_internal( &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize, - ZSTD_CONTENTSIZE_UNKNOWN); + ZSTD_CONTENTSIZE_UNKNOWN, params->useSequenceProducer, params->maxBlockSize); } } @@ -1847,6 +2014,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, assert(params->useRowMatchFinder != ZSTD_ps_auto); assert(params->useBlockSplitter != ZSTD_ps_auto); assert(params->ldmParams.enableLdm != ZSTD_ps_auto); + assert(params->maxBlockSize != 0); if (params->ldmParams.enableLdm == ZSTD_ps_enable) { /* Adjust long distance matching parameters */ ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, ¶ms->cParams); @@ -1855,9 +2023,8 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, } { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize)); - size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); - U32 const divider = (params->cParams.minMatch==3) ? 3 : 4; - size_t const maxNbSeq = blockSize / divider; + size_t const blockSize = MIN(params->maxBlockSize, windowSize); + size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, params->cParams.minMatch, params->useSequenceProducer); size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered) ? ZSTD_compressBound(blockSize) + 1 : 0; @@ -1874,7 +2041,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, size_t const neededSpace = ZSTD_estimateCCtxSize_usingCCtxParams_internal( ¶ms->cParams, ¶ms->ldmParams, zc->staticSize != 0, params->useRowMatchFinder, - buffInSize, buffOutSize, pledgedSrcSize); + buffInSize, buffOutSize, pledgedSrcSize, params->useSequenceProducer, params->maxBlockSize); int resizeWorkspace; FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!"); @@ -1917,6 +2084,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, /* init params */ zc->blockState.matchState.cParams = params->cParams; + zc->blockState.matchState.prefetchCDictTables = params->prefetchCDictTables == ZSTD_ps_enable; zc->pledgedSrcSizePlusOne = pledgedSrcSize+1; zc->consumedSrcSize = 0; zc->producedCSize = 0; @@ -1986,6 +2154,14 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, zc->ldmState.loadedDictEnd = 0; } + /* reserve space for block-level external sequences */ + if (params->useSequenceProducer) { + size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize); + zc->externalMatchCtx.seqBufferCapacity = maxNbExternalSeq; + zc->externalMatchCtx.seqBuffer = + (ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned(ws, maxNbExternalSeq * sizeof(ZSTD_Sequence)); + } + DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws)); assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace, resizeWorkspace)); @@ -2059,7 +2235,8 @@ ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx, } params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize, - cdict->dictContentSize, ZSTD_cpm_attachDict); + cdict->dictContentSize, ZSTD_cpm_attachDict, + params.useRowMatchFinder); params.cParams.windowLog = windowLog; params.useRowMatchFinder = cdict->useRowMatchFinder; /* cdict overrides */ FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, ¶ms, pledgedSrcSize, @@ -2098,6 +2275,22 @@ ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx, return 0; } +static void ZSTD_copyCDictTableIntoCCtx(U32* dst, U32 const* src, size_t tableSize, + ZSTD_compressionParameters const* cParams) { + if (ZSTD_CDictIndicesAreTagged(cParams)){ + /* Remove tags from the CDict table if they are present. + * See docs on "short cache" in zstd_compress_internal.h for context. */ + size_t i; + for (i = 0; i < tableSize; i++) { + U32 const taggedIndex = src[i]; + U32 const index = taggedIndex >> ZSTD_SHORT_CACHE_TAG_BITS; + dst[i] = index; + } + } else { + ZSTD_memcpy(dst, src, tableSize * sizeof(U32)); + } +} + static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, @@ -2133,14 +2326,15 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, : 0; size_t const hSize = (size_t)1 << cdict_cParams->hashLog; - ZSTD_memcpy(cctx->blockState.matchState.hashTable, - cdict->matchState.hashTable, - hSize * sizeof(U32)); + ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.hashTable, + cdict->matchState.hashTable, + hSize, cdict_cParams); + /* Do not copy cdict's chainTable if cctx has parameters such that it would not use chainTable */ if (ZSTD_allocateChainTable(cctx->appliedParams.cParams.strategy, cctx->appliedParams.useRowMatchFinder, 0 /* forDDSDict */)) { - ZSTD_memcpy(cctx->blockState.matchState.chainTable, - cdict->matchState.chainTable, - chainSize * sizeof(U32)); + ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.chainTable, + cdict->matchState.chainTable, + chainSize, cdict_cParams); } /* copy tag table */ if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder)) { @@ -2226,6 +2420,7 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, params.useBlockSplitter = srcCCtx->appliedParams.useBlockSplitter; params.ldmParams = srcCCtx->appliedParams.ldmParams; params.fParams = fParams; + params.maxBlockSize = srcCCtx->appliedParams.maxBlockSize; ZSTD_resetCCtx_internal(dstCCtx, ¶ms, pledgedSrcSize, /* loadedDictSize */ 0, ZSTDcrp_leaveDirty, zbuff); @@ -2385,7 +2580,7 @@ static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* par /* See doc/zstd_compression_format.md for detailed format description */ -void ZSTD_seqToCodes(const seqStore_t* seqStorePtr) +int ZSTD_seqToCodes(const seqStore_t* seqStorePtr) { const seqDef* const sequences = seqStorePtr->sequencesStart; BYTE* const llCodeTable = seqStorePtr->llCode; @@ -2393,18 +2588,24 @@ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr) BYTE* const mlCodeTable = seqStorePtr->mlCode; U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); U32 u; + int longOffsets = 0; assert(nbSeq <= seqStorePtr->maxNbSeq); for (u=0; u= STREAM_ACCUMULATOR_MIN)); + if (MEM_32bits() && ofCode >= STREAM_ACCUMULATOR_MIN) + longOffsets = 1; } if (seqStorePtr->longLengthType==ZSTD_llt_literalLength) llCodeTable[seqStorePtr->longLengthPos] = MaxLL; if (seqStorePtr->longLengthType==ZSTD_llt_matchLength) mlCodeTable[seqStorePtr->longLengthPos] = MaxML; + return longOffsets; } /* ZSTD_useTargetCBlockSize(): @@ -2438,6 +2639,7 @@ typedef struct { U32 MLtype; size_t size; size_t lastCountSize; /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ + int longOffsets; } ZSTD_symbolEncodingTypeStats_t; /* ZSTD_buildSequencesStatistics(): @@ -2448,11 +2650,13 @@ typedef struct { * entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32) */ static ZSTD_symbolEncodingTypeStats_t -ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, - const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy, - BYTE* dst, const BYTE* const dstEnd, - ZSTD_strategy strategy, unsigned* countWorkspace, - void* entropyWorkspace, size_t entropyWkspSize) { +ZSTD_buildSequencesStatistics( + const seqStore_t* seqStorePtr, size_t nbSeq, + const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy, + BYTE* dst, const BYTE* const dstEnd, + ZSTD_strategy strategy, unsigned* countWorkspace, + void* entropyWorkspace, size_t entropyWkspSize) +{ BYTE* const ostart = dst; const BYTE* const oend = dstEnd; BYTE* op = ostart; @@ -2466,7 +2670,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, stats.lastCountSize = 0; /* convert length/distances into codes */ - ZSTD_seqToCodes(seqStorePtr); + stats.longOffsets = ZSTD_seqToCodes(seqStorePtr); assert(op <= oend); assert(nbSeq != 0); /* ZSTD_selectEncodingType() divides by nbSeq */ /* build CTable for Literal Lengths */ @@ -2571,22 +2775,22 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, */ #define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20 MEM_STATIC size_t -ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, - const ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - void* dst, size_t dstCapacity, - void* entropyWorkspace, size_t entropyWkspSize, - const int bmi2) +ZSTD_entropyCompressSeqStore_internal( + const seqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + void* dst, size_t dstCapacity, + void* entropyWorkspace, size_t entropyWkspSize, + const int bmi2) { - const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; ZSTD_strategy const strategy = cctxParams->cParams.strategy; unsigned* count = (unsigned*)entropyWorkspace; FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable; FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable; FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable; const seqDef* const sequences = seqStorePtr->sequencesStart; - const size_t nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; + const size_t nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); const BYTE* const ofCodeTable = seqStorePtr->ofCode; const BYTE* const llCodeTable = seqStorePtr->llCode; const BYTE* const mlCodeTable = seqStorePtr->mlCode; @@ -2594,29 +2798,31 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; size_t lastCountSize; + int longOffsets = 0; entropyWorkspace = count + (MaxSeq + 1); entropyWkspSize -= (MaxSeq + 1) * sizeof(*count); - DEBUGLOG(4, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu)", nbSeq); + DEBUGLOG(5, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu, dstCapacity=%zu)", nbSeq, dstCapacity); ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<= HUF_WORKSPACE_SIZE); /* Compress literals */ { const BYTE* const literals = seqStorePtr->litStart; - size_t const numSequences = seqStorePtr->sequences - seqStorePtr->sequencesStart; - size_t const numLiterals = seqStorePtr->lit - seqStorePtr->litStart; + size_t const numSequences = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + size_t const numLiterals = (size_t)(seqStorePtr->lit - seqStorePtr->litStart); /* Base suspicion of uncompressibility on ratio of literals to sequences */ unsigned const suspectUncompressible = (numSequences == 0) || (numLiterals / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO); size_t const litSize = (size_t)(seqStorePtr->lit - literals); + size_t const cSize = ZSTD_compressLiterals( - &prevEntropy->huf, &nextEntropy->huf, - cctxParams->cParams.strategy, - ZSTD_literalsCompressionIsDisabled(cctxParams), op, dstCapacity, literals, litSize, entropyWorkspace, entropyWkspSize, - bmi2, suspectUncompressible); + &prevEntropy->huf, &nextEntropy->huf, + cctxParams->cParams.strategy, + ZSTD_literalsCompressionIsDisabled(cctxParams), + suspectUncompressible, bmi2); FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed"); assert(cSize <= dstCapacity); op += cSize; @@ -2642,11 +2848,10 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse)); return (size_t)(op - ostart); } - { - ZSTD_symbolEncodingTypeStats_t stats; - BYTE* seqHead = op++; + { BYTE* const seqHead = op++; /* build stats for sequences */ - stats = ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq, + const ZSTD_symbolEncodingTypeStats_t stats = + ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq, &prevEntropy->fse, &nextEntropy->fse, op, oend, strategy, count, @@ -2655,6 +2860,7 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, *seqHead = (BYTE)((stats.LLtype<<6) + (stats.Offtype<<4) + (stats.MLtype<<2)); lastCountSize = stats.lastCountSize; op += stats.size; + longOffsets = stats.longOffsets; } { size_t const bitstreamSize = ZSTD_encodeSequences( @@ -2689,14 +2895,15 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, } MEM_STATIC size_t -ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr, - const ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - void* dst, size_t dstCapacity, - size_t srcSize, - void* entropyWorkspace, size_t entropyWkspSize, - int bmi2) +ZSTD_entropyCompressSeqStore( + const seqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + void* dst, size_t dstCapacity, + size_t srcSize, + void* entropyWorkspace, size_t entropyWkspSize, + int bmi2) { size_t const cSize = ZSTD_entropyCompressSeqStore_internal( seqStorePtr, prevEntropy, nextEntropy, cctxParams, @@ -2706,15 +2913,21 @@ ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr, /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block. * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block. */ - if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity)) + if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity)) { + DEBUGLOG(4, "not enough dstCapacity (%zu) for ZSTD_entropyCompressSeqStore_internal()=> do not compress block", dstCapacity); return 0; /* block not compressed */ + } FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSeqStore_internal failed"); /* Check compressibility */ { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy); if (cSize >= maxCSize) return 0; /* block not compressed */ } - DEBUGLOG(4, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize); + DEBUGLOG(5, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize); + /* libzstd decoder before > v1.5.4 is not compatible with compressed blocks of size ZSTD_BLOCKSIZE_MAX exactly. + * This restriction is indirectly already fulfilled by respecting ZSTD_minGain() condition above. + */ + assert(cSize < ZSTD_BLOCKSIZE_MAX); return cSize; } @@ -2809,6 +3022,72 @@ void ZSTD_resetSeqStore(seqStore_t* ssPtr) ssPtr->longLengthType = ZSTD_llt_none; } +/* ZSTD_postProcessSequenceProducerResult() : + * Validates and post-processes sequences obtained through the external matchfinder API: + * - Checks whether nbExternalSeqs represents an error condition. + * - Appends a block delimiter to outSeqs if one is not already present. + * See zstd.h for context regarding block delimiters. + * Returns the number of sequences after post-processing, or an error code. */ +static size_t ZSTD_postProcessSequenceProducerResult( + ZSTD_Sequence* outSeqs, size_t nbExternalSeqs, size_t outSeqsCapacity, size_t srcSize +) { + RETURN_ERROR_IF( + nbExternalSeqs > outSeqsCapacity, + sequenceProducer_failed, + "External sequence producer returned error code %lu", + (unsigned long)nbExternalSeqs + ); + + RETURN_ERROR_IF( + nbExternalSeqs == 0 && srcSize > 0, + sequenceProducer_failed, + "Got zero sequences from external sequence producer for a non-empty src buffer!" + ); + + if (srcSize == 0) { + ZSTD_memset(&outSeqs[0], 0, sizeof(ZSTD_Sequence)); + return 1; + } + + { + ZSTD_Sequence const lastSeq = outSeqs[nbExternalSeqs - 1]; + + /* We can return early if lastSeq is already a block delimiter. */ + if (lastSeq.offset == 0 && lastSeq.matchLength == 0) { + return nbExternalSeqs; + } + + /* This error condition is only possible if the external matchfinder + * produced an invalid parse, by definition of ZSTD_sequenceBound(). */ + RETURN_ERROR_IF( + nbExternalSeqs == outSeqsCapacity, + sequenceProducer_failed, + "nbExternalSeqs == outSeqsCapacity but lastSeq is not a block delimiter!" + ); + + /* lastSeq is not a block delimiter, so we need to append one. */ + ZSTD_memset(&outSeqs[nbExternalSeqs], 0, sizeof(ZSTD_Sequence)); + return nbExternalSeqs + 1; + } +} + +/* ZSTD_fastSequenceLengthSum() : + * Returns sum(litLen) + sum(matchLen) + lastLits for *seqBuf*. + * Similar to another function in zstd_compress.c (determine_blockSize), + * except it doesn't check for a block delimiter to end summation. + * Removing the early exit allows the compiler to auto-vectorize (https://godbolt.org/z/cY1cajz9P). + * This function can be deleted and replaced by determine_blockSize after we resolve issue #3456. */ +static size_t ZSTD_fastSequenceLengthSum(ZSTD_Sequence const* seqBuf, size_t seqBufSize) { + size_t matchLenSum, litLenSum, i; + matchLenSum = 0; + litLenSum = 0; + for (i = 0; i < seqBufSize; i++) { + litLenSum += seqBuf[i].litLength; + matchLenSum += seqBuf[i].matchLength; + } + return litLenSum + matchLenSum; +} + typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e; static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) @@ -2818,7 +3097,9 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) assert(srcSize <= ZSTD_BLOCKSIZE_MAX); /* Assert that we have correctly flushed the ctx params into the ms's copy */ ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams); - if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) { + /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding + * additional 1. We need to revisit and change this logic to be more consistent */ + if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) { if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) { ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize); } else { @@ -2854,6 +3135,15 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) } if (zc->externSeqStore.pos < zc->externSeqStore.size) { assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable); + + /* External matchfinder + LDM is technically possible, just not implemented yet. + * We need to revisit soon and implement it. */ + RETURN_ERROR_IF( + zc->appliedParams.useSequenceProducer, + parameter_combination_unsupported, + "Long-distance matching with external sequence producer enabled is not currently supported." + ); + /* Updates ldmSeqStore.pos */ lastLLSize = ZSTD_ldm_blockCompress(&zc->externSeqStore, @@ -2865,6 +3155,14 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) } else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) { rawSeqStore_t ldmSeqStore = kNullRawSeqStore; + /* External matchfinder + LDM is technically possible, just not implemented yet. + * We need to revisit soon and implement it. */ + RETURN_ERROR_IF( + zc->appliedParams.useSequenceProducer, + parameter_combination_unsupported, + "Long-distance matching with external sequence producer enabled is not currently supported." + ); + ldmSeqStore.seq = zc->ldmSequences; ldmSeqStore.capacity = zc->maxNbLdmSequences; /* Updates ldmSeqStore.size */ @@ -2879,7 +3177,68 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) zc->appliedParams.useRowMatchFinder, src, srcSize); assert(ldmSeqStore.pos == ldmSeqStore.size); - } else { /* not long range mode */ + } else if (zc->appliedParams.useSequenceProducer) { + assert( + zc->externalMatchCtx.seqBufferCapacity >= ZSTD_sequenceBound(srcSize) + ); + assert(zc->externalMatchCtx.mFinder != NULL); + + { U32 const windowSize = (U32)1 << zc->appliedParams.cParams.windowLog; + + size_t const nbExternalSeqs = (zc->externalMatchCtx.mFinder)( + zc->externalMatchCtx.mState, + zc->externalMatchCtx.seqBuffer, + zc->externalMatchCtx.seqBufferCapacity, + src, srcSize, + NULL, 0, /* dict and dictSize, currently not supported */ + zc->appliedParams.compressionLevel, + windowSize + ); + + size_t const nbPostProcessedSeqs = ZSTD_postProcessSequenceProducerResult( + zc->externalMatchCtx.seqBuffer, + nbExternalSeqs, + zc->externalMatchCtx.seqBufferCapacity, + srcSize + ); + + /* Return early if there is no error, since we don't need to worry about last literals */ + if (!ZSTD_isError(nbPostProcessedSeqs)) { + ZSTD_sequencePosition seqPos = {0,0,0}; + size_t const seqLenSum = ZSTD_fastSequenceLengthSum(zc->externalMatchCtx.seqBuffer, nbPostProcessedSeqs); + RETURN_ERROR_IF(seqLenSum > srcSize, externalSequences_invalid, "External sequences imply too large a block!"); + FORWARD_IF_ERROR( + ZSTD_copySequencesToSeqStoreExplicitBlockDelim( + zc, &seqPos, + zc->externalMatchCtx.seqBuffer, nbPostProcessedSeqs, + src, srcSize, + zc->appliedParams.searchForExternalRepcodes + ), + "Failed to copy external sequences to seqStore!" + ); + ms->ldmSeqStore = NULL; + DEBUGLOG(5, "Copied %lu sequences from external sequence producer to internal seqStore.", (unsigned long)nbExternalSeqs); + return ZSTDbss_compress; + } + + /* Propagate the error if fallback is disabled */ + if (!zc->appliedParams.enableMatchFinderFallback) { + return nbPostProcessedSeqs; + } + + /* Fallback to software matchfinder */ + { ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, + zc->appliedParams.useRowMatchFinder, + dictMode); + ms->ldmSeqStore = NULL; + DEBUGLOG( + 5, + "External sequence producer returned error code %lu. Falling back to internal parser.", + (unsigned long)nbExternalSeqs + ); + lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); + } } + } else { /* not long range mode and no external matchfinder */ ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, zc->appliedParams.useRowMatchFinder, dictMode); @@ -2940,7 +3299,7 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc) /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode so we provide seqStoreSeqs[i].offset - 1 */ ZSTD_updateRep(updatedRepcodes.rep, - seqStoreSeqs[i].offBase - 1, + seqStoreSeqs[i].offBase, seqStoreSeqs[i].litLength == 0); literalsRead += outSeqs[i].litLength; } @@ -2956,6 +3315,10 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc) zc->seqCollector.seqIndex += seqStoreSeqSize; } +size_t ZSTD_sequenceBound(size_t srcSize) { + return (srcSize / ZSTD_MINMATCH_MIN) + 1; +} + size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, size_t outSeqsSize, const void* src, size_t srcSize) { @@ -3001,19 +3364,17 @@ static int ZSTD_isRLE(const BYTE* src, size_t length) { const size_t unrollMask = unrollSize - 1; const size_t prefixLength = length & unrollMask; size_t i; - size_t u; if (length == 1) return 1; /* Check if prefix is RLE first before using unrolled loop */ if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) { return 0; } for (i = prefixLength; i != length; i += unrollSize) { + size_t u; for (u = 0; u < unrollSize; u += sizeof(size_t)) { if (MEM_readST(ip + i + u) != valueST) { return 0; - } - } - } + } } } return 1; } @@ -3029,7 +3390,8 @@ static int ZSTD_maybeRLE(seqStore_t const* seqStore) return nbSeqs < 4 && nbLits < 10; } -static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs) +static void +ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs) { ZSTD_compressedBlockState_t* const tmp = bs->prevCBlock; bs->prevCBlock = bs->nextCBlock; @@ -3037,7 +3399,9 @@ static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* c } /* Writes the block header */ -static void writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock) { +static void +writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock) +{ U32 const cBlockHeader = cSize == 1 ? lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) : lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); @@ -3050,13 +3414,16 @@ static void writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastB * Stores literals block type (raw, rle, compressed, repeat) and * huffman description table to hufMetadata. * Requires ENTROPY_WORKSPACE_SIZE workspace - * @return : size of huffman description table or error code */ -static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize, - const ZSTD_hufCTables_t* prevHuf, - ZSTD_hufCTables_t* nextHuf, - ZSTD_hufCTablesMetadata_t* hufMetadata, - const int literalsCompressionIsDisabled, - void* workspace, size_t wkspSize) + * @return : size of huffman description table, or an error code + */ +static size_t +ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize, + const ZSTD_hufCTables_t* prevHuf, + ZSTD_hufCTables_t* nextHuf, + ZSTD_hufCTablesMetadata_t* hufMetadata, + const int literalsCompressionIsDisabled, + void* workspace, size_t wkspSize, + int hufFlags) { BYTE* const wkspStart = (BYTE*)workspace; BYTE* const wkspEnd = wkspStart + wkspSize; @@ -3064,9 +3431,9 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi unsigned* const countWksp = (unsigned*)workspace; const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned); BYTE* const nodeWksp = countWkspStart + countWkspSize; - const size_t nodeWkspSize = wkspEnd-nodeWksp; + const size_t nodeWkspSize = (size_t)(wkspEnd - nodeWksp); unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX; - unsigned huffLog = HUF_TABLELOG_DEFAULT; + unsigned huffLog = LitHufLog; HUF_repeat repeat = prevHuf->repeatMode; DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_literals (srcSize=%zu)", srcSize); @@ -3081,73 +3448,77 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi /* small ? don't even attempt compression (speed opt) */ #ifndef COMPRESS_LITERALS_SIZE_MIN -#define COMPRESS_LITERALS_SIZE_MIN 63 +# define COMPRESS_LITERALS_SIZE_MIN 63 /* heuristic */ #endif { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN; if (srcSize <= minLitSize) { DEBUGLOG(5, "set_basic - too small"); hufMetadata->hType = set_basic; return 0; - } - } + } } /* Scan input and build symbol stats */ - { size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize); + { size_t const largest = + HIST_count_wksp (countWksp, &maxSymbolValue, + (const BYTE*)src, srcSize, + workspace, wkspSize); FORWARD_IF_ERROR(largest, "HIST_count_wksp failed"); if (largest == srcSize) { + /* only one literal symbol */ DEBUGLOG(5, "set_rle"); hufMetadata->hType = set_rle; return 0; } if (largest <= (srcSize >> 7)+4) { + /* heuristic: likely not compressible */ DEBUGLOG(5, "set_basic - no gain"); hufMetadata->hType = set_basic; return 0; - } - } + } } /* Validate the previous Huffman table */ - if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) { + if (repeat == HUF_repeat_check + && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) { repeat = HUF_repeat_none; } /* Build Huffman Tree */ ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable)); - huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); + huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, nodeWksp, nodeWkspSize, nextHuf->CTable, countWksp, hufFlags); + assert(huffLog <= LitHufLog); { size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue, huffLog, nodeWksp, nodeWkspSize); FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp"); huffLog = (U32)maxBits; - { /* Build and write the CTable */ - size_t const newCSize = HUF_estimateCompressedSize( - (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue); - size_t const hSize = HUF_writeCTable_wksp( - hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer), - (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog, - nodeWksp, nodeWkspSize); - /* Check against repeating the previous CTable */ - if (repeat != HUF_repeat_none) { - size_t const oldCSize = HUF_estimateCompressedSize( - (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue); - if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) { - DEBUGLOG(5, "set_repeat - smaller"); - ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - hufMetadata->hType = set_repeat; - return 0; - } - } - if (newCSize + hSize >= srcSize) { - DEBUGLOG(5, "set_basic - no gains"); + } + { /* Build and write the CTable */ + size_t const newCSize = HUF_estimateCompressedSize( + (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue); + size_t const hSize = HUF_writeCTable_wksp( + hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer), + (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog, + nodeWksp, nodeWkspSize); + /* Check against repeating the previous CTable */ + if (repeat != HUF_repeat_none) { + size_t const oldCSize = HUF_estimateCompressedSize( + (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue); + if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) { + DEBUGLOG(5, "set_repeat - smaller"); ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - hufMetadata->hType = set_basic; + hufMetadata->hType = set_repeat; return 0; - } - DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize); - hufMetadata->hType = set_compressed; - nextHuf->repeatMode = HUF_repeat_check; - return hSize; + } } + if (newCSize + hSize >= srcSize) { + DEBUGLOG(5, "set_basic - no gains"); + ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + hufMetadata->hType = set_basic; + return 0; } + DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize); + hufMetadata->hType = set_compressed; + nextHuf->repeatMode = HUF_repeat_check; + return hSize; } } @@ -3157,8 +3528,9 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi * and updates nextEntropy to the appropriate repeatMode. */ static ZSTD_symbolEncodingTypeStats_t -ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) { - ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0}; +ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) +{ + ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0, 0}; nextEntropy->litlength_repeatMode = FSE_repeat_none; nextEntropy->offcode_repeatMode = FSE_repeat_none; nextEntropy->matchlength_repeatMode = FSE_repeat_none; @@ -3169,16 +3541,18 @@ ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) { * Builds entropy for the sequences. * Stores symbol compression modes and fse table to fseMetadata. * Requires ENTROPY_WORKSPACE_SIZE wksp. - * @return : size of fse tables or error code */ -static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr, - const ZSTD_fseCTables_t* prevEntropy, - ZSTD_fseCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - ZSTD_fseCTablesMetadata_t* fseMetadata, - void* workspace, size_t wkspSize) + * @return : size of fse tables or error code */ +static size_t +ZSTD_buildBlockEntropyStats_sequences( + const seqStore_t* seqStorePtr, + const ZSTD_fseCTables_t* prevEntropy, + ZSTD_fseCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + ZSTD_fseCTablesMetadata_t* fseMetadata, + void* workspace, size_t wkspSize) { ZSTD_strategy const strategy = cctxParams->cParams.strategy; - size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; + size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); BYTE* const ostart = fseMetadata->fseTablesBuffer; BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer); BYTE* op = ostart; @@ -3205,23 +3579,28 @@ static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr, /** ZSTD_buildBlockEntropyStats() : * Builds entropy for the block. * Requires workspace size ENTROPY_WORKSPACE_SIZE - * - * @return : 0 on success or error code + * @return : 0 on success, or an error code + * Note : also employed in superblock */ -size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr, - const ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - ZSTD_entropyCTablesMetadata_t* entropyMetadata, - void* workspace, size_t wkspSize) -{ - size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart; +size_t ZSTD_buildBlockEntropyStats( + const seqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, size_t wkspSize) +{ + size_t const litSize = (size_t)(seqStorePtr->lit - seqStorePtr->litStart); + int const huf_useOptDepth = (cctxParams->cParams.strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD); + int const hufFlags = huf_useOptDepth ? HUF_flags_optimalDepth : 0; + entropyMetadata->hufMetadata.hufDesSize = ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize, &prevEntropy->huf, &nextEntropy->huf, &entropyMetadata->hufMetadata, ZSTD_literalsCompressionIsDisabled(cctxParams), - workspace, wkspSize); + workspace, wkspSize, hufFlags); + FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed"); entropyMetadata->fseMetadata.fseTablesSize = ZSTD_buildBlockEntropyStats_sequences(seqStorePtr, @@ -3234,11 +3613,12 @@ size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr, } /* Returns the size estimate for the literals section (header + content) of a block */ -static size_t ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize, - const ZSTD_hufCTables_t* huf, - const ZSTD_hufCTablesMetadata_t* hufMetadata, - void* workspace, size_t wkspSize, - int writeEntropy) +static size_t +ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize, + const ZSTD_hufCTables_t* huf, + const ZSTD_hufCTablesMetadata_t* hufMetadata, + void* workspace, size_t wkspSize, + int writeEntropy) { unsigned* const countWksp = (unsigned*)workspace; unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX; @@ -3260,12 +3640,13 @@ static size_t ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSiz } /* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */ -static size_t ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type, - const BYTE* codeTable, size_t nbSeq, unsigned maxCode, - const FSE_CTable* fseCTable, - const U8* additionalBits, - short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, - void* workspace, size_t wkspSize) +static size_t +ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type, + const BYTE* codeTable, size_t nbSeq, unsigned maxCode, + const FSE_CTable* fseCTable, + const U8* additionalBits, + short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, + void* workspace, size_t wkspSize) { unsigned* const countWksp = (unsigned*)workspace; const BYTE* ctp = codeTable; @@ -3297,99 +3678,107 @@ static size_t ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type, } /* Returns the size estimate for the sequences section (header + content) of a block */ -static size_t ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable, - const BYTE* llCodeTable, - const BYTE* mlCodeTable, - size_t nbSeq, - const ZSTD_fseCTables_t* fseTables, - const ZSTD_fseCTablesMetadata_t* fseMetadata, - void* workspace, size_t wkspSize, - int writeEntropy) +static size_t +ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable, + const BYTE* llCodeTable, + const BYTE* mlCodeTable, + size_t nbSeq, + const ZSTD_fseCTables_t* fseTables, + const ZSTD_fseCTablesMetadata_t* fseMetadata, + void* workspace, size_t wkspSize, + int writeEntropy) { size_t sequencesSectionHeaderSize = 1 /* seqHead */ + 1 /* min seqSize size */ + (nbSeq >= 128) + (nbSeq >= LONGNBSEQ); size_t cSeqSizeEstimate = 0; cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, nbSeq, MaxOff, - fseTables->offcodeCTable, NULL, - OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, - workspace, wkspSize); + fseTables->offcodeCTable, NULL, + OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, + workspace, wkspSize); cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->llType, llCodeTable, nbSeq, MaxLL, - fseTables->litlengthCTable, LL_bits, - LL_defaultNorm, LL_defaultNormLog, MaxLL, - workspace, wkspSize); + fseTables->litlengthCTable, LL_bits, + LL_defaultNorm, LL_defaultNormLog, MaxLL, + workspace, wkspSize); cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, nbSeq, MaxML, - fseTables->matchlengthCTable, ML_bits, - ML_defaultNorm, ML_defaultNormLog, MaxML, - workspace, wkspSize); + fseTables->matchlengthCTable, ML_bits, + ML_defaultNorm, ML_defaultNormLog, MaxML, + workspace, wkspSize); if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize; return cSeqSizeEstimate + sequencesSectionHeaderSize; } /* Returns the size estimate for a given stream of literals, of, ll, ml */ -static size_t ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize, - const BYTE* ofCodeTable, - const BYTE* llCodeTable, - const BYTE* mlCodeTable, - size_t nbSeq, - const ZSTD_entropyCTables_t* entropy, - const ZSTD_entropyCTablesMetadata_t* entropyMetadata, - void* workspace, size_t wkspSize, - int writeLitEntropy, int writeSeqEntropy) { +static size_t +ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize, + const BYTE* ofCodeTable, + const BYTE* llCodeTable, + const BYTE* mlCodeTable, + size_t nbSeq, + const ZSTD_entropyCTables_t* entropy, + const ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, size_t wkspSize, + int writeLitEntropy, int writeSeqEntropy) +{ size_t const literalsSize = ZSTD_estimateBlockSize_literal(literals, litSize, - &entropy->huf, &entropyMetadata->hufMetadata, - workspace, wkspSize, writeLitEntropy); + &entropy->huf, &entropyMetadata->hufMetadata, + workspace, wkspSize, writeLitEntropy); size_t const seqSize = ZSTD_estimateBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, - nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, - workspace, wkspSize, writeSeqEntropy); + nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, + workspace, wkspSize, writeSeqEntropy); return seqSize + literalsSize + ZSTD_blockHeaderSize; } /* Builds entropy statistics and uses them for blocksize estimation. * - * Returns the estimated compressed size of the seqStore, or a zstd error. + * @return: estimated compressed size of the seqStore, or a zstd error. */ -static size_t ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CCtx* zc) { - ZSTD_entropyCTablesMetadata_t* entropyMetadata = &zc->blockSplitCtx.entropyMetadata; +static size_t +ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CCtx* zc) +{ + ZSTD_entropyCTablesMetadata_t* const entropyMetadata = &zc->blockSplitCtx.entropyMetadata; DEBUGLOG(6, "ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize()"); FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, entropyMetadata, - zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), ""); - return ZSTD_estimateBlockSize(seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart), + zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE), ""); + return ZSTD_estimateBlockSize( + seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart), seqStore->ofCode, seqStore->llCode, seqStore->mlCode, (size_t)(seqStore->sequences - seqStore->sequencesStart), - &zc->blockState.nextCBlock->entropy, entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE, + &zc->blockState.nextCBlock->entropy, + entropyMetadata, + zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE, (int)(entropyMetadata->hufMetadata.hType == set_compressed), 1); } /* Returns literals bytes represented in a seqStore */ -static size_t ZSTD_countSeqStoreLiteralsBytes(const seqStore_t* const seqStore) { +static size_t ZSTD_countSeqStoreLiteralsBytes(const seqStore_t* const seqStore) +{ size_t literalsBytes = 0; - size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart; + size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); size_t i; for (i = 0; i < nbSeqs; ++i) { - seqDef seq = seqStore->sequencesStart[i]; + seqDef const seq = seqStore->sequencesStart[i]; literalsBytes += seq.litLength; if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_literalLength) { literalsBytes += 0x10000; - } - } + } } return literalsBytes; } /* Returns match bytes represented in a seqStore */ -static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) { +static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) +{ size_t matchBytes = 0; - size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart; + size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); size_t i; for (i = 0; i < nbSeqs; ++i) { seqDef seq = seqStore->sequencesStart[i]; matchBytes += seq.mlBase + MINMATCH; if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) { matchBytes += 0x10000; - } - } + } } return matchBytes; } @@ -3398,15 +3787,12 @@ static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) { */ static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore, const seqStore_t* originalSeqStore, - size_t startIdx, size_t endIdx) { - BYTE* const litEnd = originalSeqStore->lit; - size_t literalsBytes; - size_t literalsBytesPreceding = 0; - + size_t startIdx, size_t endIdx) +{ *resultSeqStore = *originalSeqStore; if (startIdx > 0) { resultSeqStore->sequences = originalSeqStore->sequencesStart + startIdx; - literalsBytesPreceding = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); + resultSeqStore->litStart += ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); } /* Move longLengthPos into the correct position if necessary */ @@ -3419,13 +3805,12 @@ static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore, } resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx; resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx; - literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); - resultSeqStore->litStart += literalsBytesPreceding; if (endIdx == (size_t)(originalSeqStore->sequences - originalSeqStore->sequencesStart)) { /* This accounts for possible last literals if the derived chunk reaches the end of the block */ - resultSeqStore->lit = litEnd; + assert(resultSeqStore->lit == originalSeqStore->lit); } else { - resultSeqStore->lit = resultSeqStore->litStart+literalsBytes; + size_t const literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); + resultSeqStore->lit = resultSeqStore->litStart + literalsBytes; } resultSeqStore->llCode += startIdx; resultSeqStore->mlCode += startIdx; @@ -3433,20 +3818,26 @@ static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore, } /** - * Returns the raw offset represented by the combination of offCode, ll0, and repcode history. - * offCode must represent a repcode in the numeric representation of ZSTD_storeSeq(). + * Returns the raw offset represented by the combination of offBase, ll0, and repcode history. + * offBase must represent a repcode in the numeric representation of ZSTD_storeSeq(). */ static U32 -ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, const U32 ll0) -{ - U32 const adjustedOffCode = STORED_REPCODE(offCode) - 1 + ll0; /* [ 0 - 3 ] */ - assert(STORED_IS_REPCODE(offCode)); - if (adjustedOffCode == ZSTD_REP_NUM) { - /* litlength == 0 and offCode == 2 implies selection of first repcode - 1 */ - assert(rep[0] > 0); +ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offBase, const U32 ll0) +{ + U32 const adjustedRepCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0; /* [ 0 - 3 ] */ + assert(OFFBASE_IS_REPCODE(offBase)); + if (adjustedRepCode == ZSTD_REP_NUM) { + assert(ll0); + /* litlength == 0 and offCode == 2 implies selection of first repcode - 1 + * This is only valid if it results in a valid offset value, aka > 0. + * Note : it may happen that `rep[0]==1` in exceptional circumstances. + * In which case this function will return 0, which is an invalid offset. + * It's not an issue though, since this value will be + * compared and discarded within ZSTD_seqStore_resolveOffCodes(). + */ return rep[0] - 1; } - return rep[adjustedOffCode]; + return rep[adjustedRepCode]; } /** @@ -3462,30 +3853,32 @@ ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, c * 1-3 : repcode 1-3 * 4+ : real_offset+3 */ -static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes, - seqStore_t* const seqStore, U32 const nbSeq) { +static void +ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes, + const seqStore_t* const seqStore, U32 const nbSeq) +{ U32 idx = 0; for (; idx < nbSeq; ++idx) { seqDef* const seq = seqStore->sequencesStart + idx; U32 const ll0 = (seq->litLength == 0); - U32 const offCode = OFFBASE_TO_STORED(seq->offBase); - assert(seq->offBase > 0); - if (STORED_IS_REPCODE(offCode)) { - U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offCode, ll0); - U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offCode, ll0); + U32 const offBase = seq->offBase; + assert(offBase > 0); + if (OFFBASE_IS_REPCODE(offBase)) { + U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offBase, ll0); + U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offBase, ll0); /* Adjust simulated decompression repcode history if we come across a mismatch. Replace * the repcode with the offset it actually references, determined by the compression * repcode history. */ if (dRawOffset != cRawOffset) { - seq->offBase = cRawOffset + ZSTD_REP_NUM; + seq->offBase = OFFSET_TO_OFFBASE(cRawOffset); } } /* Compression repcode history is always updated with values directly from the unmodified seqStore. * Decompression repcode history may use modified seq->offset value taken from compression repcode history. */ - ZSTD_updateRep(dRepcodes->rep, OFFBASE_TO_STORED(seq->offBase), ll0); - ZSTD_updateRep(cRepcodes->rep, offCode, ll0); + ZSTD_updateRep(dRepcodes->rep, seq->offBase, ll0); + ZSTD_updateRep(cRepcodes->rep, offBase, ll0); } } @@ -3495,10 +3888,11 @@ static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_ * Returns the total size of that block (including header) or a ZSTD error code. */ static size_t -ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore, +ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, + const seqStore_t* const seqStore, repcodes_t* const dRep, repcodes_t* const cRep, void* dst, size_t dstCapacity, - const void* src, size_t srcSize, + const void* src, size_t srcSize, U32 lastBlock, U32 isPartition) { const U32 rleMaxLength = 25; @@ -3572,45 +3966,49 @@ typedef struct { /* Helper function to perform the recursive search for block splits. * Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half. - * If advantageous to split, then we recurse down the two sub-blocks. If not, or if an error occurred in estimation, then - * we do not recurse. + * If advantageous to split, then we recurse down the two sub-blocks. + * If not, or if an error occurred in estimation, then we do not recurse. * - * Note: The recursion depth is capped by a heuristic minimum number of sequences, defined by MIN_SEQUENCES_BLOCK_SPLITTING. + * Note: The recursion depth is capped by a heuristic minimum number of sequences, + * defined by MIN_SEQUENCES_BLOCK_SPLITTING. * In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING). * In practice, recursion depth usually doesn't go beyond 4. * - * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize + * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. + * At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize * maximum of 128 KB, this value is actually impossible to reach. */ static void ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx, ZSTD_CCtx* zc, const seqStore_t* origSeqStore) { - seqStore_t* fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; - seqStore_t* firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; - seqStore_t* secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore; + seqStore_t* const fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; + seqStore_t* const firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; + seqStore_t* const secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore; size_t estimatedOriginalSize; size_t estimatedFirstHalfSize; size_t estimatedSecondHalfSize; size_t midIdx = (startIdx + endIdx)/2; + DEBUGLOG(5, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx); + assert(endIdx >= startIdx); if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= ZSTD_MAX_NB_BLOCK_SPLITS) { - DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences"); + DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences (%zu)", endIdx - startIdx); return; } - DEBUGLOG(4, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx); ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx); ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx); ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx); estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(fullSeqStoreChunk, zc); estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(firstHalfSeqStore, zc); estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(secondHalfSeqStore, zc); - DEBUGLOG(4, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu", + DEBUGLOG(5, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu", estimatedOriginalSize, estimatedFirstHalfSize, estimatedSecondHalfSize); if (ZSTD_isError(estimatedOriginalSize) || ZSTD_isError(estimatedFirstHalfSize) || ZSTD_isError(estimatedSecondHalfSize)) { return; } if (estimatedFirstHalfSize + estimatedSecondHalfSize < estimatedOriginalSize) { + DEBUGLOG(5, "split decided at seqNb:%zu", midIdx); ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore); splits->splitLocations[splits->idx] = (U32)midIdx; splits->idx++; @@ -3618,14 +4016,18 @@ ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t end } } -/* Base recursive function. Populates a table with intra-block partition indices that can improve compression ratio. +/* Base recursive function. + * Populates a table with intra-block partition indices that can improve compression ratio. * - * Returns the number of splits made (which equals the size of the partition table - 1). + * @return: number of splits made (which equals the size of the partition table - 1). */ -static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) { - seqStoreSplits splits = {partitions, 0}; +static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) +{ + seqStoreSplits splits; + splits.splitLocations = partitions; + splits.idx = 0; if (nbSeq <= 4) { - DEBUGLOG(4, "ZSTD_deriveBlockSplits: Too few sequences to split"); + DEBUGLOG(5, "ZSTD_deriveBlockSplits: Too few sequences to split (%u <= 4)", nbSeq); /* Refuse to try and split anything with less than 4 sequences */ return 0; } @@ -3641,18 +4043,20 @@ static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) * Returns combined size of all blocks (which includes headers), or a ZSTD error code. */ static size_t -ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, - const void* src, size_t blockSize, U32 lastBlock, U32 nbSeq) +ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + const void* src, size_t blockSize, + U32 lastBlock, U32 nbSeq) { size_t cSize = 0; const BYTE* ip = (const BYTE*)src; BYTE* op = (BYTE*)dst; size_t i = 0; size_t srcBytesTotal = 0; - U32* partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */ - seqStore_t* nextSeqStore = &zc->blockSplitCtx.nextSeqStore; - seqStore_t* currSeqStore = &zc->blockSplitCtx.currSeqStore; - size_t numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq); + U32* const partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */ + seqStore_t* const nextSeqStore = &zc->blockSplitCtx.nextSeqStore; + seqStore_t* const currSeqStore = &zc->blockSplitCtx.currSeqStore; + size_t const numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq); /* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history * may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two @@ -3674,30 +4078,31 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); ZSTD_memset(nextSeqStore, 0, sizeof(seqStore_t)); - DEBUGLOG(4, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", + DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate); if (numSplits == 0) { - size_t cSizeSingleBlock = ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore, - &dRep, &cRep, - op, dstCapacity, - ip, blockSize, - lastBlock, 0 /* isPartition */); + size_t cSizeSingleBlock = + ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore, + &dRep, &cRep, + op, dstCapacity, + ip, blockSize, + lastBlock, 0 /* isPartition */); FORWARD_IF_ERROR(cSizeSingleBlock, "Compressing single block from splitBlock_internal() failed!"); DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal: No splits"); - assert(cSizeSingleBlock <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize); + assert(zc->blockSize <= ZSTD_BLOCKSIZE_MAX); + assert(cSizeSingleBlock <= zc->blockSize + ZSTD_blockHeaderSize); return cSizeSingleBlock; } ZSTD_deriveSeqStoreChunk(currSeqStore, &zc->seqStore, 0, partitions[0]); for (i = 0; i <= numSplits; ++i) { - size_t srcBytes; size_t cSizeChunk; U32 const lastPartition = (i == numSplits); U32 lastBlockEntireSrc = 0; - srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore); + size_t srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore); srcBytesTotal += srcBytes; if (lastPartition) { /* This is the final partition, need to account for possible last literals */ @@ -3712,7 +4117,8 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac op, dstCapacity, ip, srcBytes, lastBlockEntireSrc, 1 /* isPartition */); - DEBUGLOG(5, "Estimated size: %zu actual size: %zu", ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk); + DEBUGLOG(5, "Estimated size: %zu vs %zu : actual size", + ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk); FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!"); ip += srcBytes; @@ -3720,10 +4126,10 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac dstCapacity -= cSizeChunk; cSize += cSizeChunk; *currSeqStore = *nextSeqStore; - assert(cSizeChunk <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize); + assert(cSizeChunk <= zc->blockSize + ZSTD_blockHeaderSize); } - /* cRep and dRep may have diverged during the compression. If so, we use the dRep repcodes - * for the next block. + /* cRep and dRep may have diverged during the compression. + * If so, we use the dRep repcodes for the next block. */ ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(repcodes_t)); return cSize; @@ -3734,8 +4140,6 @@ ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock) { - const BYTE* ip = (const BYTE*)src; - BYTE* op = (BYTE*)dst; U32 nbSeq; size_t cSize; DEBUGLOG(4, "ZSTD_compressBlock_splitBlock"); @@ -3746,7 +4150,7 @@ ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc, if (bss == ZSTDbss_noCompress) { if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; - cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock); + cSize = ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); DEBUGLOG(4, "ZSTD_compressBlock_splitBlock: Nocompress block"); return cSize; @@ -3764,9 +4168,9 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 frame) { - /* This the upper bound for the length of an rle block. - * This isn't the actual upper bound. Finding the real threshold - * needs further investigation. + /* This is an estimated upper bound for the length of an rle block. + * This isn't the actual upper bound. + * Finding the real threshold needs further investigation. */ const U32 rleMaxLength = 25; size_t cSize; @@ -3858,10 +4262,11 @@ static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc, * * cSize >= blockBound(srcSize): We have expanded the block too much so * emit an uncompressed block. */ - { - size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock); + { size_t const cSize = + ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock); if (cSize != ERROR(dstSize_tooSmall)) { - size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy); + size_t const maxCSize = + srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy); FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed"); if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) { ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); @@ -3869,7 +4274,7 @@ static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc, } } } - } + } /* if (bss == ZSTDbss_compress)*/ DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()"); /* Superblock compression failed, attempt to emit a single no compress block. @@ -3927,7 +4332,7 @@ static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms, * All blocks will be terminated, all input will be consumed. * Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. * Frame is supposed already started (header already produced) -* @return : compressed size, or an error code +* @return : compressed size, or an error code */ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, @@ -3951,7 +4356,9 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx, ZSTD_matchState_t* const ms = &cctx->blockState.matchState; U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); - RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE, + /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding + * additional 1. We need to revisit and change this logic to be more consistent */ + RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE + 1, dstSize_tooSmall, "not enough space to store compressed block"); if (remaining < blockSize) blockSize = remaining; @@ -3990,7 +4397,7 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx, MEM_writeLE24(op, cBlockHeader); cSize += ZSTD_blockHeaderSize; } - } + } /* if (ZSTD_useTargetCBlockSize(&cctx->appliedParams))*/ ip += blockSize; @@ -4182,7 +4589,7 @@ size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx) { ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams; assert(!ZSTD_checkCParams(cParams)); - return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog); + return MIN(cctx->appliedParams.maxBlockSize, (size_t)1 << cParams.windowLog); } size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) @@ -4202,31 +4609,47 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, ZSTD_cwksp* ws, ZSTD_CCtx_params const* params, const void* src, size_t srcSize, - ZSTD_dictTableLoadMethod_e dtlm) + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp) { const BYTE* ip = (const BYTE*) src; const BYTE* const iend = ip + srcSize; int const loadLdmDict = params->ldmParams.enableLdm == ZSTD_ps_enable && ls != NULL; - /* Assert that we the ms params match the params we're being given */ + /* Assert that the ms params match the params we're being given */ ZSTD_assertEqualCParams(params->cParams, ms->cParams); - if (srcSize > ZSTD_CHUNKSIZE_MAX) { + { /* Ensure large dictionaries can't cause index overflow */ + /* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX. * Dictionaries right at the edge will immediately trigger overflow * correction, but I don't want to insert extra constraints here. */ - U32 const maxDictSize = ZSTD_CURRENT_MAX - 1; - /* We must have cleared our windows when our source is this large. */ - assert(ZSTD_window_isEmpty(ms->window)); - if (loadLdmDict) - assert(ZSTD_window_isEmpty(ls->window)); + U32 maxDictSize = ZSTD_CURRENT_MAX - ZSTD_WINDOW_START_INDEX; + + int const CDictTaggedIndices = ZSTD_CDictIndicesAreTagged(¶ms->cParams); + if (CDictTaggedIndices && tfp == ZSTD_tfp_forCDict) { + /* Some dictionary matchfinders in zstd use "short cache", + * which treats the lower ZSTD_SHORT_CACHE_TAG_BITS of each + * CDict hashtable entry as a tag rather than as part of an index. + * When short cache is used, we need to truncate the dictionary + * so that its indices don't overlap with the tag. */ + U32 const shortCacheMaxDictSize = (1u << (32 - ZSTD_SHORT_CACHE_TAG_BITS)) - ZSTD_WINDOW_START_INDEX; + maxDictSize = MIN(maxDictSize, shortCacheMaxDictSize); + assert(!loadLdmDict); + } + /* If the dictionary is too large, only load the suffix of the dictionary. */ if (srcSize > maxDictSize) { ip = iend - maxDictSize; src = ip; srcSize = maxDictSize; - } + } } + + if (srcSize > ZSTD_CHUNKSIZE_MAX) { + /* We must have cleared our windows when our source is this large. */ + assert(ZSTD_window_isEmpty(ms->window)); + if (loadLdmDict) assert(ZSTD_window_isEmpty(ls->window)); } DEBUGLOG(4, "ZSTD_loadDictionaryContent(): useRowMatchFinder=%d", (int)params->useRowMatchFinder); @@ -4249,10 +4672,10 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, switch(params->cParams.strategy) { case ZSTD_fast: - ZSTD_fillHashTable(ms, iend, dtlm); + ZSTD_fillHashTable(ms, iend, dtlm, tfp); break; case ZSTD_dfast: - ZSTD_fillDoubleHashTable(ms, iend, dtlm); + ZSTD_fillDoubleHashTable(ms, iend, dtlm, tfp); break; case ZSTD_greedy: @@ -4418,6 +4841,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_CCtx_params const* params, const void* dict, size_t dictSize, ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp, void* workspace) { const BYTE* dictPtr = (const BYTE*)dict; @@ -4436,7 +4860,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); FORWARD_IF_ERROR(ZSTD_loadDictionaryContent( - ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), ""); + ms, NULL, ws, params, dictPtr, dictContentSize, dtlm, tfp), ""); } return dictID; } @@ -4452,6 +4876,7 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp, void* workspace) { DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize); @@ -4464,13 +4889,13 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, /* dict restricted modes */ if (dictContentType == ZSTD_dct_rawContent) - return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm); + return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm, tfp); if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) { if (dictContentType == ZSTD_dct_auto) { DEBUGLOG(4, "raw content dictionary detected"); return ZSTD_loadDictionaryContent( - ms, ls, ws, params, dict, dictSize, dtlm); + ms, ls, ws, params, dict, dictSize, dtlm, tfp); } RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, ""); assert(0); /* impossible */ @@ -4478,13 +4903,14 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, /* dict as full zstd dictionary */ return ZSTD_loadZstdDictionary( - bs, ms, ws, params, dict, dictSize, dtlm, workspace); + bs, ms, ws, params, dict, dictSize, dtlm, tfp, workspace); } #define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB) #define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL) /*! ZSTD_compressBegin_internal() : + * Assumption : either @dict OR @cdict (or none) is non-NULL, never both * @return : 0, or an error code */ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, @@ -4520,11 +4946,11 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, cctx->blockState.prevCBlock, &cctx->blockState.matchState, &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent, cdict->dictContentSize, cdict->dictContentType, dtlm, - cctx->entropyWorkspace) + ZSTD_tfp_forCCtx, cctx->entropyWorkspace) : ZSTD_compress_insertDictionary( cctx->blockState.prevCBlock, &cctx->blockState.matchState, &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize, - dictContentType, dtlm, cctx->entropyWorkspace); + dictContentType, dtlm, ZSTD_tfp_forCCtx, cctx->entropyWorkspace); FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); assert(dictID <= UINT_MAX); cctx->dictID = (U32)dictID; @@ -4565,11 +4991,11 @@ size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, &cctxParams, pledgedSrcSize); } -size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) +size_t +ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) { ZSTD_CCtx_params cctxParams; - { - ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict); + { ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict); ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel); } DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize); @@ -4828,7 +5254,7 @@ static size_t ZSTD_initCDict_internal( { size_t const dictID = ZSTD_compress_insertDictionary( &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace, ¶ms, cdict->dictContent, cdict->dictContentSize, - dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace); + dictContentType, ZSTD_dtlm_full, ZSTD_tfp_forCDict, cdict->entropyWorkspace); FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); assert(dictID <= (size_t)(U32)-1); cdict->dictID = (U32)dictID; @@ -5316,30 +5742,41 @@ size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel) static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx) { - size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos; - if (hintInSize==0) hintInSize = cctx->blockSize; - return hintInSize; + if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { + return cctx->blockSize - cctx->stableIn_notConsumed; + } + assert(cctx->appliedParams.inBufferMode == ZSTD_bm_buffered); + { size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos; + if (hintInSize==0) hintInSize = cctx->blockSize; + return hintInSize; + } } /** ZSTD_compressStream_generic(): * internal function for all *compressStream*() variants - * non-static, because can be called from zstdmt_compress.c - * @return : hint size for next input */ + * @return : hint size for next input to complete ongoing block */ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective const flushMode) { - const char* const istart = (const char*)input->src; - const char* const iend = input->size != 0 ? istart + input->size : istart; - const char* ip = input->pos != 0 ? istart + input->pos : istart; - char* const ostart = (char*)output->dst; - char* const oend = output->size != 0 ? ostart + output->size : ostart; - char* op = output->pos != 0 ? ostart + output->pos : ostart; + const char* const istart = (assert(input != NULL), (const char*)input->src); + const char* const iend = (istart != NULL) ? istart + input->size : istart; + const char* ip = (istart != NULL) ? istart + input->pos : istart; + char* const ostart = (assert(output != NULL), (char*)output->dst); + char* const oend = (ostart != NULL) ? ostart + output->size : ostart; + char* op = (ostart != NULL) ? ostart + output->pos : ostart; U32 someMoreWork = 1; /* check expectations */ - DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode); + DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%i, srcSize = %zu", (int)flushMode, input->size - input->pos); + assert(zcs != NULL); + if (zcs->appliedParams.inBufferMode == ZSTD_bm_stable) { + assert(input->pos >= zcs->stableIn_notConsumed); + input->pos -= zcs->stableIn_notConsumed; + ip -= zcs->stableIn_notConsumed; + zcs->stableIn_notConsumed = 0; + } if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) { assert(zcs->inBuff != NULL); assert(zcs->inBuffSize > 0); @@ -5348,8 +5785,10 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, assert(zcs->outBuff != NULL); assert(zcs->outBuffSize > 0); } - assert(output->pos <= output->size); + if (input->src == NULL) assert(input->size == 0); assert(input->pos <= input->size); + if (output->dst == NULL) assert(output->size == 0); + assert(output->pos <= output->size); assert((U32)flushMode <= (U32)ZSTD_e_end); while (someMoreWork) { @@ -5381,8 +5820,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend-ip); zcs->inBuffPos += loaded; - if (loaded != 0) - ip += loaded; + if (ip) ip += loaded; if ( (flushMode == ZSTD_e_continue) && (zcs->inBuffPos < zcs->inBuffTarget) ) { /* not enough input to fill full block : stop here */ @@ -5393,6 +5831,20 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, /* empty */ someMoreWork = 0; break; } + } else { + assert(zcs->appliedParams.inBufferMode == ZSTD_bm_stable); + if ( (flushMode == ZSTD_e_continue) + && ( (size_t)(iend - ip) < zcs->blockSize) ) { + /* can't compress a full block : stop here */ + zcs->stableIn_notConsumed = (size_t)(iend - ip); + ip = iend; /* pretend to have consumed input */ + someMoreWork = 0; break; + } + if ( (flushMode == ZSTD_e_flush) + && (ip == iend) ) { + /* empty */ + someMoreWork = 0; break; + } } /* compress current block (note : this stage cannot be stopped in the middle) */ DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode); @@ -5400,9 +5852,8 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, void* cDst; size_t cSize; size_t oSize = oend-op; - size_t const iSize = inputBuffered - ? zcs->inBuffPos - zcs->inToCompress - : MIN((size_t)(iend - ip), zcs->blockSize); + size_t const iSize = inputBuffered ? zcs->inBuffPos - zcs->inToCompress + : MIN((size_t)(iend - ip), zcs->blockSize); if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) cDst = op; /* compress into output buffer, to skip flush stage */ else @@ -5425,19 +5876,16 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, if (!lastBlock) assert(zcs->inBuffTarget <= zcs->inBuffSize); zcs->inToCompress = zcs->inBuffPos; - } else { - unsigned const lastBlock = (ip + iSize == iend); - assert(flushMode == ZSTD_e_end /* Already validated */); + } else { /* !inputBuffered, hence ZSTD_bm_stable */ + unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip + iSize == iend); cSize = lastBlock ? ZSTD_compressEnd(zcs, cDst, oSize, ip, iSize) : ZSTD_compressContinue(zcs, cDst, oSize, ip, iSize); /* Consume the input prior to error checking to mirror buffered mode. */ - if (iSize > 0) - ip += iSize; + if (ip) ip += iSize; FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed"); zcs->frameEnded = lastBlock; - if (lastBlock) - assert(ip == iend); + if (lastBlock) assert(ip == iend); } if (cDst == op) { /* no need to flush */ op += cSize; @@ -5513,8 +5961,10 @@ size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuf /* After a compression call set the expected input/output buffer. * This is validated at the start of the next compression call. */ -static void ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input) +static void +ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, const ZSTD_outBuffer* output, const ZSTD_inBuffer* input) { + DEBUGLOG(5, "ZSTD_setBufferExpectations (for advanced stable in/out modes)"); if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { cctx->expectedInBuffer = *input; } @@ -5533,22 +5983,22 @@ static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx, { if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { ZSTD_inBuffer const expect = cctx->expectedInBuffer; - if (expect.src != input->src || expect.pos != input->pos || expect.size != input->size) - RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer enabled but input differs!"); - if (endOp != ZSTD_e_end) - RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer can only be used with ZSTD_e_end!"); + if (expect.src != input->src || expect.pos != input->pos) + RETURN_ERROR(stabilityCondition_notRespected, "ZSTD_c_stableInBuffer enabled but input differs!"); } + (void)endOp; if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) { size_t const outBufferSize = output->size - output->pos; if (cctx->expectedOutBufferSize != outBufferSize) - RETURN_ERROR(dstBuffer_wrong, "ZSTD_c_stableOutBuffer enabled but output size differs!"); + RETURN_ERROR(stabilityCondition_notRespected, "ZSTD_c_stableOutBuffer enabled but output size differs!"); } return 0; } static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, ZSTD_EndDirective endOp, - size_t inSize) { + size_t inSize) +{ ZSTD_CCtx_params params = cctx->requestedParams; ZSTD_prefixDict const prefixDict = cctx->prefixDict; FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */ @@ -5562,9 +6012,9 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, params.compressionLevel = cctx->cdict->compressionLevel; } DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage"); - if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-fix pledgedSrcSize */ - { - size_t const dictSize = prefixDict.dict + if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-determine pledgedSrcSize */ + + { size_t const dictSize = prefixDict.dict ? prefixDict.dictSize : (cctx->cdict ? cctx->cdict->dictContentSize : 0); ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, ¶ms, cctx->pledgedSrcSizePlusOne - 1); @@ -5576,8 +6026,18 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, params.useBlockSplitter = ZSTD_resolveBlockSplitterMode(params.useBlockSplitter, ¶ms.cParams); params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, ¶ms.cParams); params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, ¶ms.cParams); + params.validateSequences = ZSTD_resolveExternalSequenceValidation(params.validateSequences); + params.maxBlockSize = ZSTD_resolveMaxBlockSize(params.maxBlockSize); + params.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(params.searchForExternalRepcodes, params.compressionLevel); #ifdef ZSTD_MULTITHREAD + /* If external matchfinder is enabled, make sure to fail before checking job size (for consistency) */ + RETURN_ERROR_IF( + params.useSequenceProducer == 1 && params.nbWorkers >= 1, + parameter_combination_unsupported, + "External sequence producer isn't supported with nbWorkers >= 1" + ); + if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) { params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */ } @@ -5605,7 +6065,7 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, cctx->streamStage = zcss_load; cctx->appliedParams = params; } else -#endif +#endif /* ZSTD_MULTITHREAD */ { U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1; assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx, @@ -5631,6 +6091,8 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, return 0; } +/* @return provides a minimum amount of data remaining to be flushed from internal buffers + */ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input, @@ -5645,8 +6107,27 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, /* transparent initialization stage */ if (cctx->streamStage == zcss_init) { - FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed"); - ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */ + size_t const inputSize = input->size - input->pos; /* no obligation to start from pos==0 */ + size_t const totalInputSize = inputSize + cctx->stableIn_notConsumed; + if ( (cctx->requestedParams.inBufferMode == ZSTD_bm_stable) /* input is presumed stable, across invocations */ + && (endOp == ZSTD_e_continue) /* no flush requested, more input to come */ + && (totalInputSize < ZSTD_BLOCKSIZE_MAX) ) { /* not even reached one block yet */ + if (cctx->stableIn_notConsumed) { /* not the first time */ + /* check stable source guarantees */ + RETURN_ERROR_IF(input->src != cctx->expectedInBuffer.src, stabilityCondition_notRespected, "stableInBuffer condition not respected: wrong src pointer"); + RETURN_ERROR_IF(input->pos != cctx->expectedInBuffer.size, stabilityCondition_notRespected, "stableInBuffer condition not respected: externally modified pos"); + } + /* pretend input was consumed, to give a sense forward progress */ + input->pos = input->size; + /* save stable inBuffer, for later control, and flush/end */ + cctx->expectedInBuffer = *input; + /* but actually input wasn't consumed, so keep track of position from where compression shall resume */ + cctx->stableIn_notConsumed += inputSize; + /* don't initialize yet, wait for the first block of flush() order, for better parameters adaptation */ + return ZSTD_FRAMEHEADERSIZE_MIN(cctx->requestedParams.format); /* at least some header to produce */ + } + FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, totalInputSize), "compressStream2 initialization failed"); + ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */ } /* end of transparent initialization stage */ @@ -5659,6 +6140,13 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams); cctx->cParamsChanged = 0; } + if (cctx->stableIn_notConsumed) { + assert(cctx->appliedParams.inBufferMode == ZSTD_bm_stable); + /* some early data was skipped - make it available for consumption */ + assert(input->pos >= cctx->stableIn_notConsumed); + input->pos -= cctx->stableIn_notConsumed; + cctx->stableIn_notConsumed = 0; + } for (;;) { size_t const ipos = input->pos; size_t const opos = output->pos; @@ -5697,7 +6185,7 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, ZSTD_setBufferExpectations(cctx, output, input); return flushMin; } -#endif +#endif /* ZSTD_MULTITHREAD */ FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , ""); DEBUGLOG(5, "completed ZSTD_compressStream2"); ZSTD_setBufferExpectations(cctx, output, input); @@ -5710,13 +6198,20 @@ size_t ZSTD_compressStream2_simpleArgs ( const void* src, size_t srcSize, size_t* srcPos, ZSTD_EndDirective endOp) { - ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; - ZSTD_inBuffer input = { src, srcSize, *srcPos }; + ZSTD_outBuffer output; + ZSTD_inBuffer input; + output.dst = dst; + output.size = dstCapacity; + output.pos = *dstPos; + input.src = src; + input.size = srcSize; + input.pos = *srcPos; /* ZSTD_compressStream2() will check validity of dstPos and srcPos */ - size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp); - *dstPos = output.pos; - *srcPos = input.pos; - return cErr; + { size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp); + *dstPos = output.pos; + *srcPos = input.pos; + return cErr; + } } size_t ZSTD_compress2(ZSTD_CCtx* cctx, @@ -5739,6 +6234,7 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx, /* Reset to the original values. */ cctx->requestedParams.inBufferMode = originalInBufferMode; cctx->requestedParams.outBufferMode = originalOutBufferMode; + FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed"); if (result != 0) { /* compression not completed, due to lack of output space */ assert(oPos == dstCapacity); @@ -5749,64 +6245,61 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx, } } -typedef struct { - U32 idx; /* Index in array of ZSTD_Sequence */ - U32 posInSequence; /* Position within sequence at idx */ - size_t posInSrc; /* Number of bytes given by sequences provided so far */ -} ZSTD_sequencePosition; - /* ZSTD_validateSequence() : * @offCode : is presumed to follow format required by ZSTD_storeSeq() * @returns a ZSTD error code if sequence is not valid */ static size_t -ZSTD_validateSequence(U32 offCode, U32 matchLength, - size_t posInSrc, U32 windowLog, size_t dictSize) +ZSTD_validateSequence(U32 offCode, U32 matchLength, U32 minMatch, + size_t posInSrc, U32 windowLog, size_t dictSize, int useSequenceProducer) { - U32 const windowSize = 1 << windowLog; - /* posInSrc represents the amount of data the the decoder would decode up to this point. + U32 const windowSize = 1u << windowLog; + /* posInSrc represents the amount of data the decoder would decode up to this point. * As long as the amount of data decoded is less than or equal to window size, offsets may be * larger than the total length of output decoded in order to reference the dict, even larger than * window size. After output surpasses windowSize, we're limited to windowSize offsets again. */ size_t const offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize; - RETURN_ERROR_IF(offCode > STORE_OFFSET(offsetBound), corruption_detected, "Offset too large!"); - RETURN_ERROR_IF(matchLength < MINMATCH, corruption_detected, "Matchlength too small"); + size_t const matchLenLowerBound = (minMatch == 3 || useSequenceProducer) ? 3 : 4; + RETURN_ERROR_IF(offCode > OFFSET_TO_OFFBASE(offsetBound), externalSequences_invalid, "Offset too large!"); + /* Validate maxNbSeq is large enough for the given matchLength and minMatch */ + RETURN_ERROR_IF(matchLength < matchLenLowerBound, externalSequences_invalid, "Matchlength too small for the minMatch"); return 0; } /* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */ -static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) +static U32 ZSTD_finalizeOffBase(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) { - U32 offCode = STORE_OFFSET(rawOffset); + U32 offBase = OFFSET_TO_OFFBASE(rawOffset); if (!ll0 && rawOffset == rep[0]) { - offCode = STORE_REPCODE_1; + offBase = REPCODE1_TO_OFFBASE; } else if (rawOffset == rep[1]) { - offCode = STORE_REPCODE(2 - ll0); + offBase = REPCODE_TO_OFFBASE(2 - ll0); } else if (rawOffset == rep[2]) { - offCode = STORE_REPCODE(3 - ll0); + offBase = REPCODE_TO_OFFBASE(3 - ll0); } else if (ll0 && rawOffset == rep[0] - 1) { - offCode = STORE_REPCODE_3; + offBase = REPCODE3_TO_OFFBASE; } - return offCode; + return offBase; } -/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of - * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter. - */ -static size_t +size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, - const void* src, size_t blockSize) + const void* src, size_t blockSize, + ZSTD_paramSwitch_e externalRepSearch) { U32 idx = seqPos->idx; + U32 const startIdx = idx; BYTE const* ip = (BYTE const*)(src); const BYTE* const iend = ip + blockSize; repcodes_t updatedRepcodes; U32 dictSize; + DEBUGLOG(5, "ZSTD_copySequencesToSeqStoreExplicitBlockDelim (blockSize = %zu)", blockSize); + if (cctx->cdict) { dictSize = (U32)cctx->cdict->dictContentSize; } else if (cctx->prefixDict.dict) { @@ -5815,25 +6308,55 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, dictSize = 0; } ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t)); - for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) { + for (; idx < inSeqsSize && (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0); ++idx) { U32 const litLength = inSeqs[idx].litLength; - U32 const ll0 = (litLength == 0); U32 const matchLength = inSeqs[idx].matchLength; - U32 const offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0); - ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0); + U32 offBase; + + if (externalRepSearch == ZSTD_ps_disable) { + offBase = OFFSET_TO_OFFBASE(inSeqs[idx].offset); + } else { + U32 const ll0 = (litLength == 0); + offBase = ZSTD_finalizeOffBase(inSeqs[idx].offset, updatedRepcodes.rep, ll0); + ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); + } - DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength); + DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength); if (cctx->appliedParams.validateSequences) { seqPos->posInSrc += litLength + matchLength; - FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc, - cctx->appliedParams.cParams.windowLog, dictSize), + FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc, + cctx->appliedParams.cParams.windowLog, dictSize, cctx->appliedParams.useSequenceProducer), "Sequence validation failed"); } - RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation, + RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid, "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); - ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength); + ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); ip += matchLength + litLength; } + + /* If we skipped repcode search while parsing, we need to update repcodes now */ + assert(externalRepSearch != ZSTD_ps_auto); + assert(idx >= startIdx); + if (externalRepSearch == ZSTD_ps_disable && idx != startIdx) { + U32* const rep = updatedRepcodes.rep; + U32 lastSeqIdx = idx - 1; /* index of last non-block-delimiter sequence */ + + if (lastSeqIdx >= startIdx + 2) { + rep[2] = inSeqs[lastSeqIdx - 2].offset; + rep[1] = inSeqs[lastSeqIdx - 1].offset; + rep[0] = inSeqs[lastSeqIdx].offset; + } else if (lastSeqIdx == startIdx + 1) { + rep[2] = rep[0]; + rep[1] = inSeqs[lastSeqIdx - 1].offset; + rep[0] = inSeqs[lastSeqIdx].offset; + } else { + assert(lastSeqIdx == startIdx); + rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = inSeqs[lastSeqIdx].offset; + } + } + ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t)); if (inSeqs[idx].litLength) { @@ -5842,26 +6365,15 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ip += inSeqs[idx].litLength; seqPos->posInSrc += inSeqs[idx].litLength; } - RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!"); + RETURN_ERROR_IF(ip != iend, externalSequences_invalid, "Blocksize doesn't agree with block delimiter!"); seqPos->idx = idx+1; return 0; } -/* Returns the number of bytes to move the current read position back by. Only non-zero - * if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something - * went wrong. - * - * This function will attempt to scan through blockSize bytes represented by the sequences - * in inSeqs, storing any (partial) sequences. - * - * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to - * avoid splitting a match, or to avoid splitting a match such that it would produce a match - * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block. - */ -static size_t +size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, - const void* src, size_t blockSize) + const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch) { U32 idx = seqPos->idx; U32 startPosInSequence = seqPos->posInSequence; @@ -5873,6 +6385,9 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* U32 bytesAdjustment = 0; U32 finalMatchSplit = 0; + /* TODO(embg) support fast parsing mode in noBlockDelim mode */ + (void)externalRepSearch; + if (cctx->cdict) { dictSize = cctx->cdict->dictContentSize; } else if (cctx->prefixDict.dict) { @@ -5880,7 +6395,7 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* } else { dictSize = 0; } - DEBUGLOG(5, "ZSTD_copySequencesToSeqStore: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize); + DEBUGLOG(5, "ZSTD_copySequencesToSeqStoreNoBlockDelim: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize); DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength); ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t)); while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) { @@ -5888,7 +6403,7 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* U32 litLength = currSeq.litLength; U32 matchLength = currSeq.matchLength; U32 const rawOffset = currSeq.offset; - U32 offCode; + U32 offBase; /* Modify the sequence depending on where endPosInSequence lies */ if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) { @@ -5902,7 +6417,6 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* /* Move to the next sequence */ endPosInSequence -= currSeq.litLength + currSeq.matchLength; startPosInSequence = 0; - idx++; } else { /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence does not reach the end of the match. So, we have to split the sequence */ @@ -5942,21 +6456,23 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* } /* Check if this offset can be represented with a repcode */ { U32 const ll0 = (litLength == 0); - offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0); - ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0); + offBase = ZSTD_finalizeOffBase(rawOffset, updatedRepcodes.rep, ll0); + ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); } if (cctx->appliedParams.validateSequences) { seqPos->posInSrc += litLength + matchLength; - FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc, - cctx->appliedParams.cParams.windowLog, dictSize), + FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc, + cctx->appliedParams.cParams.windowLog, dictSize, cctx->appliedParams.useSequenceProducer), "Sequence validation failed"); } - DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength); - RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation, + DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength); + RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid, "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); - ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength); + ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); ip += matchLength + litLength; + if (!finalMatchSplit) + idx++; /* Next Sequence */ } DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength); assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength); @@ -5979,7 +6495,7 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, - const void* src, size_t blockSize); + const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch); static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) { ZSTD_sequenceCopier sequenceCopier = NULL; @@ -5993,6 +6509,57 @@ static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) return sequenceCopier; } +/* Discover the size of next block by searching for the delimiter. + * Note that a block delimiter **must** exist in this mode, + * otherwise it's an input error. + * The block size retrieved will be later compared to ensure it remains within bounds */ +static size_t +blockSize_explicitDelimiter(const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_sequencePosition seqPos) +{ + int end = 0; + size_t blockSize = 0; + size_t spos = seqPos.idx; + DEBUGLOG(6, "blockSize_explicitDelimiter : seq %zu / %zu", spos, inSeqsSize); + assert(spos <= inSeqsSize); + while (spos < inSeqsSize) { + end = (inSeqs[spos].offset == 0); + blockSize += inSeqs[spos].litLength + inSeqs[spos].matchLength; + if (end) { + if (inSeqs[spos].matchLength != 0) + RETURN_ERROR(externalSequences_invalid, "delimiter format error : both matchlength and offset must be == 0"); + break; + } + spos++; + } + if (!end) + RETURN_ERROR(externalSequences_invalid, "Reached end of sequences without finding a block delimiter"); + return blockSize; +} + +/* More a "target" block size */ +static size_t blockSize_noDelimiter(size_t blockSize, size_t remaining) +{ + int const lastBlock = (remaining <= blockSize); + return lastBlock ? remaining : blockSize; +} + +static size_t determine_blockSize(ZSTD_sequenceFormat_e mode, + size_t blockSize, size_t remaining, + const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_sequencePosition seqPos) +{ + DEBUGLOG(6, "determine_blockSize : remainingSize = %zu", remaining); + if (mode == ZSTD_sf_noBlockDelimiters) + return blockSize_noDelimiter(blockSize, remaining); + { size_t const explicitBlockSize = blockSize_explicitDelimiter(inSeqs, inSeqsSize, seqPos); + FORWARD_IF_ERROR(explicitBlockSize, "Error while determining block size with explicit delimiters"); + if (explicitBlockSize > blockSize) + RETURN_ERROR(externalSequences_invalid, "sequences incorrectly define a too large block"); + if (explicitBlockSize > remaining) + RETURN_ERROR(externalSequences_invalid, "sequences define a frame longer than source"); + return explicitBlockSize; + } +} + /* Compress, block-by-block, all of the sequences given. * * Returns the cumulative size of all compressed blocks (including their headers), @@ -6005,9 +6572,6 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, const void* src, size_t srcSize) { size_t cSize = 0; - U32 lastBlock; - size_t blockSize; - size_t compressedSeqsSize; size_t remaining = srcSize; ZSTD_sequencePosition seqPos = {0, 0, 0}; @@ -6027,22 +6591,29 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, } while (remaining) { + size_t compressedSeqsSize; size_t cBlockSize; size_t additionalByteAdjustment; - lastBlock = remaining <= cctx->blockSize; - blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize; + size_t blockSize = determine_blockSize(cctx->appliedParams.blockDelimiters, + cctx->blockSize, remaining, + inSeqs, inSeqsSize, seqPos); + U32 const lastBlock = (blockSize == remaining); + FORWARD_IF_ERROR(blockSize, "Error while trying to determine block size"); + assert(blockSize <= remaining); ZSTD_resetSeqStore(&cctx->seqStore); - DEBUGLOG(4, "Working on new block. Blocksize: %zu", blockSize); + DEBUGLOG(5, "Working on new block. Blocksize: %zu (total:%zu)", blockSize, (ip - (const BYTE*)src) + blockSize); - additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize); + additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize, cctx->appliedParams.searchForExternalRepcodes); FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy"); blockSize -= additionalByteAdjustment; /* If blocks are too small, emit as a nocompress block */ - if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) { + /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding + * additional 1. We need to revisit and change this logic to be more consistent */ + if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) { cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed"); - DEBUGLOG(4, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize); + DEBUGLOG(5, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize); cSize += cBlockSize; ip += blockSize; op += cBlockSize; @@ -6051,6 +6622,7 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, continue; } + RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "not enough dstCapacity to write a new compressed block"); compressedSeqsSize = ZSTD_entropyCompressSeqStore(&cctx->seqStore, &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy, &cctx->appliedParams, @@ -6059,11 +6631,11 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, cctx->bmi2); FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed"); - DEBUGLOG(4, "Compressed sequences size: %zu", compressedSeqsSize); + DEBUGLOG(5, "Compressed sequences size: %zu", compressedSeqsSize); if (!cctx->isFirstBlock && ZSTD_maybeRLE(&cctx->seqStore) && - ZSTD_isRLE((BYTE const*)src, srcSize)) { + ZSTD_isRLE(ip, blockSize)) { /* We don't want to emit our first block as a RLE even if it qualifies because * doing so will cause the decoder (cli only) to throw a "should consume all input error." * This is only an issue for zstd <= v1.4.3 @@ -6074,12 +6646,12 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, if (compressedSeqsSize == 0) { /* ZSTD_noCompressBlock writes the block header as well */ cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); - FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed"); - DEBUGLOG(4, "Writing out nocompress block, size: %zu", cBlockSize); + FORWARD_IF_ERROR(cBlockSize, "ZSTD_noCompressBlock failed"); + DEBUGLOG(5, "Writing out nocompress block, size: %zu", cBlockSize); } else if (compressedSeqsSize == 1) { cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock); - FORWARD_IF_ERROR(cBlockSize, "RLE compress block failed"); - DEBUGLOG(4, "Writing out RLE block, size: %zu", cBlockSize); + FORWARD_IF_ERROR(cBlockSize, "ZSTD_rleCompressBlock failed"); + DEBUGLOG(5, "Writing out RLE block, size: %zu", cBlockSize); } else { U32 cBlockHeader; /* Error checking and repcodes update */ @@ -6091,11 +6663,10 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3); MEM_writeLE24(op, cBlockHeader); cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; - DEBUGLOG(4, "Writing out compressed block, size: %zu", cBlockSize); + DEBUGLOG(5, "Writing out compressed block, size: %zu", cBlockSize); } cSize += cBlockSize; - DEBUGLOG(4, "cSize running total: %zu", cSize); if (lastBlock) { break; @@ -6106,12 +6677,15 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, dstCapacity -= cBlockSize; cctx->isFirstBlock = 0; } + DEBUGLOG(5, "cSize running total: %zu (remaining dstCapacity=%zu)", cSize, dstCapacity); } + DEBUGLOG(4, "cSize final total: %zu", cSize); return cSize; } -size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity, +size_t ZSTD_compressSequences(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, const ZSTD_Sequence* inSeqs, size_t inSeqsSize, const void* src, size_t srcSize) { @@ -6121,7 +6695,7 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapaci size_t frameHeaderSize = 0; /* Transparent initialization stage, same as compressStream2() */ - DEBUGLOG(3, "ZSTD_compressSequences()"); + DEBUGLOG(4, "ZSTD_compressSequences (dstCapacity=%zu)", dstCapacity); assert(cctx != NULL); FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed"); /* Begin writing output, starting with frame header */ @@ -6149,26 +6723,34 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapaci cSize += 4; } - DEBUGLOG(3, "Final compressed size: %zu", cSize); + DEBUGLOG(4, "Final compressed size: %zu", cSize); return cSize; } /*====== Finalize ======*/ +static ZSTD_inBuffer inBuffer_forEndFlush(const ZSTD_CStream* zcs) +{ + const ZSTD_inBuffer nullInput = { NULL, 0, 0 }; + const int stableInput = (zcs->appliedParams.inBufferMode == ZSTD_bm_stable); + return stableInput ? zcs->expectedInBuffer : nullInput; +} + /*! ZSTD_flushStream() : * @return : amount of data remaining to flush */ size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) { - ZSTD_inBuffer input = { NULL, 0, 0 }; + ZSTD_inBuffer input = inBuffer_forEndFlush(zcs); + input.size = input.pos; /* do not ingest more input during flush */ return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush); } size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) { - ZSTD_inBuffer input = { NULL, 0, 0 }; + ZSTD_inBuffer input = inBuffer_forEndFlush(zcs); size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end); - FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed"); + FORWARD_IF_ERROR(remainingToFlush , "ZSTD_compressStream2(,,ZSTD_e_end) failed"); if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */ /* single thread mode : attempt to calculate remaining to flush more precisely */ { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE; @@ -6290,7 +6872,7 @@ static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, cp.targetLength = (unsigned)(-clampedCompressionLevel); } /* refine parameters based on srcSize & dictSize */ - return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode); + return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode, ZSTD_ps_auto); } } @@ -6325,3 +6907,21 @@ ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeH if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN; return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown); } + +void ZSTD_registerSequenceProducer( + ZSTD_CCtx* zc, void* mState, + ZSTD_sequenceProducer_F* mFinder +) { + if (mFinder != NULL) { + ZSTD_externalMatchCtx emctx; + emctx.mState = mState; + emctx.mFinder = mFinder; + emctx.seqBuffer = NULL; + emctx.seqBufferCapacity = 0; + zc->externalMatchCtx = emctx; + zc->requestedParams.useSequenceProducer = 1; + } else { + ZSTD_memset(&zc->externalMatchCtx, 0, sizeof(zc->externalMatchCtx)); + zc->requestedParams.useSequenceProducer = 0; + } +} diff --git a/lib/compress/zstd_compress_internal.h b/lib/compress/zstd_compress_internal.h index c406e794bdb..cbb85e527eb 100644 --- a/lib/compress/zstd_compress_internal.h +++ b/lib/compress/zstd_compress_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -23,6 +23,7 @@ #ifdef ZSTD_MULTITHREAD # include "zstdmt_compress.h" #endif +#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_NbCommonBytes */ #if defined (__cplusplus) extern "C" { @@ -117,12 +118,13 @@ typedef struct { /** ZSTD_buildBlockEntropyStats() : * Builds entropy for the block. * @return : 0 on success or error code */ -size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr, - const ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - ZSTD_entropyCTablesMetadata_t* entropyMetadata, - void* workspace, size_t wkspSize); +size_t ZSTD_buildBlockEntropyStats( + const seqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, size_t wkspSize); /********************************* * Compression internals structs * @@ -148,6 +150,12 @@ typedef struct { size_t capacity; /* The capacity starting from `seq` pointer */ } rawSeqStore_t; +typedef struct { + U32 idx; /* Index in array of ZSTD_Sequence */ + U32 posInSequence; /* Position within sequence at idx */ + size_t posInSrc; /* Number of bytes given by sequences provided so far */ +} ZSTD_sequencePosition; + UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0}; typedef struct { @@ -234,6 +242,11 @@ struct ZSTD_matchState_t { const ZSTD_matchState_t* dictMatchState; ZSTD_compressionParameters cParams; const rawSeqStore_t* ldmSeqStore; + + /* Controls prefetching in some dictMatchState matchfinders. + * This behavior is controlled from the cctx ms. + * This parameter has no effect in the cdict ms. */ + int prefetchCDictTables; }; typedef struct { @@ -330,6 +343,24 @@ struct ZSTD_CCtx_params_s { /* Internal use, for createCCtxParams() and freeCCtxParams() only */ ZSTD_customMem customMem; + + /* Controls prefetching in some dictMatchState matchfinders */ + ZSTD_paramSwitch_e prefetchCDictTables; + + /* Controls whether zstd will fall back to an internal matchfinder + * if the external matchfinder returns an error code. */ + int enableMatchFinderFallback; + + /* Indicates whether an external matchfinder has been referenced. + * Users can't set this externally. + * It is set internally in ZSTD_registerSequenceProducer(). */ + int useSequenceProducer; + + /* Adjust the max block size*/ + size_t maxBlockSize; + + /* Controls repcode search in external sequence parsing */ + ZSTD_paramSwitch_e searchForExternalRepcodes; }; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */ #define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2)) @@ -361,6 +392,14 @@ typedef struct { ZSTD_entropyCTablesMetadata_t entropyMetadata; } ZSTD_blockSplitCtx; +/* Context for block-level external matchfinder API */ +typedef struct { + void* mState; + ZSTD_sequenceProducer_F* mFinder; + ZSTD_Sequence* seqBuffer; + size_t seqBufferCapacity; +} ZSTD_externalMatchCtx; + struct ZSTD_CCtx_s { ZSTD_compressionStage_e stage; int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */ @@ -410,6 +449,7 @@ struct ZSTD_CCtx_s { /* Stable in/out buffer verification */ ZSTD_inBuffer expectedInBuffer; + size_t stableIn_notConsumed; /* nb bytes within stable input buffer that are said to be consumed but are not */ size_t expectedOutBufferSize; /* Dictionary */ @@ -429,9 +469,13 @@ struct ZSTD_CCtx_s { /* Workspace for block splitter */ ZSTD_blockSplitCtx blockSplitCtx; + + /* Workspace for external matchfinder */ + ZSTD_externalMatchCtx externalMatchCtx; }; typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e; +typedef enum { ZSTD_tfp_forCCtx, ZSTD_tfp_forCDict } ZSTD_tableFillPurpose_e; typedef enum { ZSTD_noDict = 0, @@ -453,7 +497,7 @@ typedef enum { * In this mode we take both the source size and the dictionary size * into account when selecting and adjusting the parameters. */ - ZSTD_cpm_unknown = 3, /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams. + ZSTD_cpm_unknown = 3 /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams. * We don't know what these parameters are for. We default to the legacy * behavior of taking both the source size and the dict size into account * when selecting and adjusting parameters. @@ -512,9 +556,11 @@ MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value) /* ZSTD_noCompressBlock() : * Writes uncompressed block to dst buffer from given src. * Returns the size of the block */ -MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock) +MEM_STATIC size_t +ZSTD_noCompressBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock) { U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3); + DEBUGLOG(5, "ZSTD_noCompressBlock (srcSize=%zu, dstCapacity=%zu)", srcSize, dstCapacity); RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity, dstSize_tooSmall, "dst buf too small for uncompressed block"); MEM_writeLE24(dst, cBlockHeader24); @@ -522,7 +568,8 @@ MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const voi return ZSTD_blockHeaderSize + srcSize; } -MEM_STATIC size_t ZSTD_rleCompressBlock (void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock) +MEM_STATIC size_t +ZSTD_rleCompressBlock(void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock) { BYTE* const op = (BYTE*)dst; U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3); @@ -541,7 +588,7 @@ MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat) { U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6; ZSTD_STATIC_ASSERT(ZSTD_btultra == 8); - assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat)); + assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, (int)strat)); return (srcSize >> minlog) + 2; } @@ -577,29 +624,27 @@ ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE con while (ip < iend) *op++ = *ip++; } -#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1) -#define STORE_REPCODE_1 STORE_REPCODE(1) -#define STORE_REPCODE_2 STORE_REPCODE(2) -#define STORE_REPCODE_3 STORE_REPCODE(3) -#define STORE_REPCODE(r) (assert((r)>=1), assert((r)<=3), (r)-1) -#define STORE_OFFSET(o) (assert((o)>0), o + ZSTD_REP_MOVE) -#define STORED_IS_OFFSET(o) ((o) > ZSTD_REP_MOVE) -#define STORED_IS_REPCODE(o) ((o) <= ZSTD_REP_MOVE) -#define STORED_OFFSET(o) (assert(STORED_IS_OFFSET(o)), (o)-ZSTD_REP_MOVE) -#define STORED_REPCODE(o) (assert(STORED_IS_REPCODE(o)), (o)+1) /* returns ID 1,2,3 */ -#define STORED_TO_OFFBASE(o) ((o)+1) -#define OFFBASE_TO_STORED(o) ((o)-1) + +#define REPCODE1_TO_OFFBASE REPCODE_TO_OFFBASE(1) +#define REPCODE2_TO_OFFBASE REPCODE_TO_OFFBASE(2) +#define REPCODE3_TO_OFFBASE REPCODE_TO_OFFBASE(3) +#define REPCODE_TO_OFFBASE(r) (assert((r)>=1), assert((r)<=ZSTD_REP_NUM), (r)) /* accepts IDs 1,2,3 */ +#define OFFSET_TO_OFFBASE(o) (assert((o)>0), o + ZSTD_REP_NUM) +#define OFFBASE_IS_OFFSET(o) ((o) > ZSTD_REP_NUM) +#define OFFBASE_IS_REPCODE(o) ( 1 <= (o) && (o) <= ZSTD_REP_NUM) +#define OFFBASE_TO_OFFSET(o) (assert(OFFBASE_IS_OFFSET(o)), (o) - ZSTD_REP_NUM) +#define OFFBASE_TO_REPCODE(o) (assert(OFFBASE_IS_REPCODE(o)), (o)) /* returns ID 1,2,3 */ /*! ZSTD_storeSeq() : - * Store a sequence (litlen, litPtr, offCode and matchLength) into seqStore_t. - * @offBase_minus1 : Users should use employ macros STORE_REPCODE_X and STORE_OFFSET(). + * Store a sequence (litlen, litPtr, offBase and matchLength) into seqStore_t. + * @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE(). * @matchLength : must be >= MINMATCH - * Allowed to overread literals up to litLimit. + * Allowed to over-read literals up to litLimit. */ HINT_INLINE UNUSED_ATTR void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, - U32 offBase_minus1, + U32 offBase, size_t matchLength) { BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH; @@ -608,8 +653,8 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr, static const BYTE* g_start = NULL; if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */ { U32 const pos = (U32)((const BYTE*)literals - g_start); - DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u", - pos, (U32)litLength, (U32)matchLength, (U32)offBase_minus1); + DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offBase%7u", + pos, (U32)litLength, (U32)matchLength, (U32)offBase); } #endif assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq); @@ -619,9 +664,9 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr, assert(literals + litLength <= litLimit); if (litEnd <= litLimit_w) { /* Common case we can use wildcopy. - * First copy 16 bytes, because literals are likely short. - */ - assert(WILDCOPY_OVERLENGTH >= 16); + * First copy 16 bytes, because literals are likely short. + */ + ZSTD_STATIC_ASSERT(WILDCOPY_OVERLENGTH >= 16); ZSTD_copy16(seqStorePtr->lit, literals); if (litLength > 16) { ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap); @@ -640,7 +685,7 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr, seqStorePtr->sequences[0].litLength = (U16)litLength; /* match offset */ - seqStorePtr->sequences[0].offBase = STORED_TO_OFFBASE(offBase_minus1); + seqStorePtr->sequences[0].offBase = offBase; /* match Length */ assert(matchLength >= MINMATCH); @@ -658,17 +703,17 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr, /* ZSTD_updateRep() : * updates in-place @rep (array of repeat offsets) - * @offBase_minus1 : sum-type, with same numeric representation as ZSTD_storeSeq() + * @offBase : sum-type, using numeric representation of ZSTD_storeSeq() */ MEM_STATIC void -ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0) +ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0) { - if (STORED_IS_OFFSET(offBase_minus1)) { /* full offset */ + if (OFFBASE_IS_OFFSET(offBase)) { /* full offset */ rep[2] = rep[1]; rep[1] = rep[0]; - rep[0] = STORED_OFFSET(offBase_minus1); + rep[0] = OFFBASE_TO_OFFSET(offBase); } else { /* repcode */ - U32 const repCode = STORED_REPCODE(offBase_minus1) - 1 + ll0; + U32 const repCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0; if (repCode > 0) { /* note : if repCode==0, no change */ U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; rep[2] = (repCode >= 2) ? rep[1] : rep[2]; @@ -685,11 +730,11 @@ typedef struct repcodes_s { } repcodes_t; MEM_STATIC repcodes_t -ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0) +ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0) { repcodes_t newReps; ZSTD_memcpy(&newReps, rep, sizeof(newReps)); - ZSTD_updateRep(newReps.rep, offBase_minus1, ll0); + ZSTD_updateRep(newReps.rep, offBase, ll0); return newReps; } @@ -697,103 +742,6 @@ ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0 /*-************************************* * Match length counter ***************************************/ -static unsigned ZSTD_NbCommonBytes (size_t val) -{ - if (MEM_isLittleEndian()) { - if (MEM_64bits()) { -# if defined(_MSC_VER) && defined(_WIN64) -# if STATIC_BMI2 - return _tzcnt_u64(val) >> 3; -# else - if (val != 0) { - unsigned long r; - _BitScanForward64(&r, (U64)val); - return (unsigned)(r >> 3); - } else { - /* Should not reach this code path */ - __assume(0); - } -# endif -# elif defined(__GNUC__) && (__GNUC__ >= 4) - return (__builtin_ctzll((U64)val) >> 3); -# else - static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, - 0, 3, 1, 3, 1, 4, 2, 7, - 0, 2, 3, 6, 1, 5, 3, 5, - 1, 3, 4, 4, 2, 5, 6, 7, - 7, 0, 1, 2, 3, 3, 4, 6, - 2, 6, 5, 5, 3, 4, 5, 6, - 7, 1, 2, 4, 6, 4, 4, 5, - 7, 2, 6, 5, 7, 6, 7, 7 }; - return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; -# endif - } else { /* 32 bits */ -# if defined(_MSC_VER) - if (val != 0) { - unsigned long r; - _BitScanForward(&r, (U32)val); - return (unsigned)(r >> 3); - } else { - /* Should not reach this code path */ - __assume(0); - } -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (__builtin_ctz((U32)val) >> 3); -# else - static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, - 3, 2, 2, 1, 3, 2, 0, 1, - 3, 3, 1, 2, 2, 2, 2, 0, - 3, 1, 2, 0, 1, 0, 1, 1 }; - return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; -# endif - } - } else { /* Big Endian CPU */ - if (MEM_64bits()) { -# if defined(_MSC_VER) && defined(_WIN64) -# if STATIC_BMI2 - return _lzcnt_u64(val) >> 3; -# else - if (val != 0) { - unsigned long r; - _BitScanReverse64(&r, (U64)val); - return (unsigned)(r >> 3); - } else { - /* Should not reach this code path */ - __assume(0); - } -# endif -# elif defined(__GNUC__) && (__GNUC__ >= 4) - return (__builtin_clzll(val) >> 3); -# else - unsigned r; - const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ - if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } - if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } - r += (!val); - return r; -# endif - } else { /* 32 bits */ -# if defined(_MSC_VER) - if (val != 0) { - unsigned long r; - _BitScanReverse(&r, (unsigned long)val); - return (unsigned)(r >> 3); - } else { - /* Should not reach this code path */ - __assume(0); - } -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (__builtin_clz((U32)val) >> 3); -# else - unsigned r; - if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } - r += (!val); - return r; -# endif - } } -} - - MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit) { const BYTE* const pStart = pIn; @@ -839,32 +787,36 @@ ZSTD_count_2segments(const BYTE* ip, const BYTE* match, * Hashes ***************************************/ static const U32 prime3bytes = 506832829U; -static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; } +static U32 ZSTD_hash3(U32 u, U32 h) { assert(h <= 32); return ((u << (32-24)) * prime3bytes) >> (32-h) ; } MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */ static const U32 prime4bytes = 2654435761U; -static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; } -static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); } +static U32 ZSTD_hash4(U32 u, U32 h) { assert(h <= 32); return (u * prime4bytes) >> (32-h) ; } +static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_readLE32(ptr), h); } static const U64 prime5bytes = 889523592379ULL; -static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; } +static size_t ZSTD_hash5(U64 u, U32 h) { assert(h <= 64); return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; } static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); } static const U64 prime6bytes = 227718039650203ULL; -static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; } +static size_t ZSTD_hash6(U64 u, U32 h) { assert(h <= 64); return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; } static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); } static const U64 prime7bytes = 58295818150454627ULL; -static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; } +static size_t ZSTD_hash7(U64 u, U32 h) { assert(h <= 64); return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; } static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); } static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; -static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; } +static size_t ZSTD_hash8(U64 u, U32 h) { assert(h <= 64); return (size_t)(((u) * prime8bytes) >> (64-h)) ; } static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); } MEM_STATIC FORCE_INLINE_ATTR size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls) { + /* Although some of these hashes do support hBits up to 64, some do not. + * To be on the safe side, always avoid hBits > 32. */ + assert(hBits <= 32); + switch(mls) { default: @@ -1223,10 +1175,15 @@ ZSTD_checkDictValidity(const ZSTD_window_t* window, (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd); assert(blockEndIdx >= loadedDictEnd); - if (blockEndIdx > loadedDictEnd + maxDist) { + if (blockEndIdx > loadedDictEnd + maxDist || loadedDictEnd != window->dictLimit) { /* On reaching window size, dictionaries are invalidated. * For simplification, if window size is reached anywhere within next block, * the dictionary is invalidated for the full block. + * + * We also have to invalidate the dictionary if ZSTD_window_update() has detected + * non-contiguous segments, which means that loadedDictEnd != window->dictLimit. + * loadedDictEnd may be 0, if forceWindow is true, but in that case we never use + * dictMatchState, so setting it to NULL is not a problem. */ DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)"); *loadedDictEndPtr = 0; @@ -1358,6 +1315,42 @@ MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max) #endif +/* Short Cache */ + +/* Normally, zstd matchfinders follow this flow: + * 1. Compute hash at ip + * 2. Load index from hashTable[hash] + * 3. Check if *ip == *(base + index) + * In dictionary compression, loading *(base + index) is often an L2 or even L3 miss. + * + * Short cache is an optimization which allows us to avoid step 3 most of the time + * when the data doesn't actually match. With short cache, the flow becomes: + * 1. Compute (hash, currentTag) at ip. currentTag is an 8-bit independent hash at ip. + * 2. Load (index, matchTag) from hashTable[hash]. See ZSTD_writeTaggedIndex to understand how this works. + * 3. Only if currentTag == matchTag, check *ip == *(base + index). Otherwise, continue. + * + * Currently, short cache is only implemented in CDict hashtables. Thus, its use is limited to + * dictMatchState matchfinders. + */ +#define ZSTD_SHORT_CACHE_TAG_BITS 8 +#define ZSTD_SHORT_CACHE_TAG_MASK ((1u << ZSTD_SHORT_CACHE_TAG_BITS) - 1) + +/* Helper function for ZSTD_fillHashTable and ZSTD_fillDoubleHashTable. + * Unpacks hashAndTag into (hash, tag), then packs (index, tag) into hashTable[hash]. */ +MEM_STATIC void ZSTD_writeTaggedIndex(U32* const hashTable, size_t hashAndTag, U32 index) { + size_t const hash = hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS; + U32 const tag = (U32)(hashAndTag & ZSTD_SHORT_CACHE_TAG_MASK); + assert(index >> (32 - ZSTD_SHORT_CACHE_TAG_BITS) == 0); + hashTable[hash] = (index << ZSTD_SHORT_CACHE_TAG_BITS) | tag; +} + +/* Helper function for short cache matchfinders. + * Unpacks tag1 and tag2 from lower bits of packedTag1 and packedTag2, then checks if the tags match. */ +MEM_STATIC int ZSTD_comparePackedTags(size_t packedTag1, size_t packedTag2) { + U32 const tag1 = packedTag1 & ZSTD_SHORT_CACHE_TAG_MASK; + U32 const tag2 = packedTag2 & ZSTD_SHORT_CACHE_TAG_MASK; + return tag1 == tag2; +} #if defined (__cplusplus) } @@ -1455,4 +1448,31 @@ U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat); */ void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize); +/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of + * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter. + * Note that the block delimiter must include the last literals of the block. + */ +size_t +ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, + ZSTD_sequencePosition* seqPos, + const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, + const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch); + +/* Returns the number of bytes to move the current read position back by. + * Only non-zero if we ended up splitting a sequence. + * Otherwise, it may return a ZSTD error if something went wrong. + * + * This function will attempt to scan through blockSize bytes + * represented by the sequences in @inSeqs, + * storing any (partial) sequences. + * + * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to + * avoid splitting a match, or to avoid splitting a match such that it would produce a match + * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block. + */ +size_t +ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, + const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, + const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch); + #endif /* ZSTD_COMPRESS_H */ diff --git a/lib/compress/zstd_compress_literals.c b/lib/compress/zstd_compress_literals.c index 52b0a8059ab..bfd4f11abe4 100644 --- a/lib/compress/zstd_compress_literals.c +++ b/lib/compress/zstd_compress_literals.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -13,11 +13,36 @@ ***************************************/ #include "zstd_compress_literals.h" + +/* ************************************************************** +* Debug Traces +****************************************************************/ +#if DEBUGLEVEL >= 2 + +static size_t showHexa(const void* src, size_t srcSize) +{ + const BYTE* const ip = (const BYTE*)src; + size_t u; + for (u=0; u31) + (srcSize>4095); + DEBUGLOG(5, "ZSTD_noCompressLiterals: srcSize=%zu, dstCapacity=%zu", srcSize, dstCapacity); + RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, ""); switch(flSize) @@ -36,16 +61,30 @@ size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, } ZSTD_memcpy(ostart + flSize, src, srcSize); - DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize)); + DEBUGLOG(5, "Raw (uncompressed) literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize)); return srcSize + flSize; } +static int allBytesIdentical(const void* src, size_t srcSize) +{ + assert(srcSize >= 1); + assert(src != NULL); + { const BYTE b = ((const BYTE*)src)[0]; + size_t p; + for (p=1; p31) + (srcSize>4095); - (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */ + assert(dstCapacity >= 4); (void)dstCapacity; + assert(allBytesIdentical(src, srcSize)); switch(flSize) { @@ -63,28 +102,51 @@ size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* } ostart[flSize] = *(const BYTE*)src; - DEBUGLOG(5, "RLE literals: %u -> %u", (U32)srcSize, (U32)flSize + 1); + DEBUGLOG(5, "RLE : Repeated Literal (%02X: %u times) -> %u bytes encoded", ((const BYTE*)src)[0], (U32)srcSize, (U32)flSize + 1); return flSize+1; } -size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, - ZSTD_hufCTables_t* nextHuf, - ZSTD_strategy strategy, int disableLiteralCompression, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - void* entropyWorkspace, size_t entropyWorkspaceSize, - const int bmi2, - unsigned suspectUncompressible) +/* ZSTD_minLiteralsToCompress() : + * returns minimal amount of literals + * for literal compression to even be attempted. + * Minimum is made tighter as compression strategy increases. + */ +static size_t +ZSTD_minLiteralsToCompress(ZSTD_strategy strategy, HUF_repeat huf_repeat) +{ + assert((int)strategy >= 0); + assert((int)strategy <= 9); + /* btultra2 : min 8 bytes; + * then 2x larger for each successive compression strategy + * max threshold 64 bytes */ + { int const shift = MIN(9-(int)strategy, 3); + size_t const mintc = (huf_repeat == HUF_repeat_valid) ? 6 : (size_t)8 << shift; + DEBUGLOG(7, "minLiteralsToCompress = %zu", mintc); + return mintc; + } +} + +size_t ZSTD_compressLiterals ( + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + void* entropyWorkspace, size_t entropyWorkspaceSize, + const ZSTD_hufCTables_t* prevHuf, + ZSTD_hufCTables_t* nextHuf, + ZSTD_strategy strategy, + int disableLiteralCompression, + int suspectUncompressible, + int bmi2) { - size_t const minGain = ZSTD_minGain(srcSize, strategy); size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); BYTE* const ostart = (BYTE*)dst; U32 singleStream = srcSize < 256; symbolEncodingType_e hType = set_compressed; size_t cLitSize; - DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)", - disableLiteralCompression, (U32)srcSize); + DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i, srcSize=%u, dstCapacity=%zu)", + disableLiteralCompression, (U32)srcSize, dstCapacity); + + DEBUGLOG(6, "Completed literals listing (%zu bytes)", showHexa(src, srcSize)); /* Prepare nextEntropy assuming reusing the existing table */ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); @@ -92,40 +154,51 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, if (disableLiteralCompression) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); - /* small ? don't even attempt compression (speed opt) */ -# define COMPRESS_LITERALS_SIZE_MIN 63 - { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN; - if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); - } + /* if too small, don't even attempt compression (speed opt) */ + if (srcSize < ZSTD_minLiteralsToCompress(strategy, prevHuf->repeatMode)) + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression"); { HUF_repeat repeat = prevHuf->repeatMode; - int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0; + int const flags = 0 + | (bmi2 ? HUF_flags_bmi2 : 0) + | (strategy < ZSTD_lazy && srcSize <= 1024 ? HUF_flags_preferRepeat : 0) + | (strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD ? HUF_flags_optimalDepth : 0) + | (suspectUncompressible ? HUF_flags_suspectUncompressible : 0); + + typedef size_t (*huf_compress_f)(void*, size_t, const void*, size_t, unsigned, unsigned, void*, size_t, HUF_CElt*, HUF_repeat*, int); + huf_compress_f huf_compress; if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1; - cLitSize = singleStream ? - HUF_compress1X_repeat( - ostart+lhSize, dstCapacity-lhSize, src, srcSize, - HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize, - (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible) : - HUF_compress4X_repeat( - ostart+lhSize, dstCapacity-lhSize, src, srcSize, - HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize, - (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible); + huf_compress = singleStream ? HUF_compress1X_repeat : HUF_compress4X_repeat; + cLitSize = huf_compress(ostart+lhSize, dstCapacity-lhSize, + src, srcSize, + HUF_SYMBOLVALUE_MAX, LitHufLog, + entropyWorkspace, entropyWorkspaceSize, + (HUF_CElt*)nextHuf->CTable, + &repeat, flags); + DEBUGLOG(5, "%zu literals compressed into %zu bytes (before header)", srcSize, cLitSize); if (repeat != HUF_repeat_none) { /* reused the existing table */ - DEBUGLOG(5, "Reusing previous huffman table"); + DEBUGLOG(5, "reusing statistics from previous huffman block"); hType = set_repeat; } } - if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) { - ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); - } + { size_t const minGain = ZSTD_minGain(srcSize, strategy); + if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) { + ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + } } if (cLitSize==1) { - ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); - } + /* A return value of 1 signals that the alphabet consists of a single symbol. + * However, in some rare circumstances, it could be the compressed size (a single byte). + * For that outcome to have a chance to happen, it's necessary that `srcSize < 8`. + * (it's also necessary to not generate statistics). + * Therefore, in such a case, actively check that all bytes are identical. */ + if ((srcSize >= 8) || allBytesIdentical(src, srcSize)) { + ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); + } } if (hType == set_compressed) { /* using a newly constructed table */ @@ -136,16 +209,19 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, switch(lhSize) { case 3: /* 2 - 2 - 10 - 10 */ - { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); + if (!singleStream) assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS); + { U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); MEM_writeLE24(ostart, lhc); break; } case 4: /* 2 - 2 - 14 - 14 */ + assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS); { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18); MEM_writeLE32(ostart, lhc); break; } case 5: /* 2 - 2 - 18 - 18 */ + assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS); { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22); MEM_writeLE32(ostart, lhc); ostart[4] = (BYTE)(cLitSize >> 10); diff --git a/lib/compress/zstd_compress_literals.h b/lib/compress/zstd_compress_literals.h index 9775fb97cb7..b060c8ad218 100644 --- a/lib/compress/zstd_compress_literals.h +++ b/lib/compress/zstd_compress_literals.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -16,16 +16,24 @@ size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize); +/* ZSTD_compressRleLiteralsBlock() : + * Conditions : + * - All bytes in @src are identical + * - dstCapacity >= 4 */ size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize); -/* If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */ -size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, - ZSTD_hufCTables_t* nextHuf, - ZSTD_strategy strategy, int disableLiteralCompression, - void* dst, size_t dstCapacity, +/* ZSTD_compressLiterals(): + * @entropyWorkspace: must be aligned on 4-bytes boundaries + * @entropyWorkspaceSize : must be >= HUF_WORKSPACE_SIZE + * @suspectUncompressible: sampling checks, to potentially skip huffman coding + */ +size_t ZSTD_compressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize, void* entropyWorkspace, size_t entropyWorkspaceSize, - const int bmi2, - unsigned suspectUncompressible); + const ZSTD_hufCTables_t* prevHuf, + ZSTD_hufCTables_t* nextHuf, + ZSTD_strategy strategy, int disableLiteralCompression, + int suspectUncompressible, + int bmi2); #endif /* ZSTD_COMPRESS_LITERALS_H */ diff --git a/lib/compress/zstd_compress_sequences.c b/lib/compress/zstd_compress_sequences.c index f1e40af2ea0..8872d4d354a 100644 --- a/lib/compress/zstd_compress_sequences.c +++ b/lib/compress/zstd_compress_sequences.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -58,7 +58,7 @@ static unsigned ZSTD_useLowProbCount(size_t const nbSeq) { /* Heuristic: This should cover most blocks <= 16K and * start to fade out after 16K to about 32K depending on - * comprssibility. + * compressibility. */ return nbSeq >= 2048; } @@ -166,7 +166,7 @@ ZSTD_selectEncodingType( if (mostFrequent == nbSeq) { *repeatMode = FSE_repeat_none; if (isDefaultAllowed && nbSeq <= 2) { - /* Prefer set_basic over set_rle when there are 2 or less symbols, + /* Prefer set_basic over set_rle when there are 2 or fewer symbols, * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol. * If basic encoding isn't possible, always choose RLE. */ diff --git a/lib/compress/zstd_compress_sequences.h b/lib/compress/zstd_compress_sequences.h index 7991364c2f7..4a3a05da948 100644 --- a/lib/compress/zstd_compress_sequences.h +++ b/lib/compress/zstd_compress_sequences.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/compress/zstd_compress_superblock.c b/lib/compress/zstd_compress_superblock.c index 10e33785778..638c4acbe70 100644 --- a/lib/compress/zstd_compress_superblock.c +++ b/lib/compress/zstd_compress_superblock.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -36,13 +36,14 @@ * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block * and the following sub-blocks' literals sections will be Treeless_Literals_Block. * @return : compressed size of literals section of a sub-block - * Or 0 if it unable to compress. + * Or 0 if unable to compress. * Or error code */ -static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, - const ZSTD_hufCTablesMetadata_t* hufMetadata, - const BYTE* literals, size_t litSize, - void* dst, size_t dstSize, - const int bmi2, int writeEntropy, int* entropyWritten) +static size_t +ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, + const ZSTD_hufCTablesMetadata_t* hufMetadata, + const BYTE* literals, size_t litSize, + void* dst, size_t dstSize, + const int bmi2, int writeEntropy, int* entropyWritten) { size_t const header = writeEntropy ? 200 : 0; size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header)); @@ -53,8 +54,6 @@ static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat; size_t cLitSize = 0; - (void)bmi2; /* TODO bmi2... */ - DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy); *entropyWritten = 0; @@ -76,9 +75,9 @@ static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize); } - /* TODO bmi2 */ - { const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable) - : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable); + { int const flags = bmi2 ? HUF_flags_bmi2 : 0; + const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable, flags) + : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable, flags); op += cSize; cLitSize += cSize; if (cSize == 0 || ERR_isError(cSize)) { @@ -126,7 +125,11 @@ static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, return op-ostart; } -static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) { +static size_t +ZSTD_seqDecompressedSize(seqStore_t const* seqStore, + const seqDef* sequences, size_t nbSeq, + size_t litSize, int lastSequence) +{ const seqDef* const sstart = sequences; const seqDef* const send = sequences + nbSeq; const seqDef* sp = sstart; @@ -156,13 +159,14 @@ static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* * @return : compressed size of sequences section of a sub-block * Or 0 if it is unable to compress * Or error code. */ -static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables, - const ZSTD_fseCTablesMetadata_t* fseMetadata, - const seqDef* sequences, size_t nbSeq, - const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, - const ZSTD_CCtx_params* cctxParams, - void* dst, size_t dstCapacity, - const int bmi2, int writeEntropy, int* entropyWritten) +static size_t +ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables, + const ZSTD_fseCTablesMetadata_t* fseMetadata, + const seqDef* sequences, size_t nbSeq, + const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, + const ZSTD_CCtx_params* cctxParams, + void* dst, size_t dstCapacity, + const int bmi2, int writeEntropy, int* entropyWritten) { const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; BYTE* const ostart = (BYTE*)dst; @@ -539,7 +543,7 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr, repcodes_t rep; ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep)); for (seq = sstart; seq < sp; ++seq) { - ZSTD_updateRep(rep.rep, seq->offBase - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0); + ZSTD_updateRep(rep.rep, seq->offBase, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0); } ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep)); } diff --git a/lib/compress/zstd_compress_superblock.h b/lib/compress/zstd_compress_superblock.h index 176f9b106f3..8e494f0d5e6 100644 --- a/lib/compress/zstd_compress_superblock.h +++ b/lib/compress/zstd_compress_superblock.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/compress/zstd_cwksp.h b/lib/compress/zstd_cwksp.h index dc3f40c80c3..97676693b5e 100644 --- a/lib/compress/zstd_cwksp.h +++ b/lib/compress/zstd_cwksp.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -348,7 +348,9 @@ ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase if (alloc) { alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { - __asan_unpoison_memory_region(alloc, bytes); + /* We need to keep the redzone poisoned while unpoisoning the bytes that + * are actually allocated. */ + __asan_unpoison_memory_region(alloc, bytes - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE); } } #endif @@ -499,7 +501,7 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) { assert(ws->tableValidEnd >= ws->objectEnd); assert(ws->tableValidEnd <= ws->allocStart); if (ws->tableValidEnd < ws->tableEnd) { - ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd); + ZSTD_memset(ws->tableValidEnd, 0, (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd)); } ZSTD_cwksp_mark_tables_clean(ws); } diff --git a/lib/compress/zstd_double_fast.c b/lib/compress/zstd_double_fast.c index 76933dea262..0ad88ffc7bd 100644 --- a/lib/compress/zstd_double_fast.c +++ b/lib/compress/zstd_double_fast.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,8 +11,43 @@ #include "zstd_compress_internal.h" #include "zstd_double_fast.h" +static void ZSTD_fillDoubleHashTableForCDict(ZSTD_matchState_t* ms, + void const* end, ZSTD_dictTableLoadMethod_e dtlm) +{ + const ZSTD_compressionParameters* const cParams = &ms->cParams; + U32* const hashLarge = ms->hashTable; + U32 const hBitsL = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS; + U32 const mls = cParams->minMatch; + U32* const hashSmall = ms->chainTable; + U32 const hBitsS = cParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS; + const BYTE* const base = ms->window.base; + const BYTE* ip = base + ms->nextToUpdate; + const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; + const U32 fastHashFillStep = 3; -void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, + /* Always insert every fastHashFillStep position into the hash tables. + * Insert the other positions into the large hash table if their entry + * is empty. + */ + for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) { + U32 const curr = (U32)(ip - base); + U32 i; + for (i = 0; i < fastHashFillStep; ++i) { + size_t const smHashAndTag = ZSTD_hashPtr(ip + i, hBitsS, mls); + size_t const lgHashAndTag = ZSTD_hashPtr(ip + i, hBitsL, 8); + if (i == 0) { + ZSTD_writeTaggedIndex(hashSmall, smHashAndTag, curr + i); + } + if (i == 0 || hashLarge[lgHashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { + ZSTD_writeTaggedIndex(hashLarge, lgHashAndTag, curr + i); + } + /* Only load extra positions for ZSTD_dtlm_full */ + if (dtlm == ZSTD_dtlm_fast) + break; + } } +} + +static void ZSTD_fillDoubleHashTableForCCtx(ZSTD_matchState_t* ms, void const* end, ZSTD_dictTableLoadMethod_e dtlm) { const ZSTD_compressionParameters* const cParams = &ms->cParams; @@ -43,7 +78,19 @@ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, /* Only load extra positions for ZSTD_dtlm_full */ if (dtlm == ZSTD_dtlm_fast) break; - } } + } } +} + +void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, + const void* const end, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp) +{ + if (tfp == ZSTD_tfp_forCDict) { + ZSTD_fillDoubleHashTableForCDict(ms, end, dtlm); + } else { + ZSTD_fillDoubleHashTableForCCtx(ms, end, dtlm); + } } @@ -67,7 +114,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; U32 offset_1=rep[0], offset_2=rep[1]; - U32 offsetSaved = 0; + U32 offsetSaved1 = 0, offsetSaved2 = 0; size_t mLength; U32 offset; @@ -100,8 +147,8 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( U32 const current = (U32)(ip - base); U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog); U32 const maxRep = current - windowLow; - if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; - if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; + if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0; + if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0; } /* Outer Loop: one iteration per match found and stored */ @@ -131,7 +178,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); goto _match_stored; } @@ -175,9 +222,13 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( } while (ip1 <= ilimit); _cleanup: + /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0), + * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */ + offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2; + /* save reps for next block */ - rep[0] = offset_1 ? offset_1 : offsetSaved; - rep[1] = offset_2 ? offset_2 : offsetSaved; + rep[0] = offset_1 ? offset_1 : offsetSaved1; + rep[1] = offset_2 ? offset_2 : offsetSaved2; /* Return the last literals size */ return (size_t)(iend - anchor); @@ -217,7 +268,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( hashLong[hl1] = (U32)(ip1 - base); } - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); _match_stored: /* match found */ @@ -243,7 +294,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); - ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, rLength); + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, rLength); ip += rLength; anchor = ip; continue; /* faster when present ... (?) */ @@ -275,7 +326,6 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; U32 offset_1=rep[0], offset_2=rep[1]; - U32 offsetSaved = 0; const ZSTD_matchState_t* const dms = ms->dictMatchState; const ZSTD_compressionParameters* const dictCParams = &dms->cParams; @@ -286,8 +336,8 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( const BYTE* const dictStart = dictBase + dictStartIndex; const BYTE* const dictEnd = dms->window.nextSrc; const U32 dictIndexDelta = prefixLowestIndex - (U32)(dictEnd - dictBase); - const U32 dictHBitsL = dictCParams->hashLog; - const U32 dictHBitsS = dictCParams->chainLog; + const U32 dictHBitsL = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS; + const U32 dictHBitsS = dictCParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS; const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart)); DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_dictMatchState_generic"); @@ -295,6 +345,13 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( /* if a dictionary is attached, it must be within window range */ assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex); + if (ms->prefetchCDictTables) { + size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32); + size_t const chainTableBytes = (((size_t)1) << dictCParams->chainLog) * sizeof(U32); + PREFETCH_AREA(dictHashLong, hashTableBytes) + PREFETCH_AREA(dictHashSmall, chainTableBytes) + } + /* init */ ip += (dictAndPrefixLength == 0); @@ -309,8 +366,12 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( U32 offset; size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8); size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); - size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8); - size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls); + size_t const dictHashAndTagL = ZSTD_hashPtr(ip, dictHBitsL, 8); + size_t const dictHashAndTagS = ZSTD_hashPtr(ip, dictHBitsS, mls); + U32 const dictMatchIndexAndTagL = dictHashLong[dictHashAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS]; + U32 const dictMatchIndexAndTagS = dictHashSmall[dictHashAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS]; + int const dictTagsMatchL = ZSTD_comparePackedTags(dictMatchIndexAndTagL, dictHashAndTagL); + int const dictTagsMatchS = ZSTD_comparePackedTags(dictMatchIndexAndTagS, dictHashAndTagS); U32 const curr = (U32)(ip-base); U32 const matchIndexL = hashLong[h2]; U32 matchIndexS = hashSmall[h]; @@ -328,7 +389,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); goto _match_stored; } @@ -340,9 +401,9 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ goto _match_found; } - } else { + } else if (dictTagsMatchL) { /* check dictMatchState long match */ - U32 const dictMatchIndexL = dictHashLong[dictHL]; + U32 const dictMatchIndexL = dictMatchIndexAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS; const BYTE* dictMatchL = dictBase + dictMatchIndexL; assert(dictMatchL < dictEnd); @@ -358,9 +419,9 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( if (MEM_read32(match) == MEM_read32(ip)) { goto _search_next_long; } - } else { + } else if (dictTagsMatchS) { /* check dictMatchState short match */ - U32 const dictMatchIndexS = dictHashSmall[dictHS]; + U32 const dictMatchIndexS = dictMatchIndexAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS; match = dictBase + dictMatchIndexS; matchIndexS = dictMatchIndexS + dictIndexDelta; @@ -375,10 +436,11 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( continue; _search_next_long: - { size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8); - size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8); + size_t const dictHashAndTagL3 = ZSTD_hashPtr(ip+1, dictHBitsL, 8); U32 const matchIndexL3 = hashLong[hl3]; + U32 const dictMatchIndexAndTagL3 = dictHashLong[dictHashAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS]; + int const dictTagsMatchL3 = ZSTD_comparePackedTags(dictMatchIndexAndTagL3, dictHashAndTagL3); const BYTE* matchL3 = base + matchIndexL3; hashLong[hl3] = curr + 1; @@ -391,9 +453,9 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ goto _match_found; } - } else { + } else if (dictTagsMatchL3) { /* check dict long +1 match */ - U32 const dictMatchIndexL3 = dictHashLong[dictHLNext]; + U32 const dictMatchIndexL3 = dictMatchIndexAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS; const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3; assert(dictMatchL3 < dictEnd); if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) { @@ -419,7 +481,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( offset_2 = offset_1; offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); _match_stored: /* match found */ @@ -448,7 +510,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4; U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2); + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2); hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; ip += repLength2; @@ -461,8 +523,8 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( } /* while (ip < ilimit) */ /* save reps for next block */ - rep[0] = offset_1 ? offset_1 : offsetSaved; - rep[1] = offset_2 ? offset_2 : offsetSaved; + rep[0] = offset_1; + rep[1] = offset_2; /* Return the last literals size */ return (size_t)(iend - anchor); @@ -585,7 +647,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); } else { if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend; @@ -596,7 +658,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) { size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8); @@ -621,7 +683,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( } offset_2 = offset_1; offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); } else { ip += ((ip-anchor) >> kSearchStrength) + 1; @@ -653,7 +715,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2); + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2); hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; ip += repLength2; diff --git a/lib/compress/zstd_double_fast.h b/lib/compress/zstd_double_fast.h index e16b7b03a32..6f0047c4ba7 100644 --- a/lib/compress/zstd_double_fast.h +++ b/lib/compress/zstd_double_fast.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -19,7 +19,8 @@ extern "C" { #include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, - void const* end, ZSTD_dictTableLoadMethod_e dtlm); + void const* end, ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp); size_t ZSTD_compressBlock_doubleFast( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); diff --git a/lib/compress/zstd_fast.c b/lib/compress/zstd_fast.c index 802fc315798..5f2c6a2edad 100644 --- a/lib/compress/zstd_fast.c +++ b/lib/compress/zstd_fast.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,8 +11,42 @@ #include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */ #include "zstd_fast.h" +static void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms, + const void* const end, + ZSTD_dictTableLoadMethod_e dtlm) +{ + const ZSTD_compressionParameters* const cParams = &ms->cParams; + U32* const hashTable = ms->hashTable; + U32 const hBits = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS; + U32 const mls = cParams->minMatch; + const BYTE* const base = ms->window.base; + const BYTE* ip = base + ms->nextToUpdate; + const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; + const U32 fastHashFillStep = 3; -void ZSTD_fillHashTable(ZSTD_matchState_t* ms, + /* Currently, we always use ZSTD_dtlm_full for filling CDict tables. + * Feel free to remove this assert if there's a good reason! */ + assert(dtlm == ZSTD_dtlm_full); + + /* Always insert every fastHashFillStep position into the hash table. + * Insert the other positions if their hash entry is empty. + */ + for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) { + U32 const curr = (U32)(ip - base); + { size_t const hashAndTag = ZSTD_hashPtr(ip, hBits, mls); + ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr); } + + if (dtlm == ZSTD_dtlm_fast) continue; + /* Only load extra positions for ZSTD_dtlm_full */ + { U32 p; + for (p = 1; p < fastHashFillStep; ++p) { + size_t const hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls); + if (hashTable[hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { /* not yet filled */ + ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p); + } } } } +} + +static void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms, const void* const end, ZSTD_dictTableLoadMethod_e dtlm) { @@ -25,6 +59,10 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms, const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; const U32 fastHashFillStep = 3; + /* Currently, we always use ZSTD_dtlm_fast for filling CCtx tables. + * Feel free to remove this assert if there's a good reason! */ + assert(dtlm == ZSTD_dtlm_fast); + /* Always insert every fastHashFillStep position into the hash table. * Insert the other positions if their hash entry is empty. */ @@ -42,6 +80,18 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms, } } } } } +void ZSTD_fillHashTable(ZSTD_matchState_t* ms, + const void* const end, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp) +{ + if (tfp == ZSTD_tfp_forCDict) { + ZSTD_fillHashTableForCDict(ms, end, dtlm); + } else { + ZSTD_fillHashTableForCCtx(ms, end, dtlm); + } +} + /** * If you squint hard enough (and ignore repcodes), the search operation at any @@ -117,7 +167,7 @@ ZSTD_compressBlock_fast_noDict_generic( U32 rep_offset1 = rep[0]; U32 rep_offset2 = rep[1]; - U32 offsetSaved = 0; + U32 offsetSaved1 = 0, offsetSaved2 = 0; size_t hash0; /* hash for ip0 */ size_t hash1; /* hash for ip1 */ @@ -141,8 +191,8 @@ ZSTD_compressBlock_fast_noDict_generic( { U32 const curr = (U32)(ip0 - base); U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog); U32 const maxRep = curr - windowLow; - if (rep_offset2 > maxRep) offsetSaved = rep_offset2, rep_offset2 = 0; - if (rep_offset1 > maxRep) offsetSaved = rep_offset1, rep_offset1 = 0; + if (rep_offset2 > maxRep) offsetSaved2 = rep_offset2, rep_offset2 = 0; + if (rep_offset1 > maxRep) offsetSaved1 = rep_offset1, rep_offset1 = 0; } /* start each op */ @@ -180,8 +230,14 @@ ZSTD_compressBlock_fast_noDict_generic( mLength = ip0[-1] == match0[-1]; ip0 -= mLength; match0 -= mLength; - offcode = STORE_REPCODE_1; + offcode = REPCODE1_TO_OFFBASE; mLength += 4; + + /* First write next hash table entry; we've already calculated it. + * This write is known to be safe because the ip1 is before the + * repcode (ip2). */ + hashTable[hash1] = (U32)(ip1 - base); + goto _match; } @@ -195,6 +251,12 @@ ZSTD_compressBlock_fast_noDict_generic( /* check match at ip[0] */ if (MEM_read32(ip0) == mval) { /* found a match! */ + + /* First write next hash table entry; we've already calculated it. + * This write is known to be safe because the ip1 == ip0 + 1, so + * we know we will resume searching after ip1 */ + hashTable[hash1] = (U32)(ip1 - base); + goto _offset; } @@ -224,6 +286,21 @@ ZSTD_compressBlock_fast_noDict_generic( /* check match at ip[0] */ if (MEM_read32(ip0) == mval) { /* found a match! */ + + /* first write next hash table entry; we've already calculated it */ + if (step <= 4) { + /* We need to avoid writing an index into the hash table >= the + * position at which we will pick up our searching after we've + * taken this match. + * + * The minimum possible match has length 4, so the earliest ip0 + * can be after we take this match will be the current ip0 + 4. + * ip1 is ip0 + step - 1. If ip1 is >= ip0 + 4, we can't safely + * write this position. + */ + hashTable[hash1] = (U32)(ip1 - base); + } + goto _offset; } @@ -254,9 +331,24 @@ ZSTD_compressBlock_fast_noDict_generic( * However, it seems to be a meaningful performance hit to try to search * them. So let's not. */ + /* When the repcodes are outside of the prefix, we set them to zero before the loop. + * When the offsets are still zero, we need to restore them after the block to have a correct + * repcode history. If only one offset was invalid, it is easy. The tricky case is when both + * offsets were invalid. We need to figure out which offset to refill with. + * - If both offsets are zero they are in the same order. + * - If both offsets are non-zero, we won't restore the offsets from `offsetSaved[12]`. + * - If only one is zero, we need to decide which offset to restore. + * - If rep_offset1 is non-zero, then rep_offset2 must be offsetSaved1. + * - It is impossible for rep_offset2 to be non-zero. + * + * So if rep_offset1 started invalid (offsetSaved1 != 0) and became valid (rep_offset1 != 0), then + * set rep[0] = rep_offset1 and rep[1] = offsetSaved1. + */ + offsetSaved2 = ((offsetSaved1 != 0) && (rep_offset1 != 0)) ? offsetSaved1 : offsetSaved2; + /* save reps for next block */ - rep[0] = rep_offset1 ? rep_offset1 : offsetSaved; - rep[1] = rep_offset2 ? rep_offset2 : offsetSaved; + rep[0] = rep_offset1 ? rep_offset1 : offsetSaved1; + rep[1] = rep_offset2 ? rep_offset2 : offsetSaved2; /* Return the last literals size */ return (size_t)(iend - anchor); @@ -267,7 +359,7 @@ ZSTD_compressBlock_fast_noDict_generic( match0 = base + idx; rep_offset2 = rep_offset1; rep_offset1 = (U32)(ip0-match0); - offcode = STORE_OFFSET(rep_offset1); + offcode = OFFSET_TO_OFFBASE(rep_offset1); mLength = 4; /* Count the backwards match length. */ @@ -287,11 +379,6 @@ ZSTD_compressBlock_fast_noDict_generic( ip0 += mLength; anchor = ip0; - /* write next hash table entry */ - if (ip1 < ip0) { - hashTable[hash1] = (U32)(ip1 - base); - } - /* Fill table and check for immediate repcode. */ if (ip0 <= ilimit) { /* Fill Table */ @@ -306,7 +393,7 @@ ZSTD_compressBlock_fast_noDict_generic( { U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); ip0 += rLength; - ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, STORE_REPCODE_1, rLength); + ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, REPCODE1_TO_OFFBASE, rLength); anchor = ip0; continue; /* faster when present (confirmed on gcc-8) ... (?) */ } } } @@ -380,14 +467,14 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( U32 const stepSize = cParams->targetLength + !(cParams->targetLength); const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; - const BYTE* ip = istart; + const BYTE* ip0 = istart; + const BYTE* ip1 = ip0 + stepSize; /* we assert below that stepSize >= 1 */ const BYTE* anchor = istart; const U32 prefixStartIndex = ms->window.dictLimit; const BYTE* const prefixStart = base + prefixStartIndex; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; U32 offset_1=rep[0], offset_2=rep[1]; - U32 offsetSaved = 0; const ZSTD_matchState_t* const dms = ms->dictMatchState; const ZSTD_compressionParameters* const dictCParams = &dms->cParams ; @@ -397,13 +484,13 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( const BYTE* const dictStart = dictBase + dictStartIndex; const BYTE* const dictEnd = dms->window.nextSrc; const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase); - const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart); - const U32 dictHLog = dictCParams->hashLog; + const U32 dictAndPrefixLength = (U32)(istart - prefixStart + dictEnd - dictStart); + const U32 dictHBits = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS; /* if a dictionary is still attached, it necessarily means that * it is within window size. So we just check it. */ const U32 maxDistance = 1U << cParams->windowLog; - const U32 endIndex = (U32)((size_t)(ip - base) + srcSize); + const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); assert(endIndex - prefixStartIndex <= maxDistance); (void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */ @@ -413,106 +500,155 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( * when translating a dict index into a local index */ assert(prefixStartIndex >= (U32)(dictEnd - dictBase)); + if (ms->prefetchCDictTables) { + size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32); + PREFETCH_AREA(dictHashTable, hashTableBytes) + } + /* init */ DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic"); - ip += (dictAndPrefixLength == 0); + ip0 += (dictAndPrefixLength == 0); /* dictMatchState repCode checks don't currently handle repCode == 0 * disabling. */ assert(offset_1 <= dictAndPrefixLength); assert(offset_2 <= dictAndPrefixLength); - /* Main Search Loop */ - while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ + /* Outer search loop */ + assert(stepSize >= 1); + while (ip1 <= ilimit) { /* repcode check at (ip0 + 1) is safe because ip0 < ip1 */ size_t mLength; - size_t const h = ZSTD_hashPtr(ip, hlog, mls); - U32 const curr = (U32)(ip-base); - U32 const matchIndex = hashTable[h]; - const BYTE* match = base + matchIndex; - const U32 repIndex = curr + 1 - offset_1; - const BYTE* repMatch = (repIndex < prefixStartIndex) ? - dictBase + (repIndex - dictIndexDelta) : - base + repIndex; - hashTable[h] = curr; /* update hash table */ - - if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */ - && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { - const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; - mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; - ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength); - } else if ( (matchIndex <= prefixStartIndex) ) { - size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls); - U32 const dictMatchIndex = dictHashTable[dictHash]; - const BYTE* dictMatch = dictBase + dictMatchIndex; - if (dictMatchIndex <= dictStartIndex || - MEM_read32(dictMatch) != MEM_read32(ip)) { - assert(stepSize >= 1); - ip += ((ip-anchor) >> kSearchStrength) + stepSize; - continue; - } else { - /* found a dict match */ - U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta); - mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4; - while (((ip>anchor) & (dictMatch>dictStart)) - && (ip[-1] == dictMatch[-1])) { - ip--; dictMatch--; mLength++; + size_t hash0 = ZSTD_hashPtr(ip0, hlog, mls); + + size_t const dictHashAndTag0 = ZSTD_hashPtr(ip0, dictHBits, mls); + U32 dictMatchIndexAndTag = dictHashTable[dictHashAndTag0 >> ZSTD_SHORT_CACHE_TAG_BITS]; + int dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag0); + + U32 matchIndex = hashTable[hash0]; + U32 curr = (U32)(ip0 - base); + size_t step = stepSize; + const size_t kStepIncr = 1 << kSearchStrength; + const BYTE* nextStep = ip0 + kStepIncr; + + /* Inner search loop */ + while (1) { + const BYTE* match = base + matchIndex; + const U32 repIndex = curr + 1 - offset_1; + const BYTE* repMatch = (repIndex < prefixStartIndex) ? + dictBase + (repIndex - dictIndexDelta) : + base + repIndex; + const size_t hash1 = ZSTD_hashPtr(ip1, hlog, mls); + size_t const dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls); + hashTable[hash0] = curr; /* update hash table */ + + if (((U32) ((prefixStartIndex - 1) - repIndex) >= + 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */ + && (MEM_read32(repMatch) == MEM_read32(ip0 + 1))) { + const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; + mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4; + ip0++; + ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); + break; + } + + if (dictTagsMatch) { + /* Found a possible dict match */ + const U32 dictMatchIndex = dictMatchIndexAndTag >> ZSTD_SHORT_CACHE_TAG_BITS; + const BYTE* dictMatch = dictBase + dictMatchIndex; + if (dictMatchIndex > dictStartIndex && + MEM_read32(dictMatch) == MEM_read32(ip0)) { + /* To replicate extDict parse behavior, we only use dict matches when the normal matchIndex is invalid */ + if (matchIndex <= prefixStartIndex) { + U32 const offset = (U32) (curr - dictMatchIndex - dictIndexDelta); + mLength = ZSTD_count_2segments(ip0 + 4, dictMatch + 4, iend, dictEnd, prefixStart) + 4; + while (((ip0 > anchor) & (dictMatch > dictStart)) + && (ip0[-1] == dictMatch[-1])) { + ip0--; + dictMatch--; + mLength++; + } /* catch up */ + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); + break; + } + } + } + + if (matchIndex > prefixStartIndex && MEM_read32(match) == MEM_read32(ip0)) { + /* found a regular match */ + U32 const offset = (U32) (ip0 - match); + mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4; + while (((ip0 > anchor) & (match > prefixStart)) + && (ip0[-1] == match[-1])) { + ip0--; + match--; + mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); + ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); + break; } - } else if (MEM_read32(match) != MEM_read32(ip)) { - /* it's not a match, and we're not going to check the dictionary */ - assert(stepSize >= 1); - ip += ((ip-anchor) >> kSearchStrength) + stepSize; - continue; - } else { - /* found a regular match */ - U32 const offset = (U32)(ip-match); - mLength = ZSTD_count(ip+4, match+4, iend) + 4; - while (((ip>anchor) & (match>prefixStart)) - && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ - offset_2 = offset_1; - offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); - } + + /* Prepare for next iteration */ + dictMatchIndexAndTag = dictHashTable[dictHashAndTag1 >> ZSTD_SHORT_CACHE_TAG_BITS]; + dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag1); + matchIndex = hashTable[hash1]; + + if (ip1 >= nextStep) { + step++; + nextStep += kStepIncr; + } + ip0 = ip1; + ip1 = ip1 + step; + if (ip1 > ilimit) goto _cleanup; + + curr = (U32)(ip0 - base); + hash0 = hash1; + } /* end inner search loop */ /* match found */ - ip += mLength; - anchor = ip; + assert(mLength); + ip0 += mLength; + anchor = ip0; - if (ip <= ilimit) { + if (ip0 <= ilimit) { /* Fill Table */ assert(base+curr+2 > istart); /* check base overflow */ hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */ - hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); + hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); /* check immediate repcode */ - while (ip <= ilimit) { - U32 const current2 = (U32)(ip-base); + while (ip0 <= ilimit) { + U32 const current2 = (U32)(ip0-base); U32 const repIndex2 = current2 - offset_2; const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase - dictIndexDelta + repIndex2 : base + repIndex2; if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) - && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { + && (MEM_read32(repMatch2) == MEM_read32(ip0))) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; - size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; + size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2); - hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2; - ip += repLength2; - anchor = ip; + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2); + hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = current2; + ip0 += repLength2; + anchor = ip0; continue; } break; } } + + /* Prepare for next iteration */ + assert(ip0 == anchor); + ip1 = ip0 + stepSize; } +_cleanup: /* save reps for next block */ - rep[0] = offset_1 ? offset_1 : offsetSaved; - rep[1] = offset_2 ? offset_2 : offsetSaved; + rep[0] = offset_1; + rep[1] = offset_2; /* Return the last literals size */ return (size_t)(iend - anchor); @@ -553,11 +689,10 @@ static size_t ZSTD_compressBlock_fast_extDict_generic( U32* const hashTable = ms->hashTable; U32 const hlog = cParams->hashLog; /* support stepSize of 0 */ - U32 const stepSize = cParams->targetLength + !(cParams->targetLength); + size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; const BYTE* const base = ms->window.base; const BYTE* const dictBase = ms->window.dictBase; const BYTE* const istart = (const BYTE*)src; - const BYTE* ip = istart; const BYTE* anchor = istart; const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); @@ -570,6 +705,28 @@ static size_t ZSTD_compressBlock_fast_extDict_generic( const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - 8; U32 offset_1=rep[0], offset_2=rep[1]; + U32 offsetSaved1 = 0, offsetSaved2 = 0; + + const BYTE* ip0 = istart; + const BYTE* ip1; + const BYTE* ip2; + const BYTE* ip3; + U32 current0; + + + size_t hash0; /* hash for ip0 */ + size_t hash1; /* hash for ip1 */ + U32 idx; /* match idx for ip0 */ + const BYTE* idxBase; /* base pointer for idx */ + + U32 offcode; + const BYTE* match0; + size_t mLength; + const BYTE* matchEnd = 0; /* initialize to avoid warning, assert != 0 later */ + + size_t step; + const BYTE* nextStep; + const size_t kStepIncr = (1 << (kSearchStrength - 1)); (void)hasStep; /* not currently specialized on whether it's accelerated */ @@ -579,75 +736,202 @@ static size_t ZSTD_compressBlock_fast_extDict_generic( if (prefixStartIndex == dictStartIndex) return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize); - /* Search Loop */ - while (ip < ilimit) { /* < instead of <=, because (ip+1) */ - const size_t h = ZSTD_hashPtr(ip, hlog, mls); - const U32 matchIndex = hashTable[h]; - const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base; - const BYTE* match = matchBase + matchIndex; - const U32 curr = (U32)(ip-base); - const U32 repIndex = curr + 1 - offset_1; - const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base; - const BYTE* const repMatch = repBase + repIndex; - hashTable[h] = curr; /* update hash table */ - DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr); - - if ( ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ - & (offset_1 <= curr+1 - dictStartIndex) ) /* note: we are searching at curr+1 */ - && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { - const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; - size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4; - ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, rLength); - ip += rLength; - anchor = ip; - } else { - if ( (matchIndex < dictStartIndex) || - (MEM_read32(match) != MEM_read32(ip)) ) { - assert(stepSize >= 1); - ip += ((ip-anchor) >> kSearchStrength) + stepSize; - continue; + { U32 const curr = (U32)(ip0 - base); + U32 const maxRep = curr - dictStartIndex; + if (offset_2 >= maxRep) offsetSaved2 = offset_2, offset_2 = 0; + if (offset_1 >= maxRep) offsetSaved1 = offset_1, offset_1 = 0; + } + + /* start each op */ +_start: /* Requires: ip0 */ + + step = stepSize; + nextStep = ip0 + kStepIncr; + + /* calculate positions, ip0 - anchor == 0, so we skip step calc */ + ip1 = ip0 + 1; + ip2 = ip0 + step; + ip3 = ip2 + 1; + + if (ip3 >= ilimit) { + goto _cleanup; + } + + hash0 = ZSTD_hashPtr(ip0, hlog, mls); + hash1 = ZSTD_hashPtr(ip1, hlog, mls); + + idx = hashTable[hash0]; + idxBase = idx < prefixStartIndex ? dictBase : base; + + do { + { /* load repcode match for ip[2] */ + U32 const current2 = (U32)(ip2 - base); + U32 const repIndex = current2 - offset_1; + const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base; + U32 rval; + if ( ((U32)(prefixStartIndex - repIndex) >= 4) /* intentional underflow */ + & (offset_1 > 0) ) { + rval = MEM_read32(repBase + repIndex); + } else { + rval = MEM_read32(ip2) ^ 1; /* guaranteed to not match. */ } - { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend; - const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart; - U32 const offset = curr - matchIndex; - size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4; - while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ - offset_2 = offset_1; offset_1 = offset; /* update offset history */ - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); - ip += mLength; - anchor = ip; + + /* write back hash table entry */ + current0 = (U32)(ip0 - base); + hashTable[hash0] = current0; + + /* check repcode at ip[2] */ + if (MEM_read32(ip2) == rval) { + ip0 = ip2; + match0 = repBase + repIndex; + matchEnd = repIndex < prefixStartIndex ? dictEnd : iend; + assert((match0 != prefixStart) & (match0 != dictStart)); + mLength = ip0[-1] == match0[-1]; + ip0 -= mLength; + match0 -= mLength; + offcode = REPCODE1_TO_OFFBASE; + mLength += 4; + goto _match; } } - if (ip <= ilimit) { - /* Fill Table */ - hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; - hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); - /* check immediate repcode */ - while (ip <= ilimit) { - U32 const current2 = (U32)(ip-base); - U32 const repIndex2 = current2 - offset_2; - const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; - if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 <= curr - dictStartIndex)) /* intentional overflow */ - && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { - const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; - size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; - { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, STORE_REPCODE_1, repLength2); - hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2; - ip += repLength2; - anchor = ip; - continue; - } - break; - } } } + { /* load match for ip[0] */ + U32 const mval = idx >= dictStartIndex ? + MEM_read32(idxBase + idx) : + MEM_read32(ip0) ^ 1; /* guaranteed not to match */ + + /* check match at ip[0] */ + if (MEM_read32(ip0) == mval) { + /* found a match! */ + goto _offset; + } } + + /* lookup ip[1] */ + idx = hashTable[hash1]; + idxBase = idx < prefixStartIndex ? dictBase : base; + + /* hash ip[2] */ + hash0 = hash1; + hash1 = ZSTD_hashPtr(ip2, hlog, mls); + + /* advance to next positions */ + ip0 = ip1; + ip1 = ip2; + ip2 = ip3; + + /* write back hash table entry */ + current0 = (U32)(ip0 - base); + hashTable[hash0] = current0; + + { /* load match for ip[0] */ + U32 const mval = idx >= dictStartIndex ? + MEM_read32(idxBase + idx) : + MEM_read32(ip0) ^ 1; /* guaranteed not to match */ + + /* check match at ip[0] */ + if (MEM_read32(ip0) == mval) { + /* found a match! */ + goto _offset; + } } + + /* lookup ip[1] */ + idx = hashTable[hash1]; + idxBase = idx < prefixStartIndex ? dictBase : base; + + /* hash ip[2] */ + hash0 = hash1; + hash1 = ZSTD_hashPtr(ip2, hlog, mls); + + /* advance to next positions */ + ip0 = ip1; + ip1 = ip2; + ip2 = ip0 + step; + ip3 = ip1 + step; + + /* calculate step */ + if (ip2 >= nextStep) { + step++; + PREFETCH_L1(ip1 + 64); + PREFETCH_L1(ip1 + 128); + nextStep += kStepIncr; + } + } while (ip3 < ilimit); + +_cleanup: + /* Note that there are probably still a couple positions we could search. + * However, it seems to be a meaningful performance hit to try to search + * them. So let's not. */ + + /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0), + * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */ + offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2; /* save reps for next block */ - rep[0] = offset_1; - rep[1] = offset_2; + rep[0] = offset_1 ? offset_1 : offsetSaved1; + rep[1] = offset_2 ? offset_2 : offsetSaved2; /* Return the last literals size */ return (size_t)(iend - anchor); + +_offset: /* Requires: ip0, idx, idxBase */ + + /* Compute the offset code. */ + { U32 const offset = current0 - idx; + const BYTE* const lowMatchPtr = idx < prefixStartIndex ? dictStart : prefixStart; + matchEnd = idx < prefixStartIndex ? dictEnd : iend; + match0 = idxBase + idx; + offset_2 = offset_1; + offset_1 = offset; + offcode = OFFSET_TO_OFFBASE(offset); + mLength = 4; + + /* Count the backwards match length. */ + while (((ip0>anchor) & (match0>lowMatchPtr)) && (ip0[-1] == match0[-1])) { + ip0--; + match0--; + mLength++; + } } + +_match: /* Requires: ip0, match0, offcode, matchEnd */ + + /* Count the forward length. */ + assert(matchEnd != 0); + mLength += ZSTD_count_2segments(ip0 + mLength, match0 + mLength, iend, matchEnd, prefixStart); + + ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength); + + ip0 += mLength; + anchor = ip0; + + /* write next hash table entry */ + if (ip1 < ip0) { + hashTable[hash1] = (U32)(ip1 - base); + } + + /* Fill table and check for immediate repcode. */ + if (ip0 <= ilimit) { + /* Fill Table */ + assert(base+current0+2 > istart); /* check base overflow */ + hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */ + hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); + + while (ip0 <= ilimit) { + U32 const repIndex2 = (U32)(ip0-base) - offset_2; + const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; + if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 > 0)) /* intentional underflow */ + && (MEM_read32(repMatch2) == MEM_read32(ip0)) ) { + const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; + size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; + { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, REPCODE1_TO_OFFBASE, repLength2); + hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); + ip0 += repLength2; + anchor = ip0; + continue; + } + break; + } } + + goto _start; } ZSTD_GEN_FAST_FN(extDict, 4, 0) @@ -660,6 +944,7 @@ size_t ZSTD_compressBlock_fast_extDict( void const* src, size_t srcSize) { U32 const mls = ms->cParams.minMatch; + assert(ms->dictMatchState == NULL); switch(mls) { default: /* includes case 3 */ diff --git a/lib/compress/zstd_fast.h b/lib/compress/zstd_fast.h index 0d4a0c1090f..9e4236b4728 100644 --- a/lib/compress/zstd_fast.h +++ b/lib/compress/zstd_fast.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -19,7 +19,8 @@ extern "C" { #include "zstd_compress_internal.h" void ZSTD_fillHashTable(ZSTD_matchState_t* ms, - void const* end, ZSTD_dictTableLoadMethod_e dtlm); + void const* end, ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp); size_t ZSTD_compressBlock_fast( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); diff --git a/lib/compress/zstd_lazy.c b/lib/compress/zstd_lazy.c index 2e38dcb46d2..a2473427299 100644 --- a/lib/compress/zstd_lazy.c +++ b/lib/compress/zstd_lazy.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -10,6 +10,7 @@ #include "zstd_compress_internal.h" #include "zstd_lazy.h" +#include "../common/bits.h" /* ZSTD_countTrailingZeros64 */ /*-************************************* @@ -197,8 +198,8 @@ ZSTD_DUBT_findBetterDictMatch ( U32 matchIndex = dictMatchIndex + dictIndexDelta; if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) { DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)", - curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, STORE_OFFSET(curr - matchIndex), dictMatchIndex, matchIndex); - bestLength = matchLength, *offsetPtr = STORE_OFFSET(curr - matchIndex); + curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, OFFSET_TO_OFFBASE(curr - matchIndex), dictMatchIndex, matchIndex); + bestLength = matchLength, *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex); } if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */ break; /* drop, to guarantee consistency (miss a little bit of compression) */ @@ -218,7 +219,7 @@ ZSTD_DUBT_findBetterDictMatch ( } if (bestLength >= MINMATCH) { - U32 const mIndex = curr - (U32)STORED_OFFSET(*offsetPtr); (void)mIndex; + U32 const mIndex = curr - (U32)OFFBASE_TO_OFFSET(*offsetPtr); (void)mIndex; DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)", curr, (U32)bestLength, (U32)*offsetPtr, mIndex); } @@ -230,7 +231,7 @@ ZSTD_DUBT_findBetterDictMatch ( static size_t ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iend, - size_t* offsetPtr, + size_t* offBasePtr, U32 const mls, const ZSTD_dictMode_e dictMode) { @@ -327,8 +328,8 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, if (matchLength > bestLength) { if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; - if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) - bestLength = matchLength, *offsetPtr = STORE_OFFSET(curr - matchIndex); + if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)*offBasePtr)) ) + bestLength = matchLength, *offBasePtr = OFFSET_TO_OFFBASE(curr - matchIndex); if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ if (dictMode == ZSTD_dictMatchState) { nbCompares = 0; /* in addition to avoiding checking any @@ -361,16 +362,16 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, if (dictMode == ZSTD_dictMatchState && nbCompares) { bestLength = ZSTD_DUBT_findBetterDictMatch( ms, ip, iend, - offsetPtr, bestLength, nbCompares, + offBasePtr, bestLength, nbCompares, mls, dictMode); } assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */ ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */ if (bestLength >= MINMATCH) { - U32 const mIndex = curr - (U32)STORED_OFFSET(*offsetPtr); (void)mIndex; + U32 const mIndex = curr - (U32)OFFBASE_TO_OFFSET(*offBasePtr); (void)mIndex; DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)", - curr, (U32)bestLength, (U32)*offsetPtr, mIndex); + curr, (U32)bestLength, (U32)*offBasePtr, mIndex); } return bestLength; } @@ -381,14 +382,14 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, FORCE_INLINE_TEMPLATE size_t ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iLimit, - size_t* offsetPtr, + size_t* offBasePtr, const U32 mls /* template */, const ZSTD_dictMode_e dictMode) { DEBUGLOG(7, "ZSTD_BtFindBestMatch"); if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */ ZSTD_updateDUBT(ms, ip, iLimit, mls); - return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode); + return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offBasePtr, mls, dictMode); } /*********************************** @@ -561,7 +562,7 @@ size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nb /* save best solution */ if (currentMl > ml) { ml = currentMl; - *offsetPtr = STORE_OFFSET(curr - (matchIndex + ddsIndexDelta)); + *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + ddsIndexDelta)); if (ip+currentMl == iLimit) { /* best possible, avoids read overflow on next attempt */ return ml; @@ -598,7 +599,7 @@ size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nb /* save best solution */ if (currentMl > ml) { ml = currentMl; - *offsetPtr = STORE_OFFSET(curr - (matchIndex + ddsIndexDelta)); + *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + ddsIndexDelta)); if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ } } @@ -691,7 +692,8 @@ size_t ZSTD_HcFindBestMatch( if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) { const BYTE* const match = base + matchIndex; assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */ - if (match[ml] == ip[ml]) /* potentially better */ + /* read 4B starting from (match + ml + 1 - sizeof(U32)) */ + if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) /* potentially better */ currentMl = ZSTD_count(ip, match, iLimit); } else { const BYTE* const match = dictBase + matchIndex; @@ -703,7 +705,7 @@ size_t ZSTD_HcFindBestMatch( /* save best solution */ if (currentMl > ml) { ml = currentMl; - *offsetPtr = STORE_OFFSET(curr - matchIndex); + *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex); if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ } @@ -739,7 +741,7 @@ size_t ZSTD_HcFindBestMatch( if (currentMl > ml) { ml = currentMl; assert(curr > matchIndex + dmsIndexDelta); - *offsetPtr = STORE_OFFSET(curr - (matchIndex + dmsIndexDelta)); + *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + dmsIndexDelta)); if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ } @@ -757,7 +759,6 @@ size_t ZSTD_HcFindBestMatch( ***********************************/ /* Constants for row-based hash */ #define ZSTD_ROW_HASH_TAG_OFFSET 16 /* byte offset of hashes in the match state's tagTable from the beginning of a row */ -#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */ #define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1) #define ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */ @@ -769,38 +770,8 @@ typedef U64 ZSTD_VecMask; /* Clarifies when we are interacting with a U64 repr * Starting from the LSB, returns the idx of the next non-zero bit. * Basically counting the nb of trailing zeroes. */ -static U32 ZSTD_VecMask_next(ZSTD_VecMask val) { - assert(val != 0); -# if defined(_MSC_VER) && defined(_WIN64) - if (val != 0) { - unsigned long r; - _BitScanForward64(&r, val); - return (U32)(r); - } else { - /* Should not reach this code path */ - __assume(0); - } -# elif (defined(__GNUC__) && ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4)))) - if (sizeof(size_t) == 4) { - U32 mostSignificantWord = (U32)(val >> 32); - U32 leastSignificantWord = (U32)val; - if (leastSignificantWord == 0) { - return 32 + (U32)__builtin_ctz(mostSignificantWord); - } else { - return (U32)__builtin_ctz(leastSignificantWord); - } - } else { - return (U32)__builtin_ctzll(val); - } -# else - /* Software ctz version: http://aggregate.org/MAGIC/#Trailing%20Zero%20Count - * and: https://stackoverflow.com/questions/2709430/count-number-of-bits-in-a-64-bit-long-big-integer - */ - val = ~val & (val - 1ULL); /* Lowest set bit mask */ - val = val - ((val >> 1) & 0x5555555555555555); - val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL); - return (U32)((((val + (val >> 4)) & 0xF0F0F0F0F0F0F0FULL) * 0x101010101010101ULL) >> 56); -# endif +MEM_STATIC U32 ZSTD_VecMask_next(ZSTD_VecMask val) { + return ZSTD_countTrailingZeros64(val); } /* ZSTD_rotateRight_*(): @@ -980,7 +951,35 @@ void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) { const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */); DEBUGLOG(5, "ZSTD_row_update(), rowLog=%u", rowLog); - ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0 /* dont use cache */); + ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0 /* don't use cache */); +} + +/* Returns the mask width of bits group of which will be set to 1. Given not all + * architectures have easy movemask instruction, this helps to iterate over + * groups of bits easier and faster. + */ +FORCE_INLINE_TEMPLATE U32 +ZSTD_row_matchMaskGroupWidth(const U32 rowEntries) +{ + assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64); + assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES); + (void)rowEntries; +#if defined(ZSTD_ARCH_ARM_NEON) + /* NEON path only works for little endian */ + if (!MEM_isLittleEndian()) { + return 1; + } + if (rowEntries == 16) { + return 4; + } + if (rowEntries == 32) { + return 2; + } + if (rowEntries == 64) { + return 1; + } +#endif + return 1; } #if defined(ZSTD_ARCH_X86_SSE2) @@ -1003,71 +1002,82 @@ ZSTD_row_getSSEMask(int nbChunks, const BYTE* const src, const BYTE tag, const U } #endif -/* Returns a ZSTD_VecMask (U32) that has the nth bit set to 1 if the newly-computed "tag" matches - * the hash at the nth position in a row of the tagTable. - * Each row is a circular buffer beginning at the value of "head". So we must rotate the "matches" bitfield - * to match up with the actual layout of the entries within the hashTable */ +#if defined(ZSTD_ARCH_ARM_NEON) FORCE_INLINE_TEMPLATE ZSTD_VecMask -ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 head, const U32 rowEntries) +ZSTD_row_getNEONMask(const U32 rowEntries, const BYTE* const src, const BYTE tag, const U32 headGrouped) +{ + assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64); + if (rowEntries == 16) { + /* vshrn_n_u16 shifts by 4 every u16 and narrows to 8 lower bits. + * After that groups of 4 bits represent the equalMask. We lower + * all bits except the highest in these groups by doing AND with + * 0x88 = 0b10001000. + */ + const uint8x16_t chunk = vld1q_u8(src); + const uint16x8_t equalMask = vreinterpretq_u16_u8(vceqq_u8(chunk, vdupq_n_u8(tag))); + const uint8x8_t res = vshrn_n_u16(equalMask, 4); + const U64 matches = vget_lane_u64(vreinterpret_u64_u8(res), 0); + return ZSTD_rotateRight_U64(matches, headGrouped) & 0x8888888888888888ull; + } else if (rowEntries == 32) { + /* Same idea as with rowEntries == 16 but doing AND with + * 0x55 = 0b01010101. + */ + const uint16x8x2_t chunk = vld2q_u16((const uint16_t*)(const void*)src); + const uint8x16_t chunk0 = vreinterpretq_u8_u16(chunk.val[0]); + const uint8x16_t chunk1 = vreinterpretq_u8_u16(chunk.val[1]); + const uint8x16_t dup = vdupq_n_u8(tag); + const uint8x8_t t0 = vshrn_n_u16(vreinterpretq_u16_u8(vceqq_u8(chunk0, dup)), 6); + const uint8x8_t t1 = vshrn_n_u16(vreinterpretq_u16_u8(vceqq_u8(chunk1, dup)), 6); + const uint8x8_t res = vsli_n_u8(t0, t1, 4); + const U64 matches = vget_lane_u64(vreinterpret_u64_u8(res), 0) ; + return ZSTD_rotateRight_U64(matches, headGrouped) & 0x5555555555555555ull; + } else { /* rowEntries == 64 */ + const uint8x16x4_t chunk = vld4q_u8(src); + const uint8x16_t dup = vdupq_n_u8(tag); + const uint8x16_t cmp0 = vceqq_u8(chunk.val[0], dup); + const uint8x16_t cmp1 = vceqq_u8(chunk.val[1], dup); + const uint8x16_t cmp2 = vceqq_u8(chunk.val[2], dup); + const uint8x16_t cmp3 = vceqq_u8(chunk.val[3], dup); + + const uint8x16_t t0 = vsriq_n_u8(cmp1, cmp0, 1); + const uint8x16_t t1 = vsriq_n_u8(cmp3, cmp2, 1); + const uint8x16_t t2 = vsriq_n_u8(t1, t0, 2); + const uint8x16_t t3 = vsriq_n_u8(t2, t2, 4); + const uint8x8_t t4 = vshrn_n_u16(vreinterpretq_u16_u8(t3), 4); + const U64 matches = vget_lane_u64(vreinterpret_u64_u8(t4), 0); + return ZSTD_rotateRight_U64(matches, headGrouped); + } +} +#endif + +/* Returns a ZSTD_VecMask (U64) that has the nth group (determined by + * ZSTD_row_matchMaskGroupWidth) of bits set to 1 if the newly-computed "tag" + * matches the hash at the nth position in a row of the tagTable. + * Each row is a circular buffer beginning at the value of "headGrouped". So we + * must rotate the "matches" bitfield to match up with the actual layout of the + * entries within the hashTable */ +FORCE_INLINE_TEMPLATE ZSTD_VecMask +ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 headGrouped, const U32 rowEntries) { const BYTE* const src = tagRow + ZSTD_ROW_HASH_TAG_OFFSET; assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64); assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES); + assert(ZSTD_row_matchMaskGroupWidth(rowEntries) * rowEntries <= sizeof(ZSTD_VecMask) * 8); #if defined(ZSTD_ARCH_X86_SSE2) - return ZSTD_row_getSSEMask(rowEntries / 16, src, tag, head); + return ZSTD_row_getSSEMask(rowEntries / 16, src, tag, headGrouped); #else /* SW or NEON-LE */ # if defined(ZSTD_ARCH_ARM_NEON) /* This NEON path only works for little endian - otherwise use SWAR below */ if (MEM_isLittleEndian()) { - if (rowEntries == 16) { - const uint8x16_t chunk = vld1q_u8(src); - const uint16x8_t equalMask = vreinterpretq_u16_u8(vceqq_u8(chunk, vdupq_n_u8(tag))); - const uint16x8_t t0 = vshlq_n_u16(equalMask, 7); - const uint32x4_t t1 = vreinterpretq_u32_u16(vsriq_n_u16(t0, t0, 14)); - const uint64x2_t t2 = vreinterpretq_u64_u32(vshrq_n_u32(t1, 14)); - const uint8x16_t t3 = vreinterpretq_u8_u64(vsraq_n_u64(t2, t2, 28)); - const U16 hi = (U16)vgetq_lane_u8(t3, 8); - const U16 lo = (U16)vgetq_lane_u8(t3, 0); - return ZSTD_rotateRight_U16((hi << 8) | lo, head); - } else if (rowEntries == 32) { - const uint16x8x2_t chunk = vld2q_u16((const U16*)(const void*)src); - const uint8x16_t chunk0 = vreinterpretq_u8_u16(chunk.val[0]); - const uint8x16_t chunk1 = vreinterpretq_u8_u16(chunk.val[1]); - const uint8x16_t equalMask0 = vceqq_u8(chunk0, vdupq_n_u8(tag)); - const uint8x16_t equalMask1 = vceqq_u8(chunk1, vdupq_n_u8(tag)); - const int8x8_t pack0 = vqmovn_s16(vreinterpretq_s16_u8(equalMask0)); - const int8x8_t pack1 = vqmovn_s16(vreinterpretq_s16_u8(equalMask1)); - const uint8x8_t t0 = vreinterpret_u8_s8(pack0); - const uint8x8_t t1 = vreinterpret_u8_s8(pack1); - const uint8x8_t t2 = vsri_n_u8(t1, t0, 2); - const uint8x8x2_t t3 = vuzp_u8(t2, t0); - const uint8x8_t t4 = vsri_n_u8(t3.val[1], t3.val[0], 4); - const U32 matches = vget_lane_u32(vreinterpret_u32_u8(t4), 0); - return ZSTD_rotateRight_U32(matches, head); - } else { /* rowEntries == 64 */ - const uint8x16x4_t chunk = vld4q_u8(src); - const uint8x16_t dup = vdupq_n_u8(tag); - const uint8x16_t cmp0 = vceqq_u8(chunk.val[0], dup); - const uint8x16_t cmp1 = vceqq_u8(chunk.val[1], dup); - const uint8x16_t cmp2 = vceqq_u8(chunk.val[2], dup); - const uint8x16_t cmp3 = vceqq_u8(chunk.val[3], dup); - - const uint8x16_t t0 = vsriq_n_u8(cmp1, cmp0, 1); - const uint8x16_t t1 = vsriq_n_u8(cmp3, cmp2, 1); - const uint8x16_t t2 = vsriq_n_u8(t1, t0, 2); - const uint8x16_t t3 = vsriq_n_u8(t2, t2, 4); - const uint8x8_t t4 = vshrn_n_u16(vreinterpretq_u16_u8(t3), 4); - const U64 matches = vget_lane_u64(vreinterpret_u64_u8(t4), 0); - return ZSTD_rotateRight_U64(matches, head); - } + return ZSTD_row_getNEONMask(rowEntries, src, tag, headGrouped); } # endif /* ZSTD_ARCH_ARM_NEON */ /* SWAR */ - { const size_t chunkSize = sizeof(size_t); + { const int chunkSize = sizeof(size_t); const size_t shiftAmount = ((chunkSize * 8) - chunkSize); const size_t xFF = ~((size_t)0); const size_t x01 = xFF / 0xFF; @@ -1100,11 +1110,11 @@ ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 head, } matches = ~matches; if (rowEntries == 16) { - return ZSTD_rotateRight_U16((U16)matches, head); + return ZSTD_rotateRight_U16((U16)matches, headGrouped); } else if (rowEntries == 32) { - return ZSTD_rotateRight_U32((U32)matches, head); + return ZSTD_rotateRight_U32((U32)matches, headGrouped); } else { - return ZSTD_rotateRight_U64((U64)matches, head); + return ZSTD_rotateRight_U64((U64)matches, headGrouped); } } #endif @@ -1152,6 +1162,7 @@ size_t ZSTD_RowFindBestMatch( const U32 rowEntries = (1U << rowLog); const U32 rowMask = rowEntries - 1; const U32 cappedSearchLog = MIN(cParams->searchLog, rowLog); /* nb of searches is capped at nb entries per row */ + const U32 groupWidth = ZSTD_row_matchMaskGroupWidth(rowEntries); U32 nbAttempts = 1U << cappedSearchLog; size_t ml=4-1; @@ -1194,15 +1205,15 @@ size_t ZSTD_RowFindBestMatch( U32 const tag = hash & ZSTD_ROW_HASH_TAG_MASK; U32* const row = hashTable + relRow; BYTE* tagRow = (BYTE*)(tagTable + relRow); - U32 const head = *tagRow & rowMask; + U32 const headGrouped = (*tagRow & rowMask) * groupWidth; U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES]; size_t numMatches = 0; size_t currMatch = 0; - ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, head, rowEntries); + ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, headGrouped, rowEntries); /* Cycle through the matches and prefetch */ for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) { - U32 const matchPos = (head + ZSTD_VecMask_next(matches)) & rowMask; + U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask; U32 const matchIndex = row[matchPos]; assert(numMatches < rowEntries); if (matchIndex < lowLimit) @@ -1233,7 +1244,8 @@ size_t ZSTD_RowFindBestMatch( if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) { const BYTE* const match = base + matchIndex; assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */ - if (match[ml] == ip[ml]) /* potentially better */ + /* read 4B starting from (match + ml + 1 - sizeof(U32)) */ + if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) /* potentially better */ currentMl = ZSTD_count(ip, match, iLimit); } else { const BYTE* const match = dictBase + matchIndex; @@ -1245,7 +1257,7 @@ size_t ZSTD_RowFindBestMatch( /* Save best solution */ if (currentMl > ml) { ml = currentMl; - *offsetPtr = STORE_OFFSET(curr - matchIndex); + *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex); if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ } } @@ -1263,14 +1275,14 @@ size_t ZSTD_RowFindBestMatch( const U32 dmsSize = (U32)(dmsEnd - dmsBase); const U32 dmsIndexDelta = dictLimit - dmsSize; - { U32 const head = *dmsTagRow & rowMask; + { U32 const headGrouped = (*dmsTagRow & rowMask) * groupWidth; U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES]; size_t numMatches = 0; size_t currMatch = 0; - ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, head, rowEntries); + ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, headGrouped, rowEntries); for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) { - U32 const matchPos = (head + ZSTD_VecMask_next(matches)) & rowMask; + U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask; U32 const matchIndex = dmsRow[matchPos]; if (matchIndex < dmsLowestIndex) break; @@ -1294,7 +1306,7 @@ size_t ZSTD_RowFindBestMatch( if (currentMl > ml) { ml = currentMl; assert(curr > matchIndex + dmsIndexDelta); - *offsetPtr = STORE_OFFSET(curr - (matchIndex + dmsIndexDelta)); + *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + dmsIndexDelta)); if (ip+currentMl == iLimit) break; } } @@ -1304,14 +1316,10 @@ size_t ZSTD_RowFindBestMatch( } -typedef size_t (*searchMax_f)( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr); - /** - * This struct contains the functions necessary for lazy to search. - * Currently, that is only searchMax. However, it is still valuable to have the - * VTable because this makes it easier to add more functions to the VTable later. + * Generate search functions templated on (dictMode, mls, rowLog). + * These functions are outlined for code size & compilation time. + * ZSTD_searchMax() dispatches to the correct implementation function. * * TODO: The start of the search function involves loading and calculating a * bunch of constants from the ZSTD_matchState_t. These computations could be @@ -1329,25 +1337,25 @@ typedef size_t (*searchMax_f)( * the single segment loop. It should go in searchMax instead of its own * function to avoid having multiple virtual function calls per search. */ -typedef struct { - searchMax_f searchMax; -} ZSTD_LazyVTable; -#define GEN_ZSTD_BT_VTABLE(dictMode, mls) \ - static size_t ZSTD_BtFindBestMatch_##dictMode##_##mls( \ - ZSTD_matchState_t* ms, \ - const BYTE* ip, const BYTE* const iLimit, \ - size_t* offsetPtr) \ - { \ - assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ - return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \ - } \ - static const ZSTD_LazyVTable ZSTD_BtVTable_##dictMode##_##mls = { \ - ZSTD_BtFindBestMatch_##dictMode##_##mls \ - }; +#define ZSTD_BT_SEARCH_FN(dictMode, mls) ZSTD_BtFindBestMatch_##dictMode##_##mls +#define ZSTD_HC_SEARCH_FN(dictMode, mls) ZSTD_HcFindBestMatch_##dictMode##_##mls +#define ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog + +#define ZSTD_SEARCH_FN_ATTRS FORCE_NOINLINE + +#define GEN_ZSTD_BT_SEARCH_FN(dictMode, mls) \ + ZSTD_SEARCH_FN_ATTRS size_t ZSTD_BT_SEARCH_FN(dictMode, mls)( \ + ZSTD_matchState_t* ms, \ + const BYTE* ip, const BYTE* const iLimit, \ + size_t* offBasePtr) \ + { \ + assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, mls, ZSTD_##dictMode); \ + } \ -#define GEN_ZSTD_HC_VTABLE(dictMode, mls) \ - static size_t ZSTD_HcFindBestMatch_##dictMode##_##mls( \ +#define GEN_ZSTD_HC_SEARCH_FN(dictMode, mls) \ + ZSTD_SEARCH_FN_ATTRS size_t ZSTD_HC_SEARCH_FN(dictMode, mls)( \ ZSTD_matchState_t* ms, \ const BYTE* ip, const BYTE* const iLimit, \ size_t* offsetPtr) \ @@ -1355,12 +1363,9 @@ typedef struct { assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \ } \ - static const ZSTD_LazyVTable ZSTD_HcVTable_##dictMode##_##mls = { \ - ZSTD_HcFindBestMatch_##dictMode##_##mls \ - }; -#define GEN_ZSTD_ROW_VTABLE(dictMode, mls, rowLog) \ - static size_t ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog( \ +#define GEN_ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) \ + ZSTD_SEARCH_FN_ATTRS size_t ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)( \ ZSTD_matchState_t* ms, \ const BYTE* ip, const BYTE* const iLimit, \ size_t* offsetPtr) \ @@ -1369,9 +1374,6 @@ typedef struct { assert(MAX(4, MIN(6, ms->cParams.searchLog)) == rowLog); \ return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode, rowLog); \ } \ - static const ZSTD_LazyVTable ZSTD_RowVTable_##dictMode##_##mls##_##rowLog = { \ - ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog \ - }; #define ZSTD_FOR_EACH_ROWLOG(X, dictMode, mls) \ X(dictMode, mls, 4) \ @@ -1394,84 +1396,103 @@ typedef struct { X(__VA_ARGS__, dictMatchState) \ X(__VA_ARGS__, dedicatedDictSearch) -/* Generate Row VTables for each combination of (dictMode, mls, rowLog) */ -ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS_ROWLOG, GEN_ZSTD_ROW_VTABLE) -/* Generate Binary Tree VTables for each combination of (dictMode, mls) */ -ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_BT_VTABLE) -/* Generate Hash Chain VTables for each combination of (dictMode, mls) */ -ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_VTABLE) - -#define GEN_ZSTD_BT_VTABLE_ARRAY(dictMode) \ - { \ - &ZSTD_BtVTable_##dictMode##_4, \ - &ZSTD_BtVTable_##dictMode##_5, \ - &ZSTD_BtVTable_##dictMode##_6 \ - } - -#define GEN_ZSTD_HC_VTABLE_ARRAY(dictMode) \ - { \ - &ZSTD_HcVTable_##dictMode##_4, \ - &ZSTD_HcVTable_##dictMode##_5, \ - &ZSTD_HcVTable_##dictMode##_6 \ - } +/* Generate row search fns for each combination of (dictMode, mls, rowLog) */ +ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS_ROWLOG, GEN_ZSTD_ROW_SEARCH_FN) +/* Generate binary Tree search fns for each combination of (dictMode, mls) */ +ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_BT_SEARCH_FN) +/* Generate hash chain search fns for each combination of (dictMode, mls) */ +ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_SEARCH_FN) -#define GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, mls) \ - { \ - &ZSTD_RowVTable_##dictMode##_##mls##_4, \ - &ZSTD_RowVTable_##dictMode##_##mls##_5, \ - &ZSTD_RowVTable_##dictMode##_##mls##_6 \ - } - -#define GEN_ZSTD_ROW_VTABLE_ARRAY(dictMode) \ - { \ - GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, 4), \ - GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, 5), \ - GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, 6) \ - } +typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e; -#define GEN_ZSTD_VTABLE_ARRAY(X) \ - { \ - X(noDict), \ - X(extDict), \ - X(dictMatchState), \ - X(dedicatedDictSearch) \ +#define GEN_ZSTD_CALL_BT_SEARCH_FN(dictMode, mls) \ + case mls: \ + return ZSTD_BT_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr); +#define GEN_ZSTD_CALL_HC_SEARCH_FN(dictMode, mls) \ + case mls: \ + return ZSTD_HC_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr); +#define GEN_ZSTD_CALL_ROW_SEARCH_FN(dictMode, mls, rowLog) \ + case rowLog: \ + return ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)(ms, ip, iend, offsetPtr); + +#define ZSTD_SWITCH_MLS(X, dictMode) \ + switch (mls) { \ + ZSTD_FOR_EACH_MLS(X, dictMode) \ } -/* ******************************* -* Common parser - lazy strategy -*********************************/ -typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e; +#define ZSTD_SWITCH_ROWLOG(dictMode, mls) \ + case mls: \ + switch (rowLog) { \ + ZSTD_FOR_EACH_ROWLOG(GEN_ZSTD_CALL_ROW_SEARCH_FN, dictMode, mls) \ + } \ + ZSTD_UNREACHABLE; \ + break; + +#define ZSTD_SWITCH_SEARCH_METHOD(dictMode) \ + switch (searchMethod) { \ + case search_hashChain: \ + ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_HC_SEARCH_FN, dictMode) \ + break; \ + case search_binaryTree: \ + ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_BT_SEARCH_FN, dictMode) \ + break; \ + case search_rowHash: \ + ZSTD_SWITCH_MLS(ZSTD_SWITCH_ROWLOG, dictMode) \ + break; \ + } \ + ZSTD_UNREACHABLE; /** - * This table is indexed first by the four ZSTD_dictMode_e values, and then - * by the two searchMethod_e values. NULLs are placed for configurations - * that should never occur (extDict modes go to the other implementation - * below and there is no DDSS for binary tree search yet). + * Searches for the longest match at @p ip. + * Dispatches to the correct implementation function based on the + * (searchMethod, dictMode, mls, rowLog). We use switch statements + * here instead of using an indirect function call through a function + * pointer because after Spectre and Meltdown mitigations, indirect + * function calls can be very costly, especially in the kernel. + * + * NOTE: dictMode and searchMethod should be templated, so those switch + * statements should be optimized out. Only the mls & rowLog switches + * should be left. + * + * @param ms The match state. + * @param ip The position to search at. + * @param iend The end of the input data. + * @param[out] offsetPtr Stores the match offset into this pointer. + * @param mls The minimum search length, in the range [4, 6]. + * @param rowLog The row log (if applicable), in the range [4, 6]. + * @param searchMethod The search method to use (templated). + * @param dictMode The dictMode (templated). + * + * @returns The length of the longest match found, or < mls if no match is found. + * If a match is found its offset is stored in @p offsetPtr. */ - -static ZSTD_LazyVTable const* -ZSTD_selectLazyVTable(ZSTD_matchState_t const* ms, searchMethod_e searchMethod, ZSTD_dictMode_e dictMode) +FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax( + ZSTD_matchState_t* ms, + const BYTE* ip, + const BYTE* iend, + size_t* offsetPtr, + U32 const mls, + U32 const rowLog, + searchMethod_e const searchMethod, + ZSTD_dictMode_e const dictMode) { - /* Fill the Hc/Bt VTable arrays with the right functions for the (dictMode, mls) combination. */ - ZSTD_LazyVTable const* const hcVTables[4][3] = GEN_ZSTD_VTABLE_ARRAY(GEN_ZSTD_HC_VTABLE_ARRAY); - ZSTD_LazyVTable const* const btVTables[4][3] = GEN_ZSTD_VTABLE_ARRAY(GEN_ZSTD_BT_VTABLE_ARRAY); - /* Fill the Row VTable array with the right functions for the (dictMode, mls, rowLog) combination. */ - ZSTD_LazyVTable const* const rowVTables[4][3][3] = GEN_ZSTD_VTABLE_ARRAY(GEN_ZSTD_ROW_VTABLE_ARRAY); - - U32 const mls = MAX(4, MIN(6, ms->cParams.minMatch)); - U32 const rowLog = MAX(4, MIN(6, ms->cParams.searchLog)); - switch (searchMethod) { - case search_hashChain: - return hcVTables[dictMode][mls - 4]; - case search_binaryTree: - return btVTables[dictMode][mls - 4]; - case search_rowHash: - return rowVTables[dictMode][mls - 4][rowLog - 4]; - default: - return NULL; + if (dictMode == ZSTD_noDict) { + ZSTD_SWITCH_SEARCH_METHOD(noDict) + } else if (dictMode == ZSTD_extDict) { + ZSTD_SWITCH_SEARCH_METHOD(extDict) + } else if (dictMode == ZSTD_dictMatchState) { + ZSTD_SWITCH_SEARCH_METHOD(dictMatchState) + } else if (dictMode == ZSTD_dedicatedDictSearch) { + ZSTD_SWITCH_SEARCH_METHOD(dedicatedDictSearch) } + ZSTD_UNREACHABLE; + return 0; } +/* ******************************* +* Common parser - lazy strategy +*********************************/ + FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_lazy_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, @@ -1488,9 +1509,11 @@ ZSTD_compressBlock_lazy_generic( const BYTE* const base = ms->window.base; const U32 prefixLowestIndex = ms->window.dictLimit; const BYTE* const prefixLowest = base + prefixLowestIndex; + const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6); + const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6); - searchMax_f const searchMax = ZSTD_selectLazyVTable(ms, searchMethod, dictMode)->searchMax; - U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0; + U32 offset_1 = rep[0], offset_2 = rep[1]; + U32 offsetSaved1 = 0, offsetSaved2 = 0; const int isDMS = dictMode == ZSTD_dictMatchState; const int isDDS = dictMode == ZSTD_dedicatedDictSearch; @@ -1505,16 +1528,14 @@ ZSTD_compressBlock_lazy_generic( 0; const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest)); - assert(searchMax != NULL); - DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u) (searchFunc=%u)", (U32)dictMode, (U32)searchMethod); ip += (dictAndPrefixLength == 0); if (dictMode == ZSTD_noDict) { U32 const curr = (U32)(ip - base); U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog); U32 const maxRep = curr - windowLow; - if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0; - if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0; + if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0; + if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0; } if (isDxS) { /* dictMatchState repCode checks don't currently handle repCode == 0 @@ -1524,7 +1545,6 @@ ZSTD_compressBlock_lazy_generic( } if (searchMethod == search_rowHash) { - const U32 rowLog = MAX(4, MIN(6, ms->cParams.searchLog)); ZSTD_row_fillHashCache(ms, base, rowLog, MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */), ms->nextToUpdate, ilimit); @@ -1539,7 +1559,7 @@ ZSTD_compressBlock_lazy_generic( #endif while (ip < ilimit) { size_t matchLength=0; - size_t offcode=STORE_REPCODE_1; + size_t offBase = REPCODE1_TO_OFFBASE; const BYTE* start=ip+1; DEBUGLOG(7, "search baseline (depth 0)"); @@ -1564,10 +1584,10 @@ ZSTD_compressBlock_lazy_generic( } /* first search (depth 0) */ - { size_t offsetFound = 999999999; - size_t const ml2 = searchMax(ms, ip, iend, &offsetFound); + { size_t offbaseFound = 999999999; + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offbaseFound, mls, rowLog, searchMethod, dictMode); if (ml2 > matchLength) - matchLength = ml2, start = ip, offcode=offsetFound; + matchLength = ml2, start = ip, offBase = offbaseFound; } if (matchLength < 4) { @@ -1581,12 +1601,12 @@ ZSTD_compressBlock_lazy_generic( DEBUGLOG(7, "search depth 1"); ip ++; if ( (dictMode == ZSTD_noDict) - && (offcode) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { + && (offBase) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; int const gain2 = (int)(mlRep * 3); - int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1); if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip; + matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip; } if (isDxS) { const U32 repIndex = (U32)(ip - base) - offset_1; @@ -1598,17 +1618,17 @@ ZSTD_compressBlock_lazy_generic( const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; int const gain2 = (int)(mlRep * 3); - int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1); if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip; + matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip; } } - { size_t offset2=999999999; - size_t const ml2 = searchMax(ms, ip, iend, &offset2); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 4); + { size_t ofbCandidate=999999999; + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4); if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offcode = offset2, start = ip; + matchLength = ml2, offBase = ofbCandidate, start = ip; continue; /* search a better one */ } } @@ -1617,12 +1637,12 @@ ZSTD_compressBlock_lazy_generic( DEBUGLOG(7, "search depth 2"); ip ++; if ( (dictMode == ZSTD_noDict) - && (offcode) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { + && (offBase) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; int const gain2 = (int)(mlRep * 4); - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1); if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip; + matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip; } if (isDxS) { const U32 repIndex = (U32)(ip - base) - offset_1; @@ -1634,17 +1654,17 @@ ZSTD_compressBlock_lazy_generic( const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; int const gain2 = (int)(mlRep * 4); - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1); if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip; + matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip; } } - { size_t offset2=999999999; - size_t const ml2 = searchMax(ms, ip, iend, &offset2); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 7); + { size_t ofbCandidate=999999999; + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7); if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offcode = offset2, start = ip; + matchLength = ml2, offBase = ofbCandidate, start = ip; continue; } } } break; /* nothing found : store previous solution */ @@ -1655,24 +1675,24 @@ ZSTD_compressBlock_lazy_generic( * notably if `value` is unsigned, resulting in a large positive `-value`. */ /* catch up */ - if (STORED_IS_OFFSET(offcode)) { + if (OFFBASE_IS_OFFSET(offBase)) { if (dictMode == ZSTD_noDict) { - while ( ((start > anchor) & (start - STORED_OFFSET(offcode) > prefixLowest)) - && (start[-1] == (start-STORED_OFFSET(offcode))[-1]) ) /* only search for offset within prefix */ + while ( ((start > anchor) & (start - OFFBASE_TO_OFFSET(offBase) > prefixLowest)) + && (start[-1] == (start-OFFBASE_TO_OFFSET(offBase))[-1]) ) /* only search for offset within prefix */ { start--; matchLength++; } } if (isDxS) { - U32 const matchIndex = (U32)((size_t)(start-base) - STORED_OFFSET(offcode)); + U32 const matchIndex = (U32)((size_t)(start-base) - OFFBASE_TO_OFFSET(offBase)); const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex; const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest; while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ } - offset_2 = offset_1; offset_1 = (U32)STORED_OFFSET(offcode); + offset_2 = offset_1; offset_1 = (U32)OFFBASE_TO_OFFSET(offBase); } /* store sequence */ _storeSequence: { size_t const litLength = (size_t)(start - anchor); - ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offcode, matchLength); + ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength); anchor = ip = start + matchLength; } @@ -1688,8 +1708,8 @@ ZSTD_compressBlock_lazy_generic( && (MEM_read32(repMatch) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4; - offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength); + offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength); ip += matchLength; anchor = ip; continue; @@ -1703,16 +1723,20 @@ ZSTD_compressBlock_lazy_generic( && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) { /* store sequence */ matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; - offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap repcodes */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength); + offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap repcodes */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength); ip += matchLength; anchor = ip; continue; /* faster when present ... (?) */ } } } - /* Save reps for next block */ - rep[0] = offset_1 ? offset_1 : savedOffset; - rep[1] = offset_2 ? offset_2 : savedOffset; + /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0), + * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */ + offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2; + + /* save reps for next block */ + rep[0] = offset_1 ? offset_1 : offsetSaved1; + rep[1] = offset_2 ? offset_2 : offsetSaved2; /* Return the last literals size */ return (size_t)(iend - anchor); @@ -1881,9 +1905,9 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( const BYTE* const dictEnd = dictBase + dictLimit; const BYTE* const dictStart = dictBase + ms->window.lowLimit; const U32 windowLog = ms->cParams.windowLog; - const U32 rowLog = ms->cParams.searchLog < 5 ? 4 : 5; + const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6); + const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6); - searchMax_f const searchMax = ZSTD_selectLazyVTable(ms, searchMethod, ZSTD_extDict)->searchMax; U32 offset_1 = rep[0], offset_2 = rep[1]; DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod); @@ -1905,7 +1929,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( #endif while (ip < ilimit) { size_t matchLength=0; - size_t offcode=STORE_REPCODE_1; + size_t offBase = REPCODE1_TO_OFFBASE; const BYTE* start=ip+1; U32 curr = (U32)(ip-base); @@ -1924,10 +1948,10 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( } } /* first search (depth 0) */ - { size_t offsetFound = 999999999; - size_t const ml2 = searchMax(ms, ip, iend, &offsetFound); + { size_t ofbCandidate = 999999999; + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict); if (ml2 > matchLength) - matchLength = ml2, start = ip, offcode=offsetFound; + matchLength = ml2, start = ip, offBase = ofbCandidate; } if (matchLength < 4) { @@ -1941,7 +1965,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( ip ++; curr++; /* check repCode */ - if (offcode) { + if (offBase) { const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog); const U32 repIndex = (U32)(curr - offset_1); const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; @@ -1953,18 +1977,18 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; int const gain2 = (int)(repLength * 3); - int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1); if ((repLength >= 4) && (gain2 > gain1)) - matchLength = repLength, offcode = STORE_REPCODE_1, start = ip; + matchLength = repLength, offBase = REPCODE1_TO_OFFBASE, start = ip; } } /* search match, depth 1 */ - { size_t offset2=999999999; - size_t const ml2 = searchMax(ms, ip, iend, &offset2); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 4); + { size_t ofbCandidate = 999999999; + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4); if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offcode = offset2, start = ip; + matchLength = ml2, offBase = ofbCandidate, start = ip; continue; /* search a better one */ } } @@ -1973,7 +1997,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( ip ++; curr++; /* check repCode */ - if (offcode) { + if (offBase) { const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog); const U32 repIndex = (U32)(curr - offset_1); const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; @@ -1985,36 +2009,36 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; int const gain2 = (int)(repLength * 4); - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1); if ((repLength >= 4) && (gain2 > gain1)) - matchLength = repLength, offcode = STORE_REPCODE_1, start = ip; + matchLength = repLength, offBase = REPCODE1_TO_OFFBASE, start = ip; } } /* search match, depth 2 */ - { size_t offset2=999999999; - size_t const ml2 = searchMax(ms, ip, iend, &offset2); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 7); + { size_t ofbCandidate = 999999999; + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7); if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offcode = offset2, start = ip; + matchLength = ml2, offBase = ofbCandidate, start = ip; continue; } } } break; /* nothing found : store previous solution */ } /* catch up */ - if (STORED_IS_OFFSET(offcode)) { - U32 const matchIndex = (U32)((size_t)(start-base) - STORED_OFFSET(offcode)); + if (OFFBASE_IS_OFFSET(offBase)) { + U32 const matchIndex = (U32)((size_t)(start-base) - OFFBASE_TO_OFFSET(offBase)); const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex; const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart; while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ - offset_2 = offset_1; offset_1 = (U32)STORED_OFFSET(offcode); + offset_2 = offset_1; offset_1 = (U32)OFFBASE_TO_OFFSET(offBase); } /* store sequence */ _storeSequence: { size_t const litLength = (size_t)(start - anchor); - ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offcode, matchLength); + ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength); anchor = ip = start + matchLength; } @@ -2031,8 +2055,8 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( /* repcode detected we should take it */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; - offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap offset history */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength); + offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap offset history */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength); ip += matchLength; anchor = ip; continue; /* faster when present ... (?) */ @@ -2098,7 +2122,6 @@ size_t ZSTD_compressBlock_lazy_extDict_row( size_t ZSTD_compressBlock_lazy2_extDict_row( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) - { return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2); } diff --git a/lib/compress/zstd_lazy.h b/lib/compress/zstd_lazy.h index 150f7b390b8..3bde67331e4 100644 --- a/lib/compress/zstd_lazy.h +++ b/lib/compress/zstd_lazy.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -25,6 +25,8 @@ extern "C" { */ #define ZSTD_LAZY_DDSS_BUCKET_LOG 2 +#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */ + U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip); void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip); @@ -116,7 +118,7 @@ size_t ZSTD_compressBlock_lazy2_extDict_row( size_t ZSTD_compressBlock_btlazy2_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); - + #if defined (__cplusplus) } diff --git a/lib/compress/zstd_ldm.c b/lib/compress/zstd_ldm.c index f662b2546e0..3d74ff19e3c 100644 --- a/lib/compress/zstd_ldm.c +++ b/lib/compress/zstd_ldm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -242,11 +242,11 @@ static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms, switch(ms->cParams.strategy) { case ZSTD_fast: - ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast); + ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx); break; case ZSTD_dfast: - ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast); + ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx); break; case ZSTD_greedy: @@ -549,7 +549,7 @@ size_t ZSTD_ldm_generateSequences( * the window through early invalidation. * TODO: * Test the chunk size. * * Try invalidation after the sequence generation and test the - * the offset against maxDist directly. + * offset against maxDist directly. * * NOTE: Because of dictionaries + sequence splitting we MUST make sure * that any offset used is valid at the END of the sequence, since it may @@ -711,7 +711,7 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, rep[0] = sequence.offset; /* Store the sequence */ ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend, - STORE_OFFSET(sequence.offset), + OFFSET_TO_OFFBASE(sequence.offset), sequence.matchLength); ip += sequence.matchLength; } diff --git a/lib/compress/zstd_ldm.h b/lib/compress/zstd_ldm.h index 4e68dbf52e3..f147021d296 100644 --- a/lib/compress/zstd_ldm.h +++ b/lib/compress/zstd_ldm.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/compress/zstd_ldm_geartab.h b/lib/compress/zstd_ldm_geartab.h index 647f865be29..ef34bc5c923 100644 --- a/lib/compress/zstd_ldm_geartab.h +++ b/lib/compress/zstd_ldm_geartab.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/compress/zstd_opt.c b/lib/compress/zstd_opt.c index 1b1ddad4289..fdd7f9d8b5a 100644 --- a/lib/compress/zstd_opt.c +++ b/lib/compress/zstd_opt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -16,7 +16,7 @@ #define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */ #define ZSTD_MAX_PRICE (1<<30) -#define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */ +#define ZSTD_PREDEF_THRESHOLD 8 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */ /*-************************************* @@ -26,27 +26,35 @@ #if 0 /* approximation at bit level (for tests) */ # define BITCOST_ACCURACY 0 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) -# define WEIGHT(stat, opt) ((void)opt, ZSTD_bitWeight(stat)) +# define WEIGHT(stat, opt) ((void)(opt), ZSTD_bitWeight(stat)) #elif 0 /* fractional bit accuracy (for tests) */ # define BITCOST_ACCURACY 8 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) -# define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat)) +# define WEIGHT(stat,opt) ((void)(opt), ZSTD_fracWeight(stat)) #else /* opt==approx, ultra==accurate */ # define BITCOST_ACCURACY 8 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) -# define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat)) +# define WEIGHT(stat,opt) ((opt) ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat)) #endif +/* ZSTD_bitWeight() : + * provide estimated "cost" of a stat in full bits only */ MEM_STATIC U32 ZSTD_bitWeight(U32 stat) { return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER); } +/* ZSTD_fracWeight() : + * provide fractional-bit "cost" of a stat, + * using linear interpolation approximation */ MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat) { U32 const stat = rawStat + 1; U32 const hb = ZSTD_highbit32(stat); U32 const BWeight = hb * BITCOST_MULTIPLIER; + /* Fweight was meant for "Fractional weight" + * but it's effectively a value between 1 and 2 + * using fixed point arithmetic */ U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb; U32 const weight = BWeight + FWeight; assert(hb + BITCOST_ACCURACY < 31); @@ -57,7 +65,7 @@ MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat) /* debugging function, * @return price in bytes as fractional value * for debug messages only */ -MEM_STATIC double ZSTD_fCost(U32 price) +MEM_STATIC double ZSTD_fCost(int price) { return (double)price / (BITCOST_MULTIPLIER*8); } @@ -88,20 +96,26 @@ static U32 sum_u32(const unsigned table[], size_t nbElts) return total; } -static U32 ZSTD_downscaleStats(unsigned* table, U32 lastEltIndex, U32 shift) +typedef enum { base_0possible=0, base_1guaranteed=1 } base_directive_e; + +static U32 +ZSTD_downscaleStats(unsigned* table, U32 lastEltIndex, U32 shift, base_directive_e base1) { U32 s, sum=0; - DEBUGLOG(5, "ZSTD_downscaleStats (nbElts=%u, shift=%u)", (unsigned)lastEltIndex+1, (unsigned)shift); + DEBUGLOG(5, "ZSTD_downscaleStats (nbElts=%u, shift=%u)", + (unsigned)lastEltIndex+1, (unsigned)shift ); assert(shift < 30); for (s=0; s> shift); - sum += table[s]; + unsigned const base = base1 ? 1 : (table[s]>0); + unsigned const newStat = base + (table[s] >> shift); + sum += newStat; + table[s] = newStat; } return sum; } /* ZSTD_scaleStats() : - * reduce all elements in table is sum too large + * reduce all elt frequencies in table if sum too large * return the resulting sum of elements */ static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget) { @@ -110,7 +124,7 @@ static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget) DEBUGLOG(5, "ZSTD_scaleStats (nbElts=%u, target=%u)", (unsigned)lastEltIndex+1, (unsigned)logTarget); assert(logTarget < 30); if (factor <= 1) return prevsum; - return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor)); + return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor), base_1guaranteed); } /* ZSTD_rescaleFreqs() : @@ -129,18 +143,22 @@ ZSTD_rescaleFreqs(optState_t* const optPtr, DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize); optPtr->priceType = zop_dynamic; - if (optPtr->litLengthSum == 0) { /* first block : init */ - if (srcSize <= ZSTD_PREDEF_THRESHOLD) { /* heuristic */ - DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef"); + if (optPtr->litLengthSum == 0) { /* no literals stats collected -> first block assumed -> init */ + + /* heuristic: use pre-defined stats for too small inputs */ + if (srcSize <= ZSTD_PREDEF_THRESHOLD) { + DEBUGLOG(5, "srcSize <= %i : use predefined stats", ZSTD_PREDEF_THRESHOLD); optPtr->priceType = zop_predef; } assert(optPtr->symbolCosts != NULL); if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) { - /* huffman table presumed generated by dictionary */ + + /* huffman stats covering the full value set : table presumed generated by dictionary */ optPtr->priceType = zop_dynamic; if (compressedLiterals) { + /* generate literals statistics from huffman table */ unsigned lit; assert(optPtr->litFreq != NULL); optPtr->litSum = 0; @@ -188,13 +206,14 @@ ZSTD_rescaleFreqs(optState_t* const optPtr, optPtr->offCodeSum += optPtr->offCodeFreq[of]; } } - } else { /* not a dictionary */ + } else { /* first block, no dictionary */ assert(optPtr->litFreq != NULL); if (compressedLiterals) { + /* base initial cost of literals on direct frequency within src */ unsigned lit = MaxLit; HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */ - optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, MaxLit, 8); + optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, MaxLit, 8, base_0possible); } { unsigned const baseLLfreqs[MaxLL+1] = { @@ -224,10 +243,9 @@ ZSTD_rescaleFreqs(optState_t* const optPtr, optPtr->offCodeSum = sum_u32(baseOFCfreqs, MaxOff+1); } - } - } else { /* new block : re-use previous statistics, scaled down */ + } else { /* new block : scale down accumulated statistics */ if (compressedLiterals) optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, MaxLit, 12); @@ -255,11 +273,14 @@ static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength, return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */ /* dynamic statistics */ - { U32 price = litLength * optPtr->litSumBasePrice; + { U32 price = optPtr->litSumBasePrice * litLength; + U32 const litPriceMax = optPtr->litSumBasePrice - BITCOST_MULTIPLIER; U32 u; + assert(optPtr->litSumBasePrice >= BITCOST_MULTIPLIER); for (u=0; u < litLength; u++) { - assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice); /* literal cost should never be negative */ - price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel); + U32 litPrice = WEIGHT(optPtr->litFreq[literals[u]], optLevel); + if (UNLIKELY(litPrice > litPriceMax)) litPrice = litPriceMax; + price -= litPrice; } return price; } @@ -272,10 +293,11 @@ static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optP assert(litLength <= ZSTD_BLOCKSIZE_MAX); if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel); - /* We can't compute the litLength price for sizes >= ZSTD_BLOCKSIZE_MAX - * because it isn't representable in the zstd format. So instead just - * call it 1 bit more than ZSTD_BLOCKSIZE_MAX - 1. In this case the block - * would be all literals. + + /* ZSTD_LLcode() can't compute litLength price for sizes >= ZSTD_BLOCKSIZE_MAX + * because it isn't representable in the zstd format. + * So instead just pretend it would cost 1 bit more than ZSTD_BLOCKSIZE_MAX - 1. + * In such a case, the block would be all literals. */ if (litLength == ZSTD_BLOCKSIZE_MAX) return BITCOST_MULTIPLIER + ZSTD_litLengthPrice(ZSTD_BLOCKSIZE_MAX - 1, optPtr, optLevel); @@ -289,24 +311,25 @@ static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optP } /* ZSTD_getMatchPrice() : - * Provides the cost of the match part (offset + matchLength) of a sequence + * Provides the cost of the match part (offset + matchLength) of a sequence. * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence. - * @offcode : expects a scale where 0,1,2 are repcodes 1-3, and 3+ are real_offsets+2 + * @offBase : sumtype, representing an offset or a repcode, and using numeric representation of ZSTD_storeSeq() * @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */ FORCE_INLINE_TEMPLATE U32 -ZSTD_getMatchPrice(U32 const offcode, +ZSTD_getMatchPrice(U32 const offBase, U32 const matchLength, const optState_t* const optPtr, int const optLevel) { U32 price; - U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offcode)); + U32 const offCode = ZSTD_highbit32(offBase); U32 const mlBase = matchLength - MINMATCH; assert(matchLength >= MINMATCH); - if (optPtr->priceType == zop_predef) /* fixed scheme, do not use statistics */ - return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER); + if (optPtr->priceType == zop_predef) /* fixed scheme, does not use statistics */ + return WEIGHT(mlBase, optLevel) + + ((16 + offCode) * BITCOST_MULTIPLIER); /* emulated offset cost */ /* dynamic statistics */ price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel)); @@ -325,10 +348,10 @@ ZSTD_getMatchPrice(U32 const offcode, } /* ZSTD_updateStats() : - * assumption : literals + litLengtn <= iend */ + * assumption : literals + litLength <= iend */ static void ZSTD_updateStats(optState_t* const optPtr, U32 litLength, const BYTE* literals, - U32 offsetCode, U32 matchLength) + U32 offBase, U32 matchLength) { /* literals */ if (ZSTD_compressedLiterals(optPtr)) { @@ -344,8 +367,8 @@ static void ZSTD_updateStats(optState_t* const optPtr, optPtr->litLengthSum++; } - /* offset code : expected to follow storeSeq() numeric representation */ - { U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offsetCode)); + /* offset code : follows storeSeq() numeric representation */ + { U32 const offCode = ZSTD_highbit32(offBase); assert(offCode <= MaxOff); optPtr->offCodeFreq[offCode]++; optPtr->offCodeSum++; @@ -552,16 +575,17 @@ void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) { ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict); } -FORCE_INLINE_TEMPLATE -U32 ZSTD_insertBtAndGetAllMatches ( - ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */ - ZSTD_matchState_t* ms, - U32* nextToUpdate3, - const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode, - const U32 rep[ZSTD_REP_NUM], - U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */ - const U32 lengthToBeat, - U32 const mls /* template */) +FORCE_INLINE_TEMPLATE U32 +ZSTD_insertBtAndGetAllMatches ( + ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */ + ZSTD_matchState_t* ms, + U32* nextToUpdate3, + const BYTE* const ip, const BYTE* const iLimit, + const ZSTD_dictMode_e dictMode, + const U32 rep[ZSTD_REP_NUM], + const U32 ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */ + const U32 lengthToBeat, + const U32 mls /* template */) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); @@ -644,7 +668,7 @@ U32 ZSTD_insertBtAndGetAllMatches ( DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u", repCode, ll0, repOffset, repLen); bestLength = repLen; - matches[mnum].off = STORE_REPCODE(repCode - ll0 + 1); /* expect value between 1 and 3 */ + matches[mnum].off = REPCODE_TO_OFFBASE(repCode - ll0 + 1); /* expect value between 1 and 3 */ matches[mnum].len = (U32)repLen; mnum++; if ( (repLen > sufficient_len) @@ -673,7 +697,7 @@ U32 ZSTD_insertBtAndGetAllMatches ( bestLength = mlen; assert(curr > matchIndex3); assert(mnum==0); /* no prior solution */ - matches[0].off = STORE_OFFSET(curr - matchIndex3); + matches[0].off = OFFSET_TO_OFFBASE(curr - matchIndex3); matches[0].len = (U32)mlen; mnum = 1; if ( (mlen > sufficient_len) | @@ -706,13 +730,13 @@ U32 ZSTD_insertBtAndGetAllMatches ( } if (matchLength > bestLength) { - DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)", - (U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex)); + DEBUGLOG(8, "found match of length %u at distance %u (offBase=%u)", + (U32)matchLength, curr - matchIndex, OFFSET_TO_OFFBASE(curr - matchIndex)); assert(matchEndIdx > matchIndex); if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; bestLength = matchLength; - matches[mnum].off = STORE_OFFSET(curr - matchIndex); + matches[mnum].off = OFFSET_TO_OFFBASE(curr - matchIndex); matches[mnum].len = (U32)matchLength; mnum++; if ( (matchLength > ZSTD_OPT_NUM) @@ -754,12 +778,12 @@ U32 ZSTD_insertBtAndGetAllMatches ( if (matchLength > bestLength) { matchIndex = dictMatchIndex + dmsIndexDelta; - DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)", - (U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex)); + DEBUGLOG(8, "found dms match of length %u at distance %u (offBase=%u)", + (U32)matchLength, curr - matchIndex, OFFSET_TO_OFFBASE(curr - matchIndex)); if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; bestLength = matchLength; - matches[mnum].off = STORE_OFFSET(curr - matchIndex); + matches[mnum].off = OFFSET_TO_OFFBASE(curr - matchIndex); matches[mnum].len = (U32)matchLength; mnum++; if ( (matchLength > ZSTD_OPT_NUM) @@ -960,7 +984,7 @@ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches, const ZSTD_optLdm_t* optLdm, U32 currPosInBlock) { U32 const posDiff = currPosInBlock - optLdm->startPosInBlock; - /* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */ + /* Note: ZSTD_match_t actually contains offBase and matchLength (before subtracting MINMATCH) */ U32 const candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff; /* Ensure that current block position is not outside of the match */ @@ -971,11 +995,11 @@ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches, } if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) { - U32 const candidateOffCode = STORE_OFFSET(optLdm->offset); - DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u", - candidateOffCode, candidateMatchLength, currPosInBlock); + U32 const candidateOffBase = OFFSET_TO_OFFBASE(optLdm->offset); + DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offBase: %u matchLength %u) at block position=%u", + candidateOffBase, candidateMatchLength, currPosInBlock); matches[*nbMatches].len = candidateMatchLength; - matches[*nbMatches].off = candidateOffCode; + matches[*nbMatches].off = candidateOffBase; (*nbMatches)++; } } @@ -1098,14 +1122,14 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, /* large match -> immediate encoding */ { U32 const maxML = matches[nbMatches-1].len; - U32 const maxOffcode = matches[nbMatches-1].off; - DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series", - nbMatches, maxML, maxOffcode, (U32)(ip-prefixStart)); + U32 const maxOffBase = matches[nbMatches-1].off; + DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffBase=%u at cPos=%u => start new series", + nbMatches, maxML, maxOffBase, (U32)(ip-prefixStart)); if (maxML > sufficient_len) { lastSequence.litlen = litlen; lastSequence.mlen = maxML; - lastSequence.off = maxOffcode; + lastSequence.off = maxOffBase; DEBUGLOG(6, "large match (%u>%u), immediate encoding", maxML, sufficient_len); cur = 0; @@ -1122,15 +1146,15 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */ } for (matchNb = 0; matchNb < nbMatches; matchNb++) { - U32 const offcode = matches[matchNb].off; + U32 const offBase = matches[matchNb].off; U32 const end = matches[matchNb].len; for ( ; pos <= end ; pos++ ) { - U32 const matchPrice = ZSTD_getMatchPrice(offcode, pos, optStatePtr, optLevel); + U32 const matchPrice = ZSTD_getMatchPrice(offBase, pos, optStatePtr, optLevel); U32 const sequencePrice = literalsPrice + matchPrice; DEBUGLOG(7, "rPos:%u => set initial price : %.2f", - pos, ZSTD_fCost(sequencePrice)); + pos, ZSTD_fCost((int)sequencePrice)); opt[pos].mlen = pos; - opt[pos].off = offcode; + opt[pos].off = offBase; opt[pos].litlen = litlen; opt[pos].price = (int)sequencePrice; } } @@ -1230,7 +1254,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch; U32 mlen; - DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u", + DEBUGLOG(7, "testing match %u => offBase=%4u, mlen=%2u, llen=%2u", matchNb, matches[matchNb].off, lastML, litlen); for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */ @@ -1296,7 +1320,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, for (storePos=storeStart; storePos <= storeEnd; storePos++) { U32 const llen = opt[storePos].litlen; U32 const mlen = opt[storePos].mlen; - U32 const offCode = opt[storePos].off; + U32 const offBase = opt[storePos].off; U32 const advance = llen + mlen; DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u", anchor - istart, (unsigned)llen, (unsigned)mlen); @@ -1308,8 +1332,8 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, } assert(anchor + llen <= iend); - ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen); - ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen); + ZSTD_updateStats(optStatePtr, llen, anchor, offBase, mlen); + ZSTD_storeSeq(seqStore, llen, anchor, iend, offBase, mlen); anchor += advance; ip = anchor; } } @@ -1349,7 +1373,7 @@ size_t ZSTD_compressBlock_btopt( /* ZSTD_initStats_ultra(): * make a first compression pass, just to seed stats with more accurate starting values. * only works on first block, with no dictionary and no ldm. - * this function cannot error, hence its contract must be respected. + * this function cannot error out, its narrow contract must be respected. */ static void ZSTD_initStats_ultra(ZSTD_matchState_t* ms, @@ -1368,7 +1392,7 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms, ZSTD_compressBlock_opt2(ms, seqStore, tmpRep, src, srcSize, ZSTD_noDict); /* generate stats into ms->opt*/ - /* invalidate first scan from history */ + /* invalidate first scan from history, only keep entropy stats */ ZSTD_resetSeqStore(seqStore); ms->window.base -= srcSize; ms->window.dictLimit += (U32)srcSize; @@ -1392,20 +1416,20 @@ size_t ZSTD_compressBlock_btultra2( U32 const curr = (U32)((const BYTE*)src - ms->window.base); DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize); - /* 2-pass strategy: + /* 2-passes strategy: * this strategy makes a first pass over first block to collect statistics - * and seed next round's statistics with it. - * After 1st pass, function forgets everything, and starts a new block. + * in order to seed next round's statistics with it. + * After 1st pass, function forgets history, and starts a new block. * Consequently, this can only work if no data has been previously loaded in tables, * aka, no dictionary, no prefix, no ldm preprocessing. * The compression ratio gain is generally small (~0.5% on first block), - * the cost is 2x cpu time on first block. */ + ** the cost is 2x cpu time on first block. */ assert(srcSize <= ZSTD_BLOCKSIZE_MAX); if ( (ms->opt.litLengthSum==0) /* first block */ && (seqStore->sequences == seqStore->sequencesStart) /* no ldm */ && (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */ - && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */ - && (srcSize > ZSTD_PREDEF_THRESHOLD) + && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */ + && (srcSize > ZSTD_PREDEF_THRESHOLD) /* input large enough to not employ default stats */ ) { ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize); } diff --git a/lib/compress/zstd_opt.h b/lib/compress/zstd_opt.h index 627255f53de..342e5a31127 100644 --- a/lib/compress/zstd_opt.h +++ b/lib/compress/zstd_opt.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/compress/zstdmt_compress.c b/lib/compress/zstdmt_compress.c index 6bc14b035e1..7a2c71720a1 100644 --- a/lib/compress/zstdmt_compress.c +++ b/lib/compress/zstdmt_compress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -266,11 +266,11 @@ static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf) * 1 buffer for input loading * 1 buffer for "next input" when submitting current one * 1 buffer stuck in queue */ -#define BUF_POOL_MAX_NB_BUFFERS(nbWorkers) 2*nbWorkers + 3 +#define BUF_POOL_MAX_NB_BUFFERS(nbWorkers) (2*(nbWorkers) + 3) /* After a worker releases its rawSeqStore, it is immediately ready for reuse. * So we only need one seq buffer per worker. */ -#define SEQ_POOL_MAX_NB_BUFFERS(nbWorkers) nbWorkers +#define SEQ_POOL_MAX_NB_BUFFERS(nbWorkers) (nbWorkers) /* ===== Seq Pool Wrapper ====== */ @@ -1734,7 +1734,7 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) } } else { /* We have enough bytes buffered to initialize the hash, - * and are have processed enough bytes to find a sync point. + * and have processed enough bytes to find a sync point. * Start scanning at the beginning of the input. */ assert(mtctx->inBuff.filled >= RSYNC_MIN_BLOCK_SIZE); @@ -1761,17 +1761,24 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) * then a block will be emitted anyways, but this is okay, since if we * are already synchronized we will remain synchronized. */ + assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); for (; pos < syncPoint.toLoad; ++pos) { BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH]; - assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); + /* This assert is very expensive, and Debian compiles with asserts enabled. + * So disable it for now. We can get similar coverage by checking it at the + * beginning & end of the loop. + * assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); + */ hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower); assert(mtctx->inBuff.filled + pos >= RSYNC_MIN_BLOCK_SIZE); if ((hash & hitMask) == hitMask) { syncPoint.toLoad = pos + 1; syncPoint.flush = 1; + ++pos; /* for assert */ break; } } + assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); return syncPoint; } diff --git a/lib/compress/zstdmt_compress.h b/lib/compress/zstdmt_compress.h index 271eb1ac71f..ed4dc0e99df 100644 --- a/lib/compress/zstdmt_compress.h +++ b/lib/compress/zstdmt_compress.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/decompress/huf_decompress.c b/lib/decompress/huf_decompress.c index 2027188255e..c2d1f633a49 100644 --- a/lib/decompress/huf_decompress.c +++ b/lib/decompress/huf_decompress.c @@ -1,7 +1,7 @@ /* ****************************************************************** * huff0 huffman decoder, * part of Finite State Entropy library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -19,10 +19,10 @@ #include "../common/compiler.h" #include "../common/bitstream.h" /* BIT_* */ #include "../common/fse.h" /* to compress headers */ -#define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "../common/error_private.h" #include "../common/zstd_internal.h" +#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_countTrailingZeros64 */ /* ************************************************************** * Constants @@ -43,10 +43,14 @@ #error "Cannot force the use of the X1 and X2 decoders at the same time!" #endif -#if ZSTD_ENABLE_ASM_X86_64_BMI2 && DYNAMIC_BMI2 -# define HUF_ASM_X86_64_BMI2_ATTRS BMI2_TARGET_ATTRIBUTE +/* When DYNAMIC_BMI2 is enabled, fast decoders are only called when bmi2 is + * supported at runtime, so we can add the BMI2 target attribute. + * When it is disabled, we will still get BMI2 if it is enabled statically. + */ +#if DYNAMIC_BMI2 +# define HUF_FAST_BMI2_ATTRS BMI2_TARGET_ATTRIBUTE #else -# define HUF_ASM_X86_64_BMI2_ATTRS +# define HUF_FAST_BMI2_ATTRS #endif #ifdef __cplusplus @@ -56,18 +60,12 @@ #endif #define HUF_ASM_DECL HUF_EXTERN_C -#if DYNAMIC_BMI2 || (ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)) +#if DYNAMIC_BMI2 # define HUF_NEED_BMI2_FUNCTION 1 #else # define HUF_NEED_BMI2_FUNCTION 0 #endif -#if !(ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)) -# define HUF_NEED_DEFAULT_FUNCTION 1 -#else -# define HUF_NEED_DEFAULT_FUNCTION 0 -#endif - /* ************************************************************** * Error Management ****************************************************************/ @@ -84,6 +82,11 @@ /* ************************************************************** * BMI2 Variant Wrappers ****************************************************************/ +typedef size_t (*HUF_DecompressUsingDTableFn)(void *dst, size_t dstSize, + const void *cSrc, + size_t cSrcSize, + const HUF_DTable *DTable); + #if DYNAMIC_BMI2 #define HUF_DGEN(fn) \ @@ -105,9 +108,9 @@ } \ \ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ - size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \ + size_t cSrcSize, HUF_DTable const* DTable, int flags) \ { \ - if (bmi2) { \ + if (flags & HUF_flags_bmi2) { \ return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \ } \ return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \ @@ -117,9 +120,9 @@ #define HUF_DGEN(fn) \ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ - size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \ + size_t cSrcSize, HUF_DTable const* DTable, int flags) \ { \ - (void)bmi2; \ + (void)flags; \ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ } @@ -138,15 +141,28 @@ static DTableDesc HUF_getDTableDesc(const HUF_DTable* table) return dtd; } -#if ZSTD_ENABLE_ASM_X86_64_BMI2 - -static size_t HUF_initDStream(BYTE const* ip) { +static size_t HUF_initFastDStream(BYTE const* ip) { BYTE const lastByte = ip[7]; - size_t const bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; + size_t const bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0; size_t const value = MEM_readLEST(ip) | 1; assert(bitsConsumed <= 8); + assert(sizeof(size_t) == 8); return value << bitsConsumed; } + + +/** + * The input/output arguments to the Huffman fast decoding loop: + * + * ip [in/out] - The input pointers, must be updated to reflect what is consumed. + * op [in/out] - The output pointers, must be updated to reflect what is written. + * bits [in/out] - The bitstream containers, must be updated to reflect the current state. + * dt [in] - The decoding table. + * ilimit [in] - The input limit, stop when any input pointer is below ilimit. + * oend [in] - The end of the output stream. op[3] must not cross oend. + * iend [in] - The end of each input stream. ip[i] may cross iend[i], + * as long as it is above ilimit, but that indicates corruption. + */ typedef struct { BYTE const* ip[4]; BYTE* op[4]; @@ -155,15 +171,17 @@ typedef struct { BYTE const* ilimit; BYTE* oend; BYTE const* iend[4]; -} HUF_DecompressAsmArgs; +} HUF_DecompressFastArgs; + +typedef void (*HUF_DecompressFastLoopFn)(HUF_DecompressFastArgs*); /** - * Initializes args for the asm decoding loop. - * @returns 0 on success - * 1 if the fallback implementation should be used. + * Initializes args for the fast decoding loop. + * @returns 1 on success + * 0 if the fallback implementation should be used. * Or an error code on failure. */ -static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst, size_t dstSize, void const* src, size_t srcSize, const HUF_DTable* DTable) +static size_t HUF_DecompressFastArgs_init(HUF_DecompressFastArgs* args, void* dst, size_t dstSize, void const* src, size_t srcSize, const HUF_DTable* DTable) { void const* dt = DTable + 1; U32 const dtLog = HUF_getDTableDesc(DTable).tableLog; @@ -172,9 +190,11 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst, BYTE* const oend = (BYTE*)dst + dstSize; - /* The following condition is false on x32 platform, - * but HUF_asm is not compatible with this ABI */ - if (!(MEM_isLittleEndian() && !MEM_32bits())) return 1; + /* The fast decoding loop assumes 64-bit little-endian. + * This condition is false on x32. + */ + if (!MEM_isLittleEndian() || MEM_32bits()) + return 0; /* strict minimum : jump table + 1 byte per stream */ if (srcSize < 10) @@ -185,7 +205,7 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst, * On small inputs we don't have enough data to trigger the fast loop, so use the old decoder. */ if (dtLog != HUF_DECODER_FAST_TABLELOG) - return 1; + return 0; /* Read the jump table. */ { @@ -199,13 +219,13 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst, args->iend[2] = args->iend[1] + length2; args->iend[3] = args->iend[2] + length3; - /* HUF_initDStream() requires this, and this small of an input + /* HUF_initFastDStream() requires this, and this small of an input * won't benefit from the ASM loop anyways. * length1 must be >= 16 so that ip[0] >= ilimit before the loop * starts. */ if (length1 < 16 || length2 < 8 || length3 < 8 || length4 < 8) - return 1; + return 0; if (length4 > srcSize) return ERROR(corruption_detected); /* overflow */ } /* ip[] contains the position that is currently loaded into bits[]. */ @@ -222,7 +242,7 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst, /* No point to call the ASM loop for tiny outputs. */ if (args->op[3] >= oend) - return 1; + return 0; /* bits[] is the bit container. * It is read from the MSB down to the LSB. @@ -231,10 +251,10 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst, * set, so that CountTrailingZeros(bits[]) can be used * to count how many bits we've consumed. */ - args->bits[0] = HUF_initDStream(args->ip[0]); - args->bits[1] = HUF_initDStream(args->ip[1]); - args->bits[2] = HUF_initDStream(args->ip[2]); - args->bits[3] = HUF_initDStream(args->ip[3]); + args->bits[0] = HUF_initFastDStream(args->ip[0]); + args->bits[1] = HUF_initFastDStream(args->ip[1]); + args->bits[2] = HUF_initFastDStream(args->ip[2]); + args->bits[3] = HUF_initFastDStream(args->ip[3]); /* If ip[] >= ilimit, it is guaranteed to be safe to * reload bits[]. It may be beyond its section, but is @@ -245,10 +265,10 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst, args->oend = oend; args->dt = dt; - return 0; + return 1; } -static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressAsmArgs const* args, int stream, BYTE* segmentEnd) +static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressFastArgs const* args, int stream, BYTE* segmentEnd) { /* Validate that we haven't overwritten. */ if (args->op[stream] > segmentEnd) @@ -262,15 +282,15 @@ static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressAsmArgs return ERROR(corruption_detected); /* Construct the BIT_DStream_t. */ - bit->bitContainer = MEM_readLE64(args->ip[stream]); - bit->bitsConsumed = ZSTD_countTrailingZeros((size_t)args->bits[stream]); + assert(sizeof(size_t) == 8); + bit->bitContainer = MEM_readLEST(args->ip[stream]); + bit->bitsConsumed = ZSTD_countTrailingZeros64(args->bits[stream]); bit->start = (const char*)args->iend[0]; bit->limitPtr = bit->start + sizeof(size_t); bit->ptr = (const char*)args->ip[stream]; return 0; } -#endif #ifndef HUF_FORCE_DECOMPRESS_X2 @@ -287,10 +307,11 @@ typedef struct { BYTE nbBits; BYTE byte; } HUF_DEltX1; /* single-symbol decodi static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) { U64 D4; if (MEM_isLittleEndian()) { - D4 = (symbol << 8) + nbBits; + D4 = (U64)((symbol << 8) + nbBits); } else { - D4 = symbol + (nbBits << 8); + D4 = (U64)(symbol + (nbBits << 8)); } + assert(D4 < (1U << 16)); D4 *= 0x0001000100010001ULL; return D4; } @@ -333,13 +354,7 @@ typedef struct { BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; } HUF_ReadDTableX1_Workspace; - -size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize) -{ - return HUF_readDTableX1_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0); -} - -size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2) +size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags) { U32 tableLog = 0; U32 nbSymbols = 0; @@ -354,7 +369,7 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable)); /* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ - iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2); + iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), flags); if (HUF_isError(iSize)) return iSize; @@ -381,9 +396,8 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr * rankStart[0] is not filled because there are no entries in the table for * weight 0. */ - { - int n; - int nextRankStart = 0; + { int n; + U32 nextRankStart = 0; int const unroll = 4; int const nLimit = (int)nbSymbols - unroll + 1; for (n=0; n<(int)tableLog+1; n++) { @@ -410,10 +424,9 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr * We can switch based on the length to a different inner loop which is * optimized for that particular case. */ - { - U32 w; - int symbol=wksp->rankVal[0]; - int rankStart=0; + { U32 w; + int symbol = wksp->rankVal[0]; + int rankStart = 0; for (w=1; wrankVal[w]; int const length = (1 << w) >> 1; @@ -523,7 +536,7 @@ HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, cons while (p < pEnd) HUF_DECODE_SYMBOLX1_0(p, bitDPtr); - return pEnd-pStart; + return (size_t)(pEnd-pStart); } FORCE_INLINE_TEMPLATE size_t @@ -549,6 +562,10 @@ HUF_decompress1X1_usingDTable_internal_body( return dstSize; } +/* HUF_decompress4X1_usingDTable_internal_body(): + * Conditions : + * @dstSize >= 6 + */ FORCE_INLINE_TEMPLATE size_t HUF_decompress4X1_usingDTable_internal_body( void* dst, size_t dstSize, @@ -592,6 +609,7 @@ HUF_decompress4X1_usingDTable_internal_body( if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */ + if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); @@ -654,38 +672,142 @@ size_t HUF_decompress4X1_usingDTable_internal_bmi2(void* dst, size_t dstSize, vo } #endif -#if HUF_NEED_DEFAULT_FUNCTION static size_t HUF_decompress4X1_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc, size_t cSrcSize, HUF_DTable const* DTable) { return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); } -#endif #if ZSTD_ENABLE_ASM_X86_64_BMI2 -HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args) ZSTDLIB_HIDDEN; +HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_fast_asm_loop(HUF_DecompressFastArgs* args) ZSTDLIB_HIDDEN; -static HUF_ASM_X86_64_BMI2_ATTRS +#endif + +static HUF_FAST_BMI2_ATTRS +void HUF_decompress4X1_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args) +{ + U64 bits[4]; + BYTE const* ip[4]; + BYTE* op[4]; + U16 const* const dtable = (U16 const*)args->dt; + BYTE* const oend = args->oend; + BYTE const* const ilimit = args->ilimit; + + /* Copy the arguments to local variables */ + ZSTD_memcpy(&bits, &args->bits, sizeof(bits)); + ZSTD_memcpy(&ip, &args->ip, sizeof(ip)); + ZSTD_memcpy(&op, &args->op, sizeof(op)); + + assert(MEM_isLittleEndian()); + assert(!MEM_32bits()); + + for (;;) { + BYTE* olimit; + int stream; + int symbol; + + /* Assert loop preconditions */ +#ifndef NDEBUG + for (stream = 0; stream < 4; ++stream) { + assert(op[stream] <= (stream == 3 ? oend : op[stream + 1])); + assert(ip[stream] >= ilimit); + } +#endif + /* Compute olimit */ + { + /* Each iteration produces 5 output symbols per stream */ + size_t const oiters = (size_t)(oend - op[3]) / 5; + /* Each iteration consumes up to 11 bits * 5 = 55 bits < 7 bytes + * per stream. + */ + size_t const iiters = (size_t)(ip[0] - ilimit) / 7; + /* We can safely run iters iterations before running bounds checks */ + size_t const iters = MIN(oiters, iiters); + size_t const symbols = iters * 5; + + /* We can simply check that op[3] < olimit, instead of checking all + * of our bounds, since we can't hit the other bounds until we've run + * iters iterations, which only happens when op[3] == olimit. + */ + olimit = op[3] + symbols; + + /* Exit fast decoding loop once we get close to the end. */ + if (op[3] + 20 > olimit) + break; + + /* Exit the decoding loop if any input pointer has crossed the + * previous one. This indicates corruption, and a precondition + * to our loop is that ip[i] >= ip[0]. + */ + for (stream = 1; stream < 4; ++stream) { + if (ip[stream] < ip[stream - 1]) + goto _out; + } + } + +#ifndef NDEBUG + for (stream = 1; stream < 4; ++stream) { + assert(ip[stream] >= ip[stream - 1]); + } +#endif + + do { + /* Decode 5 symbols in each of the 4 streams */ + for (symbol = 0; symbol < 5; ++symbol) { + for (stream = 0; stream < 4; ++stream) { + int const index = (int)(bits[stream] >> 53); + int const entry = (int)dtable[index]; + bits[stream] <<= (entry & 63); + op[stream][symbol] = (BYTE)((entry >> 8) & 0xFF); + } + } + /* Reload the bitstreams */ + for (stream = 0; stream < 4; ++stream) { + int const ctz = ZSTD_countTrailingZeros64(bits[stream]); + int const nbBits = ctz & 7; + int const nbBytes = ctz >> 3; + op[stream] += 5; + ip[stream] -= nbBytes; + bits[stream] = MEM_read64(ip[stream]) | 1; + bits[stream] <<= nbBits; + } + } while (op[3] < olimit); + } + +_out: + + /* Save the final values of each of the state variables back to args. */ + ZSTD_memcpy(&args->bits, &bits, sizeof(bits)); + ZSTD_memcpy(&args->ip, &ip, sizeof(ip)); + ZSTD_memcpy(&args->op, &op, sizeof(op)); +} + +/** + * @returns @p dstSize on success (>= 6) + * 0 if the fallback implementation should be used + * An error if an error occurred + */ +static HUF_FAST_BMI2_ATTRS size_t -HUF_decompress4X1_usingDTable_internal_bmi2_asm( +HUF_decompress4X1_usingDTable_internal_fast( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) + const HUF_DTable* DTable, + HUF_DecompressFastLoopFn loopFn) { void const* dt = DTable + 1; const BYTE* const iend = (const BYTE*)cSrc + 6; BYTE* const oend = (BYTE*)dst + dstSize; - HUF_DecompressAsmArgs args; - { - size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); - FORWARD_IF_ERROR(ret, "Failed to init asm args"); - if (ret != 0) - return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); + HUF_DecompressFastArgs args; + { size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); + FORWARD_IF_ERROR(ret, "Failed to init fast loop args"); + if (ret == 0) + return 0; } assert(args.ip[0] >= args.ilimit); - HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(&args); + loopFn(&args); /* Our loop guarantees that ip[] >= ilimit and that we haven't * overwritten any op[]. @@ -698,8 +820,7 @@ HUF_decompress4X1_usingDTable_internal_bmi2_asm( (void)iend; /* finish bit streams one by one. */ - { - size_t const segmentSize = (dstSize+3) / 4; + { size_t const segmentSize = (dstSize+3) / 4; BYTE* segmentEnd = (BYTE*)dst; int i; for (i = 0; i < 4; ++i) { @@ -716,97 +837,59 @@ HUF_decompress4X1_usingDTable_internal_bmi2_asm( } /* decoded size */ + assert(dstSize != 0); return dstSize; } -#endif /* ZSTD_ENABLE_ASM_X86_64_BMI2 */ - -typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize, - const void *cSrc, - size_t cSrcSize, - const HUF_DTable *DTable); HUF_DGEN(HUF_decompress1X1_usingDTable_internal) static size_t HUF_decompress4X1_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc, - size_t cSrcSize, HUF_DTable const* DTable, int bmi2) + size_t cSrcSize, HUF_DTable const* DTable, int flags) { + HUF_DecompressUsingDTableFn fallbackFn = HUF_decompress4X1_usingDTable_internal_default; + HUF_DecompressFastLoopFn loopFn = HUF_decompress4X1_usingDTable_internal_fast_c_loop; + #if DYNAMIC_BMI2 - if (bmi2) { + if (flags & HUF_flags_bmi2) { + fallbackFn = HUF_decompress4X1_usingDTable_internal_bmi2; # if ZSTD_ENABLE_ASM_X86_64_BMI2 - return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable); -# else - return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); + if (!(flags & HUF_flags_disableAsm)) { + loopFn = HUF_decompress4X1_usingDTable_internal_fast_asm_loop; + } # endif + } else { + return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable); } -#else - (void)bmi2; #endif #if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__) - return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable); -#else - return HUF_decompress4X1_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable); + if (!(flags & HUF_flags_disableAsm)) { + loopFn = HUF_decompress4X1_usingDTable_internal_fast_asm_loop; + } #endif -} - -size_t HUF_decompress1X1_usingDTable( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (dtd.tableType != 0) return ERROR(GENERIC); - return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -} - -size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize) -{ - const BYTE* ip = (const BYTE*) cSrc; - - size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize); - if (HUF_isError(hSize)) return hSize; - if (hSize >= cSrcSize) return ERROR(srcSize_wrong); - ip += hSize; cSrcSize -= hSize; - - return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0); -} - - -size_t HUF_decompress4X1_usingDTable( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (dtd.tableType != 0) return ERROR(GENERIC); - return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); + if (!(flags & HUF_flags_disableFast)) { + size_t const ret = HUF_decompress4X1_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn); + if (ret != 0) + return ret; + } + return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable); } -static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, +static size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize, int bmi2) + void* workSpace, size_t wkspSize, int flags) { const BYTE* ip = (const BYTE*) cSrc; - size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2); + size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; - return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); -} - -size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize) -{ - return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0); + return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); } - #endif /* HUF_FORCE_DECOMPRESS_X2 */ @@ -989,7 +1072,7 @@ static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 targetLog, const U32 static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog, const sortedSymbol_t* sortedList, - const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, + const U32* rankStart, rankValCol_t* rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline) { U32* const rankVal = rankValOrigin[0]; @@ -1044,14 +1127,7 @@ typedef struct { size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, - void* workSpace, size_t wkspSize) -{ - return HUF_readDTableX2_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0); -} - -size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable, - const void* src, size_t srcSize, - void* workSpace, size_t wkspSize, int bmi2) + void* workSpace, size_t wkspSize, int flags) { U32 tableLog, maxW, nbSymbols; DTableDesc dtd = HUF_getDTableDesc(DTable); @@ -1073,7 +1149,7 @@ size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable, if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); /* ZSTD_memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ - iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), bmi2); + iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), flags); if (HUF_isError(iSize)) return iSize; /* check result */ @@ -1244,6 +1320,11 @@ HUF_decompress1X2_usingDTable_internal_body( /* decoded size */ return dstSize; } + +/* HUF_decompress4X2_usingDTable_internal_body(): + * Conditions: + * @dstSize >= 6 + */ FORCE_INLINE_TEMPLATE size_t HUF_decompress4X2_usingDTable_internal_body( void* dst, size_t dstSize, @@ -1284,8 +1365,9 @@ HUF_decompress4X2_usingDTable_internal_body( DTableDesc const dtd = HUF_getDTableDesc(DTable); U32 const dtLog = dtd.tableLog; - if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ - if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */ + if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */ + if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); @@ -1370,36 +1452,177 @@ size_t HUF_decompress4X2_usingDTable_internal_bmi2(void* dst, size_t dstSize, vo } #endif -#if HUF_NEED_DEFAULT_FUNCTION static size_t HUF_decompress4X2_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc, size_t cSrcSize, HUF_DTable const* DTable) { return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); } -#endif #if ZSTD_ENABLE_ASM_X86_64_BMI2 -HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args) ZSTDLIB_HIDDEN; +HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_fast_asm_loop(HUF_DecompressFastArgs* args) ZSTDLIB_HIDDEN; + +#endif + +static HUF_FAST_BMI2_ATTRS +void HUF_decompress4X2_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args) +{ + U64 bits[4]; + BYTE const* ip[4]; + BYTE* op[4]; + BYTE* oend[4]; + HUF_DEltX2 const* const dtable = (HUF_DEltX2 const*)args->dt; + BYTE const* const ilimit = args->ilimit; + + /* Copy the arguments to local registers. */ + ZSTD_memcpy(&bits, &args->bits, sizeof(bits)); + ZSTD_memcpy(&ip, &args->ip, sizeof(ip)); + ZSTD_memcpy(&op, &args->op, sizeof(op)); + + oend[0] = op[1]; + oend[1] = op[2]; + oend[2] = op[3]; + oend[3] = args->oend; + + assert(MEM_isLittleEndian()); + assert(!MEM_32bits()); + + for (;;) { + BYTE* olimit; + int stream; + int symbol; + + /* Assert loop preconditions */ +#ifndef NDEBUG + for (stream = 0; stream < 4; ++stream) { + assert(op[stream] <= oend[stream]); + assert(ip[stream] >= ilimit); + } +#endif + /* Compute olimit */ + { + /* Each loop does 5 table lookups for each of the 4 streams. + * Each table lookup consumes up to 11 bits of input, and produces + * up to 2 bytes of output. + */ + /* We can consume up to 7 bytes of input per iteration per stream. + * We also know that each input pointer is >= ip[0]. So we can run + * iters loops before running out of input. + */ + size_t iters = (size_t)(ip[0] - ilimit) / 7; + /* Each iteration can produce up to 10 bytes of output per stream. + * Each output stream my advance at different rates. So take the + * minimum number of safe iterations among all the output streams. + */ + for (stream = 0; stream < 4; ++stream) { + size_t const oiters = (size_t)(oend[stream] - op[stream]) / 10; + iters = MIN(iters, oiters); + } + + /* Each iteration produces at least 5 output symbols. So until + * op[3] crosses olimit, we know we haven't executed iters + * iterations yet. This saves us maintaining an iters counter, + * at the expense of computing the remaining # of iterations + * more frequently. + */ + olimit = op[3] + (iters * 5); + + /* Exit the fast decoding loop if we are too close to the end. */ + if (op[3] + 10 > olimit) + break; + + /* Exit the decoding loop if any input pointer has crossed the + * previous one. This indicates corruption, and a precondition + * to our loop is that ip[i] >= ip[0]. + */ + for (stream = 1; stream < 4; ++stream) { + if (ip[stream] < ip[stream - 1]) + goto _out; + } + } + +#ifndef NDEBUG + for (stream = 1; stream < 4; ++stream) { + assert(ip[stream] >= ip[stream - 1]); + } +#endif + + do { + /* Do 5 table lookups for each of the first 3 streams */ + for (symbol = 0; symbol < 5; ++symbol) { + for (stream = 0; stream < 3; ++stream) { + int const index = (int)(bits[stream] >> 53); + HUF_DEltX2 const entry = dtable[index]; + MEM_write16(op[stream], entry.sequence); + bits[stream] <<= (entry.nbBits); + op[stream] += (entry.length); + } + } + /* Do 1 table lookup from the final stream */ + { + int const index = (int)(bits[3] >> 53); + HUF_DEltX2 const entry = dtable[index]; + MEM_write16(op[3], entry.sequence); + bits[3] <<= (entry.nbBits); + op[3] += (entry.length); + } + /* Do 4 table lookups from the final stream & reload bitstreams */ + for (stream = 0; stream < 4; ++stream) { + /* Do a table lookup from the final stream. + * This is interleaved with the reloading to reduce register + * pressure. This shouldn't be necessary, but compilers can + * struggle with codegen with high register pressure. + */ + { + int const index = (int)(bits[3] >> 53); + HUF_DEltX2 const entry = dtable[index]; + MEM_write16(op[3], entry.sequence); + bits[3] <<= (entry.nbBits); + op[3] += (entry.length); + } + /* Reload the bistreams. The final bitstream must be reloaded + * after the 5th symbol was decoded. + */ + { + int const ctz = ZSTD_countTrailingZeros64(bits[stream]); + int const nbBits = ctz & 7; + int const nbBytes = ctz >> 3; + ip[stream] -= nbBytes; + bits[stream] = MEM_read64(ip[stream]) | 1; + bits[stream] <<= nbBits; + } + } + } while (op[3] < olimit); + } -static HUF_ASM_X86_64_BMI2_ATTRS size_t -HUF_decompress4X2_usingDTable_internal_bmi2_asm( +_out: + + /* Save the final values of each of the state variables back to args. */ + ZSTD_memcpy(&args->bits, &bits, sizeof(bits)); + ZSTD_memcpy(&args->ip, &ip, sizeof(ip)); + ZSTD_memcpy(&args->op, &op, sizeof(op)); +} + + +static HUF_FAST_BMI2_ATTRS size_t +HUF_decompress4X2_usingDTable_internal_fast( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) { + const HUF_DTable* DTable, + HUF_DecompressFastLoopFn loopFn) { void const* dt = DTable + 1; const BYTE* const iend = (const BYTE*)cSrc + 6; BYTE* const oend = (BYTE*)dst + dstSize; - HUF_DecompressAsmArgs args; + HUF_DecompressFastArgs args; { - size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); + size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); FORWARD_IF_ERROR(ret, "Failed to init asm args"); - if (ret != 0) - return HUF_decompress4X2_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); + if (ret == 0) + return 0; } assert(args.ip[0] >= args.ilimit); - HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(&args); + loopFn(&args); /* note : op4 already verified within main loop */ assert(args.ip[0] >= iend); @@ -1430,91 +1653,72 @@ HUF_decompress4X2_usingDTable_internal_bmi2_asm( /* decoded size */ return dstSize; } -#endif /* ZSTD_ENABLE_ASM_X86_64_BMI2 */ static size_t HUF_decompress4X2_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc, - size_t cSrcSize, HUF_DTable const* DTable, int bmi2) + size_t cSrcSize, HUF_DTable const* DTable, int flags) { + HUF_DecompressUsingDTableFn fallbackFn = HUF_decompress4X2_usingDTable_internal_default; + HUF_DecompressFastLoopFn loopFn = HUF_decompress4X2_usingDTable_internal_fast_c_loop; + #if DYNAMIC_BMI2 - if (bmi2) { + if (flags & HUF_flags_bmi2) { + fallbackFn = HUF_decompress4X2_usingDTable_internal_bmi2; # if ZSTD_ENABLE_ASM_X86_64_BMI2 - return HUF_decompress4X2_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable); -# else - return HUF_decompress4X2_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); + if (!(flags & HUF_flags_disableAsm)) { + loopFn = HUF_decompress4X2_usingDTable_internal_fast_asm_loop; + } # endif + } else { + return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable); } -#else - (void)bmi2; #endif #if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__) - return HUF_decompress4X2_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable); -#else - return HUF_decompress4X2_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable); + if (!(flags & HUF_flags_disableAsm)) { + loopFn = HUF_decompress4X2_usingDTable_internal_fast_asm_loop; + } #endif + + if (!(flags & HUF_flags_disableFast)) { + size_t const ret = HUF_decompress4X2_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn); + if (ret != 0) + return ret; + } + return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable); } HUF_DGEN(HUF_decompress1X2_usingDTable_internal) -size_t HUF_decompress1X2_usingDTable( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (dtd.tableType != 1) return ERROR(GENERIC); - return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -} - size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize) + void* workSpace, size_t wkspSize, int flags) { const BYTE* ip = (const BYTE*) cSrc; size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, - workSpace, wkspSize); + workSpace, wkspSize, flags); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; - return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0); -} - - -size_t HUF_decompress4X2_usingDTable( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (dtd.tableType != 1) return ERROR(GENERIC); - return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); + return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, flags); } -static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, +static size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize, int bmi2) + void* workSpace, size_t wkspSize, int flags) { const BYTE* ip = (const BYTE*) cSrc; size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, - workSpace, wkspSize); + workSpace, wkspSize, flags); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; - return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); + return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); } -size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize) -{ - return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0); -} - - #endif /* HUF_FORCE_DECOMPRESS_X1 */ @@ -1522,44 +1726,6 @@ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, /* Universal decompression selectors */ /* ***********************************/ -size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc const dtd = HUF_getDTableDesc(DTable); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)dtd; - assert(dtd.tableType == 0); - return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)dtd; - assert(dtd.tableType == 1); - return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#else - return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) : - HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#endif -} - -size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc const dtd = HUF_getDTableDesc(DTable); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)dtd; - assert(dtd.tableType == 0); - return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)dtd; - assert(dtd.tableType == 1); - return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#else - return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) : - HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#endif -} - #if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2) typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; @@ -1614,36 +1780,9 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize) #endif } - -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, - size_t dstSize, const void* cSrc, - size_t cSrcSize, void* workSpace, - size_t wkspSize) -{ - /* validation checks */ - if (dstSize == 0) return ERROR(dstSize_tooSmall); - if (cSrcSize == 0) return ERROR(corruption_detected); - - { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)algoNb; - assert(algoNb == 0); - return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)algoNb; - assert(algoNb == 1); - return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); -#else - return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize): - HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); -#endif - } -} - size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize) + void* workSpace, size_t wkspSize, int flags) { /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); @@ -1656,71 +1795,71 @@ size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, (void)algoNb; assert(algoNb == 0); return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize); + cSrcSize, workSpace, wkspSize, flags); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)algoNb; assert(algoNb == 1); return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize); + cSrcSize, workSpace, wkspSize, flags); #else return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize): + cSrcSize, workSpace, wkspSize, flags): HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize); + cSrcSize, workSpace, wkspSize, flags); #endif } } -size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2) +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags) { DTableDesc const dtd = HUF_getDTableDesc(DTable); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)dtd; assert(dtd.tableType == 0); - return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)dtd; assert(dtd.tableType == 1); - return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #else - return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) : - HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) : + HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #endif } #ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) +size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags) { const BYTE* ip = (const BYTE*) cSrc; - size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2); + size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; - return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); + return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); } #endif -size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2) +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags) { DTableDesc const dtd = HUF_getDTableDesc(DTable); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)dtd; assert(dtd.tableType == 0); - return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)dtd; assert(dtd.tableType == 1); - return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #else - return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) : - HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) : + HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #endif } -size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags) { /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); @@ -1730,160 +1869,14 @@ size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t ds #if defined(HUF_FORCE_DECOMPRESS_X1) (void)algoNb; assert(algoNb == 0); - return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); + return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)algoNb; assert(algoNb == 1); - return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); + return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags); #else - return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) : - HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); + return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags) : + HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags); #endif } } - -#ifndef ZSTD_NO_UNUSED_FUNCTIONS -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_readDTableX1(HUF_DTable* DTable, const void* src, size_t srcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_readDTableX1_wksp(DTable, src, srcSize, - workSpace, sizeof(workSpace)); -} - -size_t HUF_decompress1X1_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress1X1_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} - -size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX); - return HUF_decompress1X1_DCtx (DTable, dst, dstSize, cSrc, cSrcSize); -} -#endif - -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_readDTableX2_wksp(DTable, src, srcSize, - workSpace, sizeof(workSpace)); -} - -size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} - -size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); - return HUF_decompress1X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); -} -#endif - -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress4X1_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} -size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX); - return HUF_decompress4X1_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); -} -#endif - -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} - -size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); - return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); -} -#endif - -typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); - -size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ -#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2) - static const decompressionAlgo decompress[2] = { HUF_decompress4X1, HUF_decompress4X2 }; -#endif - - /* validation checks */ - if (dstSize == 0) return ERROR(dstSize_tooSmall); - if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ - if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ - if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ - - { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)algoNb; - assert(algoNb == 0); - return HUF_decompress4X1(dst, dstSize, cSrc, cSrcSize); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)algoNb; - assert(algoNb == 1); - return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize); -#else - return decompress[algoNb](dst, dstSize, cSrc, cSrcSize); -#endif - } -} - -size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - /* validation checks */ - if (dstSize == 0) return ERROR(dstSize_tooSmall); - if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ - if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ - if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ - - { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)algoNb; - assert(algoNb == 0); - return HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)algoNb; - assert(algoNb == 1); - return HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize); -#else - return algoNb ? HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : - HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; -#endif - } -} - -size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress4X_hufOnly_wksp(dctx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} - -size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} -#endif diff --git a/lib/decompress/huf_decompress_amd64.S b/lib/decompress/huf_decompress_amd64.S index 49589cb6114..671624fe343 100644 --- a/lib/decompress/huf_decompress_amd64.S +++ b/lib/decompress/huf_decompress_amd64.S @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -30,14 +30,14 @@ * TODO: Support Windows calling convention. */ -ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop) -ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop) -ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop) -ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop) -.global HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop -.global HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop -.global _HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop -.global _HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop +ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_fast_asm_loop) +ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X2_usingDTable_internal_fast_asm_loop) +ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X2_usingDTable_internal_fast_asm_loop) +ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_fast_asm_loop) +.global HUF_decompress4X1_usingDTable_internal_fast_asm_loop +.global HUF_decompress4X2_usingDTable_internal_fast_asm_loop +.global _HUF_decompress4X1_usingDTable_internal_fast_asm_loop +.global _HUF_decompress4X2_usingDTable_internal_fast_asm_loop .text /* Sets up register mappings for clarity. @@ -95,8 +95,9 @@ ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop) /* Define both _HUF_* & HUF_* symbols because MacOS * C symbols are prefixed with '_' & Linux symbols aren't. */ -_HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop: -HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop: +_HUF_decompress4X1_usingDTable_internal_fast_asm_loop: +HUF_decompress4X1_usingDTable_internal_fast_asm_loop: + ZSTD_CET_ENDBRANCH /* Save all registers - even if they are callee saved for simplicity. */ push %rax push %rbx @@ -350,8 +351,9 @@ HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop: pop %rax ret -_HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop: -HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop: +_HUF_decompress4X2_usingDTable_internal_fast_asm_loop: +HUF_decompress4X2_usingDTable_internal_fast_asm_loop: + ZSTD_CET_ENDBRANCH /* Save all registers - even if they are callee saved for simplicity. */ push %rax push %rbx @@ -427,41 +429,30 @@ HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop: /* r15 = (ip0 - ilimit) / 7 */ movq %rdx, %r15 - movabsq $-3689348814741910323, %rdx - movq 8(%rsp), %rax /* rax = oend0 */ - subq %op0, %rax /* rax = oend0 - op0 */ - mulq %rdx - shrq $3, %rdx /* rdx = rax / 10 */ - - /* r15 = min(%rdx, %r15) */ - cmpq %rdx, %r15 - cmova %rdx, %r15 + /* r15 = min(r15, min(oend0 - op0, oend1 - op1, oend2 - op2, oend3 - op3) / 10) */ + movq 8(%rsp), %rax /* rax = oend0 */ + subq %op0, %rax /* rax = oend0 - op0 */ + movq 16(%rsp), %rdx /* rdx = oend1 */ + subq %op1, %rdx /* rdx = oend1 - op1 */ - movabsq $-3689348814741910323, %rdx - movq 16(%rsp), %rax /* rax = oend1 */ - subq %op1, %rax /* rax = oend1 - op1 */ - mulq %rdx - shrq $3, %rdx /* rdx = rax / 10 */ - - /* r15 = min(%rdx, %r15) */ - cmpq %rdx, %r15 - cmova %rdx, %r15 + cmpq %rax, %rdx + cmova %rax, %rdx /* rdx = min(%rdx, %rax) */ - movabsq $-3689348814741910323, %rdx movq 24(%rsp), %rax /* rax = oend2 */ subq %op2, %rax /* rax = oend2 - op2 */ - mulq %rdx - shrq $3, %rdx /* rdx = rax / 10 */ - /* r15 = min(%rdx, %r15) */ - cmpq %rdx, %r15 - cmova %rdx, %r15 + cmpq %rax, %rdx + cmova %rax, %rdx /* rdx = min(%rdx, %rax) */ - movabsq $-3689348814741910323, %rdx movq 32(%rsp), %rax /* rax = oend3 */ subq %op3, %rax /* rax = oend3 - op3 */ + + cmpq %rax, %rdx + cmova %rax, %rdx /* rdx = min(%rdx, %rax) */ + + movabsq $-3689348814741910323, %rax mulq %rdx - shrq $3, %rdx /* rdx = rax / 10 */ + shrq $3, %rdx /* rdx = rdx / 10 */ /* r15 = min(%rdx, %r15) */ cmpq %rdx, %r15 diff --git a/lib/decompress/zstd_ddict.c b/lib/decompress/zstd_ddict.c index ce335477b32..ad5c34a7fc0 100644 --- a/lib/decompress/zstd_ddict.c +++ b/lib/decompress/zstd_ddict.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -19,7 +19,6 @@ #include "../common/mem.h" /* low level memory routines */ #define FSE_STATIC_LINKING_ONLY #include "../common/fse.h" -#define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "zstd_decompress_internal.h" #include "zstd_ddict.h" @@ -134,7 +133,7 @@ static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict, ZSTD_memcpy(internalBuffer, dict, dictSize); } ddict->dictSize = dictSize; - ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ + ddict->entropy.hufTable[0] = (HUF_DTable)((ZSTD_HUFFDTABLE_CAPACITY_LOG)*0x1000001); /* cover both little and big endian */ /* parse dictionary content */ FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) , ""); @@ -240,5 +239,5 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict) unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict) { if (ddict==NULL) return 0; - return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize); + return ddict->dictID; } diff --git a/lib/decompress/zstd_ddict.h b/lib/decompress/zstd_ddict.h index bd03268b508..c4ca8877a07 100644 --- a/lib/decompress/zstd_ddict.h +++ b/lib/decompress/zstd_ddict.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/decompress/zstd_decompress.c b/lib/decompress/zstd_decompress.c index 0031e98cfb1..d487966ccf1 100644 --- a/lib/decompress/zstd_decompress.c +++ b/lib/decompress/zstd_decompress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -59,13 +59,13 @@ #include "../common/mem.h" /* low level memory routines */ #define FSE_STATIC_LINKING_ONLY #include "../common/fse.h" -#define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "../common/xxhash.h" /* XXH64_reset, XXH64_update, XXH64_digest, XXH64 */ #include "../common/zstd_internal.h" /* blockProperties_t */ #include "zstd_decompress_internal.h" /* ZSTD_DCtx */ #include "zstd_ddict.h" /* ZSTD_DDictDictContent */ #include "zstd_decompress_block.h" /* ZSTD_decompressBlock_internal */ +#include "../common/bits.h" /* ZSTD_highbit32 */ #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) # include "../legacy/zstd_legacy.h" @@ -78,11 +78,11 @@ *************************************/ #define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4 -#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3 /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float. - * Currently, that means a 0.75 load factor. - * So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded - * the load factor of the ddict hash set. - */ +#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3 /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float. + * Currently, that means a 0.75 load factor. + * So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded + * the load factor of the ddict hash set. + */ #define DDICT_HASHSET_TABLE_BASE_SIZE 64 #define DDICT_HASHSET_RESIZE_FACTOR 2 @@ -243,6 +243,7 @@ static void ZSTD_DCtx_resetParameters(ZSTD_DCtx* dctx) dctx->outBufferMode = ZSTD_bm_buffered; dctx->forceIgnoreChecksum = ZSTD_d_validateChecksum; dctx->refMultipleDDicts = ZSTD_rmd_refSingleDDict; + dctx->disableHufAsm = 0; } static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx) @@ -438,16 +439,40 @@ size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize) * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless * @return : 0, `zfhPtr` is correctly filled, * >0, `srcSize` is too small, value is wanted `srcSize` amount, - * or an error code, which can be tested using ZSTD_isError() */ +** or an error code, which can be tested using ZSTD_isError() */ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format) { const BYTE* ip = (const BYTE*)src; size_t const minInputSize = ZSTD_startingInputLength(format); - ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */ - if (srcSize < minInputSize) return minInputSize; - RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter"); + DEBUGLOG(5, "ZSTD_getFrameHeader_advanced: minInputSize = %zu, srcSize = %zu", minInputSize, srcSize); + if (srcSize > 0) { + /* note : technically could be considered an assert(), since it's an invalid entry */ + RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter : src==NULL, but srcSize>0"); + } + if (srcSize < minInputSize) { + if (srcSize > 0 && format != ZSTD_f_zstd1_magicless) { + /* when receiving less than @minInputSize bytes, + * control these bytes at least correspond to a supported magic number + * in order to error out early if they don't. + **/ + size_t const toCopy = MIN(4, srcSize); + unsigned char hbuf[4]; MEM_writeLE32(hbuf, ZSTD_MAGICNUMBER); + assert(src != NULL); + ZSTD_memcpy(hbuf, src, toCopy); + if ( MEM_readLE32(hbuf) != ZSTD_MAGICNUMBER ) { + /* not a zstd frame : let's check if it's a skippable frame */ + MEM_writeLE32(hbuf, ZSTD_MAGIC_SKIPPABLE_START); + ZSTD_memcpy(hbuf, src, toCopy); + if ((MEM_readLE32(hbuf) & ZSTD_MAGIC_SKIPPABLE_MASK) != ZSTD_MAGIC_SKIPPABLE_START) { + RETURN_ERROR(prefix_unknown, + "first bytes don't correspond to any supported magic number"); + } } } + return minInputSize; + } + + ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzers may not understand that zfhPtr will be read only if return value is zero, since they are 2 different signals */ if ( (format != ZSTD_f_zstd1_magicless) && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) { if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { @@ -757,10 +782,11 @@ static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize ip += 4; } + frameSizeInfo.nbBlocks = nbBlocks; frameSizeInfo.compressedSize = (size_t)(ip - ipstart); frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) ? zfh.frameContentSize - : nbBlocks * zfh.blockSizeMax; + : (unsigned long long)nbBlocks * zfh.blockSizeMax; return frameSizeInfo; } } @@ -800,6 +826,48 @@ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize) return bound; } +size_t ZSTD_decompressionMargin(void const* src, size_t srcSize) +{ + size_t margin = 0; + unsigned maxBlockSize = 0; + + /* Iterate over each frame */ + while (srcSize > 0) { + ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize); + size_t const compressedSize = frameSizeInfo.compressedSize; + unsigned long long const decompressedBound = frameSizeInfo.decompressedBound; + ZSTD_frameHeader zfh; + + FORWARD_IF_ERROR(ZSTD_getFrameHeader(&zfh, src, srcSize), ""); + if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR) + return ERROR(corruption_detected); + + if (zfh.frameType == ZSTD_frame) { + /* Add the frame header to our margin */ + margin += zfh.headerSize; + /* Add the checksum to our margin */ + margin += zfh.checksumFlag ? 4 : 0; + /* Add 3 bytes per block */ + margin += 3 * frameSizeInfo.nbBlocks; + + /* Compute the max block size */ + maxBlockSize = MAX(maxBlockSize, zfh.blockSizeMax); + } else { + assert(zfh.frameType == ZSTD_skippableFrame); + /* Add the entire skippable frame size to our margin. */ + margin += compressedSize; + } + + assert(srcSize >= compressedSize); + src = (const BYTE*)src + compressedSize; + srcSize -= compressedSize; + } + + /* Add the max block size back to the margin. */ + margin += maxBlockSize; + + return margin; +} /*-************************************************************* * Frame decoding @@ -825,7 +893,7 @@ static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity, if (srcSize == 0) return 0; RETURN_ERROR(dstBuffer_null, ""); } - ZSTD_memcpy(dst, src, srcSize); + ZSTD_memmove(dst, src, srcSize); return srcSize; } @@ -903,6 +971,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, /* Loop on each block */ while (1) { + BYTE* oBlockEnd = oend; size_t decodedSize; blockProperties_t blockProperties; size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties); @@ -912,16 +981,34 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, remainingSrcSize -= ZSTD_blockHeaderSize; RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, ""); + if (ip >= op && ip < oBlockEnd) { + /* We are decompressing in-place. Limit the output pointer so that we + * don't overwrite the block that we are currently reading. This will + * fail decompression if the input & output pointers aren't spaced + * far enough apart. + * + * This is important to set, even when the pointers are far enough + * apart, because ZSTD_decompressBlock_internal() can decide to store + * literals in the output buffer, after the block it is decompressing. + * Since we don't want anything to overwrite our input, we have to tell + * ZSTD_decompressBlock_internal to never write past ip. + * + * See ZSTD_allocateLiteralsBuffer() for reference. + */ + oBlockEnd = op + (ip - op); + } + switch(blockProperties.blockType) { case bt_compressed: - decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1, not_streaming); + decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oBlockEnd-op), ip, cBlockSize, /* frame */ 1, not_streaming); break; case bt_raw : + /* Use oend instead of oBlockEnd because this function is safe to overlap. It uses memmove. */ decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize); break; case bt_rle : - decodedSize = ZSTD_setRleBlock(op, (size_t)(oend-op), *ip, blockProperties.origSize); + decodedSize = ZSTD_setRleBlock(op, (size_t)(oBlockEnd-op), *ip, blockProperties.origSize); break; case bt_reserved : default: @@ -956,6 +1043,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, } ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0); /* Allow caller to get size read */ + DEBUGLOG(4, "ZSTD_decompressFrame: decompressed frame of size %zi, consuming %zi bytes of input", op-ostart, ip - (const BYTE*)*srcPtr); *srcPtr = ip; *srcSizePtr = remainingSrcSize; return (size_t)(op-ostart); @@ -1108,8 +1196,8 @@ size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t sr size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; } /** - * Similar to ZSTD_nextSrcSizeToDecompress(), but when when a block input can be streamed, - * we allow taking a partial block as the input. Currently only raw uncompressed blocks can + * Similar to ZSTD_nextSrcSizeToDecompress(), but when a block input can be streamed, we + * allow taking a partial block as the input. Currently only raw uncompressed blocks can * be streamed. * * For blocks that can be streamed, this allows us to reduce the latency until we produce @@ -1309,7 +1397,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c default: assert(0); /* impossible */ - RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */ + RETURN_ERROR(GENERIC, "impossible to reach"); /* some compilers require default to do something */ } } @@ -1350,11 +1438,11 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, /* in minimal huffman, we always use X1 variants */ size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable, dictPtr, dictEnd - dictPtr, - workspace, workspaceSize); + workspace, workspaceSize, /* flags */ 0); #else size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable, dictPtr, (size_t)(dictEnd - dictPtr), - workspace, workspaceSize); + workspace, workspaceSize, /* flags */ 0); #endif RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, ""); dictPtr += hSize; @@ -1453,7 +1541,7 @@ size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx) dctx->prefixStart = NULL; dctx->virtualStart = NULL; dctx->dictEnd = NULL; - dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ + dctx->entropy.hufTable[0] = (HUF_DTable)((ZSTD_HUFFDTABLE_CAPACITY_LOG)*0x1000001); /* cover both little and big endian */ dctx->litEntropy = dctx->fseEntropy = 0; dctx->dictID = 0; dctx->bType = bt_reserved; @@ -1515,7 +1603,7 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize) * This could for one of the following reasons : * - The frame does not require a dictionary (most common case). * - The frame was built with dictID intentionally removed. - * Needed dictionary is a hidden information. + * Needed dictionary is a hidden piece of information. * Note : this use case also happens when using a non-conformant dictionary. * - `srcSize` is too small, and as a result, frame header could not be decoded. * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`. @@ -1524,7 +1612,7 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize) * ZSTD_getFrameHeader(), which will provide a more precise error code. */ unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize) { - ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 }; + ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0, 0, 0 }; size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize); if (ZSTD_isError(hError)) return 0; return zfp.dictID; @@ -1631,7 +1719,9 @@ size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t di size_t ZSTD_initDStream(ZSTD_DStream* zds) { DEBUGLOG(4, "ZSTD_initDStream"); - return ZSTD_initDStream_usingDDict(zds, NULL); + FORWARD_IF_ERROR(ZSTD_DCtx_reset(zds, ZSTD_reset_session_only), ""); + FORWARD_IF_ERROR(ZSTD_DCtx_refDDict(zds, NULL), ""); + return ZSTD_startingInputLength(zds->format); } /* ZSTD_initDStream_usingDDict() : @@ -1639,6 +1729,7 @@ size_t ZSTD_initDStream(ZSTD_DStream* zds) * this function cannot fail */ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict) { + DEBUGLOG(4, "ZSTD_initDStream_usingDDict"); FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , ""); return ZSTD_startingInputLength(dctx->format); @@ -1649,6 +1740,7 @@ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict) * this function cannot fail */ size_t ZSTD_resetDStream(ZSTD_DStream* dctx) { + DEBUGLOG(4, "ZSTD_resetDStream"); FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), ""); return ZSTD_startingInputLength(dctx->format); } @@ -1720,6 +1812,11 @@ ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam) bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict; bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts; return bounds; + case ZSTD_d_disableHuffmanAssembly: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + default:; } bounds.error = ERROR(parameter_unsupported); @@ -1760,6 +1857,9 @@ size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value case ZSTD_d_refMultipleDDicts: *value = (int)dctx->refMultipleDDicts; return 0; + case ZSTD_d_disableHuffmanAssembly: + *value = (int)dctx->disableHufAsm; + return 0; default:; } RETURN_ERROR(parameter_unsupported, ""); @@ -1793,6 +1893,10 @@ size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value } dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value; return 0; + case ZSTD_d_disableHuffmanAssembly: + CHECK_DBOUNDS(ZSTD_d_disableHuffmanAssembly, value); + dctx->disableHufAsm = value != 0; + return 0; default:; } RETURN_ERROR(parameter_unsupported, ""); @@ -1980,7 +2084,6 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB if (zds->refMultipleDDicts && zds->ddictSet) { ZSTD_DCtx_selectFrameDDict(zds); } - DEBUGLOG(5, "header size : %u", (U32)hSize); if (ZSTD_isError(hSize)) { #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart); @@ -2012,6 +2115,11 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB zds->lhSize += remainingInput; } input->pos = input->size; + /* check first few bytes */ + FORWARD_IF_ERROR( + ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format), + "First few bytes detected incorrect" ); + /* return hint input size */ return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */ } assert(ip != NULL); @@ -2029,8 +2137,9 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds)); if (ZSTD_isError(decompressedSize)) return decompressedSize; DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()") + assert(istart != NULL); ip = istart + cSize; - op += decompressedSize; + op = op ? op + decompressedSize : op; /* can occur if frameContentSize = 0 (empty frame) */ zds->expected = 0; zds->streamStage = zdss_init; someMoreWork = 0; @@ -2114,6 +2223,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB } if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */ FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), ""); + assert(ip != NULL); ip += neededInSize; /* Function modifies the stage so we must break */ break; @@ -2128,7 +2238,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB int const isSkipFrame = ZSTD_isSkipFrame(zds); size_t loadedSize; /* At this point we shouldn't be decompressing a block that we can stream. */ - assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip)); + assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip))); if (isSkipFrame) { loadedSize = MIN(toLoad, (size_t)(iend-ip)); } else { @@ -2137,8 +2247,11 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB "should never happen"); loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip)); } - ip += loadedSize; - zds->inPos += loadedSize; + if (loadedSize != 0) { + /* ip may be NULL */ + ip += loadedSize; + zds->inPos += loadedSize; + } if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */ /* decode loaded input */ @@ -2148,14 +2261,17 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB break; } case zdss_flush: - { size_t const toFlushSize = zds->outEnd - zds->outStart; + { + size_t const toFlushSize = zds->outEnd - zds->outStart; size_t const flushedSize = ZSTD_limitCopy(op, (size_t)(oend-op), zds->outBuff + zds->outStart, toFlushSize); - op += flushedSize; + + op = op ? op + flushedSize : op; + zds->outStart += flushedSize; if (flushedSize == toFlushSize) { /* flush completed */ zds->streamStage = zdss_read; if ( (zds->outBuffSize < zds->fParams.frameContentSize) - && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) { + && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) { DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)", (int)(zds->outBuffSize - zds->outStart), (U32)zds->fParams.blockSizeMax); @@ -2169,7 +2285,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB default: assert(0); /* impossible */ - RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */ + RETURN_ERROR(GENERIC, "impossible to reach"); /* some compilers require default to do something */ } } /* result */ @@ -2182,8 +2298,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB if ((ip==istart) && (op==ostart)) { /* no forward progress */ zds->noForwardProgress ++; if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) { - RETURN_ERROR_IF(op==oend, dstSize_tooSmall, ""); - RETURN_ERROR_IF(ip==iend, srcSize_wrong, ""); + RETURN_ERROR_IF(op==oend, noForwardProgress_destFull, ""); + RETURN_ERROR_IF(ip==iend, noForwardProgress_inputEmpty, ""); assert(0); } } else { @@ -2220,11 +2336,17 @@ size_t ZSTD_decompressStream_simpleArgs ( void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos) { - ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; - ZSTD_inBuffer input = { src, srcSize, *srcPos }; - /* ZSTD_compress_generic() will check validity of dstPos and srcPos */ - size_t const cErr = ZSTD_decompressStream(dctx, &output, &input); - *dstPos = output.pos; - *srcPos = input.pos; - return cErr; + ZSTD_outBuffer output; + ZSTD_inBuffer input; + output.dst = dst; + output.size = dstCapacity; + output.pos = *dstPos; + input.src = src; + input.size = srcSize; + input.pos = *srcPos; + { size_t const cErr = ZSTD_decompressStream(dctx, &output, &input); + *dstPos = output.pos; + *srcPos = input.pos; + return cErr; + } } diff --git a/lib/decompress/zstd_decompress_block.c b/lib/decompress/zstd_decompress_block.c index 2e44d30d2f3..0a06a021e15 100644 --- a/lib/decompress/zstd_decompress_block.c +++ b/lib/decompress/zstd_decompress_block.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -20,12 +20,12 @@ #include "../common/mem.h" /* low level memory routines */ #define FSE_STATIC_LINKING_ONLY #include "../common/fse.h" -#define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "../common/zstd_internal.h" #include "zstd_decompress_internal.h" /* ZSTD_DCtx */ #include "zstd_ddict.h" /* ZSTD_DDictDictContent */ #include "zstd_decompress_block.h" +#include "../common/bits.h" /* ZSTD_highbit32 */ /*_******************************************************* * Macros @@ -89,7 +89,7 @@ static void ZSTD_allocateLiteralsBuffer(ZSTD_DCtx* dctx, void* const dst, const dctx->litBufferEnd = dctx->litBuffer + litSize - ZSTD_LITBUFFEREXTRASIZE; } else { - /* initially this will be stored entirely in dst during huffman decoding, it will partially shifted to litExtraBuffer after */ + /* initially this will be stored entirely in dst during huffman decoding, it will partially be shifted to litExtraBuffer after */ dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize; dctx->litBufferEnd = (BYTE*)dst + expectedWriteSize; } @@ -134,13 +134,16 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, ZSTD_FALLTHROUGH; case set_compressed: - RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3"); + RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need up to 5 for case 3"); { size_t lhSize, litSize, litCSize; U32 singleStream=0; U32 const lhlCode = (istart[0] >> 2) & 3; U32 const lhc = MEM_readLE32(istart); size_t hufSuccess; size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity); + int const flags = 0 + | (ZSTD_DCtx_get_bmi2(dctx) ? HUF_flags_bmi2 : 0) + | (dctx->disableHufAsm ? HUF_flags_disableAsm : 0); switch(lhlCode) { case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */ @@ -165,6 +168,10 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, } RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, ""); + if (!singleStream) + RETURN_ERROR_IF(litSize < MIN_LITERALS_FOR_4_STREAMS, literals_headerWrong, + "Not enough literals (%zu) for the 4-streams mode (min %u)", + litSize, MIN_LITERALS_FOR_4_STREAMS); RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, ""); RETURN_ERROR_IF(expectedWriteSize < litSize , dstSize_tooSmall, ""); ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 0); @@ -176,13 +183,14 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, if (litEncType==set_repeat) { if (singleStream) { - hufSuccess = HUF_decompress1X_usingDTable_bmi2( + hufSuccess = HUF_decompress1X_usingDTable( dctx->litBuffer, litSize, istart+lhSize, litCSize, - dctx->HUFptr, ZSTD_DCtx_get_bmi2(dctx)); + dctx->HUFptr, flags); } else { - hufSuccess = HUF_decompress4X_usingDTable_bmi2( + assert(litSize >= MIN_LITERALS_FOR_4_STREAMS); + hufSuccess = HUF_decompress4X_usingDTable( dctx->litBuffer, litSize, istart+lhSize, litCSize, - dctx->HUFptr, ZSTD_DCtx_get_bmi2(dctx)); + dctx->HUFptr, flags); } } else { if (singleStream) { @@ -190,18 +198,18 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, hufSuccess = HUF_decompress1X_DCtx_wksp( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, - sizeof(dctx->workspace)); + sizeof(dctx->workspace), flags); #else - hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2( + hufSuccess = HUF_decompress1X1_DCtx_wksp( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, - sizeof(dctx->workspace), ZSTD_DCtx_get_bmi2(dctx)); + sizeof(dctx->workspace), flags); #endif } else { - hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2( + hufSuccess = HUF_decompress4X_hufOnly_wksp( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, - sizeof(dctx->workspace), ZSTD_DCtx_get_bmi2(dctx)); + sizeof(dctx->workspace), flags); } } if (dctx->litBufferLocation == ZSTD_split) @@ -237,6 +245,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, break; case 3: lhSize = 3; + RETURN_ERROR_IF(srcSize<3, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize = 3"); litSize = MEM_readLE24(istart) >> 4; break; } @@ -279,12 +288,13 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, break; case 1: lhSize = 2; + RETURN_ERROR_IF(srcSize<3, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize+1 = 3"); litSize = MEM_readLE16(istart) >> 4; break; case 3: lhSize = 3; + RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize+1 = 4"); litSize = MEM_readLE24(istart) >> 4; - RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4"); break; } RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); @@ -506,14 +516,15 @@ void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt, for (i = 8; i < n; i += 8) { MEM_write64(spread + pos + i, sv); } - pos += n; + assert(n>=0); + pos += (size_t)n; } } /* Now we spread those positions across the table. - * The benefit of doing it in two stages is that we avoid the the + * The benefit of doing it in two stages is that we avoid the * variable size inner loop, which caused lots of branch misses. * Now we can run through all the positions without any branch misses. - * We unroll the loop twice, since that is what emperically worked best. + * We unroll the loop twice, since that is what empirically worked best. */ { size_t position = 0; @@ -540,7 +551,7 @@ void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt, for (i=0; i highThreshold) position = (position + step) & tableMask; /* lowprob area */ + while (UNLIKELY(position > highThreshold)) position = (position + step) & tableMask; /* lowprob area */ } } assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ } @@ -551,7 +562,7 @@ void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt, for (u=0; ustateLL.table + seqState->stateLL.state, sizeof(ZSTD_seqSymbol)); + ZSTD_memcpy(mlDInfo, seqState->stateML.table + seqState->stateML.state, sizeof(ZSTD_seqSymbol)); + ZSTD_memcpy(ofDInfo, seqState->stateOffb.table + seqState->stateOffb.state, sizeof(ZSTD_seqSymbol)); +#else const ZSTD_seqSymbol* const llDInfo = seqState->stateLL.table + seqState->stateLL.state; const ZSTD_seqSymbol* const mlDInfo = seqState->stateML.table + seqState->stateML.state; const ZSTD_seqSymbol* const ofDInfo = seqState->stateOffb.table + seqState->stateOffb.state; +#endif seq.matchLength = mlDInfo->baseValue; seq.litLength = llDInfo->baseValue; { U32 const ofBase = ofDInfo->baseValue; @@ -1186,9 +1220,13 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets) U32 const llnbBits = llDInfo->nbBits; U32 const mlnbBits = mlDInfo->nbBits; U32 const ofnbBits = ofDInfo->nbBits; + + assert(llBits <= MaxLLBits); + assert(mlBits <= MaxMLBits); + assert(ofBits <= MaxOff); /* * As gcc has better branch and block analyzers, sometimes it is only - * valuable to mark likelyness for clang, it gives around 3-4% of + * valuable to mark likeliness for clang, it gives around 3-4% of * performance. */ @@ -1201,13 +1239,16 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets) #endif ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); - assert(ofBits <= MaxOff); + ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 > LONG_OFFSETS_MAX_EXTRA_BITS_32); + ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 - LONG_OFFSETS_MAX_EXTRA_BITS_32 >= MaxMLBits); if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) { - U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed); + /* Always read extra bits, this keeps the logic simple, + * avoids branches, and avoids accidentally reading 0 bits. + */ + U32 const extraBits = LONG_OFFSETS_MAX_EXTRA_BITS_32; offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); BIT_reloadDStream(&seqState->DStream); - if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits); - assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */ + offset += BIT_readBitsFast(&seqState->DStream, extraBits); } else { offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); @@ -1552,7 +1593,7 @@ ZSTD_decompressSequences_body(ZSTD_DCtx* dctx, const BYTE* const prefixStart = (const BYTE*)(dctx->prefixStart); const BYTE* const vBase = (const BYTE*)(dctx->virtualStart); const BYTE* const dictEnd = (const BYTE*)(dctx->dictEnd); - DEBUGLOG(5, "ZSTD_decompressSequences_body"); + DEBUGLOG(5, "ZSTD_decompressSequences_body: nbSeq = %d", nbSeq); (void)frame; /* Regen sequences */ @@ -1945,34 +1986,79 @@ ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx, #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ +/** + * @returns The total size of the history referencable by zstd, including + * both the prefix and the extDict. At @p op any offset larger than this + * is invalid. + */ +static size_t ZSTD_totalHistorySize(BYTE* op, BYTE const* virtualStart) +{ + return (size_t)(op - virtualStart); +} -#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ - !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) -/* ZSTD_getLongOffsetsShare() : +typedef struct { + unsigned longOffsetShare; + unsigned maxNbAdditionalBits; +} ZSTD_OffsetInfo; + +/* ZSTD_getOffsetInfo() : * condition : offTable must be valid * @return : "share" of long offsets (arbitrarily defined as > (1<<23)) - * compared to maximum possible of (1< 22) total += 1; + ZSTD_OffsetInfo info = {0, 0}; + /* If nbSeq == 0, then the offTable is uninitialized, but we have + * no sequences, so both values should be 0. + */ + if (nbSeq != 0) { + const void* ptr = offTable; + U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog; + const ZSTD_seqSymbol* table = offTable + 1; + U32 const max = 1 << tableLog; + U32 u; + DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog); + + assert(max <= (1 << OffFSELog)); /* max not too large */ + for (u=0; u 22) info.longOffsetShare += 1; + } + + assert(tableLog <= OffFSELog); + info.longOffsetShare <<= (OffFSELog - tableLog); /* scale to OffFSELog */ } - assert(tableLog <= OffFSELog); - total <<= (OffFSELog - tableLog); /* scale to OffFSELog */ + return info; +} - return total; +/** + * @returns The maximum offset we can decode in one read of our bitstream, without + * reloading more bits in the middle of the offset bits read. Any offsets larger + * than this must use the long offset decoder. + */ +static size_t ZSTD_maxShortOffset(void) +{ + if (MEM_64bits()) { + /* We can decode any offset without reloading bits. + * This might change if the max window size grows. + */ + ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31); + return (size_t)-1; + } else { + /* The maximum offBase is (1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1. + * This offBase would require STREAM_ACCUMULATOR_MIN extra bits. + * Then we have to subtract ZSTD_REP_NUM to get the maximum possible offset. + */ + size_t const maxOffbase = ((size_t)1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1; + size_t const maxOffset = maxOffbase - ZSTD_REP_NUM; + assert(ZSTD_highbit32((U32)maxOffbase) == STREAM_ACCUMULATOR_MIN); + return maxOffset; + } } -#endif size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, @@ -1980,20 +2066,21 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, const void* src, size_t srcSize, const int frame, const streaming_operation streaming) { /* blockType == blockCompressed */ const BYTE* ip = (const BYTE*)src; - /* isLongOffset must be true if there are long offsets. - * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN. - * We don't expect that to be the case in 64-bit mode. - * In block mode, window size is not known, so we have to be conservative. - * (note: but it could be evaluated from current-lowLimit) - */ - ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN)))); DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize); - RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, ""); + /* Note : the wording of the specification + * allows compressed block to be sized exactly ZSTD_BLOCKSIZE_MAX. + * This generally does not happen, as it makes little sense, + * since an uncompressed block would feature same size and have no decompression cost. + * Also, note that decoder from reference libzstd before < v1.5.4 + * would consider this edge case as an error. + * As a consequence, avoid generating compressed blocks of size ZSTD_BLOCKSIZE_MAX + * for broader compatibility with the deployed ecosystem of zstd decoders */ + RETURN_ERROR_IF(srcSize > ZSTD_BLOCKSIZE_MAX, srcSize_wrong, ""); /* Decode literals section */ { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, streaming); - DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize); + DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : cSize=%u, nbLiterals=%zu", (U32)litCSize, dctx->litSize); if (ZSTD_isError(litCSize)) return litCSize; ip += litCSize; srcSize -= litCSize; @@ -2001,6 +2088,23 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, /* Build Decoding Tables */ { + /* Compute the maximum block size, which must also work when !frame and fParams are unset. + * Additionally, take the min with dstCapacity to ensure that the totalHistorySize fits in a size_t. + */ + size_t const blockSizeMax = MIN(dstCapacity, (frame ? dctx->fParams.blockSizeMax : ZSTD_BLOCKSIZE_MAX)); + size_t const totalHistorySize = ZSTD_totalHistorySize((BYTE*)dst + blockSizeMax, (BYTE const*)dctx->virtualStart); + /* isLongOffset must be true if there are long offsets. + * Offsets are long if they are larger than ZSTD_maxShortOffset(). + * We don't expect that to be the case in 64-bit mode. + * + * We check here to see if our history is large enough to allow long offsets. + * If it isn't, then we can't possible have (valid) long offsets. If the offset + * is invalid, then it is okay to read it incorrectly. + * + * If isLongOffsets is true, then we will later check our decoding table to see + * if it is even possible to generate long offsets. + */ + ZSTD_longOffset_e isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (totalHistorySize > ZSTD_maxShortOffset())); /* These macros control at build-time which decompressor implementation * we use. If neither is defined, we do some inspection and dispatch at * runtime. @@ -2008,6 +2112,11 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) int usePrefetchDecoder = dctx->ddictIsCold; +#else + /* Set to 1 to avoid computing offset info if we don't need to. + * Otherwise this value is ignored. + */ + int usePrefetchDecoder = 1; #endif int nbSeq; size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize); @@ -2017,26 +2126,38 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, RETURN_ERROR_IF(dst == NULL && nbSeq > 0, dstSize_tooSmall, "NULL not handled"); -#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ - !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) - if ( !usePrefetchDecoder - && (!frame || (dctx->fParams.windowSize > (1<<24))) - && (nbSeq>ADVANCED_SEQS) ) { /* could probably use a larger nbSeq limit */ - U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr); - U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */ - usePrefetchDecoder = (shareLongOffsets >= minShare); + /* If we could potentially have long offsets, or we might want to use the prefetch decoder, + * compute information about the share of long offsets, and the maximum nbAdditionalBits. + * NOTE: could probably use a larger nbSeq limit + */ + if (isLongOffset || (!usePrefetchDecoder && (totalHistorySize > (1u << 24)) && (nbSeq > 8))) { + ZSTD_OffsetInfo const info = ZSTD_getOffsetInfo(dctx->OFTptr, nbSeq); + if (isLongOffset && info.maxNbAdditionalBits <= STREAM_ACCUMULATOR_MIN) { + /* If isLongOffset, but the maximum number of additional bits that we see in our table is small + * enough, then we know it is impossible to have too long an offset in this block, so we can + * use the regular offset decoder. + */ + isLongOffset = ZSTD_lo_isRegularOffset; + } + if (!usePrefetchDecoder) { + U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */ + usePrefetchDecoder = (info.longOffsetShare >= minShare); + } } -#endif dctx->ddictIsCold = 0; #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) - if (usePrefetchDecoder) + if (usePrefetchDecoder) { +#else + (void)usePrefetchDecoder; + { #endif #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame); #endif + } #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG /* else */ diff --git a/lib/decompress/zstd_decompress_block.h b/lib/decompress/zstd_decompress_block.h index c61a9d0c4b3..67791dbc3ad 100644 --- a/lib/decompress/zstd_decompress_block.h +++ b/lib/decompress/zstd_decompress_block.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/decompress/zstd_decompress_internal.h b/lib/decompress/zstd_decompress_internal.h index 2b5a53850ac..c2ec5d9fbef 100644 --- a/lib/decompress/zstd_decompress_internal.h +++ b/lib/decompress/zstd_decompress_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -75,12 +75,13 @@ static UNUSED_ATTR const U32 ML_base[MaxML+1] = { #define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE (sizeof(S16) * (MaxSeq + 1) + (1u << MaxFSELog) + sizeof(U64)) #define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32 ((ZSTD_BUILD_FSE_TABLE_WKSP_SIZE + sizeof(U32) - 1) / sizeof(U32)) +#define ZSTD_HUFFDTABLE_CAPACITY_LOG 12 typedef struct { ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)]; /* Note : Space reserved for FSE Tables */ ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)]; /* is also used as temporary workspace while building hufTable during DDict creation */ ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)]; /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */ - HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */ + HUF_DTable hufTable[HUF_DTABLE_SIZE(ZSTD_HUFFDTABLE_CAPACITY_LOG)]; /* can accommodate HUF_decompress4X */ U32 rep[ZSTD_REP_NUM]; U32 workspace[ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32]; } ZSTD_entropyDTables_t; @@ -164,6 +165,7 @@ struct ZSTD_DCtx_s ZSTD_dictUses_e dictUses; ZSTD_DDictHashSet* ddictSet; /* Hash set for multiple ddicts */ ZSTD_refMultipleDDicts_e refMultipleDDicts; /* User specified: if == 1, will allow references to multiple DDicts. Default == 0 (disabled) */ + int disableHufAsm; /* streaming */ ZSTD_dStreamStage streamStage; diff --git a/lib/deprecated/zbuff.h b/lib/deprecated/zbuff.h index b83ea0fed58..a968245b36a 100644 --- a/lib/deprecated/zbuff.h +++ b/lib/deprecated/zbuff.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/deprecated/zbuff_common.c b/lib/deprecated/zbuff_common.c index e7d01a08180..5a2f2db354f 100644 --- a/lib/deprecated/zbuff_common.c +++ b/lib/deprecated/zbuff_common.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/deprecated/zbuff_compress.c b/lib/deprecated/zbuff_compress.c index 51cf158c4ad..1d8682150b2 100644 --- a/lib/deprecated/zbuff_compress.c +++ b/lib/deprecated/zbuff_compress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/deprecated/zbuff_decompress.c b/lib/deprecated/zbuff_decompress.c index d73c0f35fac..12a66af7412 100644 --- a/lib/deprecated/zbuff_decompress.c +++ b/lib/deprecated/zbuff_decompress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -13,6 +13,8 @@ /* ************************************* * Dependencies ***************************************/ +#define ZSTD_DISABLE_DEPRECATE_WARNINGS /* suppress warning on ZSTD_initDStream_usingDict */ +#include "../zstd.h" /* ZSTD_CStream, ZSTD_DStream, ZSTDLIB_API */ #define ZBUFF_STATIC_LINKING_ONLY #include "zbuff.h" diff --git a/lib/dictBuilder/cover.c b/lib/dictBuilder/cover.c index 028802a1b00..9e5e7d5b55d 100644 --- a/lib/dictBuilder/cover.c +++ b/lib/dictBuilder/cover.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -34,6 +34,7 @@ #include "../common/pool.h" #include "../common/threading.h" #include "../common/zstd_internal.h" /* includes zstd.h */ +#include "../common/bits.h" /* ZSTD_highbit32 */ #include "../zdict.h" #include "cover.h" @@ -541,7 +542,7 @@ static void COVER_ctx_destroy(COVER_ctx_t *ctx) { /** * Prepare a context for dictionary building. - * The context is only dependent on the parameter `d` and can used multiple + * The context is only dependent on the parameter `d` and can be used multiple * times. * Returns 0 on success or error code on error. * The context must be destroyed with `COVER_ctx_destroy()`. @@ -646,7 +647,7 @@ static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer, void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel) { - const double ratio = (double)nbDmers / maxDictSize; + const double ratio = (double)nbDmers / (double)maxDictSize; if (ratio >= 10) { return; } @@ -950,9 +951,17 @@ void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters, } } +static COVER_dictSelection_t setDictSelection(BYTE* buf, size_t s, size_t csz) +{ + COVER_dictSelection_t ds; + ds.dictContent = buf; + ds.dictSize = s; + ds.totalCompressedSize = csz; + return ds; +} + COVER_dictSelection_t COVER_dictSelectionError(size_t error) { - COVER_dictSelection_t selection = { NULL, 0, error }; - return selection; + return setDictSelection(NULL, 0, error); } unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection) { @@ -1005,9 +1014,8 @@ COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictBuffe } if (params.shrinkDict == 0) { - COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize }; free(candidateDictBuffer); - return selection; + return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize); } largestDict = dictContentSize; @@ -1039,20 +1047,16 @@ COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictBuffe return COVER_dictSelectionError(totalCompressedSize); } - if (totalCompressedSize <= largestCompressed * regressionTolerance) { - COVER_dictSelection_t selection = { candidateDictBuffer, dictContentSize, totalCompressedSize }; + if ((double)totalCompressedSize <= (double)largestCompressed * regressionTolerance) { free(largestDictbuffer); - return selection; + return setDictSelection( candidateDictBuffer, dictContentSize, totalCompressedSize ); } dictContentSize *= 2; } dictContentSize = largestDict; totalCompressedSize = largestCompressed; - { - COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize }; - free(candidateDictBuffer); - return selection; - } + free(candidateDictBuffer); + return setDictSelection( largestDictbuffer, dictContentSize, totalCompressedSize ); } /** diff --git a/lib/dictBuilder/cover.h b/lib/dictBuilder/cover.h index 1aacdddd6fe..252624bdeb5 100644 --- a/lib/dictBuilder/cover.h +++ b/lib/dictBuilder/cover.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/dictBuilder/fastcover.c b/lib/dictBuilder/fastcover.c index 3352859ada4..46bba0120b0 100644 --- a/lib/dictBuilder/fastcover.c +++ b/lib/dictBuilder/fastcover.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -304,7 +304,7 @@ FASTCOVER_computeFrequency(U32* freqs, const FASTCOVER_ctx_t* ctx) /** * Prepare a context for dictionary building. - * The context is only dependent on the parameter `d` and can used multiple + * The context is only dependent on the parameter `d` and can be used multiple * times. * Returns 0 on success or error code on error. * The context must be destroyed with `FASTCOVER_ctx_destroy()`. diff --git a/lib/dictBuilder/zdict.c b/lib/dictBuilder/zdict.c index 006aba7c9c4..f22e04df2e6 100644 --- a/lib/dictBuilder/zdict.c +++ b/lib/dictBuilder/zdict.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -44,7 +44,6 @@ #ifndef ZDICT_STATIC_LINKING_ONLY # define ZDICT_STATIC_LINKING_ONLY #endif -#define HUF_STATIC_LINKING_ONLY #include "../common/mem.h" /* read */ #include "../common/fse.h" /* FSE_normalizeCount, FSE_writeNCount */ @@ -54,6 +53,7 @@ #include "../compress/zstd_compress_internal.h" /* ZSTD_loadCEntropy() */ #include "../zdict.h" #include "divsufsort.h" +#include "../common/bits.h" /* ZSTD_NbCommonBytes */ /*-************************************* @@ -130,85 +130,6 @@ size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictSize) /*-******************************************************** * Dictionary training functions **********************************************************/ -static unsigned ZDICT_NbCommonBytes (size_t val) -{ - if (MEM_isLittleEndian()) { - if (MEM_64bits()) { -# if defined(_MSC_VER) && defined(_WIN64) - if (val != 0) { - unsigned long r; - _BitScanForward64(&r, (U64)val); - return (unsigned)(r >> 3); - } else { - /* Should not reach this code path */ - __assume(0); - } -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (unsigned)(__builtin_ctzll((U64)val) >> 3); -# else - static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; - return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; -# endif - } else { /* 32 bits */ -# if defined(_MSC_VER) - if (val != 0) { - unsigned long r; - _BitScanForward(&r, (U32)val); - return (unsigned)(r >> 3); - } else { - /* Should not reach this code path */ - __assume(0); - } -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (unsigned)(__builtin_ctz((U32)val) >> 3); -# else - static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; - return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; -# endif - } - } else { /* Big Endian CPU */ - if (MEM_64bits()) { -# if defined(_MSC_VER) && defined(_WIN64) - if (val != 0) { - unsigned long r; - _BitScanReverse64(&r, val); - return (unsigned)(r >> 3); - } else { - /* Should not reach this code path */ - __assume(0); - } -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (unsigned)(__builtin_clzll(val) >> 3); -# else - unsigned r; - const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ - if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } - if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } - r += (!val); - return r; -# endif - } else { /* 32 bits */ -# if defined(_MSC_VER) - if (val != 0) { - unsigned long r; - _BitScanReverse(&r, (unsigned long)val); - return (unsigned)(r >> 3); - } else { - /* Should not reach this code path */ - __assume(0); - } -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (unsigned)(__builtin_clz((U32)val) >> 3); -# else - unsigned r; - if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } - r += (!val); - return r; -# endif - } } -} - - /*! ZDICT_count() : Count the nb of common bytes between 2 pointers. Note : this function presumes end of buffer followed by noisy guard band. @@ -223,7 +144,7 @@ static size_t ZDICT_count(const void* pIn, const void* pMatch) pMatch = (const char*)pMatch+sizeof(size_t); continue; } - pIn = (const char*)pIn+ZDICT_NbCommonBytes(diff); + pIn = (const char*)pIn+ZSTD_NbCommonBytes(diff); return (size_t)((const char*)pIn - pStart); } } @@ -451,7 +372,7 @@ static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const elt = table[u]; /* sort : improve rank */ while ((u>1) && (table[u-1].savings < elt.savings)) - table[u] = table[u-1], u--; + table[u] = table[u-1], u--; table[u] = elt; return u; } } @@ -602,7 +523,7 @@ static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize, if (solution.length==0) { cursor++; continue; } ZDICT_insertDictItem(dictList, dictListSize, solution, buffer); cursor += solution.length; - DISPLAYUPDATE(2, "\r%4.2f %% \r", (double)cursor / bufferSize * 100); + DISPLAYUPDATE(2, "\r%4.2f %% \r", (double)cursor / (double)bufferSize * 100.0); } } _cleanup: @@ -754,6 +675,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, size_t const totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles); size_t const averageSampleSize = totalSrcSize / (nbFiles + !nbFiles); BYTE* dstPtr = (BYTE*)dstBuffer; + U32 wksp[HUF_CTABLE_WORKSPACE_SIZE_U32]; /* init */ DEBUGLOG(4, "ZDICT_analyzeEntropy"); @@ -794,7 +716,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, } } /* analyze, build stats, starting with literals */ - { size_t maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog); + { size_t maxNbBits = HUF_buildCTable_wksp(hufTable, countLit, 255, huffLog, wksp, sizeof(wksp)); if (HUF_isError(maxNbBits)) { eSize = maxNbBits; DISPLAYLEVEL(1, " HUF_buildCTable error \n"); @@ -803,7 +725,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, if (maxNbBits==8) { /* not compressible : will fail on HUF_writeCTable() */ DISPLAYLEVEL(2, "warning : pathological dataset : literals are not compressible : samples are noisy or too regular \n"); ZDICT_flatLit(countLit); /* replace distribution by a fake "mostly flat but still compressible" distribution, that HUF_writeCTable() can encode */ - maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog); + maxNbBits = HUF_buildCTable_wksp(hufTable, countLit, 255, huffLog, wksp, sizeof(wksp)); assert(maxNbBits==9); } huffLog = (U32)maxNbBits; @@ -844,7 +766,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, llLog = (U32)errorCode; /* write result to buffer */ - { size_t const hhSize = HUF_writeCTable(dstPtr, maxDstSize, hufTable, 255, huffLog); + { size_t const hhSize = HUF_writeCTable_wksp(dstPtr, maxDstSize, hufTable, 255, huffLog, wksp, sizeof(wksp)); if (HUF_isError(hhSize)) { eSize = hhSize; DISPLAYLEVEL(1, "HUF_writeCTable error \n"); diff --git a/lib/dll/example/Makefile b/lib/dll/example/Makefile index 03b034dd50a..86cf6906e5c 100644 --- a/lib/dll/example/Makefile +++ b/lib/dll/example/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/lib/dll/example/README.md b/lib/dll/example/README.md index 9e30fd59331..46aec798005 100644 --- a/lib/dll/example/README.md +++ b/lib/dll/example/README.md @@ -46,7 +46,7 @@ The compiled executable will require ZSTD DLL which is available at `dll\libzstd Open `example\fullbench-dll.sln` to compile `fullbench-dll` that uses a dynamic ZSTD library from the `dll` directory. The solution works with Visual C++ 2010 or newer. When one will open the solution with Visual C++ newer than 2010 -then the solution will upgraded to the current version. +then the solution will be upgraded to the current version. ## Using ZSTD DLL with Visual C++ diff --git a/lib/legacy/zstd_legacy.h b/lib/legacy/zstd_legacy.h index a6f1174b82e..dd173251d34 100644 --- a/lib/legacy/zstd_legacy.h +++ b/lib/legacy/zstd_legacy.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -242,6 +242,13 @@ MEM_STATIC ZSTD_frameSizeInfo ZSTD_findFrameSizeInfoLegacy(const void *src, size frameSizeInfo.compressedSize = ERROR(srcSize_wrong); frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR; } + /* In all cases, decompressedBound == nbBlocks * ZSTD_BLOCKSIZE_MAX. + * So we can compute nbBlocks without having to change every function. + */ + if (frameSizeInfo.decompressedBound != ZSTD_CONTENTSIZE_ERROR) { + assert((frameSizeInfo.decompressedBound & (ZSTD_BLOCKSIZE_MAX - 1)) == 0); + frameSizeInfo.nbBlocks = (size_t)(frameSizeInfo.decompressedBound / ZSTD_BLOCKSIZE_MAX); + } return frameSizeInfo; } diff --git a/lib/legacy/zstd_v01.c b/lib/legacy/zstd_v01.c index 23caaef5647..1a3aad07ed8 100644 --- a/lib/legacy/zstd_v01.c +++ b/lib/legacy/zstd_v01.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -190,25 +190,6 @@ typedef signed long long S64; /**************************************************************** * Memory I/O *****************************************************************/ -/* FSE_FORCE_MEMORY_ACCESS - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets generating assembly depending on alignment. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef FSE_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define FSE_FORCE_MEMORY_ACCESS 1 -# endif -#endif - static unsigned FSE_32bits(void) { @@ -221,24 +202,6 @@ static unsigned FSE_isLittleEndian(void) return one.c[0]; } -#if defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==2) - -static U16 FSE_read16(const void* memPtr) { return *(const U16*) memPtr; } -static U32 FSE_read32(const void* memPtr) { return *(const U32*) memPtr; } -static U64 FSE_read64(const void* memPtr) { return *(const U64*) memPtr; } - -#elif defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; - -static U16 FSE_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -static U32 FSE_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -static U64 FSE_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -#else - static U16 FSE_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -254,8 +217,6 @@ static U64 FSE_read64(const void* memPtr) U64 val; memcpy(&val, memPtr, sizeof(val)); return val; } -#endif /* FSE_FORCE_MEMORY_ACCESS */ - static U16 FSE_readLE16(const void* memPtr) { if (FSE_isLittleEndian()) @@ -1190,7 +1151,7 @@ static size_t HUF_decompress (void* dst, size_t maxDstSize, const void* cSrc, si zstd - standard compression library Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1759,20 +1720,26 @@ static size_t ZSTD_execSequence(BYTE* op, static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */ static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* subtracted */ const BYTE* const ostart = op; + BYTE* const oLitEnd = op + sequence.litLength; const size_t litLength = sequence.litLength; BYTE* const endMatch = op + litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */ const BYTE* const litEnd = *litPtr + litLength; - /* check */ + /* checks */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ + if (sequence.offset > (U32)(oLitEnd - base)) return ERROR(corruption_detected); + if (endMatch > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ - if (litEnd > litLimit) return ERROR(corruption_detected); - if (sequence.matchLength > (size_t)(*litPtr-op)) return ERROR(dstSize_tooSmall); /* overwrite literal segment */ + if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ + if (sequence.matchLength > (size_t)(*litPtr-op)) return ERROR(dstSize_tooSmall); /* overwrite literal segment */ /* copy Literals */ - if (((size_t)(*litPtr - op) < 8) || ((size_t)(oend-litEnd) < 8) || (op+litLength > oend-8)) - memmove(op, *litPtr, litLength); /* overwrite risk */ - else - ZSTD_wildcopy(op, *litPtr, litLength); + ZSTD_memmove(op, *litPtr, sequence.litLength); /* note : v0.1 seems to allow scenarios where output or input are close to end of buffer */ + op += litLength; *litPtr = litEnd; /* update for next sequence */ diff --git a/lib/legacy/zstd_v01.h b/lib/legacy/zstd_v01.h index f777eb6e4c9..6ac876954d1 100644 --- a/lib/legacy/zstd_v01.h +++ b/lib/legacy/zstd_v01.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/legacy/zstd_v02.c b/lib/legacy/zstd_v02.c index 2f473a75734..e09bb4a248c 100644 --- a/lib/legacy/zstd_v02.c +++ b/lib/legacy/zstd_v02.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -28,7 +28,7 @@ low-level memory access routines Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -115,24 +115,6 @@ extern "C" { /**************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets generating assembly depending on alignment. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } @@ -143,33 +125,6 @@ MEM_STATIC unsigned MEM_isLittleEndian(void) return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -190,9 +145,6 @@ MEM_STATIC void MEM_write16(void* memPtr, U16 value) memcpy(memPtr, &value, sizeof(value)); } -#endif /* MEM_FORCE_MEMORY_ACCESS */ - - MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) @@ -269,7 +221,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) header file (to include) Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -433,7 +385,7 @@ MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits) } /*! BIT_lookBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; @@ -453,7 +405,7 @@ MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) } /*!BIT_readBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) { size_t value = BIT_lookBitsFast(bitD, nbBits); @@ -510,7 +462,7 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) Error codes and messages Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -609,7 +561,7 @@ typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be mor header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -753,7 +705,7 @@ MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -822,7 +774,7 @@ static size_t HUF_decompress4X6 (void* dst, size_t dstSize, const void* cSrc, si Header File Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -882,7 +834,7 @@ typedef struct ZSTD_CCtx_s ZSTD_CCtx; /* incomplete type */ Header File for static linking only Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -946,7 +898,7 @@ typedef struct ZSTD_DCtx_s ZSTD_DCtx; FSE : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1450,7 +1402,7 @@ static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, siz Huff0 : Huffman coder, part of New Generation Entropy library Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2609,7 +2561,7 @@ static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_ zstd - standard compression library Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -3114,12 +3066,19 @@ static size_t ZSTD_execSequence(BYTE* op, const BYTE* const litEnd = *litPtr + sequence.litLength; /* checks */ - if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use the pointer check */ + if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); + if (sequence.offset > (U32)(oLitEnd - base)) return ERROR(corruption_detected); + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ - ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ + ZSTD_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = litEnd; /* update for next sequence */ diff --git a/lib/legacy/zstd_v02.h b/lib/legacy/zstd_v02.h index 1b371953b74..dab0260ee9e 100644 --- a/lib/legacy/zstd_v02.h +++ b/lib/legacy/zstd_v02.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/legacy/zstd_v03.c b/lib/legacy/zstd_v03.c index 6625f4df1cb..b0d7f521ed0 100644 --- a/lib/legacy/zstd_v03.c +++ b/lib/legacy/zstd_v03.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -29,7 +29,7 @@ low-level memory access routines Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -116,24 +116,6 @@ extern "C" { /**************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets generating assembly depending on alignment. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } @@ -144,33 +126,6 @@ MEM_STATIC unsigned MEM_isLittleEndian(void) return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -191,10 +146,6 @@ MEM_STATIC void MEM_write16(void* memPtr, U16 value) memcpy(memPtr, &value, sizeof(value)); } - -#endif /* MEM_FORCE_MEMORY_ACCESS */ - - MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) @@ -271,7 +222,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) header file (to include) Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -435,7 +386,7 @@ MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits) } /*! BIT_lookBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; @@ -455,7 +406,7 @@ MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) } /*!BIT_readBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) { size_t value = BIT_lookBitsFast(bitD, nbBits); @@ -512,7 +463,7 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) Error codes and messages Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -611,7 +562,7 @@ typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be mor header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -755,7 +706,7 @@ MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -823,7 +774,7 @@ static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, si Header File Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -883,7 +834,7 @@ typedef struct ZSTD_CCtx_s ZSTD_CCtx; /* incomplete type */ Header File for static linking only Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -947,7 +898,7 @@ typedef struct ZSTD_DCtx_s ZSTD_DCtx; FSE : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1451,7 +1402,7 @@ static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, siz Huff0 : Huffman coder, part of New Generation Entropy library Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2248,7 +2199,7 @@ static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_ zstd - standard compression library Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2755,18 +2706,24 @@ static size_t ZSTD_execSequence(BYTE* op, const BYTE* const litEnd = *litPtr + sequence.litLength; /* checks */ - if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ + if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); + if (sequence.offset > (U32)(oLitEnd - base)) return ERROR(corruption_detected); + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ - ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ + ZSTD_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = litEnd; /* update for next sequence */ /* copy Match */ - { - const BYTE* match = op - sequence.offset; + { const BYTE* match = op - sequence.offset; /* check */ if (sequence.offset > (size_t)op) return ERROR(corruption_detected); /* address space overflow test (this test seems kept by clang optimizer) */ diff --git a/lib/legacy/zstd_v03.h b/lib/legacy/zstd_v03.h index 7a00d4304ba..9bf3cce6473 100644 --- a/lib/legacy/zstd_v03.h +++ b/lib/legacy/zstd_v03.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/legacy/zstd_v04.c b/lib/legacy/zstd_v04.c index 8d305c7eae9..57be832bd32 100644 --- a/lib/legacy/zstd_v04.c +++ b/lib/legacy/zstd_v04.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -87,24 +87,6 @@ extern "C" { /**************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets generating assembly depending on alignment. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } @@ -115,33 +97,6 @@ MEM_STATIC unsigned MEM_isLittleEndian(void) return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -162,9 +117,6 @@ MEM_STATIC void MEM_write16(void* memPtr, U16 value) memcpy(memPtr, &value, sizeof(value)); } -#endif /* MEM_FORCE_MEMORY_ACCESS */ - - MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) @@ -542,7 +494,7 @@ If there is an error, the function will return an error code, which can be teste header file (to include) Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -700,7 +652,7 @@ MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits) } /*! BIT_lookBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; @@ -720,7 +672,7 @@ MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) } /*!BIT_readBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) { size_t value = BIT_lookBitsFast(bitD, nbBits); @@ -781,7 +733,7 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -930,7 +882,7 @@ MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) FSE : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1436,7 +1388,7 @@ static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, siz header file Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1514,7 +1466,7 @@ static unsigned HUF_isError(size_t code); /* tells if a return value i header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1601,7 +1553,7 @@ static size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const Huff0 : Huffman coder, part of New Generation Entropy library Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2401,7 +2353,7 @@ static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_ zstd - decompression module fo v0.4 legacy format Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2876,13 +2828,19 @@ static size_t ZSTD_execSequence(BYTE* op, const BYTE* const litEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; - /* check */ - if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ + /* checks */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ + if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ - if (litEnd > litLimit) return ERROR(corruption_detected); /* risk read beyond lit buffer */ + if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ - ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ + ZSTD_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = litEnd; /* update for next sequence */ @@ -3283,7 +3241,7 @@ static void ZSTD_decompress_insertDictionary(ZSTD_DCtx* ctx, const void* dict, s Buffered version of Zstd compression library Copyright (C) 2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/lib/legacy/zstd_v04.h b/lib/legacy/zstd_v04.h index 66b97ab8e60..640240d624d 100644 --- a/lib/legacy/zstd_v04.h +++ b/lib/legacy/zstd_v04.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/legacy/zstd_v05.c b/lib/legacy/zstd_v05.c index 795dfb410c0..93a1169f3b6 100644 --- a/lib/legacy/zstd_v05.c +++ b/lib/legacy/zstd_v05.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -19,7 +19,7 @@ low-level memory access routines Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -106,24 +106,6 @@ extern "C" { /*-************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets depending on alignment. - * In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } @@ -134,37 +116,6 @@ MEM_STATIC unsigned MEM_isLittleEndian(void) return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard, by lying on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } -MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } -MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } -MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; } -MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign*)memPtr)->u64 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -195,9 +146,6 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value) memcpy(memPtr, &value, sizeof(value)); } -#endif /* MEM_FORCE_MEMORY_ACCESS */ - - MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) @@ -262,7 +210,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) Header File for static linking only Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -286,7 +234,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net + - zstd homepage : https://facebook.github.io/zstd */ #ifndef ZSTD_STATIC_H #define ZSTD_STATIC_H @@ -398,7 +346,7 @@ size_t ZSTDv05_decompressBlock(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity Header File for include Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -485,7 +433,7 @@ static const size_t ZSTDv05_frameHeaderSize_min = 5; #define FSEv05_ENCODING_DYNAMIC 3 -#define HufLog 12 +#define ZSTD_HUFFDTABLE_CAPACITY_LOG 12 #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ @@ -553,7 +501,7 @@ typedef struct { header file Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -672,7 +620,7 @@ size_t FSEv05_decompress_usingDTable(void* dst, size_t dstCapacity, const void* header file (to include) Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -826,7 +774,7 @@ MEM_STATIC size_t BITv05_lookBits(BITv05_DStream_t* bitD, U32 nbBits) } /*! BITv05_lookBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BITv05_lookBitsFast(BITv05_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; @@ -846,7 +794,7 @@ MEM_STATIC size_t BITv05_readBits(BITv05_DStream_t* bitD, unsigned nbBits) } /*!BITv05_readBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits) { size_t value = BITv05_lookBitsFast(bitD, nbBits); @@ -901,7 +849,7 @@ MEM_STATIC unsigned BITv05_endOfDStream(const BITv05_DStream_t* DStream) header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1051,7 +999,7 @@ MEM_STATIC unsigned FSEv05_endOfDState(const FSEv05_DState_t* DStatePtr) FSEv05 : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1537,7 +1485,7 @@ size_t FSEv05_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t header file Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1610,7 +1558,7 @@ const char* HUFv05_getErrorName(size_t code); /* provides error code string (u header file, for static linking only Copyright (C) 2013-2016, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1702,7 +1650,7 @@ size_t HUFv05_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void Huff0 : Huffman coder, part of New Generation Entropy library Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2547,7 +2495,7 @@ size_t HUFv05_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cS zstd - standard compression library Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2645,7 +2593,7 @@ struct ZSTDv05_DCtx_s FSEv05_DTable LLTable[FSEv05_DTABLE_SIZE_U32(LLFSEv05Log)]; FSEv05_DTable OffTable[FSEv05_DTABLE_SIZE_U32(OffFSEv05Log)]; FSEv05_DTable MLTable[FSEv05_DTABLE_SIZE_U32(MLFSEv05Log)]; - unsigned hufTableX4[HUFv05_DTABLE_SIZE(HufLog)]; + unsigned hufTableX4[HUFv05_DTABLE_SIZE(ZSTD_HUFFDTABLE_CAPACITY_LOG)]; const void* previousDstEnd; const void* base; const void* vBase; @@ -2673,7 +2621,7 @@ size_t ZSTDv05_decompressBegin(ZSTDv05_DCtx* dctx) dctx->base = NULL; dctx->vBase = NULL; dctx->dictEnd = NULL; - dctx->hufTableX4[0] = HufLog; + dctx->hufTableX4[0] = ZSTD_HUFFDTABLE_CAPACITY_LOG; dctx->flagStaticTables = 0; return 0; } @@ -3234,13 +3182,19 @@ static size_t ZSTDv05_execSequence(BYTE* op, const BYTE* const litEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; - /* check */ - if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ + /* checks */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ + if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ - if (litEnd > litLimit) return ERROR(corruption_detected); /* risk read beyond lit buffer */ + if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ - ZSTDv05_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ + ZSTDv05_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = litEnd; /* update for next sequence */ @@ -3746,7 +3700,7 @@ size_t ZSTDv05_decompressBegin_usingDict(ZSTDv05_DCtx* dctx, const void* dict, s Buffered version of Zstd compression library Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/lib/legacy/zstd_v05.h b/lib/legacy/zstd_v05.h index bd423bfc1b9..2dcffc92367 100644 --- a/lib/legacy/zstd_v05.h +++ b/lib/legacy/zstd_v05.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/legacy/zstd_v06.c b/lib/legacy/zstd_v06.c index ead213c4849..175f7cc4224 100644 --- a/lib/legacy/zstd_v06.c +++ b/lib/legacy/zstd_v06.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -23,7 +23,7 @@ low-level memory access routines Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -108,24 +108,6 @@ extern "C" { /*-************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets depending on alignment. - * In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } @@ -136,33 +118,6 @@ MEM_STATIC unsigned MEM_isLittleEndian(void) return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard, by lying on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -183,9 +138,6 @@ MEM_STATIC void MEM_write16(void* memPtr, U16 value) memcpy(memPtr, &value, sizeof(value)); } - -#endif /* MEM_FORCE_MEMORY_ACCESS */ - MEM_STATIC U32 MEM_swap32(U32 in) { #if defined(_MSC_VER) /* Visual Studio */ @@ -281,7 +233,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) Header File for static linking only Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -305,7 +257,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net + - zstd homepage : https://facebook.github.io/zstd */ #ifndef ZSTDv06_STATIC_H #define ZSTDv06_STATIC_H @@ -412,7 +364,7 @@ ZSTDLIBv06_API size_t ZSTDv06_decompressBlock(ZSTDv06_DCtx* dctx, void* dst, siz Header File for include Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -479,7 +431,7 @@ typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t; #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ -#define HufLog 12 +#define ZSTD_HUFFDTABLE_CAPACITY_LOG 12 #define IS_HUF 0 #define IS_PCH 1 @@ -619,7 +571,7 @@ void ZSTDv06_seqToCodes(const seqStore_t* seqStorePtr, size_t const nbSeq); Public Prototypes declaration Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -767,7 +719,7 @@ If there is an error, the function will return an error code, which can be teste header file (to include) Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -928,7 +880,7 @@ MEM_STATIC size_t BITv06_initDStream(BITv06_DStream_t* bitD, const void* srcBuff } /*! BITv06_lookBitsFast() : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BITv06_lookBitsFast(const BITv06_DStream_t* bitD, U32 nbBits) { U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1; @@ -948,7 +900,7 @@ MEM_STATIC size_t BITv06_readBits(BITv06_DStream_t* bitD, U32 nbBits) } /*! BITv06_readBitsFast() : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BITv06_readBitsFast(BITv06_DStream_t* bitD, U32 nbBits) { size_t const value = BITv06_lookBitsFast(bitD, nbBits); @@ -1002,7 +954,7 @@ MEM_STATIC unsigned BITv06_endOfDStream(const BITv06_DStream_t* DStream) header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1210,7 +1162,7 @@ MEM_STATIC BYTE FSEv06_decodeSymbolFast(FSEv06_DState_t* DStatePtr, BITv06_DStre Common functions of New Generation Entropy library Copyright (C) 2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1355,7 +1307,7 @@ size_t FSEv06_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned FSE : Finite State Entropy decoder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1679,7 +1631,7 @@ size_t FSEv06_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t header file Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1749,7 +1701,7 @@ size_t HUFv06_compressBound(size_t size); /**< maximum compressed size */ header file, for static linking only Copyright (C) 2013-2016, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1931,7 +1883,7 @@ MEM_STATIC size_t HUFv06_readStats(BYTE* huffWeight, size_t hwSize, U32* rankSta Huffman decoder, part of New Generation Entropy library Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2676,7 +2628,7 @@ size_t HUFv06_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cS Common functions of Zstd compression library Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2700,7 +2652,7 @@ size_t HUFv06_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cS OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net/ + - zstd homepage : https://facebook.github.io/zstd/ */ @@ -2730,7 +2682,7 @@ const char* ZBUFFv06_getErrorName(size_t errorCode) { return ERR_getErrorName(er zstd - standard compression library Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2754,7 +2706,7 @@ const char* ZBUFFv06_getErrorName(size_t errorCode) { return ERR_getErrorName(er OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net + - zstd homepage : https://facebook.github.io/zstd */ /* *************************************************************** @@ -2806,7 +2758,7 @@ struct ZSTDv06_DCtx_s FSEv06_DTable LLTable[FSEv06_DTABLE_SIZE_U32(LLFSELog)]; FSEv06_DTable OffTable[FSEv06_DTABLE_SIZE_U32(OffFSELog)]; FSEv06_DTable MLTable[FSEv06_DTABLE_SIZE_U32(MLFSELog)]; - unsigned hufTableX4[HUFv06_DTABLE_SIZE(HufLog)]; + unsigned hufTableX4[HUFv06_DTABLE_SIZE(ZSTD_HUFFDTABLE_CAPACITY_LOG)]; const void* previousDstEnd; const void* base; const void* vBase; @@ -2834,7 +2786,7 @@ size_t ZSTDv06_decompressBegin(ZSTDv06_DCtx* dctx) dctx->base = NULL; dctx->vBase = NULL; dctx->dictEnd = NULL; - dctx->hufTableX4[0] = HufLog; + dctx->hufTableX4[0] = ZSTD_HUFFDTABLE_CAPACITY_LOG; dctx->flagRepeatTable = 0; return 0; } @@ -3370,13 +3322,19 @@ static size_t ZSTDv06_execSequence(BYTE* op, const BYTE* const iLitEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; - /* check */ - if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ + /* checks */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ + if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ - if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ + if (iLitEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ - ZSTDv06_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ + ZSTDv06_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = iLitEnd; /* update for next sequence */ @@ -3889,7 +3847,7 @@ size_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, s Buffered version of Zstd compression library Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -3913,7 +3871,7 @@ size_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, s OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net/ + - zstd homepage : https://facebook.github.io/zstd/ */ @@ -4035,7 +3993,8 @@ size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* zbd, size_t const toLoad = hSize - zbd->lhSize; /* if hSize!=0, hSize > zbd->lhSize */ if (ZSTDv06_isError(hSize)) return hSize; if (toLoad > (size_t)(iend-ip)) { /* not enough input to load full header */ - memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip); + if (ip != NULL) + memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip); zbd->lhSize += iend-ip; *dstCapacityPtr = 0; return (hSize - zbd->lhSize) + ZSTDv06_blockHeaderSize; /* remaining header bytes + next block header */ diff --git a/lib/legacy/zstd_v06.h b/lib/legacy/zstd_v06.h index 9e32b76e08d..633891010d7 100644 --- a/lib/legacy/zstd_v06.h +++ b/lib/legacy/zstd_v06.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/legacy/zstd_v07.c b/lib/legacy/zstd_v07.c index 3a0418e526f..15dc3ef7994 100644 --- a/lib/legacy/zstd_v07.c +++ b/lib/legacy/zstd_v07.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -184,7 +184,7 @@ ZSTDLIBv07_API size_t ZSTDv07_insertBlock(ZSTDv07_DCtx* dctx, const void* blockS low-level memory access routines Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -268,24 +268,6 @@ extern "C" { /*-************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets depending on alignment. - * In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } @@ -296,33 +278,6 @@ MEM_STATIC unsigned MEM_isLittleEndian(void) return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard, by lying on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -343,8 +298,6 @@ MEM_STATIC void MEM_write16(void* memPtr, U16 value) memcpy(memPtr, &value, sizeof(value)); } -#endif /* MEM_FORCE_MEMORY_ACCESS */ - MEM_STATIC U32 MEM_swap32(U32 in) { #if defined(_MSC_VER) /* Visual Studio */ @@ -439,7 +392,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) header file (to include) Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -596,7 +549,7 @@ MEM_STATIC size_t BITv07_initDStream(BITv07_DStream_t* bitD, const void* srcBuff } /*! BITv07_lookBitsFast() : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BITv07_lookBitsFast(const BITv07_DStream_t* bitD, U32 nbBits) { U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1; @@ -616,7 +569,7 @@ MEM_STATIC size_t BITv07_readBits(BITv07_DStream_t* bitD, U32 nbBits) } /*! BITv07_readBitsFast() : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BITv07_readBitsFast(BITv07_DStream_t* bitD, U32 nbBits) { size_t const value = BITv07_lookBitsFast(bitD, nbBits); @@ -670,7 +623,7 @@ MEM_STATIC unsigned BITv07_endOfDStream(const BITv07_DStream_t* DStream) Public Prototypes declaration Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -978,7 +931,7 @@ MEM_STATIC BYTE FSEv07_decodeSymbolFast(FSEv07_DState_t* DStatePtr, BITv07_DStre header file Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1151,7 +1104,7 @@ size_t HUFv07_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void Common functions of New Generation Entropy library Copyright (C) 2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1375,7 +1328,7 @@ size_t HUFv07_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, FSE : Finite State Entropy decoder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1699,7 +1652,7 @@ size_t FSEv07_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t Huffman decoder, part of New Generation Entropy library Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2577,7 +2530,7 @@ size_t HUFv07_decompress1X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, Common functions of Zstd compression library Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2601,7 +2554,7 @@ size_t HUFv07_decompress1X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net/ + - zstd homepage : https://facebook.github.io/zstd/ */ @@ -2647,7 +2600,7 @@ static void ZSTDv07_defaultFreeFunction(void* opaque, void* address) Header File for include Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2717,7 +2670,7 @@ typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t; #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ -#define HufLog 12 +#define ZSTD_HUFFDTABLE_CAPACITY_LOG 12 typedef enum { lbt_huffman, lbt_repeat, lbt_raw, lbt_rle } litBlockType_t; #define LONGNBSEQ 0x7F00 @@ -2854,7 +2807,7 @@ static const ZSTDv07_customMem defaultCustomMem = { ZSTDv07_defaultAllocFunction zstd - standard compression library Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2878,7 +2831,7 @@ static const ZSTDv07_customMem defaultCustomMem = { ZSTDv07_defaultAllocFunction OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net + - zstd homepage : https://facebook.github.io/zstd */ /* *************************************************************** @@ -2931,7 +2884,7 @@ struct ZSTDv07_DCtx_s FSEv07_DTable LLTable[FSEv07_DTABLE_SIZE_U32(LLFSELog)]; FSEv07_DTable OffTable[FSEv07_DTABLE_SIZE_U32(OffFSELog)]; FSEv07_DTable MLTable[FSEv07_DTABLE_SIZE_U32(MLFSELog)]; - HUFv07_DTable hufTable[HUFv07_DTABLE_SIZE(HufLog)]; /* can accommodate HUFv07_decompress4X */ + HUFv07_DTable hufTable[HUFv07_DTABLE_SIZE(ZSTD_HUFFDTABLE_CAPACITY_LOG)]; /* can accommodate HUFv07_decompress4X */ const void* previousDstEnd; const void* base; const void* vBase; @@ -2967,7 +2920,7 @@ size_t ZSTDv07_decompressBegin(ZSTDv07_DCtx* dctx) dctx->base = NULL; dctx->vBase = NULL; dctx->dictEnd = NULL; - dctx->hufTable[0] = (HUFv07_DTable)((HufLog)*0x1000001); + dctx->hufTable[0] = (HUFv07_DTable)((ZSTD_HUFFDTABLE_CAPACITY_LOG)*0x1000001); dctx->litEntropy = dctx->fseEntropy = 0; dctx->dictID = 0; { int i; for (i=0; irep[i] = repStartValue[i]; } @@ -3599,11 +3552,14 @@ size_t ZSTDv07_execSequence(BYTE* op, const BYTE* match = oLitEnd - sequence.offset; /* check */ - if ((oLitEnd>oend_w) | (oMatchEnd>oend)) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ - if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ + assert(oend >= op); + if (sequence.litLength + WILDCOPY_OVERLENGTH > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequenceLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + assert(litLimit >= *litPtr); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected);; /* copy Literals */ - ZSTDv07_wildcopy(op, *litPtr, sequence.litLength); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ + ZSTDv07_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = iLitEnd; /* update for next sequence */ @@ -3617,7 +3573,7 @@ size_t ZSTDv07_execSequence(BYTE* op, return sequenceLength; } /* span extDict & currentPrefixSegment */ - { size_t const length1 = dictEnd - match; + { size_t const length1 = (size_t)(dictEnd - match); memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; @@ -4253,7 +4209,7 @@ ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx, Buffered version of Zstd compression library Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -4277,7 +4233,7 @@ ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx, OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net/ + - zstd homepage : https://facebook.github.io/zstd/ */ @@ -4417,7 +4373,8 @@ size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* zbd, if (hSize != 0) { size_t const toLoad = hSize - zbd->lhSize; /* if hSize!=0, hSize > zbd->lhSize */ if (toLoad > (size_t)(iend-ip)) { /* not enough input to load full header */ - memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip); + if (ip != NULL) + memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip); zbd->lhSize += iend-ip; *dstCapacityPtr = 0; return (hSize - zbd->lhSize) + ZSTDv07_blockHeaderSize; /* remaining header bytes + next block header */ diff --git a/lib/legacy/zstd_v07.h b/lib/legacy/zstd_v07.h index bc35cfa6a33..1ff39041f88 100644 --- a/lib/legacy/zstd_v07.h +++ b/lib/legacy/zstd_v07.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/libzstd.mk b/lib/libzstd.mk index 6e9a643954b..5e11d5d294e 100644 --- a/lib/libzstd.mk +++ b/lib/libzstd.mk @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -15,17 +15,34 @@ # Zstd lib directory LIBZSTD ?= ./ +# ZSTD_LIB_MINIFY is a helper variable that +# configures a bunch of other variables to space-optimized defaults. +ZSTD_LIB_MINIFY ?= 0 + # Legacy support -ZSTD_LEGACY_SUPPORT ?= 5 +ifneq ($(ZSTD_LIB_MINIFY), 0) + ZSTD_LEGACY_SUPPORT ?= 0 +else + ZSTD_LEGACY_SUPPORT ?= 5 +endif ZSTD_LEGACY_MULTITHREADED_API ?= 0 # Build size optimizations -HUF_FORCE_DECOMPRESS_X1 ?= 0 -HUF_FORCE_DECOMPRESS_X2 ?= 0 -ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT ?= 0 -ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG ?= 0 -ZSTD_NO_INLINE ?= 0 -ZSTD_STRIP_ERROR_STRINGS ?= 0 +ifneq ($(ZSTD_LIB_MINIFY), 0) + HUF_FORCE_DECOMPRESS_X1 ?= 1 + HUF_FORCE_DECOMPRESS_X2 ?= 0 + ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT ?= 1 + ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG ?= 0 + ZSTD_NO_INLINE ?= 1 + ZSTD_STRIP_ERROR_STRINGS ?= 1 +else + HUF_FORCE_DECOMPRESS_X1 ?= 0 + HUF_FORCE_DECOMPRESS_X2 ?= 0 + ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT ?= 0 + ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG ?= 0 + ZSTD_NO_INLINE ?= 0 + ZSTD_STRIP_ERROR_STRINGS ?= 0 +endif # Assembly support ZSTD_NO_ASM ?= 0 @@ -61,17 +78,8 @@ LIBVER := $(shell echo $(LIBVER_SCRIPT)) CCVER := $(shell $(CC) --version) ZSTD_VERSION?= $(LIBVER) -# ZSTD_LIB_MINIFY is a helper variable that -# configures a bunch of other variables to space-optimized defaults. -ZSTD_LIB_MINIFY ?= 0 ifneq ($(ZSTD_LIB_MINIFY), 0) HAVE_CC_OZ ?= $(shell echo "" | $(CC) -Oz -x c -c - -o /dev/null 2> /dev/null && echo 1 || echo 0) - ZSTD_LEGACY_SUPPORT ?= 0 - ZSTD_LIB_DEPRECATED ?= 0 - HUF_FORCE_DECOMPRESS_X1 ?= 1 - ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT ?= 1 - ZSTD_NO_INLINE ?= 1 - ZSTD_STRIP_ERROR_STRINGS ?= 1 ifneq ($(HAVE_CC_OZ), 0) # Some compilers (clang) support an even more space-optimized setting. CFLAGS += -Oz @@ -114,13 +122,16 @@ CFLAGS += -Qunused-arguments -Wa,--noexecstack endif endif +ifeq ($(shell echo "int main(int argc, char* argv[]) { (void)argc; (void)argv; return 0; }" | $(CC) $(FLAGS) -z cet-report=error -x c -Werror - -o $(VOID) 2>$(VOID) && echo 1 || echo 0),1) +LDFLAGS += -z cet-report=error +endif + HAVE_COLORNEVER = $(shell echo a | grep --color=never a > /dev/null 2> /dev/null && echo 1 || echo 0) GREP_OPTIONS ?= -ifeq ($HAVE_COLORNEVER, 1) +ifeq ($(HAVE_COLORNEVER), 1) GREP_OPTIONS += --color=never endif GREP = grep $(GREP_OPTIONS) -SED_ERE_OPT ?= -E ZSTD_COMMON_FILES := $(sort $(wildcard $(LIBZSTD)/common/*.c)) ZSTD_COMPRESS_FILES := $(sort $(wildcard $(LIBZSTD)/compress/*.c)) diff --git a/lib/libzstd.pc.in b/lib/libzstd.pc.in index 43ebaec3576..d5cc0270cea 100644 --- a/lib/libzstd.pc.in +++ b/lib/libzstd.pc.in @@ -1,6 +1,6 @@ # ZSTD - standard compression algorithm -# Copyright (C) 2014-2016, Yann Collet, Facebook -# BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) +# Copyright (c) Meta Platforms, Inc. and affiliates. +# BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) prefix=@PREFIX@ exec_prefix=@EXEC_PREFIX@ @@ -9,7 +9,7 @@ libdir=@LIBDIR@ Name: zstd Description: fast lossless compression algorithm library -URL: http://www.zstd.net/ +URL: https://facebook.github.io/zstd/ Version: @VERSION@ Libs: -L${libdir} -lzstd Libs.private: @LIBS_PRIVATE@ diff --git a/lib/module.modulemap b/lib/module.modulemap index bbb939782e1..eff98dfacea 100644 --- a/lib/module.modulemap +++ b/lib/module.modulemap @@ -1,17 +1,27 @@ module libzstd [extern_c] { header "zstd.h" export * - config_macros [exhaustive] /* zstd.h */ \ + config_macros [exhaustive] \ + /* zstd.h */ \ ZSTD_STATIC_LINKING_ONLY, \ + ZSTDLIB_VISIBILITY, \ ZSTDLIB_VISIBLE, \ + ZSTDLIB_HIDDEN, \ ZSTD_DLL_EXPORT, \ ZSTDLIB_STATIC_API, \ ZSTD_DISABLE_DEPRECATE_WARNINGS, \ ZSTD_CLEVEL_DEFAULT, \ - /* zdict.h */ ZDICT_STATIC_LINKING_ONLY, \ + /* zdict.h */ \ + ZDICT_STATIC_LINKING_ONLY, \ + ZDICTLIB_VISIBLE, \ + ZDICTLIB_HIDDEN, \ ZDICTLIB_VISIBILITY, \ + ZDICTLIB_STATIC_API, \ ZDICT_DISABLE_DEPRECATE_WARNINGS, \ - /* zstd_errors.h */ ZSTDERRORLIB_VISIBILITY + /* zstd_errors.h */ \ + ZSTDERRORLIB_VISIBLE, \ + ZSTDERRORLIB_HIDDEN, \ + ZSTDERRORLIB_VISIBILITY module dictbuilder [extern_c] { header "zdict.h" diff --git a/lib/zdict.h b/lib/zdict.h index f1e139a40dd..2268f948a5d 100644 --- a/lib/zdict.h +++ b/lib/zdict.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -8,32 +8,43 @@ * You may select, at your option, one of the above-listed licenses. */ -#ifndef DICTBUILDER_H_001 -#define DICTBUILDER_H_001 - #if defined (__cplusplus) extern "C" { #endif +#ifndef ZSTD_ZDICT_H +#define ZSTD_ZDICT_H /*====== Dependencies ======*/ #include /* size_t */ /* ===== ZDICTLIB_API : control library symbols visibility ===== */ -#ifndef ZDICTLIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default"))) +#ifndef ZDICTLIB_VISIBLE + /* Backwards compatibility with old macro name */ +# ifdef ZDICTLIB_VISIBILITY +# define ZDICTLIB_VISIBLE ZDICTLIB_VISIBILITY +# elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZDICTLIB_VISIBLE __attribute__ ((visibility ("default"))) +# else +# define ZDICTLIB_VISIBLE +# endif +#endif + +#ifndef ZDICTLIB_HIDDEN +# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZDICTLIB_HIDDEN __attribute__ ((visibility ("hidden"))) # else -# define ZDICTLIB_VISIBILITY +# define ZDICTLIB_HIDDEN # endif #endif + #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) -# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY +# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBLE #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) -# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBLE /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else -# define ZDICTLIB_API ZDICTLIB_VISIBILITY +# define ZDICTLIB_API ZDICTLIB_VISIBLE #endif /******************************************************************************* @@ -110,8 +121,8 @@ extern "C" { * The zstd CLI defaults to a 110KB dictionary. You likely don't need a * dictionary larger than that. But, most use cases can get away with a * smaller dictionary. The advanced dictionary builders can automatically - * shrink the dictionary for you, and select a the smallest size that - * doesn't hurt compression ratio too much. See the `shrinkDict` parameter. + * shrink the dictionary for you, and select the smallest size that doesn't + * hurt compression ratio too much. See the `shrinkDict` parameter. * A smaller dictionary can save memory, and potentially speed up * compression. * @@ -201,9 +212,9 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCap const size_t* samplesSizes, unsigned nbSamples); typedef struct { - int compressionLevel; /*< optimize for a specific zstd compression level; 0 means default */ - unsigned notificationLevel; /*< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */ - unsigned dictID; /*< force dictID value; 0 means auto mode (32-bits random value) + int compressionLevel; /**< optimize for a specific zstd compression level; 0 means default */ + unsigned notificationLevel; /**< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */ + unsigned dictID; /**< force dictID value; 0 means auto mode (32-bits random value) * NOTE: The zstd format reserves some dictionary IDs for future use. * You may use them in private settings, but be warned that they * may be used by zstd in a public dictionary registry in the future. @@ -260,9 +271,21 @@ ZDICTLIB_API size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictS ZDICTLIB_API unsigned ZDICT_isError(size_t errorCode); ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode); +#endif /* ZSTD_ZDICT_H */ +#if defined(ZDICT_STATIC_LINKING_ONLY) && !defined(ZSTD_ZDICT_H_STATIC) +#define ZSTD_ZDICT_H_STATIC -#ifdef ZDICT_STATIC_LINKING_ONLY +/* This can be overridden externally to hide static symbols. */ +#ifndef ZDICTLIB_STATIC_API +# if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) +# define ZDICTLIB_STATIC_API __declspec(dllexport) ZDICTLIB_VISIBLE +# elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) +# define ZDICTLIB_STATIC_API __declspec(dllimport) ZDICTLIB_VISIBLE +# else +# define ZDICTLIB_STATIC_API ZDICTLIB_VISIBLE +# endif +#endif /* ==================================================================================== * The definitions in this section are considered experimental. @@ -318,7 +341,7 @@ typedef struct { * In general, it's recommended to provide a few thousands samples, though this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( +ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_cover( void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_cover_params_t parameters); @@ -340,7 +363,7 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( * See ZDICT_trainFromBuffer() for details on failure modes. * Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread. */ -ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( +ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_cover( void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_cover_params_t* parameters); @@ -361,7 +384,7 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( * In general, it's recommended to provide a few thousands samples, though this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer, +ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t parameters); @@ -384,7 +407,7 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer, * See ZDICT_trainFromBuffer() for details on failure modes. * Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread. */ -ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, +ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t* parameters); @@ -409,7 +432,7 @@ typedef struct { * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. * Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0. */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy( +ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_legacy( void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_legacy_params_t parameters); @@ -421,32 +444,31 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy( or _CRT_SECURE_NO_WARNINGS in Visual. Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */ #ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS -# define ZDICT_DEPRECATED(message) ZDICTLIB_API /* disable deprecation warnings */ +# define ZDICT_DEPRECATED(message) /* disable deprecation warnings */ #else # define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) # if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ -# define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API +# define ZDICT_DEPRECATED(message) [[deprecated(message)]] # elif defined(__clang__) || (ZDICT_GCC_VERSION >= 405) -# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message))) +# define ZDICT_DEPRECATED(message) __attribute__((deprecated(message))) # elif (ZDICT_GCC_VERSION >= 301) -# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated)) +# define ZDICT_DEPRECATED(message) __attribute__((deprecated)) # elif defined(_MSC_VER) -# define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message)) +# define ZDICT_DEPRECATED(message) __declspec(deprecated(message)) # else # pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler") -# define ZDICT_DEPRECATED(message) ZDICTLIB_API +# define ZDICT_DEPRECATED(message) # endif #endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */ ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead") +ZDICTLIB_STATIC_API size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples); -#endif /* ZDICT_STATIC_LINKING_ONLY */ +#endif /* ZSTD_ZDICT_H_STATIC */ #if defined (__cplusplus) } #endif - -#endif /* DICTBUILDER_H_001 */ diff --git a/lib/zstd.h b/lib/zstd.h index a88ae7bf8ed..95aac07370d 100644 --- a/lib/zstd.h +++ b/lib/zstd.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,21 +14,31 @@ extern "C" { #ifndef ZSTD_H_235446 #define ZSTD_H_235446 -/* ====== Dependency ======*/ +/* ====== Dependencies ======*/ #include /* INT_MAX */ #include /* size_t */ /* ===== ZSTDLIB_API : control library symbols visibility ===== */ #ifndef ZSTDLIB_VISIBLE -# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) + /* Backwards compatibility with old macro name */ +# ifdef ZSTDLIB_VISIBILITY +# define ZSTDLIB_VISIBLE ZSTDLIB_VISIBILITY +# elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) # define ZSTDLIB_VISIBLE __attribute__ ((visibility ("default"))) -# define ZSTDLIB_HIDDEN __attribute__ ((visibility ("hidden"))) # else # define ZSTDLIB_VISIBLE +# endif +#endif + +#ifndef ZSTDLIB_HIDDEN +# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZSTDLIB_HIDDEN __attribute__ ((visibility ("hidden"))) +# else # define ZSTDLIB_HIDDEN # endif #endif + #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) # define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBLE #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) @@ -37,6 +47,28 @@ extern "C" { # define ZSTDLIB_API ZSTDLIB_VISIBLE #endif +/* Deprecation warnings : + * Should these warnings be a problem, it is generally possible to disable them, + * typically with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual. + * Otherwise, it's also possible to define ZSTD_DISABLE_DEPRECATE_WARNINGS. + */ +#ifdef ZSTD_DISABLE_DEPRECATE_WARNINGS +# define ZSTD_DEPRECATED(message) /* disable deprecation warnings */ +#else +# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ +# define ZSTD_DEPRECATED(message) [[deprecated(message)]] +# elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__) +# define ZSTD_DEPRECATED(message) __attribute__((deprecated(message))) +# elif defined(__GNUC__) && (__GNUC__ >= 3) +# define ZSTD_DEPRECATED(message) __attribute__((deprecated)) +# elif defined(_MSC_VER) +# define ZSTD_DEPRECATED(message) __declspec(deprecated(message)) +# else +# pragma message("WARNING: You need to implement ZSTD_DEPRECATED for this compiler") +# define ZSTD_DEPRECATED(message) +# endif +#endif /* ZSTD_DISABLE_DEPRECATE_WARNINGS */ + /******************************************************************************* Introduction @@ -74,7 +106,7 @@ extern "C" { /*------ Version ------*/ #define ZSTD_VERSION_MAJOR 1 #define ZSTD_VERSION_MINOR 5 -#define ZSTD_VERSION_RELEASE 2 +#define ZSTD_VERSION_RELEASE 4 #define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) /*! ZSTD_versionNumber() : @@ -165,7 +197,9 @@ ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t * "empty", "unknown" and "error" results to the same return value (0), * while ZSTD_getFrameContentSize() gives them separate return values. * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */ -ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize); +ZSTD_DEPRECATED("Replaced by ZSTD_getFrameContentSize") +ZSTDLIB_API +unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize); /*! ZSTD_findFrameCompressedSize() : Requires v1.4.0+ * `src` should point to the start of a ZSTD frame or skippable frame. @@ -177,8 +211,30 @@ ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize) /*====== Helper functions ======*/ -#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */ -ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */ +/* ZSTD_compressBound() : + * maximum compressed size in worst case single-pass scenario. + * When invoking `ZSTD_compress()` or any other one-pass compression function, + * it's recommended to provide @dstCapacity >= ZSTD_compressBound(srcSize) + * as it eliminates one potential failure scenario, + * aka not enough room in dst buffer to write the compressed frame. + * Note : ZSTD_compressBound() itself can fail, if @srcSize > ZSTD_MAX_INPUT_SIZE . + * In which case, ZSTD_compressBound() will return an error code + * which can be tested using ZSTD_isError(). + * + * ZSTD_COMPRESSBOUND() : + * same as ZSTD_compressBound(), but as a macro. + * It can be used to produce constants, which can be useful for static allocation, + * for example to size a static array on stack. + * Will produce constant value 0 if srcSize too large. + */ +#define ZSTD_MAX_INPUT_SIZE ((sizeof(size_t)==8) ? 0xFF00FF00FF00FF00LLU : 0xFF00FF00U) +#define ZSTD_COMPRESSBOUND(srcSize) (((size_t)(srcSize) >= ZSTD_MAX_INPUT_SIZE) ? 0 : (srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */ +ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */ +/* ZSTD_isError() : + * Most ZSTD_* functions returning a size_t value can be tested for error, + * using ZSTD_isError(). + * @return 1 if error, 0 otherwise + */ ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */ ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */ ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */ @@ -421,6 +477,9 @@ typedef enum { * ZSTD_c_validateSequences * ZSTD_c_useBlockSplitter * ZSTD_c_useRowMatchFinder + * ZSTD_c_prefetchCDictTables + * ZSTD_c_enableSeqProducerFallback + * ZSTD_c_maxBlockSize * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. * note : never ever use experimentalParam? names directly; * also, the enums values themselves are unstable and can still change. @@ -439,7 +498,11 @@ typedef enum { ZSTD_c_experimentalParam12=1009, ZSTD_c_experimentalParam13=1010, ZSTD_c_experimentalParam14=1011, - ZSTD_c_experimentalParam15=1012 + ZSTD_c_experimentalParam15=1012, + ZSTD_c_experimentalParam16=1013, + ZSTD_c_experimentalParam17=1014, + ZSTD_c_experimentalParam18=1015, + ZSTD_c_experimentalParam19=1016 } ZSTD_cParameter; typedef struct { @@ -502,7 +565,7 @@ typedef enum { * They will be used to compress next frame. * Resetting session never fails. * - The parameters : changes all parameters back to "default". - * This removes any reference to any dictionary too. + * This also removes any reference to any dictionary or external sequence producer. * Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing) * otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError()) * - Both : similar to resetting the session, followed by resetting parameters. @@ -552,13 +615,15 @@ typedef enum { * ZSTD_d_stableOutBuffer * ZSTD_d_forceIgnoreChecksum * ZSTD_d_refMultipleDDicts + * ZSTD_d_disableHuffmanAssembly * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. * note : never ever use experimentalParam? names directly */ ZSTD_d_experimentalParam1=1000, ZSTD_d_experimentalParam2=1001, ZSTD_d_experimentalParam3=1002, - ZSTD_d_experimentalParam4=1003 + ZSTD_d_experimentalParam4=1003, + ZSTD_d_experimentalParam5=1004 } ZSTD_dParameter; @@ -737,8 +802,6 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output * This following is a legacy streaming API, available since v1.0+ . * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2(). * It is redundant, but remains fully supported. - * Streaming in combination with advanced parameters and dictionary compression - * can only be used through the new API. ******************************************************************************/ /*! @@ -747,6 +810,9 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); + * + * Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API + * to compress with a dictionary. */ ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel); /*! @@ -797,13 +863,31 @@ ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); /* accept NULL pointer /*===== Streaming decompression functions =====*/ -/* This function is redundant with the advanced API and equivalent to: +/*! ZSTD_initDStream() : + * Initialize/reset DStream state for new decompression operation. + * Call before new decompression operation using same DStream. * + * Note : This function is redundant with the advanced API and equivalent to: * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); * ZSTD_DCtx_refDDict(zds, NULL); */ ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds); +/*! ZSTD_decompressStream() : + * Streaming decompression function. + * Call repetitively to consume full input updating it as necessary. + * Function will update both input and output `pos` fields exposing current state via these fields: + * - `input.pos < input.size`, some input remaining and caller should provide remaining input + * on the next call. + * - `output.pos < output.size`, decoder finished and flushed all remaining buffers. + * - `output.pos == output.size`, potentially uncflushed data present in the internal buffers, + * call ZSTD_decompressStream() again to flush remaining data to output. + * Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX. + * + * @return : 0 when a frame is completely decoded and fully flushed, + * or an error code, which can be tested using ZSTD_isError(), + * or any other value > 0, which means there is some decoding or flushing to do to complete current frame. + */ ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input); ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */ @@ -922,7 +1006,7 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict); * If @return == 0, the dictID could not be decoded. * This could for one of the following reasons : * - The frame does not require a dictionary to be decoded (most common case). - * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information. + * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden piece of information. * Note : this use case also happens when using a non-conformant dictionary. * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`). * - This is not a Zstandard frame. @@ -946,8 +1030,9 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary, * meaning "return to no-dictionary mode". - * Note 1 : Dictionary is sticky, it will be used for all future compressed frames. - * To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters). + * Note 1 : Dictionary is sticky, it will be used for all future compressed frames, + * until parameters are reset, a new dictionary is loaded, or the dictionary + * is explicitly invalidated by loading a NULL dictionary. * Note 2 : Loading a dictionary involves building tables. * It's also a CPU consuming operation, with non-negligible impact on latency. * Tables are dependent on compression parameters, and for this reason, @@ -960,7 +1045,7 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); /*! ZSTD_CCtx_refCDict() : Requires v1.4.0+ - * Reference a prepared dictionary, to be used for all next compressed frames. + * Reference a prepared dictionary, to be used for all future compressed frames. * Note that compression parameters are enforced from within CDict, * and supersede any compression parameter previously set within CCtx. * The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs. @@ -995,9 +1080,9 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize); /*! ZSTD_DCtx_loadDictionary() : Requires v1.4.0+ - * Create an internal DDict from dict buffer, - * to be used to decompress next frames. - * The dictionary remains valid for all future frames, until explicitly invalidated. + * Create an internal DDict from dict buffer, to be used to decompress all future frames. + * The dictionary remains valid for all future frames, until explicitly invalidated, or + * a new dictionary is loaded. * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary, * meaning "return to no-dictionary mode". @@ -1021,9 +1106,10 @@ ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, s * The memory for the table is allocated on the first call to refDDict, and can be * freed with ZSTD_freeDCtx(). * + * If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary + * will be managed, and referencing a dictionary effectively "discards" any previous one. + * * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Note 1 : Currently, only one dictionary can be managed. - * Referencing a new dictionary effectively "discards" any previous one. * Special: referencing a NULL DDict means "return to no-dictionary mode". * Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx. */ @@ -1086,28 +1172,6 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); # endif #endif -/* Deprecation warnings : - * Should these warnings be a problem, it is generally possible to disable them, - * typically with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual. - * Otherwise, it's also possible to define ZSTD_DISABLE_DEPRECATE_WARNINGS. - */ -#ifdef ZSTD_DISABLE_DEPRECATE_WARNINGS -# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API /* disable deprecation warnings */ -#else -# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ -# define ZSTD_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_STATIC_API -# elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__) -# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __attribute__((deprecated(message))) -# elif defined(__GNUC__) && (__GNUC__ >= 3) -# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __attribute__((deprecated)) -# elif defined(_MSC_VER) -# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __declspec(deprecated(message)) -# else -# pragma message("WARNING: You need to implement ZSTD_DEPRECATED for this compiler") -# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API -# endif -#endif /* ZSTD_DISABLE_DEPRECATE_WARNINGS */ - /**************************************************************************************** * experimental API (static linking only) **************************************************************************************** @@ -1142,6 +1206,7 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); #define ZSTD_TARGETLENGTH_MIN 0 /* note : comparing this constant to an unsigned results in a tautological test */ #define ZSTD_STRATEGY_MIN ZSTD_fast #define ZSTD_STRATEGY_MAX ZSTD_btultra2 +#define ZSTD_BLOCKSIZE_MAX_MIN (1 << 10) /* The minimum valid max blocksize. Maximum blocksizes smaller than this make compressBound() inaccurate. */ #define ZSTD_OVERLAPLOG_MIN 0 @@ -1369,33 +1434,89 @@ ZSTDLIB_STATIC_API unsigned long long ZSTD_decompressBound(const void* src, size * or an error code (if srcSize is too small) */ ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize); +/*! ZSTD_decompressionMargin() : + * Zstd supports in-place decompression, where the input and output buffers overlap. + * In this case, the output buffer must be at least (Margin + Output_Size) bytes large, + * and the input buffer must be at the end of the output buffer. + * + * _______________________ Output Buffer ________________________ + * | | + * | ____ Input Buffer ____| + * | | | + * v v v + * |---------------------------------------|-----------|----------| + * ^ ^ ^ + * |___________________ Output_Size ___________________|_ Margin _| + * + * NOTE: See also ZSTD_DECOMPRESSION_MARGIN(). + * NOTE: This applies only to single-pass decompression through ZSTD_decompress() or + * ZSTD_decompressDCtx(). + * NOTE: This function supports multi-frame input. + * + * @param src The compressed frame(s) + * @param srcSize The size of the compressed frame(s) + * @returns The decompression margin or an error that can be checked with ZSTD_isError(). + */ +ZSTDLIB_STATIC_API size_t ZSTD_decompressionMargin(const void* src, size_t srcSize); + +/*! ZSTD_DECOMPRESS_MARGIN() : + * Similar to ZSTD_decompressionMargin(), but instead of computing the margin from + * the compressed frame, compute it from the original size and the blockSizeLog. + * See ZSTD_decompressionMargin() for details. + * + * WARNING: This macro does not support multi-frame input, the input must be a single + * zstd frame. If you need that support use the function, or implement it yourself. + * + * @param originalSize The original uncompressed size of the data. + * @param blockSize The block size == MIN(windowSize, ZSTD_BLOCKSIZE_MAX). + * Unless you explicitly set the windowLog smaller than + * ZSTD_BLOCKSIZELOG_MAX you can just use ZSTD_BLOCKSIZE_MAX. + */ +#define ZSTD_DECOMPRESSION_MARGIN(originalSize, blockSize) ((size_t)( \ + ZSTD_FRAMEHEADERSIZE_MAX /* Frame header */ + \ + 4 /* checksum */ + \ + ((originalSize) == 0 ? 0 : 3 * (((originalSize) + (blockSize) - 1) / blockSize)) /* 3 bytes per block */ + \ + (blockSize) /* One block of margin */ \ + )) + typedef enum { ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */ ZSTD_sf_explicitBlockDelimiters = 1 /* Representation of ZSTD_Sequence contains explicit block delimiters */ } ZSTD_sequenceFormat_e; +/*! ZSTD_sequenceBound() : + * `srcSize` : size of the input buffer + * @return : upper-bound for the number of sequences that can be generated + * from a buffer of srcSize bytes + * + * note : returns number of sequences - to get bytes, multiply by sizeof(ZSTD_Sequence). + */ +ZSTDLIB_STATIC_API size_t ZSTD_sequenceBound(size_t srcSize); + /*! ZSTD_generateSequences() : - * Generate sequences using ZSTD_compress2, given a source buffer. + * Generate sequences using ZSTD_compress2(), given a source buffer. * * Each block will end with a dummy sequence * with offset == 0, matchLength == 0, and litLength == length of last literals. * litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0) * simply acts as a block delimiter. * - * zc can be used to insert custom compression params. - * This function invokes ZSTD_compress2 + * @zc can be used to insert custom compression params. + * This function invokes ZSTD_compress2(). * * The output of this function can be fed into ZSTD_compressSequences() with CCtx * setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters * @return : number of sequences generated */ -ZSTDLIB_STATIC_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, - size_t outSeqsSize, const void* src, size_t srcSize); +ZSTDLIB_STATIC_API size_t +ZSTD_generateSequences( ZSTD_CCtx* zc, + ZSTD_Sequence* outSeqs, size_t outSeqsSize, + const void* src, size_t srcSize); /*! ZSTD_mergeBlockDelimiters() : * Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals - * by merging them into into the literals of the next sequence. + * by merging them into the literals of the next sequence. * * As such, the final generated result has no explicit representation of block boundaries, * and the final last literals segment is not represented in the sequences. @@ -1407,7 +1528,9 @@ ZSTDLIB_STATIC_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* o ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize); /*! ZSTD_compressSequences() : - * Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst. + * Compress an array of ZSTD_Sequence, associated with @src buffer, into dst. + * @src contains the entire input (not just the literals). + * If @srcSize > sum(sequence.length), the remaining bytes are considered all literals * If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.) * The entire source is compressed into a single frame. * @@ -1432,17 +1555,18 @@ ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, si * Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused. * Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly, * and cannot emit an RLE block that disagrees with the repcode history - * @return : final compressed size or a ZSTD error. + * @return : final compressed size, or a ZSTD error code. */ -ZSTDLIB_STATIC_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize, - const ZSTD_Sequence* inSeqs, size_t inSeqsSize, - const void* src, size_t srcSize); +ZSTDLIB_STATIC_API size_t +ZSTD_compressSequences( ZSTD_CCtx* cctx, void* dst, size_t dstSize, + const ZSTD_Sequence* inSeqs, size_t inSeqsSize, + const void* src, size_t srcSize); /*! ZSTD_writeSkippableFrame() : * Generates a zstd skippable frame containing data given by src, and writes it to dst buffer. * - * Skippable frames begin with a a 4-byte magic number. There are 16 possible choices of magic number, + * Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number, * ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15. * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so * the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant. @@ -1500,8 +1624,11 @@ ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size); * and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter(). * Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits. * - * Note 2 : only single-threaded compression is supported. + * Note : only single-threaded compression is supported. * ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. + * + * Note 2 : ZSTD_estimateCCtxSize* functions are not compatible with the Block-Level Sequence Producer API at this time. + * Size estimates assume that no external sequence producer is registered. */ ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int compressionLevel); ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams); @@ -1520,7 +1647,12 @@ ZSTDLIB_STATIC_API size_t ZSTD_estimateDCtxSize(void); * or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame(); * Note : if streaming is init with function ZSTD_init?Stream_usingDict(), * an internal ?Dict will be created, which additional size is not estimated here. - * In this case, get total size by adding ZSTD_estimate?DictSize */ + * In this case, get total size by adding ZSTD_estimate?DictSize + * Note 2 : only single-threaded compression is supported. + * ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. + * Note 3 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time. + * Size estimates assume that no external sequence producer is registered. + */ ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int compressionLevel); ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams); ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params); @@ -1670,22 +1802,31 @@ ZSTDLIB_STATIC_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params); * This function never fails (wide contract) */ ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize); +/*! ZSTD_CCtx_setCParams() : + * Set all parameters provided within @cparams into the working @cctx. + * Note : if modifying parameters during compression (MT mode only), + * note that changes to the .windowLog parameter will be ignored. + * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */ +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams); + /*! ZSTD_compress_advanced() : * Note : this function is now DEPRECATED. * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters. * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_compress2") +ZSTDLIB_STATIC_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const void* dict,size_t dictSize, - ZSTD_parameters params); + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + ZSTD_parameters params); /*! ZSTD_compress_usingCDict_advanced() : * Note : this function is now DEPRECATED. * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters. * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_compress2 with ZSTD_CCtx_loadDictionary") +ZSTDLIB_STATIC_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, @@ -1829,13 +1970,16 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo * Experimental parameter. * Default is 0 == disabled. Set to 1 to enable. * - * Tells the compressor that the ZSTD_inBuffer will ALWAYS be the same - * between calls, except for the modifications that zstd makes to pos (the - * caller must not modify pos). This is checked by the compressor, and - * compression will fail if it ever changes. This means the only flush - * mode that makes sense is ZSTD_e_end, so zstd will error if ZSTD_e_end - * is not used. The data in the ZSTD_inBuffer in the range [src, src + pos) - * MUST not be modified during compression or you will get data corruption. + * Tells the compressor that input data presented with ZSTD_inBuffer + * will ALWAYS be the same between calls. + * Technically, the @src pointer must never be changed, + * and the @pos field can only be updated by zstd. + * However, it's possible to increase the @size field, + * allowing scenarios where more data can be appended after compressions starts. + * These conditions are checked by the compressor, + * and compression will fail if they are not respected. + * Also, data in the ZSTD_inBuffer within the range [src, src + pos) + * MUST not be modified during compression or it will result in data corruption. * * When this flag is enabled zstd won't allocate an input window buffer, * because the user guarantees it can reference the ZSTD_inBuffer until @@ -1843,18 +1987,15 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo * large enough to fit a block (see ZSTD_c_stableOutBuffer). This will also * avoid the memcpy() from the input buffer to the input window buffer. * - * NOTE: ZSTD_compressStream2() will error if ZSTD_e_end is not used. - * That means this flag cannot be used with ZSTD_compressStream(). - * * NOTE: So long as the ZSTD_inBuffer always points to valid memory, using * this flag is ALWAYS memory safe, and will never access out-of-bounds - * memory. However, compression WILL fail if you violate the preconditions. + * memory. However, compression WILL fail if conditions are not respected. * - * WARNING: The data in the ZSTD_inBuffer in the range [dst, dst + pos) MUST - * not be modified during compression or you will get data corruption. This - * is because zstd needs to reference data in the ZSTD_inBuffer to find + * WARNING: The data in the ZSTD_inBuffer in the range [src, src + pos) MUST + * not be modified during compression or it will result in data corruption. + * This is because zstd needs to reference data in the ZSTD_inBuffer to find * matches. Normally zstd maintains its own window buffer for this purpose, - * but passing this flag tells zstd to use the user provided buffer. + * but passing this flag tells zstd to rely on user provided buffer instead. */ #define ZSTD_c_stableInBuffer ZSTD_c_experimentalParam9 @@ -1899,7 +2040,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo * Without validation, providing a sequence that does not conform to the zstd spec will cause * undefined behavior, and may produce a corrupted block. * - * With validation enabled, a if sequence is invalid (see doc/zstd_compression_format.md for + * With validation enabled, if sequence is invalid (see doc/zstd_compression_format.md for * specifics regarding offset/matchlength requirements) then the function will bail out and * return an error. * @@ -1949,6 +2090,79 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo */ #define ZSTD_c_deterministicRefPrefix ZSTD_c_experimentalParam15 +/* ZSTD_c_prefetchCDictTables + * Controlled with ZSTD_paramSwitch_e enum. Default is ZSTD_ps_auto. + * + * In some situations, zstd uses CDict tables in-place rather than copying them + * into the working context. (See docs on ZSTD_dictAttachPref_e above for details). + * In such situations, compression speed is seriously impacted when CDict tables are + * "cold" (outside CPU cache). This parameter instructs zstd to prefetch CDict tables + * when they are used in-place. + * + * For sufficiently small inputs, the cost of the prefetch will outweigh the benefit. + * For sufficiently large inputs, zstd will by default memcpy() CDict tables + * into the working context, so there is no need to prefetch. This parameter is + * targeted at a middle range of input sizes, where a prefetch is cheap enough to be + * useful but memcpy() is too expensive. The exact range of input sizes where this + * makes sense is best determined by careful experimentation. + * + * Note: for this parameter, ZSTD_ps_auto is currently equivalent to ZSTD_ps_disable, + * but in the future zstd may conditionally enable this feature via an auto-detection + * heuristic for cold CDicts. + * Use ZSTD_ps_disable to opt out of prefetching under any circumstances. + */ +#define ZSTD_c_prefetchCDictTables ZSTD_c_experimentalParam16 + +/* ZSTD_c_enableSeqProducerFallback + * Allowed values are 0 (disable) and 1 (enable). The default setting is 0. + * + * Controls whether zstd will fall back to an internal sequence producer if an + * external sequence producer is registered and returns an error code. This fallback + * is block-by-block: the internal sequence producer will only be called for blocks + * where the external sequence producer returns an error code. Fallback parsing will + * follow any other cParam settings, such as compression level, the same as in a + * normal (fully-internal) compression operation. + * + * The user is strongly encouraged to read the full Block-Level Sequence Producer API + * documentation (below) before setting this parameter. */ +#define ZSTD_c_enableSeqProducerFallback ZSTD_c_experimentalParam17 + +/* ZSTD_c_maxBlockSize + * Allowed values are between 1KB and ZSTD_BLOCKSIZE_MAX (128KB). + * The default is ZSTD_BLOCKSIZE_MAX, and setting to 0 will set to the default. + * + * This parameter can be used to set an upper bound on the blocksize + * that overrides the default ZSTD_BLOCKSIZE_MAX. It cannot be used to set upper + * bounds greater than ZSTD_BLOCKSIZE_MAX or bounds lower than 1KB (will make + * compressBound() innacurate). Only currently meant to be used for testing. + * + */ +#define ZSTD_c_maxBlockSize ZSTD_c_experimentalParam18 + +/* ZSTD_c_searchForExternalRepcodes + * This parameter affects how zstd parses external sequences, such as sequences + * provided through the compressSequences() API or from an external block-level + * sequence producer. + * + * If set to ZSTD_ps_enable, the library will check for repeated offsets in + * external sequences, even if those repcodes are not explicitly indicated in + * the "rep" field. Note that this is the only way to exploit repcode matches + * while using compressSequences() or an external sequence producer, since zstd + * currently ignores the "rep" field of external sequences. + * + * If set to ZSTD_ps_disable, the library will not exploit repeated offsets in + * external sequences, regardless of whether the "rep" field has been set. This + * reduces sequence compression overhead by about 25% while sacrificing some + * compression ratio. + * + * The default value is ZSTD_ps_auto, for which the library will enable/disable + * based on compression level. + * + * Note: for now, this param only has an effect if ZSTD_c_blockDelimiters is + * set to ZSTD_sf_explicitBlockDelimiters. That may change in the future. + */ +#define ZSTD_c_searchForExternalRepcodes ZSTD_c_experimentalParam19 + /*! ZSTD_CCtx_getParameter() : * Get the requested compression parameter value, selected by enum ZSTD_cParameter, * and store it into int* value. @@ -2105,7 +2319,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete * in the range [dst, dst + pos) MUST not be modified during decompression * or you will get data corruption. * - * When this flags is enabled zstd won't allocate an output buffer, because + * When this flag is enabled zstd won't allocate an output buffer, because * it can write directly to the ZSTD_outBuffer, but it will still allocate * an input buffer large enough to fit any compressed block. This will also * avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer. @@ -2158,6 +2372,17 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete */ #define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4 +/* ZSTD_d_disableHuffmanAssembly + * Set to 1 to disable the Huffman assembly implementation. + * The default value is 0, which allows zstd to use the Huffman assembly + * implementation if available. + * + * This parameter can be used to disable Huffman assembly at runtime. + * If you want to disable it at compile time you can define the macro + * ZSTD_DISABLE_ASM. + */ +#define ZSTD_d_disableHuffmanAssembly ZSTD_d_experimentalParam5 + /*! ZSTD_DCtx_setFormat() : * This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter(). @@ -2166,6 +2391,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete * such ZSTD_f_zstd1_magicless for example. * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ ZSTD_DEPRECATED("use ZSTD_DCtx_setParameter() instead") +ZSTDLIB_STATIC_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format); /*! ZSTD_decompressStream_simpleArgs() : @@ -2202,6 +2428,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_decompressStream_simpleArgs ( * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize); @@ -2219,6 +2446,7 @@ size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); @@ -2239,6 +2467,7 @@ size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize, ZSTD_parameters params, @@ -2253,6 +2482,7 @@ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); /*! ZSTD_initCStream_usingCDict_advanced() : @@ -2271,6 +2501,7 @@ size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, @@ -2295,6 +2526,7 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize); @@ -2340,8 +2572,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx); * ZSTD_DCtx_loadDictionary(zds, dict, dictSize); * * note: no dictionary will be used if dict == NULL or dictSize < 8 - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ +ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_loadDictionary, see zstd.h for detailed instructions") ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /*! @@ -2351,8 +2583,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const vo * ZSTD_DCtx_refDDict(zds, ddict); * * note : ddict is referenced, it must outlive decompression session - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ +ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_refDDict, see zstd.h for detailed instructions") ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); /*! @@ -2361,8 +2593,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const Z * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); * * re-use decompression parameters from previous init; saves dictionary loading - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ +ZSTD_DEPRECATED("use ZSTD_DCtx_reset, see zstd.h for detailed instructions") ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); @@ -2383,7 +2615,6 @@ ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); Start by initializing a context. Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression. - It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx() Then, consume your input using ZSTD_compressContinue(). There are some important considerations to keep in mind when using this advanced function : @@ -2408,15 +2639,20 @@ ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel); ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */ -ZSTDLIB_STATIC_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */ + +ZSTD_DEPRECATED("This function will likely be removed in a future release. It is misleading and has very limited utility.") +ZSTDLIB_STATIC_API +size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */ ZSTDLIB_STATIC_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); ZSTDLIB_STATIC_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /* The ZSTD_compressBegin_advanced() and ZSTD_compressBegin_usingCDict_advanced() are now DEPRECATED and will generate a compiler warning */ ZSTD_DEPRECATED("use advanced API to access custom parameters") +ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */ ZSTD_DEPRECATED("use advanced API to access custom parameters") +ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */ /** Buffer-less streaming decompression (synchronous mode) @@ -2429,8 +2665,8 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_ Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough. Data fragment must be large enough to ensure successful decoding. `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough. - @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled. - >0 : `srcSize` is too small, please provide at least @result bytes on next attempt. + result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled. + >0 : `srcSize` is too small, please provide at least result bytes on next attempt. errorCode, which can be tested using ZSTD_isError(). It fills a ZSTD_frameHeader structure with important information to correctly decode the frame, @@ -2449,7 +2685,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_ The most memory efficient way is to use a round buffer of sufficient size. Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(), - which can @return an error code if required value is too large for current system (in 32-bits mode). + which can return an error code if required value is too large for current system (in 32-bits mode). In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one, up to the moment there is not enough room left in the buffer to guarantee decoding another full block, which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`. @@ -2469,7 +2705,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_ ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail. - @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity). + result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity). It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item. It can also be an error code, which can be tested with ZSTD_isError(). @@ -2501,6 +2737,8 @@ typedef struct { unsigned headerSize; unsigned dictID; unsigned checksumFlag; + unsigned _reserved1; + unsigned _reserved2; } ZSTD_frameHeader; /*! ZSTD_getFrameHeader() : @@ -2523,6 +2761,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx); ZSTDLIB_STATIC_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /* misc */ +ZSTD_DEPRECATED("This function will likely be removed in the next minor release. It is misleading and has very limited utility.") ZSTDLIB_STATIC_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx); typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e; ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); @@ -2545,7 +2784,6 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); - It is necessary to init context before starting + compression : any ZSTD_compressBegin*() variant, including with dictionary + decompression : any ZSTD_decompressBegin*() variant, including with dictionary - + copyCCtx() and copyDCtx() can be used too - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB + If input is larger than a block size, it's necessary to split input data into multiple blocks + For inputs larger than a single block, consider using regular ZSTD_compress() instead. @@ -2568,6 +2806,167 @@ ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_ ZSTDLIB_STATIC_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */ +/* ********************* BLOCK-LEVEL SEQUENCE PRODUCER API ********************* + * + * *** OVERVIEW *** + * The Block-Level Sequence Producer API allows users to provide their own custom + * sequence producer which libzstd invokes to process each block. The produced list + * of sequences (literals and matches) is then post-processed by libzstd to produce + * valid compressed blocks. + * + * This block-level offload API is a more granular complement of the existing + * frame-level offload API compressSequences() (introduced in v1.5.1). It offers + * an easier migration story for applications already integrated with libzstd: the + * user application continues to invoke the same compression functions + * ZSTD_compress2() or ZSTD_compressStream2() as usual, and transparently benefits + * from the specific advantages of the external sequence producer. For example, + * the sequence producer could be tuned to take advantage of known characteristics + * of the input, to offer better speed / ratio, or could leverage hardware + * acceleration not available within libzstd itself. + * + * See contrib/externalSequenceProducer for an example program employing the + * Block-Level Sequence Producer API. + * + * *** USAGE *** + * The user is responsible for implementing a function of type + * ZSTD_sequenceProducer_F. For each block, zstd will pass the following + * arguments to the user-provided function: + * + * - sequenceProducerState: a pointer to a user-managed state for the sequence + * producer. + * + * - outSeqs, outSeqsCapacity: an output buffer for the sequence producer. + * outSeqsCapacity is guaranteed >= ZSTD_sequenceBound(srcSize). The memory + * backing outSeqs is managed by the CCtx. + * + * - src, srcSize: an input buffer for the sequence producer to parse. + * srcSize is guaranteed to be <= ZSTD_BLOCKSIZE_MAX. + * + * - dict, dictSize: a history buffer, which may be empty, which the sequence + * producer may reference as it parses the src buffer. Currently, zstd will + * always pass dictSize == 0 into external sequence producers, but this will + * change in the future. + * + * - compressionLevel: a signed integer representing the zstd compression level + * set by the user for the current operation. The sequence producer may choose + * to use this information to change its compression strategy and speed/ratio + * tradeoff. Note: the compression level does not reflect zstd parameters set + * through the advanced API. + * + * - windowSize: a size_t representing the maximum allowed offset for external + * sequences. Note that sequence offsets are sometimes allowed to exceed the + * windowSize if a dictionary is present, see doc/zstd_compression_format.md + * for details. + * + * The user-provided function shall return a size_t representing the number of + * sequences written to outSeqs. This return value will be treated as an error + * code if it is greater than outSeqsCapacity. The return value must be non-zero + * if srcSize is non-zero. The ZSTD_SEQUENCE_PRODUCER_ERROR macro is provided + * for convenience, but any value greater than outSeqsCapacity will be treated as + * an error code. + * + * If the user-provided function does not return an error code, the sequences + * written to outSeqs must be a valid parse of the src buffer. Data corruption may + * occur if the parse is not valid. A parse is defined to be valid if the + * following conditions hold: + * - The sum of matchLengths and literalLengths must equal srcSize. + * - All sequences in the parse, except for the final sequence, must have + * matchLength >= ZSTD_MINMATCH_MIN. The final sequence must have + * matchLength >= ZSTD_MINMATCH_MIN or matchLength == 0. + * - All offsets must respect the windowSize parameter as specified in + * doc/zstd_compression_format.md. + * - If the final sequence has matchLength == 0, it must also have offset == 0. + * + * zstd will only validate these conditions (and fail compression if they do not + * hold) if the ZSTD_c_validateSequences cParam is enabled. Note that sequence + * validation has a performance cost. + * + * If the user-provided function returns an error, zstd will either fall back + * to an internal sequence producer or fail the compression operation. The user can + * choose between the two behaviors by setting the ZSTD_c_enableSeqProducerFallback + * cParam. Fallback compression will follow any other cParam settings, such as + * compression level, the same as in a normal compression operation. + * + * The user shall instruct zstd to use a particular ZSTD_sequenceProducer_F + * function by calling + * ZSTD_registerSequenceProducer(cctx, + * sequenceProducerState, + * sequenceProducer) + * This setting will persist until the next parameter reset of the CCtx. + * + * The sequenceProducerState must be initialized by the user before calling + * ZSTD_registerSequenceProducer(). The user is responsible for destroying the + * sequenceProducerState. + * + * *** LIMITATIONS *** + * This API is compatible with all zstd compression APIs which respect advanced parameters. + * However, there are three limitations: + * + * First, the ZSTD_c_enableLongDistanceMatching cParam is not currently supported. + * COMPRESSION WILL FAIL if it is enabled and the user tries to compress with a block-level + * external sequence producer. + * - Note that ZSTD_c_enableLongDistanceMatching is auto-enabled by default in some + * cases (see its documentation for details). Users must explicitly set + * ZSTD_c_enableLongDistanceMatching to ZSTD_ps_disable in such cases if an external + * sequence producer is registered. + * - As of this writing, ZSTD_c_enableLongDistanceMatching is disabled by default + * whenever ZSTD_c_windowLog < 128MB, but that's subject to change. Users should + * check the docs on ZSTD_c_enableLongDistanceMatching whenever the Block-Level Sequence + * Producer API is used in conjunction with advanced settings (like ZSTD_c_windowLog). + * + * Second, history buffers are not currently supported. Concretely, zstd will always pass + * dictSize == 0 to the external sequence producer (for now). This has two implications: + * - Dictionaries are not currently supported. Compression will *not* fail if the user + * references a dictionary, but the dictionary won't have any effect. + * - Stream history is not currently supported. All advanced compression APIs, including + * streaming APIs, work with external sequence producers, but each block is treated as + * an independent chunk without history from previous blocks. + * + * Third, multi-threading within a single compression is not currently supported. In other words, + * COMPRESSION WILL FAIL if ZSTD_c_nbWorkers > 0 and an external sequence producer is registered. + * Multi-threading across compressions is fine: simply create one CCtx per thread. + * + * Long-term, we plan to overcome all three limitations. There is no technical blocker to + * overcoming them. It is purely a question of engineering effort. + */ + +#define ZSTD_SEQUENCE_PRODUCER_ERROR ((size_t)(-1)) + +typedef size_t ZSTD_sequenceProducer_F ( + void* sequenceProducerState, + ZSTD_Sequence* outSeqs, size_t outSeqsCapacity, + const void* src, size_t srcSize, + const void* dict, size_t dictSize, + int compressionLevel, + size_t windowSize +); + +/*! ZSTD_registerSequenceProducer() : + * Instruct zstd to use a block-level external sequence producer function. + * + * The sequenceProducerState must be initialized by the caller, and the caller is + * responsible for managing its lifetime. This parameter is sticky across + * compressions. It will remain set until the user explicitly resets compression + * parameters. + * + * Sequence producer registration is considered to be an "advanced parameter", + * part of the "advanced API". This means it will only have an effect on compression + * APIs which respect advanced parameters, such as compress2() and compressStream2(). + * Older compression APIs such as compressCCtx(), which predate the introduction of + * "advanced parameters", will ignore any external sequence producer setting. + * + * The sequence producer can be "cleared" by registering a NULL function pointer. This + * removes all limitations described above in the "LIMITATIONS" section of the API docs. + * + * The user is strongly encouraged to read the full API documentation (above) before + * calling this function. */ +ZSTDLIB_STATIC_API void +ZSTD_registerSequenceProducer( + ZSTD_CCtx* cctx, + void* sequenceProducerState, + ZSTD_sequenceProducer_F* sequenceProducer +); + #endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */ #if defined (__cplusplus) diff --git a/lib/zstd_errors.h b/lib/zstd_errors.h index fa3686b7724..dc75eeebad9 100644 --- a/lib/zstd_errors.h +++ b/lib/zstd_errors.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -20,19 +20,31 @@ extern "C" { /* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */ -#ifndef ZSTDERRORLIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default"))) +#ifndef ZSTDERRORLIB_VISIBLE + /* Backwards compatibility with old macro name */ +# ifdef ZSTDERRORLIB_VISIBILITY +# define ZSTDERRORLIB_VISIBLE ZSTDERRORLIB_VISIBILITY +# elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZSTDERRORLIB_VISIBLE __attribute__ ((visibility ("default"))) # else -# define ZSTDERRORLIB_VISIBILITY +# define ZSTDERRORLIB_VISIBLE # endif #endif + +#ifndef ZSTDERRORLIB_HIDDEN +# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZSTDERRORLIB_HIDDEN __attribute__ ((visibility ("hidden"))) +# else +# define ZSTDERRORLIB_HIDDEN +# endif +#endif + #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) -# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY +# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBLE #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) -# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBLE /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else -# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY +# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBLE #endif /*-********************************************* @@ -58,14 +70,17 @@ typedef enum { ZSTD_error_frameParameter_windowTooLarge = 16, ZSTD_error_corruption_detected = 20, ZSTD_error_checksum_wrong = 22, + ZSTD_error_literals_headerWrong = 24, ZSTD_error_dictionary_corrupted = 30, ZSTD_error_dictionary_wrong = 32, ZSTD_error_dictionaryCreation_failed = 34, ZSTD_error_parameter_unsupported = 40, + ZSTD_error_parameter_combination_unsupported = 41, ZSTD_error_parameter_outOfBound = 42, ZSTD_error_tableLog_tooLarge = 44, ZSTD_error_maxSymbolValue_tooLarge = 46, ZSTD_error_maxSymbolValue_tooSmall = 48, + ZSTD_error_stabilityCondition_notRespected = 50, ZSTD_error_stage_wrong = 60, ZSTD_error_init_missing = 62, ZSTD_error_memory_allocation = 64, @@ -73,11 +88,15 @@ typedef enum { ZSTD_error_dstSize_tooSmall = 70, ZSTD_error_srcSize_wrong = 72, ZSTD_error_dstBuffer_null = 74, + ZSTD_error_noForwardProgress_destFull = 80, + ZSTD_error_noForwardProgress_inputEmpty = 82, /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */ ZSTD_error_frameIndex_tooLarge = 100, ZSTD_error_seekableIO = 102, ZSTD_error_dstBuffer_wrong = 104, ZSTD_error_srcBuffer_wrong = 105, + ZSTD_error_sequenceProducer_failed = 106, + ZSTD_error_externalSequences_invalid = 107, ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */ } ZSTD_ErrorCode; diff --git a/programs/Makefile b/programs/Makefile index f77e1b7f10f..fcff41dc1b3 100644 --- a/programs/Makefile +++ b/programs/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -129,10 +129,10 @@ SET_CACHE_DIRECTORY = \ .PHONY: all -all: zstd +all: zstd zstd-compress zstd-decompress zstd-small .PHONY: allVariants -allVariants: zstd zstd-compress zstd-decompress zstd-small zstd-frugal zstd-nolegacy zstd-dictBuilder +allVariants: all zstd-frugal zstd-nolegacy zstd-dictBuilder .PHONY: zstd # must always be run zstd : CPPFLAGS += $(THREAD_CPP) $(ZLIBCPP) $(LZMACPP) $(LZ4CPP) @@ -180,11 +180,13 @@ zstd : $(BUILD_DIR)/zstd endif # BUILD_DIR +CLEAN += zstd .PHONY: zstd-release zstd-release: DEBUGFLAGS := -DBACKTRACE_ENABLE=0 zstd-release: DEBUGFLAGS_LD := zstd-release: zstd +CLEAN += zstd32 zstd32 : CPPFLAGS += $(THREAD_CPP) zstd32 : LDFLAGS += $(THREAD_LD) zstd32 : CPPFLAGS += -DZSTD_LEGACY_SUPPORT=$(ZSTD_LEGACY_SUPPORT) @@ -194,7 +196,8 @@ endif zstd32 : $(ZSTDLIB_FULL_SRC) $(ZSTD_CLI_SRC) $(CC) -m32 $(FLAGS) $^ -o $@$(EXT) -## zstd-nolegacy: same scope as zstd, with just support of legacy formats removed +## zstd-nolegacy: same scope as zstd, with removed support of legacy formats +CLEAN += zstd-nolegacy zstd-nolegacy : LDFLAGS += $(THREAD_LD) $(ZLIBLD) $(LZMALD) $(LZ4LD) $(DEBUGFLAGS_LD) zstd-nolegacy : CPPFLAGS += -UZSTD_LEGACY_SUPPORT -DZSTD_LEGACY_SUPPORT=0 zstd-nolegacy : $(ZSTDLIB_CORE_SRC) $(ZDICT_SRC) $(ZSTD_CLI_OBJ) @@ -228,34 +231,46 @@ zstd-dll : zstd ## zstd-pgo: zstd executable optimized with PGO. .PHONY: zstd-pgo +zstd-pgo : LLVM_PROFDATA?=llvm-profdata +zstd-pgo : PROF_GENERATE_FLAGS=-fprofile-generate $(if $(findstring gcc,$(CC)),-fprofile-dir=.) +zstd-pgo : PROF_USE_FLAGS=-fprofile-use $(if $(findstring gcc,$(CC)),-fprofile-dir=. -Werror=missing-profile -Wno-error=coverage-mismatch) zstd-pgo : - $(MAKE) clean - $(MAKE) zstd MOREFLAGS=-fprofile-generate + $(MAKE) clean HASH_DIR=$(HASH_DIR) + $(MAKE) zstd HASH_DIR=$(HASH_DIR) MOREFLAGS="$(PROF_GENERATE_FLAGS)" ./zstd -b19i1 $(PROFILE_WITH) ./zstd -b16i1 $(PROFILE_WITH) ./zstd -b9i2 $(PROFILE_WITH) ./zstd -b $(PROFILE_WITH) ./zstd -b7i2 $(PROFILE_WITH) ./zstd -b5 $(PROFILE_WITH) - $(RM) zstd *.o - case $(CC) in *clang*) if ! [ -e default.profdata ]; then llvm-profdata merge -output=default.profdata default*.profraw; fi ;; esac - $(MAKE) zstd MOREFLAGS=-fprofile-use +ifndef BUILD_DIR + $(RM) zstd obj/$(HASH_DIR)/zstd obj/$(HASH_DIR)/*.o +else + $(RM) zstd $(BUILD_DIR)/zstd $(BUILD_DIR)/*.o +endif + case $(CC) in *clang*) if ! [ -e default.profdata ]; then $(LLVM_PROFDATA) merge -output=default.profdata default*.profraw; fi ;; esac + $(MAKE) zstd HASH_DIR=$(HASH_DIR) MOREFLAGS="$(PROF_USE_FLAGS)" ## zstd-small: minimal target, supporting only zstd compression and decompression. no bench. no legacy. no other format. -zstd-small: CFLAGS = -Os -s -zstd-frugal zstd-small: $(ZSTDLIB_CORE_SRC) zstdcli.c util.c timefn.c fileio.c +CLEAN += zstd-small zstd-frugal +zstd-small: CFLAGS = -Os -Wl,-s +zstd-frugal zstd-small: $(ZSTDLIB_CORE_SRC) zstdcli.c util.c timefn.c fileio.c fileio_asyncio.c $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT -DZSTD_NOTRACE -UZSTD_LEGACY_SUPPORT -DZSTD_LEGACY_SUPPORT=0 $^ -o $@$(EXT) -zstd-decompress: $(ZSTDLIB_COMMON_SRC) $(ZSTDLIB_DECOMPRESS_SRC) zstdcli.c util.c timefn.c fileio.c +CLEAN += zstd-decompress +zstd-decompress: $(ZSTDLIB_COMMON_SRC) $(ZSTDLIB_DECOMPRESS_SRC) zstdcli.c util.c timefn.c fileio.c fileio_asyncio.c $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT -DZSTD_NOCOMPRESS -DZSTD_NOTRACE -UZSTD_LEGACY_SUPPORT -DZSTD_LEGACY_SUPPORT=0 $^ -o $@$(EXT) -zstd-compress: $(ZSTDLIB_COMMON_SRC) $(ZSTDLIB_COMPRESS_SRC) zstdcli.c util.c timefn.c fileio.c +CLEAN += zstd-compress +zstd-compress: $(ZSTDLIB_COMMON_SRC) $(ZSTDLIB_COMPRESS_SRC) zstdcli.c util.c timefn.c fileio.c fileio_asyncio.c $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT -DZSTD_NODECOMPRESS -DZSTD_NOTRACE -UZSTD_LEGACY_SUPPORT -DZSTD_LEGACY_SUPPORT=0 $^ -o $@$(EXT) ## zstd-dictBuilder: executable supporting dictionary creation and compression (only) -zstd-dictBuilder: $(ZSTDLIB_COMMON_SRC) $(ZSTDLIB_COMPRESS_SRC) $(ZDICT_SRC) zstdcli.c util.c timefn.c fileio.c dibio.c +CLEAN += zstd-dictBuilder +zstd-dictBuilder: $(ZSTDLIB_COMMON_SRC) $(ZSTDLIB_COMPRESS_SRC) $(ZDICT_SRC) zstdcli.c util.c timefn.c fileio.c fileio_asyncio.c dibio.c $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODECOMPRESS -DZSTD_NOTRACE $^ -o $@$(EXT) +CLEAN += zstdmt zstdmt: zstd ln -sf zstd zstdmt @@ -264,7 +279,7 @@ generate_res: $(RES64_FILE) $(RES32_FILE) ifneq (,$(filter Windows%,$(OS))) RC ?= windres -# http://stackoverflow.com/questions/708238/how-do-i-add-an-icon-to-a-mingw-gcc-compiled-executable +# https://stackoverflow.com/questions/708238/how-do-i-add-an-icon-to-a-mingw-gcc-compiled-executable $(RES64_FILE): windres/zstd.rc $(RC) -o $@ -I ../lib -I windres -i $< -O coff -F pe-x86-64 $(RES32_FILE): windres/zstd.rc @@ -273,12 +288,8 @@ endif .PHONY: clean clean: - $(RM) core *.o tmp* result* *.gcda dictionary *.zst \ - zstd$(EXT) zstd32$(EXT) zstd-dll$(EXT) \ - zstd-compress$(EXT) zstd-decompress$(EXT) \ - zstd-small$(EXT) zstd-frugal$(EXT) zstd-nolegacy$(EXT) zstd4$(EXT) \ - zstd-dictBuilder$(EXT) \ - *.gcda default*.profraw default.profdata have_zlib$(EXT) + $(RM) $(CLEAN) core *.o tmp* result* dictionary *.zst \ + *.gcda default*.profraw default.profdata have_zlib $(RM) -r obj/* @echo Cleaning completed @@ -339,7 +350,7 @@ ifneq (,$(filter $(UNAME),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD D HAVE_COLORNEVER = $(shell echo a | egrep --color=never a > /dev/null 2> /dev/null && echo 1 || echo 0) EGREP_OPTIONS ?= -ifeq ($HAVE_COLORNEVER, 1) +ifeq ($(HAVE_COLORNEVER), 1) EGREP_OPTIONS += --color=never endif EGREP = egrep $(EGREP_OPTIONS) diff --git a/programs/README.md b/programs/README.md index 5570f90c3b4..1b9f47cbba9 100644 --- a/programs/README.md +++ b/programs/README.md @@ -164,6 +164,7 @@ Advanced arguments : --filelist FILE : read list of files to operate upon from FILE --output-dir-flat DIR : processed files are stored into DIR --output-dir-mirror DIR : processed files are stored into DIR respecting original directory structure +--[no-]asyncio : use asynchronous IO (default: enabled) --[no-]check : during compression, add XXH64 integrity checksum to frame (default: enabled). If specified with -d, decompressor will ignore/validate checksums in compressed frame (default: validate). -- : All arguments after "--" are treated as files @@ -208,7 +209,7 @@ Benchmark arguments : -b# : benchmark file(s), using # compression level (default: 3) -e# : test all compression levels successively from -b# to -e# (default: 1) -i# : minimum evaluation time in seconds (default: 3s) - -B# : cut file into independent blocks of size # (default: no block) + -B# : cut file into independent chunks of size # (default: no chunking) -S : output one benchmark result per input file (default: consolidated result) --priority=rt : set process priority to real-time ``` @@ -275,7 +276,7 @@ compression speed (for lower levels) with minimal change in compression ratio. The below table illustrates this on the [Silesia compression corpus]. -[Silesia compression corpus]: http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia +[Silesia compression corpus]: https://sun.aei.polsl.pl//~sdeor/index.php?page=silesia | Method | Compression ratio | Compression speed | Decompression speed | |:-------|------------------:|------------------:|---------------------:| diff --git a/programs/benchfn.c b/programs/benchfn.c index 1aadbdd9136..8e6726f8dc6 100644 --- a/programs/benchfn.c +++ b/programs/benchfn.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -229,9 +229,9 @@ BMK_runOutcome_t BMK_benchTimedFn(BMK_timedFnState_t* cont, cont->timeSpent_ns += (unsigned long long)loopDuration_ns; /* estimate nbLoops for next run to last approximately 1 second */ - if (loopDuration_ns > (runBudget_ns / 50)) { + if (loopDuration_ns > ((double)runBudget_ns / 50)) { double const fastestRun_ns = MIN(bestRunTime.nanoSecPerRun, newRunTime.nanoSecPerRun); - cont->nbLoops = (unsigned)(runBudget_ns / fastestRun_ns) + 1; + cont->nbLoops = (unsigned)((double)runBudget_ns / fastestRun_ns) + 1; } else { /* previous run was too short : blindly increase workload by x multiplier */ const unsigned multiplier = 10; @@ -239,7 +239,7 @@ BMK_runOutcome_t BMK_benchTimedFn(BMK_timedFnState_t* cont, cont->nbLoops *= multiplier; } - if(loopDuration_ns < runTimeMin_ns) { + if(loopDuration_ns < (double)runTimeMin_ns) { /* don't report results for which benchmark run time was too small : increased risks of rounding errors */ assert(completed == 0); continue; diff --git a/programs/benchfn.h b/programs/benchfn.h index 590f292eaa6..1bd93d13519 100644 --- a/programs/benchfn.h +++ b/programs/benchfn.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -123,7 +123,7 @@ BMK_runTime_t BMK_extract_runTime(BMK_runOutcome_t outcome); /* when benchmark failed, it means one invocation of `benchFn` failed. * The failure was detected by `errorFn`, operating on return values of `benchFn`. * Returns the faulty return value. - * note : this function will abort() program execution if benchmark did not failed. + * note : this function will abort() program execution if benchmark did not fail. * always check if benchmark failed first ! */ size_t BMK_extract_errorResult(BMK_runOutcome_t outcome); diff --git a/programs/benchzstd.c b/programs/benchzstd.c index fa2659efbbb..a76db5f375f 100644 --- a/programs/benchzstd.c +++ b/programs/benchzstd.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -13,7 +13,7 @@ * Tuning parameters ****************************************/ #ifndef BMK_TIMETEST_DEFAULT_S /* default minimum time per test */ -#define BMK_TIMETEST_DEFAULT_S 3 +# define BMK_TIMETEST_DEFAULT_S 3 #endif @@ -327,26 +327,31 @@ BMK_benchMemAdvancedNoAlloc( /* init */ memset(&benchResult, 0, sizeof(benchResult)); if (strlen(displayName)>17) displayName += strlen(displayName) - 17; /* display last 17 characters */ - if (adv->mode == BMK_decodeOnly) { /* benchmark only decompression : source must be already compressed */ + if (adv->mode == BMK_decodeOnly) { + /* benchmark only decompression : source must be already compressed */ const char* srcPtr = (const char*)srcBuffer; U64 totalDSize64 = 0; U32 fileNb; for (fileNb=0; fileNb decodedSize) { /* size_t overflow */ + RETURN_ERROR(32, BMK_benchOutcome_t, "decompressed size is too large for local system"); + } *resultBufferPtr = malloc(decodedSize); if (!(*resultBufferPtr)) { - RETURN_ERROR(33, BMK_benchOutcome_t, "not enough memory"); - } - if (totalDSize64 > decodedSize) { /* size_t overflow */ - free(*resultBufferPtr); - RETURN_ERROR(32, BMK_benchOutcome_t, "original size is too large"); + RETURN_ERROR(33, BMK_benchOutcome_t, "allocation error: not enough memory"); } cSize = srcSize; srcSize = decodedSize; @@ -387,6 +392,10 @@ BMK_benchMemAdvancedNoAlloc( RDG_genBuffer(compressedBuffer, maxCompressedSize, 0.10, 0.50, 1); } + if (!UTIL_support_MT_measurements() && adv->nbWorkers > 1) { + OUTPUTLEVEL(2, "Warning : time measurements may be incorrect in multithreading mode... \n") + } + /* Bench */ { U64 const crcOrig = (adv->mode == BMK_decodeOnly) ? 0 : XXH64(srcBuffer, srcSize, 0); # define NB_MARKS 4 @@ -442,7 +451,7 @@ BMK_benchMemAdvancedNoAlloc( BMK_runOutcome_t const cOutcome = BMK_benchTimedFn( timeStateCompress, cbp); if (!BMK_isSuccessful_runOutcome(cOutcome)) { - return BMK_benchOutcome_error(); + RETURN_ERROR(30, BMK_benchOutcome_t, "compression error"); } { BMK_runTime_t const cResult = BMK_extract_runTime(cOutcome); @@ -470,7 +479,7 @@ BMK_benchMemAdvancedNoAlloc( BMK_runOutcome_t const dOutcome = BMK_benchTimedFn(timeStateDecompress, dbp); if(!BMK_isSuccessful_runOutcome(dOutcome)) { - return BMK_benchOutcome_error(); + RETURN_ERROR(30, BMK_benchOutcome_t, "decompression error"); } { BMK_runTime_t const dResult = BMK_extract_runTime(dOutcome); @@ -594,7 +603,7 @@ BMK_benchOutcome_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize, void* resultBuffer = srcSize ? malloc(srcSize) : NULL; - int allocationincomplete = !srcPtrs || !srcSizes || !cPtrs || + int const allocationincomplete = !srcPtrs || !srcSizes || !cPtrs || !cSizes || !cCapacities || !resPtrs || !resSizes || !timeStateCompress || !timeStateDecompress || !cctx || !dctx || diff --git a/programs/benchzstd.h b/programs/benchzstd.h index 11ac85da7f9..aa683dfc259 100644 --- a/programs/benchzstd.h +++ b/programs/benchzstd.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/programs/datagen.c b/programs/datagen.c index 3b4f9e5c7b6..ddc690bb1b7 100644 --- a/programs/datagen.c +++ b/programs/datagen.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/programs/datagen.h b/programs/datagen.h index b76ae2a2225..ca72700063f 100644 --- a/programs/datagen.h +++ b/programs/datagen.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/programs/dibio.c b/programs/dibio.c index d19f954486f..26ebe5ca1d6 100644 --- a/programs/dibio.c +++ b/programs/dibio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -27,10 +27,11 @@ #include /* memset */ #include /* fprintf, fopen, ftello64 */ #include /* errno */ -#include #include "timefn.h" /* UTIL_time_t, UTIL_clockSpanMicro, UTIL_getTime */ +#include "../lib/common/debug.h" /* assert */ #include "../lib/common/mem.h" /* read */ +#include "../lib/zstd_errors.h" #include "dibio.h" @@ -127,8 +128,11 @@ static int DiB_loadFiles( while ( nbSamplesLoaded < sstSize && fileIndex < nbFiles ) { size_t fileDataLoaded; S64 const fileSize = DiB_getFileSize(fileNamesTable[fileIndex]); - if (fileSize <= 0) /* skip if zero-size or file error */ + if (fileSize <= 0) { + /* skip if zero-size or file error */ + ++fileIndex; continue; + } f = fopen( fileNamesTable[fileIndex], "rb"); if (f == NULL) @@ -193,7 +197,8 @@ static U32 DiB_rand(U32* src) static void DiB_shuffle(const char** fileNamesTable, unsigned nbFiles) { U32 seed = 0xFD2FB528; unsigned i; - assert(nbFiles >= 1); + if (nbFiles == 0) + return; for (i = nbFiles - 1; i > 0; --i) { unsigned const j = DiB_rand(&seed) % (i + 1); const char* const tmp = fileNamesTable[j]; @@ -269,21 +274,20 @@ static fileStats DiB_fileStats(const char** fileNamesTable, int nbFiles, size_t int n; memset(&fs, 0, sizeof(fs)); - // We assume that if chunking is requested, the chunk size is < SAMPLESIZE_MAX + /* We assume that if chunking is requested, the chunk size is < SAMPLESIZE_MAX */ assert( chunkSize <= SAMPLESIZE_MAX ); for (n=0; n 0) - { - // TODO: is there a minimum sample size? Can we have a 1-byte sample? + if (chunkSize > 0) { + /* TODO: is there a minimum sample size? Can we have a 1-byte sample? */ fs.nbSamples += (int)((fileSize + chunkSize-1) / chunkSize); fs.totalSizeToLoad += fileSize; } @@ -350,7 +354,7 @@ int DiB_trainFromFiles(const char* dictFileName, size_t maxDictSize, } /* Checks */ - if ((!sampleSizes) || (!srcBuffer) || (!dictBuffer)) + if ((fs.nbSamples && !sampleSizes) || (!srcBuffer) || (!dictBuffer)) EXM_THROW(12, "not enough memory for DiB_trainFiles"); /* should not happen */ if (fs.oneSampleTooLarge) { DISPLAYLEVEL(2, "! Warning : some sample(s) are very large \n"); @@ -379,7 +383,7 @@ int DiB_trainFromFiles(const char* dictFileName, size_t maxDictSize, srcBuffer, &loadedSize, sampleSizes, fs.nbSamples, fileNamesTable, nbFiles, chunkSize, displayLevel); - { size_t dictSize; + { size_t dictSize = ZSTD_error_GENERIC; if (params) { DiB_fillNoise((char*)srcBuffer + loadedSize, NOISELENGTH); /* guard band, for end of buffer condition */ dictSize = ZDICT_trainFromBuffer_legacy(dictBuffer, maxDictSize, @@ -399,8 +403,7 @@ int DiB_trainFromFiles(const char* dictFileName, size_t maxDictSize, dictSize = ZDICT_trainFromBuffer_cover(dictBuffer, maxDictSize, srcBuffer, sampleSizes, nbSamplesLoaded, *coverParams); } - } else { - assert(fastCoverParams != NULL); + } else if (fastCoverParams != NULL) { if (optimize) { dictSize = ZDICT_optimizeTrainFromBuffer_fastCover(dictBuffer, maxDictSize, srcBuffer, sampleSizes, nbSamplesLoaded, @@ -415,6 +418,8 @@ int DiB_trainFromFiles(const char* dictFileName, size_t maxDictSize, dictSize = ZDICT_trainFromBuffer_fastCover(dictBuffer, maxDictSize, srcBuffer, sampleSizes, nbSamplesLoaded, *fastCoverParams); } + } else { + assert(0 /* Impossible */); } if (ZDICT_isError(dictSize)) { DISPLAYLEVEL(1, "dictionary training failed : %s \n", ZDICT_getErrorName(dictSize)); /* should not happen */ diff --git a/programs/dibio.h b/programs/dibio.h index 666c1e66180..a96104c36d7 100644 --- a/programs/dibio.h +++ b/programs/dibio.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/programs/fileio.c b/programs/fileio.c index 5338fa62955..9a8300cdd83 100644 --- a/programs/fileio.c +++ b/programs/fileio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -28,6 +28,7 @@ #include /* fprintf, open, fdopen, fread, _fileno, stdin, stdout */ #include /* malloc, free */ #include /* strcmp, strlen */ +#include /* clock_t, to measure process time */ #include /* O_WRONLY */ #include #include /* errno */ @@ -40,8 +41,12 @@ # include #endif -#include "../lib/common/mem.h" /* U32, U64 */ #include "fileio.h" +#include "fileio_asyncio.h" +#include "fileio_common.h" + +FIO_display_prefs_t g_display_prefs = {2, FIO_ps_auto}; +UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER; #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_magicNumber, ZSTD_frameHeaderSize_max */ #include "../lib/zstd.h" @@ -65,6 +70,40 @@ # include #endif +char const* FIO_zlibVersion(void) +{ +#if defined(ZSTD_GZCOMPRESS) || defined(ZSTD_GZDECOMPRESS) + return zlibVersion(); +#else + return "Unsupported"; +#endif +} + +char const* FIO_lz4Version(void) +{ +#if defined(ZSTD_LZ4COMPRESS) || defined(ZSTD_LZ4DECOMPRESS) + /* LZ4_versionString() added in v1.7.3 */ +# if LZ4_VERSION_NUMBER >= 10703 + return LZ4_versionString(); +# else +# define ZSTD_LZ4_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE +# define ZSTD_LZ4_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LZ4_VERSION) + return ZSTD_LZ4_VERSION_STRING; +# endif +#else + return "Unsupported"; +#endif +} + +char const* FIO_lzmaVersion(void) +{ +#if defined(ZSTD_LZMACOMPRESS) || defined(ZSTD_LZMADECOMPRESS) + return lzma_version_string(); +#else + return "Unsupported"; +#endif +} + /*-************************************* * Constants @@ -75,69 +114,17 @@ #define FNSPACE 30 /* Default file permissions 0666 (modulated by umask) */ +/* Temporary restricted file permissions are used when we're going to + * chmod/chown at the end of the operation. */ #if !defined(_WIN32) /* These macros aren't defined on windows. */ #define DEFAULT_FILE_PERMISSIONS (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH) +#define TEMPORARY_FILE_PERMISSIONS (S_IRUSR|S_IWUSR) #else #define DEFAULT_FILE_PERMISSIONS (0666) +#define TEMPORARY_FILE_PERMISSIONS (0600) #endif -/*-************************************* -* Macros -***************************************/ -#define KB *(1 <<10) -#define MB *(1 <<20) -#define GB *(1U<<30) -#undef MAX -#define MAX(a,b) ((a)>(b) ? (a) : (b)) - -struct FIO_display_prefs_s { - int displayLevel; /* 0 : no display; 1: errors; 2: + result + interaction + warnings; 3: + progression; 4: + information */ - FIO_progressSetting_e progressSetting; -}; - -static FIO_display_prefs_t g_display_prefs = {2, FIO_ps_auto}; - -#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) -#define DISPLAYOUT(...) fprintf(stdout, __VA_ARGS__) -#define DISPLAYLEVEL(l, ...) { if (g_display_prefs.displayLevel>=l) { DISPLAY(__VA_ARGS__); } } - -static const U64 g_refreshRate = SEC_TO_MICRO / 6; -static UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER; - -#define READY_FOR_UPDATE() ((g_display_prefs.progressSetting != FIO_ps_never) && UTIL_clockSpanMicro(g_displayClock) > g_refreshRate) -#define DELAY_NEXT_UPDATE() { g_displayClock = UTIL_getTime(); } -#define DISPLAYUPDATE(l, ...) { \ - if (g_display_prefs.displayLevel>=l && (g_display_prefs.progressSetting != FIO_ps_never)) { \ - if (READY_FOR_UPDATE() || (g_display_prefs.displayLevel>=4)) { \ - DELAY_NEXT_UPDATE(); \ - DISPLAY(__VA_ARGS__); \ - if (g_display_prefs.displayLevel>=4) fflush(stderr); \ - } } } - -#undef MIN /* in case it would be already defined */ -#define MIN(a,b) ((a) < (b) ? (a) : (b)) - - -#define EXM_THROW(error, ...) \ -{ \ - DISPLAYLEVEL(1, "zstd: "); \ - DISPLAYLEVEL(5, "Error defined at %s, line %i : \n", __FILE__, __LINE__); \ - DISPLAYLEVEL(1, "error %i : ", error); \ - DISPLAYLEVEL(1, __VA_ARGS__); \ - DISPLAYLEVEL(1, " \n"); \ - exit(error); \ -} - -#define CHECK_V(v, f) \ - v = f; \ - if (ZSTD_isError(v)) { \ - DISPLAYLEVEL(5, "%s \n", #f); \ - EXM_THROW(11, "%s", ZSTD_getErrorName(v)); \ - } -#define CHECK(f) { size_t err; CHECK_V(err, f); } - - /*-************************************ * Signal (Ctrl-C trapping) **************************************/ @@ -237,7 +224,7 @@ static void ABRThandler(int sig) { } #endif -void FIO_addAbortHandler() +void FIO_addAbortHandler(void) { #if BACKTRACE_ENABLE signal(SIGABRT, ABRThandler); @@ -248,94 +235,6 @@ void FIO_addAbortHandler() #endif } - -/*-************************************************************ -* Avoid fseek()'s 2GiB barrier with MSVC, macOS, *BSD, MinGW -***************************************************************/ -#if defined(_MSC_VER) && _MSC_VER >= 1400 -# define LONG_SEEK _fseeki64 -# define LONG_TELL _ftelli64 -#elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */ -# define LONG_SEEK fseeko -# define LONG_TELL ftello -#elif defined(__MINGW32__) && !defined(__STRICT_ANSI__) && !defined(__NO_MINGW_LFS) && defined(__MSVCRT__) -# define LONG_SEEK fseeko64 -# define LONG_TELL ftello64 -#elif defined(_WIN32) && !defined(__DJGPP__) -# include - static int LONG_SEEK(FILE* file, __int64 offset, int origin) { - LARGE_INTEGER off; - DWORD method; - off.QuadPart = offset; - if (origin == SEEK_END) - method = FILE_END; - else if (origin == SEEK_CUR) - method = FILE_CURRENT; - else - method = FILE_BEGIN; - - if (SetFilePointerEx((HANDLE) _get_osfhandle(_fileno(file)), off, NULL, method)) - return 0; - else - return -1; - } - static __int64 LONG_TELL(FILE* file) { - LARGE_INTEGER off, newOff; - off.QuadPart = 0; - newOff.QuadPart = 0; - SetFilePointerEx((HANDLE) _get_osfhandle(_fileno(file)), off, &newOff, FILE_CURRENT); - return newOff.QuadPart; - } -#else -# define LONG_SEEK fseek -# define LONG_TELL ftell -#endif - - -/*-************************************* -* Parameters: FIO_prefs_t -***************************************/ - -/* typedef'd to FIO_prefs_t within fileio.h */ -struct FIO_prefs_s { - - /* Algorithm preferences */ - FIO_compressionType_t compressionType; - U32 sparseFileSupport; /* 0: no sparse allowed; 1: auto (file yes, stdout no); 2: force sparse */ - int dictIDFlag; - int checksumFlag; - int blockSize; - int overlapLog; - U32 adaptiveMode; - U32 useRowMatchFinder; - int rsyncable; - int minAdaptLevel; - int maxAdaptLevel; - int ldmFlag; - int ldmHashLog; - int ldmMinMatch; - int ldmBucketSizeLog; - int ldmHashRateLog; - size_t streamSrcSize; - size_t targetCBlockSize; - int srcSizeHint; - int testMode; - ZSTD_paramSwitch_e literalCompressionMode; - - /* IO preferences */ - U32 removeSrcFile; - U32 overwrite; - - /* Computation resources preferences */ - unsigned memLimit; - int nbWorkers; - - int excludeCompressedFiles; - int patchFromMode; - int contentSize; - int allowBlockDevices; -}; - /*-************************************* * Parameters: FIO_ctx_t ***************************************/ @@ -355,6 +254,18 @@ struct FIO_ctx_s { size_t totalBytesOutput; }; +static int FIO_shouldDisplayFileSummary(FIO_ctx_t const* fCtx) +{ + return fCtx->nbFilesTotal <= 1 || g_display_prefs.displayLevel >= 3; +} + +static int FIO_shouldDisplayMultipleFileSummary(FIO_ctx_t const* fCtx) +{ + int const shouldDisplay = (fCtx->nbFilesProcessed >= 1 && fCtx->nbFilesTotal > 1); + assert(shouldDisplay || FIO_shouldDisplayFileSummary(fCtx) || fCtx->nbFilesProcessed == 0); + return shouldDisplay; +} + /*-************************************* * Parameters: Initialization @@ -395,6 +306,8 @@ FIO_prefs_t* FIO_createPreferences(void) ret->literalCompressionMode = ZSTD_ps_auto; ret->excludeCompressedFiles = 0; ret->allowBlockDevices = 0; + ret->asyncIO = AIO_supported(); + ret->passThrough = -1; return ret; } @@ -443,13 +356,13 @@ void FIO_setCompressionType(FIO_prefs_t* const prefs, FIO_compressionType_t comp void FIO_overwriteMode(FIO_prefs_t* const prefs) { prefs->overwrite = 1; } -void FIO_setSparseWrite(FIO_prefs_t* const prefs, unsigned sparse) { prefs->sparseFileSupport = sparse; } +void FIO_setSparseWrite(FIO_prefs_t* const prefs, int sparse) { prefs->sparseFileSupport = sparse; } void FIO_setDictIDFlag(FIO_prefs_t* const prefs, int dictIDFlag) { prefs->dictIDFlag = dictIDFlag; } void FIO_setChecksumFlag(FIO_prefs_t* const prefs, int checksumFlag) { prefs->checksumFlag = checksumFlag; } -void FIO_setRemoveSrcFile(FIO_prefs_t* const prefs, unsigned flag) { prefs->removeSrcFile = (flag>0); } +void FIO_setRemoveSrcFile(FIO_prefs_t* const prefs, int flag) { prefs->removeSrcFile = (flag!=0); } void FIO_setMemLimit(FIO_prefs_t* const prefs, unsigned memLimit) { prefs->memLimit = memLimit; } @@ -476,7 +389,7 @@ void FIO_setOverlapLog(FIO_prefs_t* const prefs, int overlapLog){ prefs->overlapLog = overlapLog; } -void FIO_setAdaptiveMode(FIO_prefs_t* const prefs, unsigned adapt) { +void FIO_setAdaptiveMode(FIO_prefs_t* const prefs, int adapt) { if ((adapt>0) && (prefs->nbWorkers==0)) EXM_THROW(1, "Adaptive mode is not compatible with single thread mode \n"); prefs->adaptiveMode = adapt; @@ -558,6 +471,20 @@ void FIO_setContentSize(FIO_prefs_t* const prefs, int value) prefs->contentSize = value != 0; } +void FIO_setAsyncIOFlag(FIO_prefs_t* const prefs, int value) { +#ifdef ZSTD_MULTITHREAD + prefs->asyncIO = value; +#else + (void) prefs; + (void) value; + DISPLAYLEVEL(2, "Note : asyncio is disabled (lack of multithreading support) \n"); +#endif +} + +void FIO_setPassThroughFlag(FIO_prefs_t* const prefs, int value) { + prefs->passThrough = (value != 0); +} + /* FIO_ctx_t functions */ void FIO_setHasStdoutOutput(FIO_ctx_t* const fCtx, int value) { @@ -608,26 +535,26 @@ static int FIO_removeFile(const char* path) /** FIO_openSrcFile() : * condition : `srcFileName` must be non-NULL. `prefs` may be NULL. * @result : FILE* to `srcFileName`, or NULL if it fails */ -static FILE* FIO_openSrcFile(const FIO_prefs_t* const prefs, const char* srcFileName) +static FILE* FIO_openSrcFile(const FIO_prefs_t* const prefs, const char* srcFileName, stat_t* statbuf) { - stat_t statbuf; int allowBlockDevices = prefs != NULL ? prefs->allowBlockDevices : 0; assert(srcFileName != NULL); + assert(statbuf != NULL); if (!strcmp (srcFileName, stdinmark)) { DISPLAYLEVEL(4,"Using stdin for input \n"); SET_BINARY_MODE(stdin); return stdin; } - if (!UTIL_stat(srcFileName, &statbuf)) { + if (!UTIL_stat(srcFileName, statbuf)) { DISPLAYLEVEL(1, "zstd: can't stat %s : %s -- ignored \n", srcFileName, strerror(errno)); return NULL; } - if (!UTIL_isRegularFileStat(&statbuf) - && !UTIL_isFIFOStat(&statbuf) - && !(allowBlockDevices && UTIL_isBlockDevStat(&statbuf)) + if (!UTIL_isRegularFileStat(statbuf) + && !UTIL_isFIFOStat(statbuf) + && !(allowBlockDevices && UTIL_isBlockDevStat(statbuf)) ) { DISPLAYLEVEL(1, "zstd: %s is not a regular file -- ignored \n", srcFileName); @@ -685,7 +612,7 @@ FIO_openDstFile(FIO_ctx_t* fCtx, FIO_prefs_t* const prefs, if (!prefs->overwrite) { if (g_display_prefs.displayLevel <= 1) { /* No interaction possible */ - DISPLAY("zstd: %s already exists; not overwritten \n", + DISPLAYLEVEL(1, "zstd: %s already exists; not overwritten \n", dstFileName); return NULL; } @@ -718,6 +645,17 @@ FIO_openDstFile(FIO_ctx_t* fCtx, FIO_prefs_t* const prefs, if (f == NULL) { DISPLAYLEVEL(1, "zstd: %s: %s\n", dstFileName, strerror(errno)); } + /* An increased buffer size can provide a significant performance boost on some platforms. + * Note that providing a NULL buf with a size that's not 0 is not defined in ANSI C, but is defined + * in an extension. There are three possibilities here - + * 1. Libc supports the extended version and everything is good. + * 2. Libc ignores the size when buf is NULL, in which case everything will continue as if we didn't + * call `setvbuf`. + * 3. We fail the call and execution continues but a warning message might be shown. + * In all cases due execution continues. For now, I believe that this is a more cost-effective + * solution than managing the buffers allocations ourselves (will require an API change). */ + if(setvbuf(f, NULL, _IOFBF, 1 MB)) + DISPLAYLEVEL(2, "Warning: setvbuf failed for %s\n", dstFileName); return f; } } @@ -728,23 +666,23 @@ FIO_openDstFile(FIO_ctx_t* fCtx, FIO_prefs_t* const prefs, * @return : loaded size * if fileName==NULL, returns 0 and a NULL pointer */ -static size_t FIO_createDictBuffer(void** bufferPtr, const char* fileName, FIO_prefs_t* const prefs) +static size_t FIO_createDictBuffer(void** bufferPtr, const char* fileName, FIO_prefs_t* const prefs, stat_t* dictFileStat) { FILE* fileHandle; U64 fileSize; - stat_t statbuf; assert(bufferPtr != NULL); + assert(dictFileStat != NULL); *bufferPtr = NULL; if (fileName == NULL) return 0; DISPLAYLEVEL(4,"Loading %s as dictionary \n", fileName); - if (!UTIL_stat(fileName, &statbuf)) { + if (!UTIL_stat(fileName, dictFileStat)) { EXM_THROW(31, "Stat failed on dictionary file %s: %s", fileName, strerror(errno)); } - if (!UTIL_isRegularFileStat(&statbuf)) { + if (!UTIL_isRegularFileStat(dictFileStat)) { EXM_THROW(32, "Dictionary %s must be a regular file.", fileName); } @@ -754,7 +692,7 @@ static size_t FIO_createDictBuffer(void** bufferPtr, const char* fileName, FIO_p EXM_THROW(33, "Couldn't open dictionary %s: %s", fileName, strerror(errno)); } - fileSize = UTIL_getFileSizeStat(&statbuf); + fileSize = UTIL_getFileSizeStat(dictFileStat); { size_t const dictSizeMax = prefs->patchFromMode ? prefs->memLimit : DICTSIZE_MAX; if (fileSize > dictSizeMax) { @@ -785,7 +723,7 @@ int FIO_checkFilenameCollisions(const char** filenameTable, unsigned nbFiles) { filenameTableSorted = (const char**) malloc(sizeof(char*) * nbFiles); if (!filenameTableSorted) { - DISPLAY("Unable to malloc new str array, not checking for name collisions\n"); + DISPLAYLEVEL(1, "Allocation error during filename collision checking \n"); return 1; } @@ -802,7 +740,7 @@ int FIO_checkFilenameCollisions(const char** filenameTable, unsigned nbFiles) { prevElem = filenameTableSorted[0]; for (u = 1; u < nbFiles; ++u) { if (strcmp(prevElem, filenameTableSorted[u]) == 0) { - DISPLAY("WARNING: Two files have same filename: %s\n", prevElem); + DISPLAYLEVEL(2, "WARNING: Two files have same filename: %s\n", prevElem); } prevElem = filenameTableSorted[u]; } @@ -885,45 +823,89 @@ static void FIO_adjustMemLimitForPatchFromMode(FIO_prefs_t* const prefs, FIO_setMemLimit(prefs, (unsigned)maxSize); } -/* FIO_removeMultiFilesWarning() : +/* FIO_multiFilesConcatWarning() : + * This function handles logic when processing multiple files with -o or -c, displaying the appropriate warnings/prompts. * Returns 1 if the console should abort, 0 if console should proceed. - * This function handles logic when processing multiple files with -o, displaying the appropriate warnings/prompts. * - * If -f is specified, or there is just 1 file, zstd will always proceed as usual. - * If --rm is specified, there will be a prompt asking for user confirmation. - * If -f is specified with --rm, zstd will proceed as usual - * If -q is specified with --rm, zstd will abort pre-emptively - * If neither flag is specified, zstd will prompt the user for confirmation to proceed. - * If --rm is not specified, then zstd will print a warning to the user (which can be silenced with -q). - * However, if the output is stdout, we will always abort rather than displaying the warning prompt. + * If output is stdout or test mode is active, check that `--rm` disabled. + * + * If there is just 1 file to process, zstd will proceed as usual. + * If each file get processed into its own separate destination file, proceed as usual. + * + * When multiple files are processed into a single output, + * display a warning message, then disable --rm if it's set. + * + * If -f is specified or if output is stdout, just proceed. + * If output is set with -o, prompt for confirmation. */ -static int FIO_removeMultiFilesWarning(FIO_ctx_t* const fCtx, const FIO_prefs_t* const prefs, const char* outFileName, int displayLevelCutoff) +static int FIO_multiFilesConcatWarning(const FIO_ctx_t* fCtx, FIO_prefs_t* prefs, const char* outFileName, int displayLevelCutoff) { - int error = 0; - if (fCtx->nbFilesTotal > 1 && !prefs->overwrite) { - if (g_display_prefs.displayLevel <= displayLevelCutoff) { - if (prefs->removeSrcFile) { - DISPLAYLEVEL(1, "zstd: Aborting... not deleting files and processing into dst: %s\n", outFileName); - error = 1; - } - } else { - if (!strcmp(outFileName, stdoutmark)) { - DISPLAYLEVEL(2, "zstd: WARNING: all input files will be processed and concatenated into stdout. \n"); - } else { - DISPLAYLEVEL(2, "zstd: WARNING: all input files will be processed and concatenated into a single output file: %s \n", outFileName); - } - DISPLAYLEVEL(2, "The concatenated output CANNOT regenerate the original directory tree. \n") - if (prefs->removeSrcFile) { - if (fCtx->hasStdoutOutput) { - DISPLAYLEVEL(1, "Aborting. Use -f if you really want to delete the files and output to stdout\n"); - error = 1; - } else { - error = g_display_prefs.displayLevel > displayLevelCutoff && UTIL_requireUserConfirmation("This is a destructive operation. Proceed? (y/n): ", "Aborting...", "yY", fCtx->hasStdinInput); - } - } - } + if (fCtx->hasStdoutOutput) { + if (prefs->removeSrcFile) + /* this should not happen ; hard fail, to protect user's data + * note: this should rather be an assert(), but we want to be certain that user's data will not be wiped out in case it nonetheless happen */ + EXM_THROW(43, "It's not allowed to remove input files when processed output is piped to stdout. " + "This scenario is not supposed to be possible. " + "This is a programming error. File an issue for it to be fixed."); + } + if (prefs->testMode) { + if (prefs->removeSrcFile) + /* this should not happen ; hard fail, to protect user's data + * note: this should rather be an assert(), but we want to be certain that user's data will not be wiped out in case it nonetheless happen */ + EXM_THROW(43, "Test mode shall not remove input files! " + "This scenario is not supposed to be possible. " + "This is a programming error. File an issue for it to be fixed."); + return 0; } - return error; + + if (fCtx->nbFilesTotal == 1) return 0; + assert(fCtx->nbFilesTotal > 1); + + if (!outFileName) return 0; + + if (fCtx->hasStdoutOutput) { + DISPLAYLEVEL(2, "zstd: WARNING: all input files will be processed and concatenated into stdout. \n"); + } else { + DISPLAYLEVEL(2, "zstd: WARNING: all input files will be processed and concatenated into a single output file: %s \n", outFileName); + } + DISPLAYLEVEL(2, "The concatenated output CANNOT regenerate original file names nor directory structure. \n") + + /* multi-input into single output : --rm is not allowed */ + if (prefs->removeSrcFile) { + DISPLAYLEVEL(2, "Since it's a destructive operation, input files will not be removed. \n"); + prefs->removeSrcFile = 0; + } + + if (fCtx->hasStdoutOutput) return 0; + if (prefs->overwrite) return 0; + + /* multiple files concatenated into single destination file using -o without -f */ + if (g_display_prefs.displayLevel <= displayLevelCutoff) { + /* quiet mode => no prompt => fail automatically */ + DISPLAYLEVEL(1, "Concatenating multiple processed inputs into a single output loses file metadata. \n"); + DISPLAYLEVEL(1, "Aborting. \n"); + return 1; + } + /* normal mode => prompt */ + return UTIL_requireUserConfirmation("Proceed? (y/n): ", "Aborting...", "yY", fCtx->hasStdinInput); +} + +static ZSTD_inBuffer setInBuffer(const void* buf, size_t s, size_t pos) +{ + ZSTD_inBuffer i; + i.src = buf; + i.size = s; + i.pos = pos; + return i; +} + +static ZSTD_outBuffer setOutBuffer(void* buf, size_t s, size_t pos) +{ + ZSTD_outBuffer o; + o.dst = buf; + o.size = s; + o.pos = pos; + return o; } #ifndef ZSTD_NOCOMPRESS @@ -932,16 +914,13 @@ static int FIO_removeMultiFilesWarning(FIO_ctx_t* const fCtx, const FIO_prefs_t* * Compression ************************************************************************/ typedef struct { - FILE* srcFile; - FILE* dstFile; - void* srcBuffer; - size_t srcBufferSize; - void* dstBuffer; - size_t dstBufferSize; void* dictBuffer; size_t dictBufferSize; const char* dictFileName; + stat_t dictFileStat; ZSTD_CStream* cctx; + WritePoolCtx_t *writeCtx; + ReadPoolCtx_t *readCtx; } cRess_t; /** ZSTD_cycleLog() : @@ -973,8 +952,8 @@ static void FIO_adjustParamsForPatchFromMode(FIO_prefs_t* const prefs, if (cParams.strategy >= ZSTD_btopt) { DISPLAYLEVEL(1, "[Optimal parser notes] Consider the following to improve patch size at the cost of speed:\n"); DISPLAYLEVEL(1, "- Use --single-thread mode in the zstd cli\n"); - DISPLAYLEVEL(1, "- Set a larger targetLength (eg. --zstd=targetLength=4096)\n"); - DISPLAYLEVEL(1, "- Set a larger chainLog (eg. --zstd=chainLog=%u)\n", ZSTD_CHAINLOG_MAX); + DISPLAYLEVEL(1, "- Set a larger targetLength (e.g. --zstd=targetLength=4096)\n"); + DISPLAYLEVEL(1, "- Set a larger chainLog (e.g. --zstd=chainLog=%u)\n", ZSTD_CHAINLOG_MAX); DISPLAYLEVEL(1, "Also consider playing around with searchLog and hashLog\n"); } } @@ -990,9 +969,6 @@ static cRess_t FIO_createCResources(FIO_prefs_t* const prefs, if (ress.cctx == NULL) EXM_THROW(30, "allocation error (%s): can't create ZSTD_CCtx", strerror(errno)); - ress.srcBufferSize = ZSTD_CStreamInSize(); - ress.srcBuffer = malloc(ress.srcBufferSize); - ress.dstBufferSize = ZSTD_CStreamOutSize(); /* need to update memLimit before calling createDictBuffer * because of memLimit check inside it */ @@ -1000,10 +976,10 @@ static cRess_t FIO_createCResources(FIO_prefs_t* const prefs, unsigned long long const ssSize = (unsigned long long)prefs->streamSrcSize; FIO_adjustParamsForPatchFromMode(prefs, &comprParams, UTIL_getFileSize(dictFileName), ssSize > 0 ? ssSize : maxSrcFileSize, cLevel); } - ress.dstBuffer = malloc(ress.dstBufferSize); - ress.dictBufferSize = FIO_createDictBuffer(&ress.dictBuffer, dictFileName, prefs); /* works with dictFileName==NULL */ - if (!ress.srcBuffer || !ress.dstBuffer) - EXM_THROW(31, "allocation error : not enough memory"); + ress.dictBufferSize = FIO_createDictBuffer(&ress.dictBuffer, dictFileName, prefs, &ress.dictFileStat); /* works with dictFileName==NULL */ + + ress.writeCtx = AIO_WritePool_create(prefs, ZSTD_CStreamOutSize()); + ress.readCtx = AIO_ReadPool_create(prefs, ZSTD_CStreamInSize()); /* Advanced parameters, including dictionary */ if (dictFileName && (ress.dictBuffer==NULL)) @@ -1066,9 +1042,9 @@ static cRess_t FIO_createCResources(FIO_prefs_t* const prefs, static void FIO_freeCResources(const cRess_t* const ress) { - free(ress->srcBuffer); - free(ress->dstBuffer); free(ress->dictBuffer); + AIO_WritePool_free(ress->writeCtx); + AIO_ReadPool_free(ress->readCtx); ZSTD_freeCStream(ress->cctx); /* never fails */ } @@ -1081,6 +1057,7 @@ FIO_compressGzFrame(const cRess_t* ress, /* buffers & handlers are used, but no { unsigned long long inFileSize = 0, outFileSize = 0; z_stream strm; + IOJob_t *writeJob = NULL; if (compressionLevel > Z_BEST_COMPRESSION) compressionLevel = Z_BEST_COMPRESSION; @@ -1091,56 +1068,65 @@ FIO_compressGzFrame(const cRess_t* ress, /* buffers & handlers are used, but no { int const ret = deflateInit2(&strm, compressionLevel, Z_DEFLATED, 15 /* maxWindowLogSize */ + 16 /* gzip only */, - 8, Z_DEFAULT_STRATEGY); /* see http://www.zlib.net/manual.html */ + 8, Z_DEFAULT_STRATEGY); /* see https://www.zlib.net/manual.html */ if (ret != Z_OK) { EXM_THROW(71, "zstd: %s: deflateInit2 error %d \n", srcFileName, ret); } } + writeJob = AIO_WritePool_acquireJob(ress->writeCtx); strm.next_in = 0; strm.avail_in = 0; - strm.next_out = (Bytef*)ress->dstBuffer; - strm.avail_out = (uInt)ress->dstBufferSize; + strm.next_out = (Bytef*)writeJob->buffer; + strm.avail_out = (uInt)writeJob->bufferSize; while (1) { int ret; if (strm.avail_in == 0) { - size_t const inSize = fread(ress->srcBuffer, 1, ress->srcBufferSize, ress->srcFile); - if (inSize == 0) break; - inFileSize += inSize; - strm.next_in = (z_const unsigned char*)ress->srcBuffer; - strm.avail_in = (uInt)inSize; + AIO_ReadPool_fillBuffer(ress->readCtx, ZSTD_CStreamInSize()); + if (ress->readCtx->srcBufferLoaded == 0) break; + inFileSize += ress->readCtx->srcBufferLoaded; + strm.next_in = (z_const unsigned char*)ress->readCtx->srcBuffer; + strm.avail_in = (uInt)ress->readCtx->srcBufferLoaded; + } + + { + size_t const availBefore = strm.avail_in; + ret = deflate(&strm, Z_NO_FLUSH); + AIO_ReadPool_consumeBytes(ress->readCtx, availBefore - strm.avail_in); } - ret = deflate(&strm, Z_NO_FLUSH); + if (ret != Z_OK) EXM_THROW(72, "zstd: %s: deflate error %d \n", srcFileName, ret); - { size_t const cSize = ress->dstBufferSize - strm.avail_out; + { size_t const cSize = writeJob->bufferSize - strm.avail_out; if (cSize) { - if (fwrite(ress->dstBuffer, 1, cSize, ress->dstFile) != cSize) - EXM_THROW(73, "Write error : cannot write to output file : %s ", strerror(errno)); + writeJob->usedBufferSize = cSize; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); outFileSize += cSize; - strm.next_out = (Bytef*)ress->dstBuffer; - strm.avail_out = (uInt)ress->dstBufferSize; - } } + strm.next_out = (Bytef*)writeJob->buffer; + strm.avail_out = (uInt)writeJob->bufferSize; + } } if (srcFileSize == UTIL_FILESIZE_UNKNOWN) { - DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%% ", - (unsigned)(inFileSize>>20), - (double)outFileSize/inFileSize*100) + DISPLAYUPDATE_PROGRESS( + "\rRead : %u MB ==> %.2f%% ", + (unsigned)(inFileSize>>20), + (double)outFileSize/(double)inFileSize*100) } else { - DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%% ", - (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20), - (double)outFileSize/inFileSize*100); + DISPLAYUPDATE_PROGRESS( + "\rRead : %u / %u MB ==> %.2f%% ", + (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20), + (double)outFileSize/(double)inFileSize*100); } } while (1) { int const ret = deflate(&strm, Z_FINISH); - { size_t const cSize = ress->dstBufferSize - strm.avail_out; + { size_t const cSize = writeJob->bufferSize - strm.avail_out; if (cSize) { - if (fwrite(ress->dstBuffer, 1, cSize, ress->dstFile) != cSize) - EXM_THROW(75, "Write error : %s ", strerror(errno)); + writeJob->usedBufferSize = cSize; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); outFileSize += cSize; - strm.next_out = (Bytef*)ress->dstBuffer; - strm.avail_out = (uInt)ress->dstBufferSize; - } } + strm.next_out = (Bytef*)writeJob->buffer; + strm.avail_out = (uInt)writeJob->bufferSize; + } } if (ret == Z_STREAM_END) break; if (ret != Z_BUF_ERROR) EXM_THROW(77, "zstd: %s: deflate error %d \n", srcFileName, ret); @@ -1151,6 +1137,8 @@ FIO_compressGzFrame(const cRess_t* ress, /* buffers & handlers are used, but no EXM_THROW(79, "zstd: %s: deflateEnd error %d \n", srcFileName, ret); } } *readsize = inFileSize; + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); return outFileSize; } #endif @@ -1166,6 +1154,7 @@ FIO_compressLzmaFrame(cRess_t* ress, lzma_stream strm = LZMA_STREAM_INIT; lzma_action action = LZMA_RUN; lzma_ret ret; + IOJob_t *writeJob = NULL; if (compressionLevel < 0) compressionLevel = 0; if (compressionLevel > 9) compressionLevel = 9; @@ -1183,46 +1172,55 @@ FIO_compressLzmaFrame(cRess_t* ress, EXM_THROW(83, "zstd: %s: lzma_easy_encoder error %d", srcFileName, ret); } + writeJob =AIO_WritePool_acquireJob(ress->writeCtx); + strm.next_out = (Bytef*)writeJob->buffer; + strm.avail_out = (uInt)writeJob->bufferSize; strm.next_in = 0; strm.avail_in = 0; - strm.next_out = (BYTE*)ress->dstBuffer; - strm.avail_out = ress->dstBufferSize; while (1) { if (strm.avail_in == 0) { - size_t const inSize = fread(ress->srcBuffer, 1, ress->srcBufferSize, ress->srcFile); - if (inSize == 0) action = LZMA_FINISH; + size_t const inSize = AIO_ReadPool_fillBuffer(ress->readCtx, ZSTD_CStreamInSize()); + if (ress->readCtx->srcBufferLoaded == 0) action = LZMA_FINISH; inFileSize += inSize; - strm.next_in = (BYTE const*)ress->srcBuffer; - strm.avail_in = inSize; + strm.next_in = (BYTE const*)ress->readCtx->srcBuffer; + strm.avail_in = ress->readCtx->srcBufferLoaded; + } + + { + size_t const availBefore = strm.avail_in; + ret = lzma_code(&strm, action); + AIO_ReadPool_consumeBytes(ress->readCtx, availBefore - strm.avail_in); } - ret = lzma_code(&strm, action); if (ret != LZMA_OK && ret != LZMA_STREAM_END) EXM_THROW(84, "zstd: %s: lzma_code encoding error %d", srcFileName, ret); - { size_t const compBytes = ress->dstBufferSize - strm.avail_out; + { size_t const compBytes = writeJob->bufferSize - strm.avail_out; if (compBytes) { - if (fwrite(ress->dstBuffer, 1, compBytes, ress->dstFile) != compBytes) - EXM_THROW(85, "Write error : %s", strerror(errno)); + writeJob->usedBufferSize = compBytes; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); outFileSize += compBytes; - strm.next_out = (BYTE*)ress->dstBuffer; - strm.avail_out = ress->dstBufferSize; + strm.next_out = (Bytef*)writeJob->buffer; + strm.avail_out = writeJob->bufferSize; } } if (srcFileSize == UTIL_FILESIZE_UNKNOWN) - DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%", + DISPLAYUPDATE_PROGRESS("\rRead : %u MB ==> %.2f%%", (unsigned)(inFileSize>>20), - (double)outFileSize/inFileSize*100) + (double)outFileSize/(double)inFileSize*100) else - DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%", + DISPLAYUPDATE_PROGRESS("\rRead : %u / %u MB ==> %.2f%%", (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20), - (double)outFileSize/inFileSize*100); + (double)outFileSize/(double)inFileSize*100); if (ret == LZMA_STREAM_END) break; } lzma_end(&strm); *readsize = inFileSize; + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); + return outFileSize; } #endif @@ -1248,15 +1246,18 @@ FIO_compressLz4Frame(cRess_t* ress, LZ4F_preferences_t prefs; LZ4F_compressionContext_t ctx; + IOJob_t* writeJob = AIO_WritePool_acquireJob(ress->writeCtx); + LZ4F_errorCode_t const errorCode = LZ4F_createCompressionContext(&ctx, LZ4F_VERSION); if (LZ4F_isError(errorCode)) EXM_THROW(31, "zstd: failed to create lz4 compression context"); memset(&prefs, 0, sizeof(prefs)); - assert(blockSize <= ress->srcBufferSize); + assert(blockSize <= ress->readCtx->base.jobBufferSize); - prefs.autoFlush = 1; + /* autoflush off to mitigate a bug in lz4<=1.9.3 for compression level 12 */ + prefs.autoFlush = 0; prefs.compressionLevel = compressionLevel; prefs.frameInfo.blockMode = LZ4F_blockLinked; prefs.frameInfo.blockSizeID = LZ4F_max64KB; @@ -1264,75 +1265,68 @@ FIO_compressLz4Frame(cRess_t* ress, #if LZ4_VERSION_NUMBER >= 10600 prefs.frameInfo.contentSize = (srcFileSize==UTIL_FILESIZE_UNKNOWN) ? 0 : srcFileSize; #endif - assert(LZ4F_compressBound(blockSize, &prefs) <= ress->dstBufferSize); + assert(LZ4F_compressBound(blockSize, &prefs) <= writeJob->bufferSize); { - size_t readSize; - size_t headerSize = LZ4F_compressBegin(ctx, ress->dstBuffer, ress->dstBufferSize, &prefs); + size_t headerSize = LZ4F_compressBegin(ctx, writeJob->buffer, writeJob->bufferSize, &prefs); if (LZ4F_isError(headerSize)) EXM_THROW(33, "File header generation failed : %s", LZ4F_getErrorName(headerSize)); - if (fwrite(ress->dstBuffer, 1, headerSize, ress->dstFile) != headerSize) - EXM_THROW(34, "Write error : %s (cannot write header)", strerror(errno)); + writeJob->usedBufferSize = headerSize; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); outFileSize += headerSize; /* Read first block */ - readSize = fread(ress->srcBuffer, (size_t)1, (size_t)blockSize, ress->srcFile); - inFileSize += readSize; + inFileSize += AIO_ReadPool_fillBuffer(ress->readCtx, blockSize); /* Main Loop */ - while (readSize>0) { - size_t const outSize = LZ4F_compressUpdate(ctx, - ress->dstBuffer, ress->dstBufferSize, - ress->srcBuffer, readSize, NULL); + while (ress->readCtx->srcBufferLoaded) { + size_t inSize = MIN(blockSize, ress->readCtx->srcBufferLoaded); + size_t const outSize = LZ4F_compressUpdate(ctx, writeJob->buffer, writeJob->bufferSize, + ress->readCtx->srcBuffer, inSize, NULL); if (LZ4F_isError(outSize)) EXM_THROW(35, "zstd: %s: lz4 compression failed : %s", srcFileName, LZ4F_getErrorName(outSize)); outFileSize += outSize; if (srcFileSize == UTIL_FILESIZE_UNKNOWN) { - DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%", + DISPLAYUPDATE_PROGRESS("\rRead : %u MB ==> %.2f%%", (unsigned)(inFileSize>>20), - (double)outFileSize/inFileSize*100) + (double)outFileSize/(double)inFileSize*100) } else { - DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%", + DISPLAYUPDATE_PROGRESS("\rRead : %u / %u MB ==> %.2f%%", (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20), - (double)outFileSize/inFileSize*100); + (double)outFileSize/(double)inFileSize*100); } /* Write Block */ - { size_t const sizeCheck = fwrite(ress->dstBuffer, 1, outSize, ress->dstFile); - if (sizeCheck != outSize) - EXM_THROW(36, "Write error : %s", strerror(errno)); - } + writeJob->usedBufferSize = outSize; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); /* Read next block */ - readSize = fread(ress->srcBuffer, (size_t)1, (size_t)blockSize, ress->srcFile); - inFileSize += readSize; + AIO_ReadPool_consumeBytes(ress->readCtx, inSize); + inFileSize += AIO_ReadPool_fillBuffer(ress->readCtx, blockSize); } - if (ferror(ress->srcFile)) EXM_THROW(37, "Error reading %s ", srcFileName); /* End of Stream mark */ - headerSize = LZ4F_compressEnd(ctx, ress->dstBuffer, ress->dstBufferSize, NULL); + headerSize = LZ4F_compressEnd(ctx, writeJob->buffer, writeJob->bufferSize, NULL); if (LZ4F_isError(headerSize)) EXM_THROW(38, "zstd: %s: lz4 end of file generation failed : %s", srcFileName, LZ4F_getErrorName(headerSize)); - { size_t const sizeCheck = fwrite(ress->dstBuffer, 1, headerSize, ress->dstFile); - if (sizeCheck != headerSize) - EXM_THROW(39, "Write error : %s (cannot write end of stream)", - strerror(errno)); - } + writeJob->usedBufferSize = headerSize; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); outFileSize += headerSize; } *readsize = inFileSize; LZ4F_freeCompressionContext(ctx); + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); return outFileSize; } #endif - static unsigned long long FIO_compressZstdFrame(FIO_ctx_t* const fCtx, FIO_prefs_t* const prefs, @@ -1341,8 +1335,8 @@ FIO_compressZstdFrame(FIO_ctx_t* const fCtx, int compressionLevel, U64* readsize) { cRess_t const ress = *ressPtr; - FILE* const srcFile = ress.srcFile; - FILE* const dstFile = ress.dstFile; + IOJob_t *writeJob = AIO_WritePool_acquireJob(ressPtr->writeCtx); + U64 compressedfilesize = 0; ZSTD_EndDirective directive = ZSTD_e_continue; U64 pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; @@ -1356,6 +1350,9 @@ FIO_compressZstdFrame(FIO_ctx_t* const fCtx, unsigned inputPresented = 0; unsigned inputBlocked = 0; unsigned lastJobID = 0; + UTIL_time_t lastAdaptTime = UTIL_getTime(); + U64 const adaptEveryMicro = REFRESH_RATE; + UTIL_HumanReadableSize_t const file_hrs = UTIL_makeHumanReadableSize(fileSize); DISPLAYLEVEL(6, "compression using zstd format \n"); @@ -1375,8 +1372,13 @@ FIO_compressZstdFrame(FIO_ctx_t* const fCtx, UTIL_HumanReadableSize_t windowSize; CHECK(ZSTD_CCtx_getParameter(ress.cctx, ZSTD_c_windowLog, &windowLog)); if (windowLog == 0) { - const ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, fileSize, 0); - windowLog = cParams.windowLog; + if (prefs->ldmFlag) { + /* If long mode is set without a window size libzstd will set this size internally */ + windowLog = ZSTD_WINDOWLOG_LIMIT_DEFAULT; + } else { + const ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, fileSize, 0); + windowLog = (int)cParams.windowLog; + } } windowSize = UTIL_makeHumanReadableSize(MAX(1ULL, MIN(1ULL << windowLog, pledgedSrcSize))); DISPLAYLEVEL(4, "Decompression will require %.*f%s of memory\n", windowSize.precision, windowSize.value, windowSize.suffix); @@ -1387,12 +1389,12 @@ FIO_compressZstdFrame(FIO_ctx_t* const fCtx, do { size_t stillToFlush; /* Fill input Buffer */ - size_t const inSize = fread(ress.srcBuffer, (size_t)1, ress.srcBufferSize, srcFile); - ZSTD_inBuffer inBuff = { ress.srcBuffer, inSize, 0 }; + size_t const inSize = AIO_ReadPool_fillBuffer(ress.readCtx, ZSTD_CStreamInSize()); + ZSTD_inBuffer inBuff = setInBuffer( ress.readCtx->srcBuffer, ress.readCtx->srcBufferLoaded, 0 ); DISPLAYLEVEL(6, "fread %u bytes from source \n", (unsigned)inSize); *readsize += inSize; - if ((inSize == 0) || (*readsize == fileSize)) + if ((ress.readCtx->srcBufferLoaded == 0) || (*readsize == fileSize)) directive = ZSTD_e_end; stillToFlush = 1; @@ -1400,9 +1402,10 @@ FIO_compressZstdFrame(FIO_ctx_t* const fCtx, || (directive == ZSTD_e_end && stillToFlush != 0) ) { size_t const oldIPos = inBuff.pos; - ZSTD_outBuffer outBuff = { ress.dstBuffer, ress.dstBufferSize, 0 }; + ZSTD_outBuffer outBuff = setOutBuffer( writeJob->buffer, writeJob->bufferSize, 0 ); size_t const toFlushNow = ZSTD_toFlushNow(ress.cctx); CHECK_V(stillToFlush, ZSTD_compressStream2(ress.cctx, &outBuff, &inBuff, directive)); + AIO_ReadPool_consumeBytes(ress.readCtx, inBuff.pos - oldIPos); /* count stats */ inputPresented++; @@ -1411,151 +1414,155 @@ FIO_compressZstdFrame(FIO_ctx_t* const fCtx, /* Write compressed stream */ DISPLAYLEVEL(6, "ZSTD_compress_generic(end:%u) => input pos(%u)<=(%u)size ; output generated %u bytes \n", - (unsigned)directive, (unsigned)inBuff.pos, (unsigned)inBuff.size, (unsigned)outBuff.pos); + (unsigned)directive, (unsigned)inBuff.pos, (unsigned)inBuff.size, (unsigned)outBuff.pos); if (outBuff.pos) { - size_t const sizeCheck = fwrite(ress.dstBuffer, 1, outBuff.pos, dstFile); - if (sizeCheck != outBuff.pos) - EXM_THROW(25, "Write error : %s (cannot write compressed block)", - strerror(errno)); + writeJob->usedBufferSize = outBuff.pos; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); compressedfilesize += outBuff.pos; } - /* display notification; and adapt compression level */ - if (READY_FOR_UPDATE()) { + /* adaptive mode : statistics measurement and speed correction */ + if (prefs->adaptiveMode && UTIL_clockSpanMicro(lastAdaptTime) > adaptEveryMicro) { + ZSTD_frameProgression const zfp = ZSTD_getFrameProgression(ress.cctx); + + lastAdaptTime = UTIL_getTime(); + + /* check output speed */ + if (zfp.currentJobID > 1) { /* only possible if nbWorkers >= 1 */ + + unsigned long long newlyProduced = zfp.produced - previous_zfp_update.produced; + unsigned long long newlyFlushed = zfp.flushed - previous_zfp_update.flushed; + assert(zfp.produced >= previous_zfp_update.produced); + assert(prefs->nbWorkers >= 1); + + /* test if compression is blocked + * either because output is slow and all buffers are full + * or because input is slow and no job can start while waiting for at least one buffer to be filled. + * note : exclude starting part, since currentJobID > 1 */ + if ( (zfp.consumed == previous_zfp_update.consumed) /* no data compressed : no data available, or no more buffer to compress to, OR compression is really slow (compression of a single block is slower than update rate)*/ + && (zfp.nbActiveWorkers == 0) /* confirmed : no compression ongoing */ + ) { + DISPLAYLEVEL(6, "all buffers full : compression stopped => slow down \n") + speedChange = slower; + } + + previous_zfp_update = zfp; + + if ( (newlyProduced > (newlyFlushed * 9 / 8)) /* compression produces more data than output can flush (though production can be spiky, due to work unit : (N==4)*block sizes) */ + && (flushWaiting == 0) /* flush speed was never slowed by lack of production, so it's operating at max capacity */ + ) { + DISPLAYLEVEL(6, "compression faster than flush (%llu > %llu), and flushed was never slowed down by lack of production => slow down \n", newlyProduced, newlyFlushed); + speedChange = slower; + } + flushWaiting = 0; + } + + /* course correct only if there is at least one new job completed */ + if (zfp.currentJobID > lastJobID) { + DISPLAYLEVEL(6, "compression level adaptation check \n") + + /* check input speed */ + if (zfp.currentJobID > (unsigned)(prefs->nbWorkers+1)) { /* warm up period, to fill all workers */ + if (inputBlocked <= 0) { + DISPLAYLEVEL(6, "input is never blocked => input is slower than ingestion \n"); + speedChange = slower; + } else if (speedChange == noChange) { + unsigned long long newlyIngested = zfp.ingested - previous_zfp_correction.ingested; + unsigned long long newlyConsumed = zfp.consumed - previous_zfp_correction.consumed; + unsigned long long newlyProduced = zfp.produced - previous_zfp_correction.produced; + unsigned long long newlyFlushed = zfp.flushed - previous_zfp_correction.flushed; + previous_zfp_correction = zfp; + assert(inputPresented > 0); + DISPLAYLEVEL(6, "input blocked %u/%u(%.2f) - ingested:%u vs %u:consumed - flushed:%u vs %u:produced \n", + inputBlocked, inputPresented, (double)inputBlocked/inputPresented*100, + (unsigned)newlyIngested, (unsigned)newlyConsumed, + (unsigned)newlyFlushed, (unsigned)newlyProduced); + if ( (inputBlocked > inputPresented / 8) /* input is waiting often, because input buffers is full : compression or output too slow */ + && (newlyFlushed * 33 / 32 > newlyProduced) /* flush everything that is produced */ + && (newlyIngested * 33 / 32 > newlyConsumed) /* input speed as fast or faster than compression speed */ + ) { + DISPLAYLEVEL(6, "recommend faster as in(%llu) >= (%llu)comp(%llu) <= out(%llu) \n", + newlyIngested, newlyConsumed, newlyProduced, newlyFlushed); + speedChange = faster; + } + } + inputBlocked = 0; + inputPresented = 0; + } + + if (speedChange == slower) { + DISPLAYLEVEL(6, "slower speed , higher compression \n") + compressionLevel ++; + if (compressionLevel > ZSTD_maxCLevel()) compressionLevel = ZSTD_maxCLevel(); + if (compressionLevel > prefs->maxAdaptLevel) compressionLevel = prefs->maxAdaptLevel; + compressionLevel += (compressionLevel == 0); /* skip 0 */ + ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_compressionLevel, compressionLevel); + } + if (speedChange == faster) { + DISPLAYLEVEL(6, "faster speed , lighter compression \n") + compressionLevel --; + if (compressionLevel < prefs->minAdaptLevel) compressionLevel = prefs->minAdaptLevel; + compressionLevel -= (compressionLevel == 0); /* skip 0 */ + ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_compressionLevel, compressionLevel); + } + speedChange = noChange; + + lastJobID = zfp.currentJobID; + } /* if (zfp.currentJobID > lastJobID) */ + } /* if (prefs->adaptiveMode && UTIL_clockSpanMicro(lastAdaptTime) > adaptEveryMicro) */ + + /* display notification */ + if (SHOULD_DISPLAY_PROGRESS() && READY_FOR_UPDATE()) { ZSTD_frameProgression const zfp = ZSTD_getFrameProgression(ress.cctx); double const cShare = (double)zfp.produced / (double)(zfp.consumed + !zfp.consumed/*avoid div0*/) * 100; UTIL_HumanReadableSize_t const buffered_hrs = UTIL_makeHumanReadableSize(zfp.ingested - zfp.consumed); UTIL_HumanReadableSize_t const consumed_hrs = UTIL_makeHumanReadableSize(zfp.consumed); UTIL_HumanReadableSize_t const produced_hrs = UTIL_makeHumanReadableSize(zfp.produced); + DELAY_NEXT_UPDATE(); + /* display progress notifications */ + DISPLAY_PROGRESS("\r%79s\r", ""); /* Clear out the current displayed line */ if (g_display_prefs.displayLevel >= 3) { - DISPLAYUPDATE(3, "\r(L%i) Buffered :%6.*f%4s - Consumed :%6.*f%4s - Compressed :%6.*f%4s => %.2f%% ", - compressionLevel, - buffered_hrs.precision, buffered_hrs.value, buffered_hrs.suffix, - consumed_hrs.precision, consumed_hrs.value, consumed_hrs.suffix, - produced_hrs.precision, produced_hrs.value, produced_hrs.suffix, - cShare ); - } else if (g_display_prefs.displayLevel >= 2 || g_display_prefs.progressSetting == FIO_ps_always) { + /* Verbose progress update */ + DISPLAY_PROGRESS( + "(L%i) Buffered:%5.*f%s - Consumed:%5.*f%s - Compressed:%5.*f%s => %.2f%% ", + compressionLevel, + buffered_hrs.precision, buffered_hrs.value, buffered_hrs.suffix, + consumed_hrs.precision, consumed_hrs.value, consumed_hrs.suffix, + produced_hrs.precision, produced_hrs.value, produced_hrs.suffix, + cShare ); + } else { /* Require level 2 or forcibly displayed progress counter for summarized updates */ - DISPLAYLEVEL(1, "\r%79s\r", ""); /* Clear out the current displayed line */ if (fCtx->nbFilesTotal > 1) { size_t srcFileNameSize = strlen(srcFileName); /* Ensure that the string we print is roughly the same size each time */ if (srcFileNameSize > 18) { const char* truncatedSrcFileName = srcFileName + srcFileNameSize - 15; - DISPLAYLEVEL(1, "Compress: %u/%u files. Current: ...%s ", + DISPLAY_PROGRESS("Compress: %u/%u files. Current: ...%s ", fCtx->currFileIdx+1, fCtx->nbFilesTotal, truncatedSrcFileName); } else { - DISPLAYLEVEL(1, "Compress: %u/%u files. Current: %*s ", + DISPLAY_PROGRESS("Compress: %u/%u files. Current: %*s ", fCtx->currFileIdx+1, fCtx->nbFilesTotal, (int)(18-srcFileNameSize), srcFileName); } } - DISPLAYLEVEL(1, "Read:%6.*f%4s ", consumed_hrs.precision, consumed_hrs.value, consumed_hrs.suffix); + DISPLAY_PROGRESS("Read:%6.*f%4s ", consumed_hrs.precision, consumed_hrs.value, consumed_hrs.suffix); if (fileSize != UTIL_FILESIZE_UNKNOWN) - DISPLAYLEVEL(2, "/%6.*f%4s", file_hrs.precision, file_hrs.value, file_hrs.suffix); - DISPLAYLEVEL(1, " ==> %2.f%%", cShare); - DELAY_NEXT_UPDATE(); + DISPLAY_PROGRESS("/%6.*f%4s", file_hrs.precision, file_hrs.value, file_hrs.suffix); + DISPLAY_PROGRESS(" ==> %2.f%%", cShare); } - - /* adaptive mode : statistics measurement and speed correction */ - if (prefs->adaptiveMode) { - - /* check output speed */ - if (zfp.currentJobID > 1) { /* only possible if nbWorkers >= 1 */ - - unsigned long long newlyProduced = zfp.produced - previous_zfp_update.produced; - unsigned long long newlyFlushed = zfp.flushed - previous_zfp_update.flushed; - assert(zfp.produced >= previous_zfp_update.produced); - assert(prefs->nbWorkers >= 1); - - /* test if compression is blocked - * either because output is slow and all buffers are full - * or because input is slow and no job can start while waiting for at least one buffer to be filled. - * note : exclude starting part, since currentJobID > 1 */ - if ( (zfp.consumed == previous_zfp_update.consumed) /* no data compressed : no data available, or no more buffer to compress to, OR compression is really slow (compression of a single block is slower than update rate)*/ - && (zfp.nbActiveWorkers == 0) /* confirmed : no compression ongoing */ - ) { - DISPLAYLEVEL(6, "all buffers full : compression stopped => slow down \n") - speedChange = slower; - } - - previous_zfp_update = zfp; - - if ( (newlyProduced > (newlyFlushed * 9 / 8)) /* compression produces more data than output can flush (though production can be spiky, due to work unit : (N==4)*block sizes) */ - && (flushWaiting == 0) /* flush speed was never slowed by lack of production, so it's operating at max capacity */ - ) { - DISPLAYLEVEL(6, "compression faster than flush (%llu > %llu), and flushed was never slowed down by lack of production => slow down \n", newlyProduced, newlyFlushed); - speedChange = slower; - } - flushWaiting = 0; - } - - /* course correct only if there is at least one new job completed */ - if (zfp.currentJobID > lastJobID) { - DISPLAYLEVEL(6, "compression level adaptation check \n") - - /* check input speed */ - if (zfp.currentJobID > (unsigned)(prefs->nbWorkers+1)) { /* warm up period, to fill all workers */ - if (inputBlocked <= 0) { - DISPLAYLEVEL(6, "input is never blocked => input is slower than ingestion \n"); - speedChange = slower; - } else if (speedChange == noChange) { - unsigned long long newlyIngested = zfp.ingested - previous_zfp_correction.ingested; - unsigned long long newlyConsumed = zfp.consumed - previous_zfp_correction.consumed; - unsigned long long newlyProduced = zfp.produced - previous_zfp_correction.produced; - unsigned long long newlyFlushed = zfp.flushed - previous_zfp_correction.flushed; - previous_zfp_correction = zfp; - assert(inputPresented > 0); - DISPLAYLEVEL(6, "input blocked %u/%u(%.2f) - ingested:%u vs %u:consumed - flushed:%u vs %u:produced \n", - inputBlocked, inputPresented, (double)inputBlocked/inputPresented*100, - (unsigned)newlyIngested, (unsigned)newlyConsumed, - (unsigned)newlyFlushed, (unsigned)newlyProduced); - if ( (inputBlocked > inputPresented / 8) /* input is waiting often, because input buffers is full : compression or output too slow */ - && (newlyFlushed * 33 / 32 > newlyProduced) /* flush everything that is produced */ - && (newlyIngested * 33 / 32 > newlyConsumed) /* input speed as fast or faster than compression speed */ - ) { - DISPLAYLEVEL(6, "recommend faster as in(%llu) >= (%llu)comp(%llu) <= out(%llu) \n", - newlyIngested, newlyConsumed, newlyProduced, newlyFlushed); - speedChange = faster; - } - } - inputBlocked = 0; - inputPresented = 0; - } - - if (speedChange == slower) { - DISPLAYLEVEL(6, "slower speed , higher compression \n") - compressionLevel ++; - if (compressionLevel > ZSTD_maxCLevel()) compressionLevel = ZSTD_maxCLevel(); - if (compressionLevel > prefs->maxAdaptLevel) compressionLevel = prefs->maxAdaptLevel; - compressionLevel += (compressionLevel == 0); /* skip 0 */ - ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_compressionLevel, compressionLevel); - } - if (speedChange == faster) { - DISPLAYLEVEL(6, "faster speed , lighter compression \n") - compressionLevel --; - if (compressionLevel < prefs->minAdaptLevel) compressionLevel = prefs->minAdaptLevel; - compressionLevel -= (compressionLevel == 0); /* skip 0 */ - ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_compressionLevel, compressionLevel); - } - speedChange = noChange; - - lastJobID = zfp.currentJobID; - } /* if (zfp.currentJobID > lastJobID) */ - } /* if (g_adaptiveMode) */ - } /* if (READY_FOR_UPDATE()) */ + } /* if (SHOULD_DISPLAY_PROGRESS() && READY_FOR_UPDATE()) */ } /* while ((inBuff.pos != inBuff.size) */ } while (directive != ZSTD_e_end); - if (ferror(srcFile)) { - EXM_THROW(26, "Read error : I/O error"); - } if (fileSize != UTIL_FILESIZE_UNKNOWN && *readsize != fileSize) { EXM_THROW(27, "Read error : Incomplete read : %llu / %llu B", (unsigned long long)*readsize, (unsigned long long)fileSize); } + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ressPtr->writeCtx); + return compressedfilesize; } @@ -1620,20 +1627,18 @@ FIO_compressFilename_internal(FIO_ctx_t* const fCtx, /* Status */ fCtx->totalBytesInput += (size_t)readsize; fCtx->totalBytesOutput += (size_t)compressedfilesize; - DISPLAYLEVEL(2, "\r%79s\r", ""); - if (g_display_prefs.displayLevel >= 2 && - !fCtx->hasStdoutOutput && - (g_display_prefs.displayLevel >= 3 || fCtx->nbFilesTotal <= 1)) { + DISPLAY_PROGRESS("\r%79s\r", ""); + if (FIO_shouldDisplayFileSummary(fCtx)) { UTIL_HumanReadableSize_t hr_isize = UTIL_makeHumanReadableSize((U64) readsize); UTIL_HumanReadableSize_t hr_osize = UTIL_makeHumanReadableSize((U64) compressedfilesize); if (readsize == 0) { - DISPLAYLEVEL(2,"%-20s : (%6.*f%4s => %6.*f%4s, %s) \n", + DISPLAY_SUMMARY("%-20s : (%6.*f%s => %6.*f%s, %s) \n", srcFileName, hr_isize.precision, hr_isize.value, hr_isize.suffix, hr_osize.precision, hr_osize.value, hr_osize.suffix, dstFileName); } else { - DISPLAYLEVEL(2,"%-20s :%6.2f%% (%6.*f%4s => %6.*f%4s, %s) \n", + DISPLAY_SUMMARY("%-20s :%6.2f%% (%6.*f%s => %6.*f%s, %s) \n", srcFileName, (double)compressedfilesize / (double)readsize * 100, hr_isize.precision, hr_isize.value, hr_isize.suffix, @@ -1656,7 +1661,7 @@ FIO_compressFilename_internal(FIO_ctx_t* const fCtx, /*! FIO_compressFilename_dstFile() : - * open dstFileName, or pass-through if ress.dstFile != NULL, + * open dstFileName, or pass-through if ress.file != NULL, * then start compression with FIO_compressFilename_internal(). * Manages source removal (--rm) and file permissions transfer. * note : ress.srcFile must be != NULL, @@ -1669,27 +1674,29 @@ static int FIO_compressFilename_dstFile(FIO_ctx_t* const fCtx, cRess_t ress, const char* dstFileName, const char* srcFileName, + const stat_t* srcFileStat, int compressionLevel) { int closeDstFile = 0; int result; - stat_t statbuf; - int transferMTime = 0; - assert(ress.srcFile != NULL); - if (ress.dstFile == NULL) { - int dstFilePermissions = DEFAULT_FILE_PERMISSIONS; + int transferStat = 0; + FILE *dstFile; + + assert(AIO_ReadPool_getFile(ress.readCtx) != NULL); + if (AIO_WritePool_getFile(ress.writeCtx) == NULL) { + int dstFileInitialPermissions = DEFAULT_FILE_PERMISSIONS; if ( strcmp (srcFileName, stdinmark) && strcmp (dstFileName, stdoutmark) - && UTIL_stat(srcFileName, &statbuf) - && UTIL_isRegularFileStat(&statbuf) ) { - dstFilePermissions = statbuf.st_mode; - transferMTime = 1; + && UTIL_isRegularFileStat(srcFileStat) ) { + transferStat = 1; + dstFileInitialPermissions = TEMPORARY_FILE_PERMISSIONS; } closeDstFile = 1; DISPLAYLEVEL(6, "FIO_compressFilename_dstFile: opening dst: %s \n", dstFileName); - ress.dstFile = FIO_openDstFile(fCtx, prefs, srcFileName, dstFileName, dstFilePermissions); - if (ress.dstFile==NULL) return 1; /* could not open dstFileName */ + dstFile = FIO_openDstFile(fCtx, prefs, srcFileName, dstFileName, dstFileInitialPermissions); + if (dstFile==NULL) return 1; /* could not open dstFileName */ + AIO_WritePool_setFile(ress.writeCtx, dstFile); /* Must only be added after FIO_openDstFile() succeeds. * Otherwise we may delete the destination file if it already exists, * and the user presses Ctrl-C when asked if they wish to overwrite. @@ -1700,18 +1707,15 @@ static int FIO_compressFilename_dstFile(FIO_ctx_t* const fCtx, result = FIO_compressFilename_internal(fCtx, prefs, ress, dstFileName, srcFileName, compressionLevel); if (closeDstFile) { - FILE* const dstFile = ress.dstFile; - ress.dstFile = NULL; - clearHandler(); DISPLAYLEVEL(6, "FIO_compressFilename_dstFile: closing dst: %s \n", dstFileName); - if (fclose(dstFile)) { /* error closing dstFile */ + if (AIO_WritePool_closeFile(ress.writeCtx)) { /* error closing file */ DISPLAYLEVEL(1, "zstd: %s: %s \n", dstFileName, strerror(errno)); result=1; } - if (transferMTime) { - UTIL_utime(dstFileName, &statbuf); + if (transferStat) { + UTIL_setFileStat(dstFileName, srcFileStat); } if ( (result != 0) /* operation failure */ && strcmp(dstFileName, stdoutmark) /* special case : don't remove() stdout */ @@ -1752,18 +1756,27 @@ FIO_compressFilename_srcFile(FIO_ctx_t* const fCtx, int compressionLevel) { int result; + FILE* srcFile; + stat_t srcFileStat; + U64 fileSize = UTIL_FILESIZE_UNKNOWN; DISPLAYLEVEL(6, "FIO_compressFilename_srcFile: %s \n", srcFileName); - /* ensure src is not a directory */ - if (UTIL_isDirectory(srcFileName)) { - DISPLAYLEVEL(1, "zstd: %s is a directory -- ignored \n", srcFileName); - return 1; - } + if (strcmp(srcFileName, stdinmark)) { + if (UTIL_stat(srcFileName, &srcFileStat)) { + /* failure to stat at all is handled during opening */ - /* ensure src is not the same as dict (if present) */ - if (ress.dictFileName != NULL && UTIL_isSameFile(srcFileName, ress.dictFileName)) { - DISPLAYLEVEL(1, "zstd: cannot use %s as an input file and dictionary \n", srcFileName); - return 1; + /* ensure src is not a directory */ + if (UTIL_isDirectoryStat(&srcFileStat)) { + DISPLAYLEVEL(1, "zstd: %s is a directory -- ignored \n", srcFileName); + return 1; + } + + /* ensure src is not the same as dict (if present) */ + if (ress.dictFileName != NULL && UTIL_isSameFileStat(srcFileName, ress.dictFileName, &srcFileStat, &ress.dictFileStat)) { + DISPLAYLEVEL(1, "zstd: cannot use %s as an input file and dictionary \n", srcFileName); + return 1; + } + } } /* Check if "srcFile" is compressed. Only done if --exclude-compressed flag is used @@ -1775,16 +1788,30 @@ FIO_compressFilename_srcFile(FIO_ctx_t* const fCtx, return 0; } - ress.srcFile = FIO_openSrcFile(prefs, srcFileName); - if (ress.srcFile == NULL) return 1; /* srcFile could not be opened */ + srcFile = FIO_openSrcFile(prefs, srcFileName, &srcFileStat); + if (srcFile == NULL) return 1; /* srcFile could not be opened */ + + /* Don't use AsyncIO for small files */ + if (strcmp(srcFileName, stdinmark)) /* Stdin doesn't have stats */ + fileSize = UTIL_getFileSizeStat(&srcFileStat); + if(fileSize != UTIL_FILESIZE_UNKNOWN && fileSize < ZSTD_BLOCKSIZE_MAX * 3) { + AIO_ReadPool_setAsync(ress.readCtx, 0); + AIO_WritePool_setAsync(ress.writeCtx, 0); + } else { + AIO_ReadPool_setAsync(ress.readCtx, 1); + AIO_WritePool_setAsync(ress.writeCtx, 1); + } - result = FIO_compressFilename_dstFile(fCtx, prefs, ress, dstFileName, srcFileName, compressionLevel); + AIO_ReadPool_setFile(ress.readCtx, srcFile); + result = FIO_compressFilename_dstFile( + fCtx, prefs, ress, + dstFileName, srcFileName, + &srcFileStat, compressionLevel); + AIO_ReadPool_closeFile(ress.readCtx); - fclose(ress.srcFile); - ress.srcFile = NULL; - if ( prefs->removeSrcFile /* --rm */ - && result == 0 /* success */ - && strcmp(srcFileName, stdinmark) /* exception : don't erase stdin */ + if ( prefs->removeSrcFile /* --rm */ + && result == 0 /* success */ + && strcmp(srcFileName, stdinmark) /* exception : don't erase stdin */ ) { /* We must clear the handler, since after this point calling it would * delete both the source and destination files. @@ -1796,16 +1823,18 @@ FIO_compressFilename_srcFile(FIO_ctx_t* const fCtx, return result; } -static const char* checked_index(const char* options[], size_t length, size_t index) { +static const char* +checked_index(const char* options[], size_t length, size_t index) { assert(index < length); - // Necessary to avoid warnings since -O3 will omit the above `assert` + /* Necessary to avoid warnings since -O3 will omit the above `assert` */ (void) length; return options[index]; } -#define INDEX(options, index) checked_index((options), sizeof(options) / sizeof(char*), (index)) +#define INDEX(options, index) checked_index((options), sizeof(options) / sizeof(char*), (size_t)(index)) -void FIO_displayCompressionParameters(const FIO_prefs_t* prefs) { +void FIO_displayCompressionParameters(const FIO_prefs_t* prefs) +{ static const char* formatOptions[5] = {ZSTD_EXTENSION, GZ_EXTENSION, XZ_EXTENSION, LZMA_EXTENSION, LZ4_EXTENSION}; static const char* sparseOptions[3] = {" --no-sparse", "", " --sparse"}; @@ -1866,6 +1895,11 @@ FIO_determineCompressedName(const char* srcFileName, const char* outDirName, con char* outDirFilename = NULL; size_t sfnSize = strlen(srcFileName); size_t const srcSuffixLen = strlen(suffix); + + if(!strcmp(srcFileName, stdinmark)) { + return stdoutmark; + } + if (outDirName) { outDirFilename = FIO_createFilename_fromOutDir(srcFileName, outDirName, srcSuffixLen); sfnSize = strlen(outDirFilename); @@ -1928,23 +1962,24 @@ int FIO_compressMultipleFilenames(FIO_ctx_t* const fCtx, /* init */ assert(outFileName != NULL || suffix != NULL); if (outFileName != NULL) { /* output into a single destination (stdout typically) */ - if (FIO_removeMultiFilesWarning(fCtx, prefs, outFileName, 1 /* displayLevelCutoff */)) { + FILE *dstFile; + if (FIO_multiFilesConcatWarning(fCtx, prefs, outFileName, 1 /* displayLevelCutoff */)) { FIO_freeCResources(&ress); return 1; } - ress.dstFile = FIO_openDstFile(fCtx, prefs, NULL, outFileName, DEFAULT_FILE_PERMISSIONS); - if (ress.dstFile == NULL) { /* could not open outFileName */ + dstFile = FIO_openDstFile(fCtx, prefs, NULL, outFileName, DEFAULT_FILE_PERMISSIONS); + if (dstFile == NULL) { /* could not open outFileName */ error = 1; } else { + AIO_WritePool_setFile(ress.writeCtx, dstFile); for (; fCtx->currFileIdx < fCtx->nbFilesTotal; ++fCtx->currFileIdx) { status = FIO_compressFilename_srcFile(fCtx, prefs, ress, outFileName, inFileNamesTable[fCtx->currFileIdx], compressionLevel); if (!status) fCtx->nbFilesProcessed++; error |= status; } - if (fclose(ress.dstFile)) + if (AIO_WritePool_closeFile(ress.writeCtx)) EXM_THROW(29, "Write error (%s) : cannot properly close %s", strerror(errno), outFileName); - ress.dstFile = NULL; } } else { if (outMirroredRootDirName) @@ -1975,16 +2010,23 @@ int FIO_compressMultipleFilenames(FIO_ctx_t* const fCtx, FIO_checkFilenameCollisions(inFileNamesTable , (unsigned)fCtx->nbFilesTotal); } - if (fCtx->nbFilesProcessed >= 1 && fCtx->nbFilesTotal > 1 && fCtx->totalBytesInput != 0) { + if (FIO_shouldDisplayMultipleFileSummary(fCtx)) { UTIL_HumanReadableSize_t hr_isize = UTIL_makeHumanReadableSize((U64) fCtx->totalBytesInput); UTIL_HumanReadableSize_t hr_osize = UTIL_makeHumanReadableSize((U64) fCtx->totalBytesOutput); - DISPLAYLEVEL(2, "\r%79s\r", ""); - DISPLAYLEVEL(2, "%3d files compressed :%.2f%% (%6.*f%4s => %6.*f%4s)\n", - fCtx->nbFilesProcessed, - (double)fCtx->totalBytesOutput/((double)fCtx->totalBytesInput)*100, - hr_isize.precision, hr_isize.value, hr_isize.suffix, - hr_osize.precision, hr_osize.value, hr_osize.suffix); + DISPLAY_PROGRESS("\r%79s\r", ""); + if (fCtx->totalBytesInput == 0) { + DISPLAY_SUMMARY("%3d files compressed : (%6.*f%4s => %6.*f%4s)\n", + fCtx->nbFilesProcessed, + hr_isize.precision, hr_isize.value, hr_isize.suffix, + hr_osize.precision, hr_osize.value, hr_osize.suffix); + } else { + DISPLAY_SUMMARY("%3d files compressed : %.2f%% (%6.*f%4s => %6.*f%4s)\n", + fCtx->nbFilesProcessed, + (double)fCtx->totalBytesOutput/((double)fCtx->totalBytesInput)*100, + hr_isize.precision, hr_isize.value, hr_isize.suffix, + hr_osize.precision, hr_osize.value, hr_osize.suffix); + } } FIO_freeCResources(&ress); @@ -2001,13 +2043,9 @@ int FIO_compressMultipleFilenames(FIO_ctx_t* const fCtx, * Decompression ***************************************************************************/ typedef struct { - void* srcBuffer; - size_t srcBufferSize; - size_t srcBufferLoaded; - void* dstBuffer; - size_t dstBufferSize; ZSTD_DStream* dctx; - FILE* dstFile; + WritePoolCtx_t *writeCtx; + ReadPoolCtx_t *readCtx; } dRess_t; static dRess_t FIO_createDResources(FIO_prefs_t* const prefs, const char* dictFileName) @@ -2025,159 +2063,49 @@ static dRess_t FIO_createDResources(FIO_prefs_t* const prefs, const char* dictFi CHECK( ZSTD_DCtx_setMaxWindowSize(ress.dctx, prefs->memLimit) ); CHECK( ZSTD_DCtx_setParameter(ress.dctx, ZSTD_d_forceIgnoreChecksum, !prefs->checksumFlag)); - ress.srcBufferSize = ZSTD_DStreamInSize(); - ress.srcBuffer = malloc(ress.srcBufferSize); - ress.dstBufferSize = ZSTD_DStreamOutSize(); - ress.dstBuffer = malloc(ress.dstBufferSize); - if (!ress.srcBuffer || !ress.dstBuffer) - EXM_THROW(61, "Allocation error : not enough memory"); - /* dictionary */ { void* dictBuffer; - size_t const dictBufferSize = FIO_createDictBuffer(&dictBuffer, dictFileName, prefs); - CHECK( ZSTD_initDStream_usingDict(ress.dctx, dictBuffer, dictBufferSize) ); + stat_t statbuf; + size_t const dictBufferSize = FIO_createDictBuffer(&dictBuffer, dictFileName, prefs, &statbuf); + CHECK( ZSTD_DCtx_reset(ress.dctx, ZSTD_reset_session_only) ); + CHECK( ZSTD_DCtx_loadDictionary(ress.dctx, dictBuffer, dictBufferSize) ); free(dictBuffer); } + ress.writeCtx = AIO_WritePool_create(prefs, ZSTD_DStreamOutSize()); + ress.readCtx = AIO_ReadPool_create(prefs, ZSTD_DStreamInSize()); + return ress; } static void FIO_freeDResources(dRess_t ress) { CHECK( ZSTD_freeDStream(ress.dctx) ); - free(ress.srcBuffer); - free(ress.dstBuffer); + AIO_WritePool_free(ress.writeCtx); + AIO_ReadPool_free(ress.readCtx); } - -/** FIO_fwriteSparse() : -* @return : storedSkips, -* argument for next call to FIO_fwriteSparse() or FIO_fwriteSparseEnd() */ -static unsigned -FIO_fwriteSparse(FILE* file, - const void* buffer, size_t bufferSize, - const FIO_prefs_t* const prefs, - unsigned storedSkips) -{ - const size_t* const bufferT = (const size_t*)buffer; /* Buffer is supposed malloc'ed, hence aligned on size_t */ - size_t bufferSizeT = bufferSize / sizeof(size_t); - const size_t* const bufferTEnd = bufferT + bufferSizeT; - const size_t* ptrT = bufferT; - static const size_t segmentSizeT = (32 KB) / sizeof(size_t); /* check every 32 KB */ - - if (prefs->testMode) return 0; /* do not output anything in test mode */ - - if (!prefs->sparseFileSupport) { /* normal write */ - size_t const sizeCheck = fwrite(buffer, 1, bufferSize, file); - if (sizeCheck != bufferSize) - EXM_THROW(70, "Write error : cannot write decoded block : %s", - strerror(errno)); - return 0; - } - - /* avoid int overflow */ - if (storedSkips > 1 GB) { - if (LONG_SEEK(file, 1 GB, SEEK_CUR) != 0) - EXM_THROW(91, "1 GB skip error (sparse file support)"); - storedSkips -= 1 GB; - } - - while (ptrT < bufferTEnd) { - size_t nb0T; - - /* adjust last segment if < 32 KB */ - size_t seg0SizeT = segmentSizeT; - if (seg0SizeT > bufferSizeT) seg0SizeT = bufferSizeT; - bufferSizeT -= seg0SizeT; - - /* count leading zeroes */ - for (nb0T=0; (nb0T < seg0SizeT) && (ptrT[nb0T] == 0); nb0T++) ; - storedSkips += (unsigned)(nb0T * sizeof(size_t)); - - if (nb0T != seg0SizeT) { /* not all 0s */ - size_t const nbNon0ST = seg0SizeT - nb0T; - /* skip leading zeros */ - if (LONG_SEEK(file, storedSkips, SEEK_CUR) != 0) - EXM_THROW(92, "Sparse skip error ; try --no-sparse"); - storedSkips = 0; - /* write the rest */ - if (fwrite(ptrT + nb0T, sizeof(size_t), nbNon0ST, file) != nbNon0ST) - EXM_THROW(93, "Write error : cannot write decoded block : %s", - strerror(errno)); - } - ptrT += seg0SizeT; - } - - { static size_t const maskT = sizeof(size_t)-1; - if (bufferSize & maskT) { - /* size not multiple of sizeof(size_t) : implies end of block */ - const char* const restStart = (const char*)bufferTEnd; - const char* restPtr = restStart; - const char* const restEnd = (const char*)buffer + bufferSize; - assert(restEnd > restStart && restEnd < restStart + sizeof(size_t)); - for ( ; (restPtr < restEnd) && (*restPtr == 0); restPtr++) ; - storedSkips += (unsigned) (restPtr - restStart); - if (restPtr != restEnd) { - /* not all remaining bytes are 0 */ - size_t const restSize = (size_t)(restEnd - restPtr); - if (LONG_SEEK(file, storedSkips, SEEK_CUR) != 0) - EXM_THROW(92, "Sparse skip error ; try --no-sparse"); - if (fwrite(restPtr, 1, restSize, file) != restSize) - EXM_THROW(95, "Write error : cannot write end of decoded block : %s", - strerror(errno)); - storedSkips = 0; - } } } - - return storedSkips; -} - -static void -FIO_fwriteSparseEnd(const FIO_prefs_t* const prefs, FILE* file, unsigned storedSkips) -{ - if (prefs->testMode) assert(storedSkips == 0); - if (storedSkips>0) { - assert(prefs->sparseFileSupport > 0); /* storedSkips>0 implies sparse support is enabled */ - (void)prefs; /* assert can be disabled, in which case prefs becomes unused */ - if (LONG_SEEK(file, storedSkips-1, SEEK_CUR) != 0) - EXM_THROW(69, "Final skip error (sparse file support)"); - /* last zero must be explicitly written, - * so that skipped ones get implicitly translated as zero by FS */ - { const char lastZeroByte[1] = { 0 }; - if (fwrite(lastZeroByte, 1, 1, file) != 1) - EXM_THROW(69, "Write error : cannot write last zero : %s", strerror(errno)); - } } -} - - /** FIO_passThrough() : just copy input into output, for compatibility with gzip -df mode @return : 0 (no error) */ -static int FIO_passThrough(const FIO_prefs_t* const prefs, - FILE* foutput, FILE* finput, - void* buffer, size_t bufferSize, - size_t alreadyLoaded) -{ - size_t const blockSize = MIN(64 KB, bufferSize); - size_t readFromInput; - unsigned storedSkips = 0; - - /* assumption : ress->srcBufferLoaded bytes already loaded and stored within buffer */ - { size_t const sizeCheck = fwrite(buffer, 1, alreadyLoaded, foutput); - if (sizeCheck != alreadyLoaded) { - DISPLAYLEVEL(1, "Pass-through write error : %s\n", strerror(errno)); - return 1; - } } - - do { - readFromInput = fread(buffer, 1, blockSize, finput); - storedSkips = FIO_fwriteSparse(foutput, buffer, readFromInput, prefs, storedSkips); - } while (readFromInput == blockSize); - if (ferror(finput)) { - DISPLAYLEVEL(1, "Pass-through read error : %s\n", strerror(errno)); - return 1; - } - assert(feof(finput)); - - FIO_fwriteSparseEnd(prefs, foutput, storedSkips); +static int FIO_passThrough(dRess_t *ress) +{ + size_t const blockSize = MIN(MIN(64 KB, ZSTD_DStreamInSize()), ZSTD_DStreamOutSize()); + IOJob_t *writeJob = AIO_WritePool_acquireJob(ress->writeCtx); + AIO_ReadPool_fillBuffer(ress->readCtx, blockSize); + + while(ress->readCtx->srcBufferLoaded) { + size_t writeSize; + writeSize = MIN(blockSize, ress->readCtx->srcBufferLoaded); + assert(writeSize <= writeJob->bufferSize); + memcpy(writeJob->buffer, ress->readCtx->srcBuffer, writeSize); + writeJob->usedBufferSize = writeSize; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); + AIO_ReadPool_consumeBytes(ress->readCtx, writeSize); + AIO_ReadPool_fillBuffer(ress->readCtx, blockSize); + } + assert(ress->readCtx->reachedEof); + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); return 0; } @@ -2195,7 +2123,7 @@ FIO_zstdErrorHelp(const FIO_prefs_t* const prefs, return; /* Try to decode the frame header */ - err = ZSTD_getFrameHeader(&header, ress->srcBuffer, ress->srcBufferLoaded); + err = ZSTD_getFrameHeader(&header, ress->readCtx->srcBuffer, ress->readCtx->srcBufferLoaded); if (err == 0) { unsigned long long const windowSize = header.windowSize; unsigned const windowLog = FIO_highbit64(windowSize) + ((windowSize & (windowSize - 1)) != 0); @@ -2218,13 +2146,13 @@ FIO_zstdErrorHelp(const FIO_prefs_t* const prefs, */ #define FIO_ERROR_FRAME_DECODING ((unsigned long long)(-2)) static unsigned long long -FIO_decompressZstdFrame(FIO_ctx_t* const fCtx, dRess_t* ress, FILE* finput, +FIO_decompressZstdFrame(FIO_ctx_t* const fCtx, dRess_t* ress, const FIO_prefs_t* const prefs, const char* srcFileName, U64 alreadyDecoded) /* for multi-frames streams */ { U64 frameSize = 0; - U32 storedSkips = 0; + IOJob_t *writeJob = AIO_WritePool_acquireJob(ress->writeCtx); /* display last 20 characters only */ { size_t const srcFileLength = strlen(srcFileName); @@ -2234,67 +2162,60 @@ FIO_decompressZstdFrame(FIO_ctx_t* const fCtx, dRess_t* ress, FILE* finput, ZSTD_DCtx_reset(ress->dctx, ZSTD_reset_session_only); /* Header loading : ensures ZSTD_getFrameHeader() will succeed */ - { size_t const toDecode = ZSTD_FRAMEHEADERSIZE_MAX; - if (ress->srcBufferLoaded < toDecode) { - size_t const toRead = toDecode - ress->srcBufferLoaded; - void* const startPosition = (char*)ress->srcBuffer + ress->srcBufferLoaded; - ress->srcBufferLoaded += fread(startPosition, 1, toRead, finput); - } } + AIO_ReadPool_fillBuffer(ress->readCtx, ZSTD_FRAMEHEADERSIZE_MAX); /* Main decompression Loop */ while (1) { - ZSTD_inBuffer inBuff = { ress->srcBuffer, ress->srcBufferLoaded, 0 }; - ZSTD_outBuffer outBuff= { ress->dstBuffer, ress->dstBufferSize, 0 }; + ZSTD_inBuffer inBuff = setInBuffer( ress->readCtx->srcBuffer, ress->readCtx->srcBufferLoaded, 0 ); + ZSTD_outBuffer outBuff= setOutBuffer( writeJob->buffer, writeJob->bufferSize, 0 ); size_t const readSizeHint = ZSTD_decompressStream(ress->dctx, &outBuff, &inBuff); - const int displayLevel = (g_display_prefs.progressSetting == FIO_ps_always) ? 1 : 2; UTIL_HumanReadableSize_t const hrs = UTIL_makeHumanReadableSize(alreadyDecoded+frameSize); if (ZSTD_isError(readSizeHint)) { DISPLAYLEVEL(1, "%s : Decoding error (36) : %s \n", srcFileName, ZSTD_getErrorName(readSizeHint)); FIO_zstdErrorHelp(prefs, ress, readSizeHint, srcFileName); + AIO_WritePool_releaseIoJob(writeJob); return FIO_ERROR_FRAME_DECODING; } /* Write block */ - storedSkips = FIO_fwriteSparse(ress->dstFile, ress->dstBuffer, outBuff.pos, prefs, storedSkips); + writeJob->usedBufferSize = outBuff.pos; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); frameSize += outBuff.pos; if (fCtx->nbFilesTotal > 1) { size_t srcFileNameSize = strlen(srcFileName); if (srcFileNameSize > 18) { const char* truncatedSrcFileName = srcFileName + srcFileNameSize - 15; - DISPLAYUPDATE(displayLevel, "\rDecompress: %2u/%2u files. Current: ...%s : %.*f%s... ", - fCtx->currFileIdx+1, fCtx->nbFilesTotal, truncatedSrcFileName, hrs.precision, hrs.value, hrs.suffix); + DISPLAYUPDATE_PROGRESS( + "\rDecompress: %2u/%2u files. Current: ...%s : %.*f%s... ", + fCtx->currFileIdx+1, fCtx->nbFilesTotal, truncatedSrcFileName, hrs.precision, hrs.value, hrs.suffix); } else { - DISPLAYUPDATE(displayLevel, "\rDecompress: %2u/%2u files. Current: %s : %.*f%s... ", + DISPLAYUPDATE_PROGRESS("\rDecompress: %2u/%2u files. Current: %s : %.*f%s... ", fCtx->currFileIdx+1, fCtx->nbFilesTotal, srcFileName, hrs.precision, hrs.value, hrs.suffix); } } else { - DISPLAYUPDATE(displayLevel, "\r%-20.20s : %.*f%s... ", + DISPLAYUPDATE_PROGRESS("\r%-20.20s : %.*f%s... ", srcFileName, hrs.precision, hrs.value, hrs.suffix); } - if (inBuff.pos > 0) { - memmove(ress->srcBuffer, (char*)ress->srcBuffer + inBuff.pos, inBuff.size - inBuff.pos); - ress->srcBufferLoaded -= inBuff.pos; - } + AIO_ReadPool_consumeBytes(ress->readCtx, inBuff.pos); if (readSizeHint == 0) break; /* end of frame */ /* Fill input buffer */ - { size_t const toDecode = MIN(readSizeHint, ress->srcBufferSize); /* support large skippable frames */ - if (ress->srcBufferLoaded < toDecode) { - size_t const toRead = toDecode - ress->srcBufferLoaded; /* > 0 */ - void* const startPosition = (char*)ress->srcBuffer + ress->srcBufferLoaded; - size_t const readSize = fread(startPosition, 1, toRead, finput); + { size_t const toDecode = MIN(readSizeHint, ZSTD_DStreamInSize()); /* support large skippable frames */ + if (ress->readCtx->srcBufferLoaded < toDecode) { + size_t const readSize = AIO_ReadPool_fillBuffer(ress->readCtx, toDecode); if (readSize==0) { DISPLAYLEVEL(1, "%s : Read error (39) : premature end \n", - srcFileName); + srcFileName); + AIO_WritePool_releaseIoJob(writeJob); return FIO_ERROR_FRAME_DECODING; } - ress->srcBufferLoaded += readSize; - } } } + } } } - FIO_fwriteSparseEnd(prefs, ress->dstFile, storedSkips); + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); return frameSize; } @@ -2302,37 +2223,36 @@ FIO_decompressZstdFrame(FIO_ctx_t* const fCtx, dRess_t* ress, FILE* finput, #ifdef ZSTD_GZDECOMPRESS static unsigned long long -FIO_decompressGzFrame(dRess_t* ress, FILE* srcFile, - const FIO_prefs_t* const prefs, - const char* srcFileName) +FIO_decompressGzFrame(dRess_t* ress, const char* srcFileName) { unsigned long long outFileSize = 0; z_stream strm; int flush = Z_NO_FLUSH; int decodingError = 0; - unsigned storedSkips = 0; + IOJob_t *writeJob = NULL; strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.next_in = 0; strm.avail_in = 0; - /* see http://www.zlib.net/manual.html */ + /* see https://www.zlib.net/manual.html */ if (inflateInit2(&strm, 15 /* maxWindowLogSize */ + 16 /* gzip only */) != Z_OK) return FIO_ERROR_FRAME_DECODING; - strm.next_out = (Bytef*)ress->dstBuffer; - strm.avail_out = (uInt)ress->dstBufferSize; - strm.avail_in = (uInt)ress->srcBufferLoaded; - strm.next_in = (z_const unsigned char*)ress->srcBuffer; + writeJob = AIO_WritePool_acquireJob(ress->writeCtx); + strm.next_out = (Bytef*)writeJob->buffer; + strm.avail_out = (uInt)writeJob->bufferSize; + strm.avail_in = (uInt)ress->readCtx->srcBufferLoaded; + strm.next_in = (z_const unsigned char*)ress->readCtx->srcBuffer; for ( ; ; ) { int ret; if (strm.avail_in == 0) { - ress->srcBufferLoaded = fread(ress->srcBuffer, 1, ress->srcBufferSize, srcFile); - if (ress->srcBufferLoaded == 0) flush = Z_FINISH; - strm.next_in = (z_const unsigned char*)ress->srcBuffer; - strm.avail_in = (uInt)ress->srcBufferLoaded; + AIO_ReadPool_consumeAndRefill(ress->readCtx); + if (ress->readCtx->srcBufferLoaded == 0) flush = Z_FINISH; + strm.next_in = (z_const unsigned char*)ress->readCtx->srcBuffer; + strm.avail_in = (uInt)ress->readCtx->srcBufferLoaded; } ret = inflate(&strm, flush); if (ret == Z_BUF_ERROR) { @@ -2343,35 +2263,34 @@ FIO_decompressGzFrame(dRess_t* ress, FILE* srcFile, DISPLAYLEVEL(1, "zstd: %s: inflate error %d \n", srcFileName, ret); decodingError = 1; break; } - { size_t const decompBytes = ress->dstBufferSize - strm.avail_out; + { size_t const decompBytes = writeJob->bufferSize - strm.avail_out; if (decompBytes) { - storedSkips = FIO_fwriteSparse(ress->dstFile, ress->dstBuffer, decompBytes, prefs, storedSkips); + writeJob->usedBufferSize = decompBytes; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); outFileSize += decompBytes; - strm.next_out = (Bytef*)ress->dstBuffer; - strm.avail_out = (uInt)ress->dstBufferSize; + strm.next_out = (Bytef*)writeJob->buffer; + strm.avail_out = (uInt)writeJob->bufferSize; } } if (ret == Z_STREAM_END) break; } - if (strm.avail_in > 0) - memmove(ress->srcBuffer, strm.next_in, strm.avail_in); - ress->srcBufferLoaded = strm.avail_in; + AIO_ReadPool_consumeBytes(ress->readCtx, ress->readCtx->srcBufferLoaded - strm.avail_in); + if ( (inflateEnd(&strm) != Z_OK) /* release resources ; error detected */ && (decodingError==0) ) { DISPLAYLEVEL(1, "zstd: %s: inflateEnd error \n", srcFileName); decodingError = 1; } - FIO_fwriteSparseEnd(prefs, ress->dstFile, storedSkips); + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); return decodingError ? FIO_ERROR_FRAME_DECODING : outFileSize; } #endif - #ifdef ZSTD_LZMADECOMPRESS static unsigned long long -FIO_decompressLzmaFrame(dRess_t* ress, FILE* srcFile, - const FIO_prefs_t* const prefs, +FIO_decompressLzmaFrame(dRess_t* ress, const char* srcFileName, int plain_lzma) { unsigned long long outFileSize = 0; @@ -2379,7 +2298,7 @@ FIO_decompressLzmaFrame(dRess_t* ress, FILE* srcFile, lzma_action action = LZMA_RUN; lzma_ret initRet; int decodingError = 0; - unsigned storedSkips = 0; + IOJob_t *writeJob = NULL; strm.next_in = 0; strm.avail_in = 0; @@ -2396,18 +2315,19 @@ FIO_decompressLzmaFrame(dRess_t* ress, FILE* srcFile, return FIO_ERROR_FRAME_DECODING; } - strm.next_out = (BYTE*)ress->dstBuffer; - strm.avail_out = ress->dstBufferSize; - strm.next_in = (BYTE const*)ress->srcBuffer; - strm.avail_in = ress->srcBufferLoaded; + writeJob = AIO_WritePool_acquireJob(ress->writeCtx); + strm.next_out = (Bytef*)writeJob->buffer; + strm.avail_out = (uInt)writeJob->bufferSize; + strm.next_in = (BYTE const*)ress->readCtx->srcBuffer; + strm.avail_in = ress->readCtx->srcBufferLoaded; for ( ; ; ) { lzma_ret ret; if (strm.avail_in == 0) { - ress->srcBufferLoaded = fread(ress->srcBuffer, 1, ress->srcBufferSize, srcFile); - if (ress->srcBufferLoaded == 0) action = LZMA_FINISH; - strm.next_in = (BYTE const*)ress->srcBuffer; - strm.avail_in = ress->srcBufferLoaded; + AIO_ReadPool_consumeAndRefill(ress->readCtx); + if (ress->readCtx->srcBufferLoaded == 0) action = LZMA_FINISH; + strm.next_in = (BYTE const*)ress->readCtx->srcBuffer; + strm.avail_in = ress->readCtx->srcBufferLoaded; } ret = lzma_code(&strm, action); @@ -2420,104 +2340,90 @@ FIO_decompressLzmaFrame(dRess_t* ress, FILE* srcFile, srcFileName, ret); decodingError = 1; break; } - { size_t const decompBytes = ress->dstBufferSize - strm.avail_out; + { size_t const decompBytes = writeJob->bufferSize - strm.avail_out; if (decompBytes) { - storedSkips = FIO_fwriteSparse(ress->dstFile, ress->dstBuffer, decompBytes, prefs, storedSkips); + writeJob->usedBufferSize = decompBytes; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); outFileSize += decompBytes; - strm.next_out = (BYTE*)ress->dstBuffer; - strm.avail_out = ress->dstBufferSize; + strm.next_out = (Bytef*)writeJob->buffer; + strm.avail_out = writeJob->bufferSize; } } if (ret == LZMA_STREAM_END) break; } - if (strm.avail_in > 0) - memmove(ress->srcBuffer, strm.next_in, strm.avail_in); - ress->srcBufferLoaded = strm.avail_in; + AIO_ReadPool_consumeBytes(ress->readCtx, ress->readCtx->srcBufferLoaded - strm.avail_in); lzma_end(&strm); - FIO_fwriteSparseEnd(prefs, ress->dstFile, storedSkips); + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); return decodingError ? FIO_ERROR_FRAME_DECODING : outFileSize; } #endif #ifdef ZSTD_LZ4DECOMPRESS static unsigned long long -FIO_decompressLz4Frame(dRess_t* ress, FILE* srcFile, - const FIO_prefs_t* const prefs, - const char* srcFileName) +FIO_decompressLz4Frame(dRess_t* ress, const char* srcFileName) { unsigned long long filesize = 0; - LZ4F_errorCode_t nextToLoad; + LZ4F_errorCode_t nextToLoad = 4; LZ4F_decompressionContext_t dCtx; LZ4F_errorCode_t const errorCode = LZ4F_createDecompressionContext(&dCtx, LZ4F_VERSION); int decodingError = 0; - unsigned storedSkips = 0; + IOJob_t *writeJob = NULL; if (LZ4F_isError(errorCode)) { DISPLAYLEVEL(1, "zstd: failed to create lz4 decompression context \n"); return FIO_ERROR_FRAME_DECODING; } - /* Init feed with magic number (already consumed from FILE* sFile) */ - { size_t inSize = 4; - size_t outSize= 0; - MEM_writeLE32(ress->srcBuffer, LZ4_MAGICNUMBER); - nextToLoad = LZ4F_decompress(dCtx, ress->dstBuffer, &outSize, ress->srcBuffer, &inSize, NULL); - if (LZ4F_isError(nextToLoad)) { - DISPLAYLEVEL(1, "zstd: %s: lz4 header error : %s \n", - srcFileName, LZ4F_getErrorName(nextToLoad)); - LZ4F_freeDecompressionContext(dCtx); - return FIO_ERROR_FRAME_DECODING; - } } + writeJob = AIO_WritePool_acquireJob(ress->writeCtx); /* Main Loop */ for (;nextToLoad;) { - size_t readSize; size_t pos = 0; - size_t decodedBytes = ress->dstBufferSize; + size_t decodedBytes = writeJob->bufferSize; + int fullBufferDecoded = 0; /* Read input */ - if (nextToLoad > ress->srcBufferSize) nextToLoad = ress->srcBufferSize; - readSize = fread(ress->srcBuffer, 1, nextToLoad, srcFile); - if (!readSize) break; /* reached end of file or stream */ + AIO_ReadPool_fillBuffer(ress->readCtx, nextToLoad); + if(!ress->readCtx->srcBufferLoaded) break; /* reached end of file */ - while ((pos < readSize) || (decodedBytes == ress->dstBufferSize)) { /* still to read, or still to flush */ + while ((pos < ress->readCtx->srcBufferLoaded) || fullBufferDecoded) { /* still to read, or still to flush */ /* Decode Input (at least partially) */ - size_t remaining = readSize - pos; - decodedBytes = ress->dstBufferSize; - nextToLoad = LZ4F_decompress(dCtx, ress->dstBuffer, &decodedBytes, (char*)(ress->srcBuffer)+pos, &remaining, NULL); + size_t remaining = ress->readCtx->srcBufferLoaded - pos; + decodedBytes = writeJob->bufferSize; + nextToLoad = LZ4F_decompress(dCtx, writeJob->buffer, &decodedBytes, (char*)(ress->readCtx->srcBuffer)+pos, + &remaining, NULL); if (LZ4F_isError(nextToLoad)) { DISPLAYLEVEL(1, "zstd: %s: lz4 decompression error : %s \n", srcFileName, LZ4F_getErrorName(nextToLoad)); decodingError = 1; nextToLoad = 0; break; } pos += remaining; + assert(pos <= ress->readCtx->srcBufferLoaded); + fullBufferDecoded = decodedBytes == writeJob->bufferSize; /* Write Block */ if (decodedBytes) { UTIL_HumanReadableSize_t hrs; - storedSkips = FIO_fwriteSparse(ress->dstFile, ress->dstBuffer, decodedBytes, prefs, storedSkips); + writeJob->usedBufferSize = decodedBytes; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); filesize += decodedBytes; hrs = UTIL_makeHumanReadableSize(filesize); - DISPLAYUPDATE(2, "\rDecompressed : %.*f%s ", hrs.precision, hrs.value, hrs.suffix); + DISPLAYUPDATE_PROGRESS("\rDecompressed : %.*f%s ", hrs.precision, hrs.value, hrs.suffix); } if (!nextToLoad) break; } + AIO_ReadPool_consumeBytes(ress->readCtx, pos); } - /* can be out because readSize == 0, which could be an fread() error */ - if (ferror(srcFile)) { - DISPLAYLEVEL(1, "zstd: %s: read error \n", srcFileName); - decodingError=1; - } - if (nextToLoad!=0) { DISPLAYLEVEL(1, "zstd: %s: unfinished lz4 stream \n", srcFileName); decodingError=1; } LZ4F_freeDecompressionContext(dCtx); - ress->srcBufferLoaded = 0; /* LZ4F will reach exact frame boundary */ - FIO_fwriteSparseEnd(prefs, ress->dstFile, storedSkips); + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); return decodingError ? FIO_ERROR_FRAME_DECODING : filesize; } @@ -2532,23 +2438,30 @@ FIO_decompressLz4Frame(dRess_t* ress, FILE* srcFile, * 1 : error */ static int FIO_decompressFrames(FIO_ctx_t* const fCtx, - dRess_t ress, FILE* srcFile, - const FIO_prefs_t* const prefs, - const char* dstFileName, const char* srcFileName) + dRess_t ress, const FIO_prefs_t* const prefs, + const char* dstFileName, const char* srcFileName) { unsigned readSomething = 0; unsigned long long filesize = 0; - assert(srcFile != NULL); + int passThrough = prefs->passThrough; + + if (passThrough == -1) { + /* If pass-through mode is not explicitly enabled or disabled, + * default to the legacy behavior of enabling it if we are writing + * to stdout with the overwrite flag enabled. + */ + passThrough = prefs->overwrite && !strcmp(dstFileName, stdoutmark); + } + assert(passThrough == 0 || passThrough == 1); /* for each frame */ for ( ; ; ) { /* check magic number -> version */ size_t const toRead = 4; - const BYTE* const buf = (const BYTE*)ress.srcBuffer; - if (ress.srcBufferLoaded < toRead) /* load up to 4 bytes for header */ - ress.srcBufferLoaded += fread((char*)ress.srcBuffer + ress.srcBufferLoaded, - (size_t)1, toRead - ress.srcBufferLoaded, srcFile); - if (ress.srcBufferLoaded==0) { + const BYTE* buf; + AIO_ReadPool_fillBuffer(ress.readCtx, toRead); + buf = (const BYTE*)ress.readCtx->srcBuffer; + if (ress.readCtx->srcBufferLoaded==0) { if (readSomething==0) { /* srcFile is empty (which is invalid) */ DISPLAYLEVEL(1, "zstd: %s: unexpected end of file \n", srcFileName); return 1; @@ -2556,17 +2469,20 @@ static int FIO_decompressFrames(FIO_ctx_t* const fCtx, break; /* no more input */ } readSomething = 1; /* there is at least 1 byte in srcFile */ - if (ress.srcBufferLoaded < toRead) { + if (ress.readCtx->srcBufferLoaded < toRead) { /* not enough input to check magic number */ + if (passThrough) { + return FIO_passThrough(&ress); + } DISPLAYLEVEL(1, "zstd: %s: unknown header \n", srcFileName); return 1; } - if (ZSTD_isFrame(buf, ress.srcBufferLoaded)) { - unsigned long long const frameSize = FIO_decompressZstdFrame(fCtx, &ress, srcFile, prefs, srcFileName, filesize); + if (ZSTD_isFrame(buf, ress.readCtx->srcBufferLoaded)) { + unsigned long long const frameSize = FIO_decompressZstdFrame(fCtx, &ress, prefs, srcFileName, filesize); if (frameSize == FIO_ERROR_FRAME_DECODING) return 1; filesize += frameSize; } else if (buf[0] == 31 && buf[1] == 139) { /* gz magic number */ #ifdef ZSTD_GZDECOMPRESS - unsigned long long const frameSize = FIO_decompressGzFrame(&ress, srcFile, prefs, srcFileName); + unsigned long long const frameSize = FIO_decompressGzFrame(&ress, srcFileName); if (frameSize == FIO_ERROR_FRAME_DECODING) return 1; filesize += frameSize; #else @@ -2576,7 +2492,7 @@ static int FIO_decompressFrames(FIO_ctx_t* const fCtx, } else if ((buf[0] == 0xFD && buf[1] == 0x37) /* xz magic number */ || (buf[0] == 0x5D && buf[1] == 0x00)) { /* lzma header (no magic number) */ #ifdef ZSTD_LZMADECOMPRESS - unsigned long long const frameSize = FIO_decompressLzmaFrame(&ress, srcFile, prefs, srcFileName, buf[0] != 0xFD); + unsigned long long const frameSize = FIO_decompressLzmaFrame(&ress, srcFileName, buf[0] != 0xFD); if (frameSize == FIO_ERROR_FRAME_DECODING) return 1; filesize += frameSize; #else @@ -2585,18 +2501,15 @@ static int FIO_decompressFrames(FIO_ctx_t* const fCtx, #endif } else if (MEM_readLE32(buf) == LZ4_MAGICNUMBER) { #ifdef ZSTD_LZ4DECOMPRESS - unsigned long long const frameSize = FIO_decompressLz4Frame(&ress, srcFile, prefs, srcFileName); + unsigned long long const frameSize = FIO_decompressLz4Frame(&ress, srcFileName); if (frameSize == FIO_ERROR_FRAME_DECODING) return 1; filesize += frameSize; #else DISPLAYLEVEL(1, "zstd: %s: lz4 file cannot be uncompressed (zstd compiled without HAVE_LZ4) -- ignored \n", srcFileName); return 1; #endif - } else if ((prefs->overwrite) && !strcmp (dstFileName, stdoutmark)) { /* pass-through mode */ - return FIO_passThrough(prefs, - ress.dstFile, srcFile, - ress.srcBuffer, ress.srcBufferSize, - ress.srcBufferLoaded); + } else if (passThrough) { + return FIO_passThrough(&ress); } else { DISPLAYLEVEL(1, "zstd: %s: unsupported format \n", srcFileName); return 1; @@ -2604,48 +2517,45 @@ static int FIO_decompressFrames(FIO_ctx_t* const fCtx, /* Final Status */ fCtx->totalBytesOutput += (size_t)filesize; - DISPLAYLEVEL(2, "\r%79s\r", ""); - /* No status message in pipe mode (stdin - stdout) or multi-files mode */ - if ((g_display_prefs.displayLevel >= 2 && fCtx->nbFilesTotal <= 1) || - g_display_prefs.displayLevel >= 3 || - g_display_prefs.progressSetting == FIO_ps_always) { - DISPLAYLEVEL(1, "\r%-20s: %llu bytes \n", srcFileName, filesize); - } + DISPLAY_PROGRESS("\r%79s\r", ""); + if (FIO_shouldDisplayFileSummary(fCtx)) + DISPLAY_SUMMARY("%-20s: %llu bytes \n", srcFileName, filesize); return 0; } /** FIO_decompressDstFile() : - open `dstFileName`, - or path-through if ress.dstFile is already != 0, + open `dstFileName`, or pass-through if writeCtx's file is already != 0, then start decompression process (FIO_decompressFrames()). @return : 0 : OK 1 : operation aborted */ static int FIO_decompressDstFile(FIO_ctx_t* const fCtx, FIO_prefs_t* const prefs, - dRess_t ress, FILE* srcFile, - const char* dstFileName, const char* srcFileName) + dRess_t ress, + const char* dstFileName, + const char* srcFileName, + const stat_t* srcFileStat) { int result; - stat_t statbuf; int releaseDstFile = 0; - int transferMTime = 0; + int transferStat = 0; - if ((ress.dstFile == NULL) && (prefs->testMode==0)) { + if ((AIO_WritePool_getFile(ress.writeCtx) == NULL) && (prefs->testMode == 0)) { + FILE *dstFile; int dstFilePermissions = DEFAULT_FILE_PERMISSIONS; if ( strcmp(srcFileName, stdinmark) /* special case : don't transfer permissions from stdin */ && strcmp(dstFileName, stdoutmark) - && UTIL_stat(srcFileName, &statbuf) - && UTIL_isRegularFileStat(&statbuf) ) { - dstFilePermissions = statbuf.st_mode; - transferMTime = 1; + && UTIL_isRegularFileStat(srcFileStat) ) { + transferStat = 1; + dstFilePermissions = TEMPORARY_FILE_PERMISSIONS; } releaseDstFile = 1; - ress.dstFile = FIO_openDstFile(fCtx, prefs, srcFileName, dstFileName, dstFilePermissions); - if (ress.dstFile==NULL) return 1; + dstFile = FIO_openDstFile(fCtx, prefs, srcFileName, dstFileName, dstFilePermissions); + if (dstFile==NULL) return 1; + AIO_WritePool_setFile(ress.writeCtx, dstFile); /* Must only be added after FIO_openDstFile() succeeds. * Otherwise we may delete the destination file if it already exists, @@ -2654,19 +2564,17 @@ static int FIO_decompressDstFile(FIO_ctx_t* const fCtx, addHandler(dstFileName); } - result = FIO_decompressFrames(fCtx, ress, srcFile, prefs, dstFileName, srcFileName); + result = FIO_decompressFrames(fCtx, ress, prefs, dstFileName, srcFileName); if (releaseDstFile) { - FILE* const dstFile = ress.dstFile; clearHandler(); - ress.dstFile = NULL; - if (fclose(dstFile)) { + if (AIO_WritePool_closeFile(ress.writeCtx)) { DISPLAYLEVEL(1, "zstd: %s: %s \n", dstFileName, strerror(errno)); result = 1; } - if (transferMTime) { - UTIL_utime(dstFileName, &statbuf); + if (transferStat) { + UTIL_setFileStat(dstFileName, srcFileStat); } if ( (result != 0) /* operation failure */ @@ -2688,18 +2596,34 @@ static int FIO_decompressDstFile(FIO_ctx_t* const fCtx, static int FIO_decompressSrcFile(FIO_ctx_t* const fCtx, FIO_prefs_t* const prefs, dRess_t ress, const char* dstFileName, const char* srcFileName) { FILE* srcFile; + stat_t srcFileStat; int result; + U64 fileSize = UTIL_FILESIZE_UNKNOWN; if (UTIL_isDirectory(srcFileName)) { DISPLAYLEVEL(1, "zstd: %s is a directory -- ignored \n", srcFileName); return 1; } - srcFile = FIO_openSrcFile(prefs, srcFileName); + srcFile = FIO_openSrcFile(prefs, srcFileName, &srcFileStat); if (srcFile==NULL) return 1; - ress.srcBufferLoaded = 0; - result = FIO_decompressDstFile(fCtx, prefs, ress, srcFile, dstFileName, srcFileName); + /* Don't use AsyncIO for small files */ + if (strcmp(srcFileName, stdinmark)) /* Stdin doesn't have stats */ + fileSize = UTIL_getFileSizeStat(&srcFileStat); + if(fileSize != UTIL_FILESIZE_UNKNOWN && fileSize < ZSTD_BLOCKSIZE_MAX * 3) { + AIO_ReadPool_setAsync(ress.readCtx, 0); + AIO_WritePool_setAsync(ress.writeCtx, 0); + } else { + AIO_ReadPool_setAsync(ress.readCtx, 1); + AIO_WritePool_setAsync(ress.writeCtx, 1); + } + + AIO_ReadPool_setFile(ress.readCtx, srcFile); + + result = FIO_decompressDstFile(fCtx, prefs, ress, dstFileName, srcFileName, &srcFileStat); + + AIO_ReadPool_setFile(ress.readCtx, NULL); /* Close file */ if (fclose(srcFile)) { @@ -2788,6 +2712,11 @@ FIO_determineDstName(const char* srcFileName, const char* outDirName) size_t srcSuffixLen; const char* const srcSuffix = strrchr(srcFileName, '.'); + + if(!strcmp(srcFileName, stdinmark)) { + return stdoutmark; + } + if (srcSuffix == NULL) { DISPLAYLEVEL(1, "zstd: %s: unknown suffix (%s expected). " @@ -2869,20 +2798,21 @@ FIO_decompressMultipleFilenames(FIO_ctx_t* const fCtx, dRess_t ress = FIO_createDResources(prefs, dictFileName); if (outFileName) { - if (FIO_removeMultiFilesWarning(fCtx, prefs, outFileName, 1 /* displayLevelCutoff */)) { + if (FIO_multiFilesConcatWarning(fCtx, prefs, outFileName, 1 /* displayLevelCutoff */)) { FIO_freeDResources(ress); return 1; } if (!prefs->testMode) { - ress.dstFile = FIO_openDstFile(fCtx, prefs, NULL, outFileName, DEFAULT_FILE_PERMISSIONS); - if (ress.dstFile == 0) EXM_THROW(19, "cannot open %s", outFileName); + FILE* dstFile = FIO_openDstFile(fCtx, prefs, NULL, outFileName, DEFAULT_FILE_PERMISSIONS); + if (dstFile == 0) EXM_THROW(19, "cannot open %s", outFileName); + AIO_WritePool_setFile(ress.writeCtx, dstFile); } for (; fCtx->currFileIdx < fCtx->nbFilesTotal; fCtx->currFileIdx++) { status = FIO_decompressSrcFile(fCtx, prefs, ress, outFileName, srcNamesTable[fCtx->currFileIdx]); if (!status) fCtx->nbFilesProcessed++; error |= status; } - if ((!prefs->testMode) && (fclose(ress.dstFile))) + if ((!prefs->testMode) && (AIO_WritePool_closeFile(ress.writeCtx))) EXM_THROW(72, "Write error : %s : cannot properly close output file", strerror(errno)); } else { @@ -2912,8 +2842,11 @@ FIO_decompressMultipleFilenames(FIO_ctx_t* const fCtx, FIO_checkFilenameCollisions(srcNamesTable , (unsigned)fCtx->nbFilesTotal); } - if (fCtx->nbFilesProcessed >= 1 && fCtx->nbFilesTotal > 1 && fCtx->totalBytesOutput != 0) - DISPLAYLEVEL(2, "%d files decompressed : %6zu bytes total \n", fCtx->nbFilesProcessed, fCtx->totalBytesOutput); + if (FIO_shouldDisplayMultipleFileSummary(fCtx)) { + DISPLAY_PROGRESS("\r%79s\r", ""); + DISPLAY_SUMMARY("%d files decompressed : %6llu bytes total \n", + fCtx->nbFilesProcessed, (unsigned long long)fCtx->totalBytesOutput); + } FIO_freeDResources(ress); return error; @@ -2931,7 +2864,9 @@ typedef struct { int numSkippableFrames; int decompUnavailable; int usesCheck; + BYTE checksum[4]; U32 nbFiles; + unsigned dictID; } fileInfo_t; typedef enum { @@ -2939,7 +2874,7 @@ typedef enum { info_frame_error=1, info_not_zstd=2, info_file_error=3, - info_truncated_input=4, + info_truncated_input=4 } InfoError; #define ERROR_IF(c,n,...) { \ @@ -2986,6 +2921,12 @@ FIO_analyzeFrames(fileInfo_t* info, FILE* const srcFile) } ERROR_IF(ZSTD_getFrameHeader(&header, headerBuffer, numBytesRead) != 0, info_frame_error, "Error: could not decode frame header"); + if (info->dictID != 0 && info->dictID != header.dictID) { + DISPLAY("WARNING: File contains multiple frames with different dictionary IDs. Showing dictID 0 instead"); + info->dictID = 0; + } else { + info->dictID = header.dictID; + } info->windowSize = header.windowSize; /* move to the end of the frame header */ { size_t const headerSize = ZSTD_frameHeaderSize(headerBuffer, numBytesRead); @@ -3018,8 +2959,8 @@ FIO_analyzeFrames(fileInfo_t* info, FILE* const srcFile) int const contentChecksumFlag = (frameHeaderDescriptor & (1 << 2)) >> 2; if (contentChecksumFlag) { info->usesCheck = 1; - ERROR_IF(fseek(srcFile, 4, SEEK_CUR) != 0, - info_frame_error, "Error: could not skip past checksum"); + ERROR_IF(fread(info->checksum, 1, 4, srcFile) != 4, + info_frame_error, "Error: could not read checksum"); } } info->numActualFrames++; } @@ -3045,10 +2986,11 @@ static InfoError getFileInfo_fileConfirmed(fileInfo_t* info, const char* inFileName) { InfoError status; - FILE* const srcFile = FIO_openSrcFile(NULL, inFileName); + stat_t srcFileStat; + FILE* const srcFile = FIO_openSrcFile(NULL, inFileName, &srcFileStat); ERROR_IF(srcFile == NULL, info_file_error, "Error: could not open source file %s", inFileName); - info->compressedSize = UTIL_getFileSize(inFileName); + info->compressedSize = UTIL_getFileSizeStat(&srcFileStat); status = FIO_analyzeFrames(info, srcFile); fclose(srcFile); @@ -3098,6 +3040,7 @@ displayInfo(const char* inFileName, const fileInfo_t* info, int displayLevel) DISPLAYOUT("# Zstandard Frames: %d\n", info->numActualFrames); if (info->numSkippableFrames) DISPLAYOUT("# Skippable Frames: %d\n", info->numSkippableFrames); + DISPLAYOUT("DictID: %u\n", info->dictID); DISPLAYOUT("Window Size: %.*f%s (%llu B)\n", window_hrs.precision, window_hrs.value, window_hrs.suffix, (unsigned long long)info->windowSize); @@ -3110,7 +3053,16 @@ displayInfo(const char* inFileName, const fileInfo_t* info, int displayLevel) (unsigned long long)info->decompressedSize); DISPLAYOUT("Ratio: %.4f\n", ratio); } - DISPLAYOUT("Check: %s\n", checkString); + + if (info->usesCheck && info->numActualFrames == 1) { + DISPLAYOUT("Check: %s %02x%02x%02x%02x\n", checkString, + info->checksum[3], info->checksum[2], + info->checksum[1], info->checksum[0] + ); + } else { + DISPLAYOUT("Check: %s\n", checkString); + } + DISPLAYOUT("\n"); } } @@ -3174,7 +3126,7 @@ int FIO_listMultipleFiles(unsigned numFiles, const char** filenameTable, int dis } } if (numFiles == 0) { - if (!IS_CONSOLE(stdin)) { + if (!UTIL_isConsole(stdin)) { DISPLAYLEVEL(1, "zstd: --list does not support reading from standard input \n"); } DISPLAYLEVEL(1, "No files given \n"); diff --git a/programs/fileio.h b/programs/fileio.h index 61094db83cb..291d4d41458 100644 --- a/programs/fileio.h +++ b/programs/fileio.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -12,6 +12,8 @@ #ifndef FILEIO_H_23981798732 #define FILEIO_H_23981798732 +#include "fileio_types.h" +#include "util.h" /* FileNamesTable */ #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */ #include "../lib/zstd.h" /* ZSTD_* */ @@ -53,10 +55,6 @@ extern "C" { /*-************************************* * Types ***************************************/ -typedef enum { FIO_zstdCompression, FIO_gzipCompression, FIO_xzCompression, FIO_lzmaCompression, FIO_lz4Compression } FIO_compressionType_t; - -typedef struct FIO_prefs_s FIO_prefs_t; - FIO_prefs_t* FIO_createPreferences(void); void FIO_freePreferences(FIO_prefs_t* const prefs); @@ -66,9 +64,6 @@ typedef struct FIO_ctx_s FIO_ctx_t; FIO_ctx_t* FIO_createContext(void); void FIO_freeContext(FIO_ctx_t* const fCtx); -typedef struct FIO_display_prefs_s FIO_display_prefs_t; - -typedef enum { FIO_ps_auto, FIO_ps_never, FIO_ps_always } FIO_progressSetting_e; /*-************************************* * Parameters @@ -76,7 +71,7 @@ typedef enum { FIO_ps_auto, FIO_ps_never, FIO_ps_always } FIO_progressSetting_e; /* FIO_prefs_t functions */ void FIO_setCompressionType(FIO_prefs_t* const prefs, FIO_compressionType_t compressionType); void FIO_overwriteMode(FIO_prefs_t* const prefs); -void FIO_setAdaptiveMode(FIO_prefs_t* const prefs, unsigned adapt); +void FIO_setAdaptiveMode(FIO_prefs_t* const prefs, int adapt); void FIO_setAdaptMin(FIO_prefs_t* const prefs, int minCLevel); void FIO_setAdaptMax(FIO_prefs_t* const prefs, int maxCLevel); void FIO_setUseRowMatchFinder(FIO_prefs_t* const prefs, int useRowMatchFinder); @@ -91,8 +86,8 @@ void FIO_setLdmMinMatch(FIO_prefs_t* const prefs, int ldmMinMatch); void FIO_setMemLimit(FIO_prefs_t* const prefs, unsigned memLimit); void FIO_setNbWorkers(FIO_prefs_t* const prefs, int nbWorkers); void FIO_setOverlapLog(FIO_prefs_t* const prefs, int overlapLog); -void FIO_setRemoveSrcFile(FIO_prefs_t* const prefs, unsigned flag); -void FIO_setSparseWrite(FIO_prefs_t* const prefs, unsigned sparse); /**< 0: no sparse; 1: disable on stdout; 2: always enabled */ +void FIO_setRemoveSrcFile(FIO_prefs_t* const prefs, int flag); +void FIO_setSparseWrite(FIO_prefs_t* const prefs, int sparse); /**< 0: no sparse; 1: disable on stdout; 2: always enabled */ void FIO_setRsyncable(FIO_prefs_t* const prefs, int rsyncable); void FIO_setStreamSrcSize(FIO_prefs_t* const prefs, size_t streamSrcSize); void FIO_setTargetCBlockSize(FIO_prefs_t* const prefs, size_t targetCBlockSize); @@ -109,6 +104,8 @@ void FIO_setAllowBlockDevices(FIO_prefs_t* const prefs, int allowBlockDevices); void FIO_setPatchFromMode(FIO_prefs_t* const prefs, int value); void FIO_setContentSize(FIO_prefs_t* const prefs, int value); void FIO_displayCompressionParameters(const FIO_prefs_t* prefs); +void FIO_setAsyncIOFlag(FIO_prefs_t* const prefs, int value); +void FIO_setPassThroughFlag(FIO_prefs_t* const prefs, int value); /* FIO_ctx_t functions */ void FIO_setNbFilesTotal(FIO_ctx_t* const fCtx, int value); @@ -171,6 +168,9 @@ int FIO_checkFilenameCollisions(const char** filenameTable, unsigned nbFiles); /* custom crash signal handler */ void FIO_addAbortHandler(void); +char const* FIO_zlibVersion(void); +char const* FIO_lz4Version(void); +char const* FIO_lzmaVersion(void); #if defined (__cplusplus) diff --git a/programs/fileio_asyncio.c b/programs/fileio_asyncio.c new file mode 100644 index 00000000000..fe9cca95d1f --- /dev/null +++ b/programs/fileio_asyncio.c @@ -0,0 +1,663 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "platform.h" +#include /* fprintf, open, fdopen, fread, _fileno, stdin, stdout */ +#include /* malloc, free */ +#include +#include /* errno */ + +#if defined (_MSC_VER) +# include +# include +#endif + +#include "fileio_asyncio.h" +#include "fileio_common.h" + +/* ********************************************************************** + * Sparse write + ************************************************************************/ + +/** AIO_fwriteSparse() : +* @return : storedSkips, +* argument for next call to AIO_fwriteSparse() or AIO_fwriteSparseEnd() */ +static unsigned +AIO_fwriteSparse(FILE* file, + const void* buffer, size_t bufferSize, + const FIO_prefs_t* const prefs, + unsigned storedSkips) +{ + const size_t* const bufferT = (const size_t*)buffer; /* Buffer is supposed malloc'ed, hence aligned on size_t */ + size_t bufferSizeT = bufferSize / sizeof(size_t); + const size_t* const bufferTEnd = bufferT + bufferSizeT; + const size_t* ptrT = bufferT; + static const size_t segmentSizeT = (32 KB) / sizeof(size_t); /* check every 32 KB */ + + if (prefs->testMode) return 0; /* do not output anything in test mode */ + + if (!prefs->sparseFileSupport) { /* normal write */ + size_t const sizeCheck = fwrite(buffer, 1, bufferSize, file); + if (sizeCheck != bufferSize) + EXM_THROW(70, "Write error : cannot write block : %s", + strerror(errno)); + return 0; + } + + /* avoid int overflow */ + if (storedSkips > 1 GB) { + if (LONG_SEEK(file, 1 GB, SEEK_CUR) != 0) + EXM_THROW(91, "1 GB skip error (sparse file support)"); + storedSkips -= 1 GB; + } + + while (ptrT < bufferTEnd) { + size_t nb0T; + + /* adjust last segment if < 32 KB */ + size_t seg0SizeT = segmentSizeT; + if (seg0SizeT > bufferSizeT) seg0SizeT = bufferSizeT; + bufferSizeT -= seg0SizeT; + + /* count leading zeroes */ + for (nb0T=0; (nb0T < seg0SizeT) && (ptrT[nb0T] == 0); nb0T++) ; + storedSkips += (unsigned)(nb0T * sizeof(size_t)); + + if (nb0T != seg0SizeT) { /* not all 0s */ + size_t const nbNon0ST = seg0SizeT - nb0T; + /* skip leading zeros */ + if (LONG_SEEK(file, storedSkips, SEEK_CUR) != 0) + EXM_THROW(92, "Sparse skip error ; try --no-sparse"); + storedSkips = 0; + /* write the rest */ + if (fwrite(ptrT + nb0T, sizeof(size_t), nbNon0ST, file) != nbNon0ST) + EXM_THROW(93, "Write error : cannot write block : %s", + strerror(errno)); + } + ptrT += seg0SizeT; + } + + { static size_t const maskT = sizeof(size_t)-1; + if (bufferSize & maskT) { + /* size not multiple of sizeof(size_t) : implies end of block */ + const char* const restStart = (const char*)bufferTEnd; + const char* restPtr = restStart; + const char* const restEnd = (const char*)buffer + bufferSize; + assert(restEnd > restStart && restEnd < restStart + sizeof(size_t)); + for ( ; (restPtr < restEnd) && (*restPtr == 0); restPtr++) ; + storedSkips += (unsigned) (restPtr - restStart); + if (restPtr != restEnd) { + /* not all remaining bytes are 0 */ + size_t const restSize = (size_t)(restEnd - restPtr); + if (LONG_SEEK(file, storedSkips, SEEK_CUR) != 0) + EXM_THROW(92, "Sparse skip error ; try --no-sparse"); + if (fwrite(restPtr, 1, restSize, file) != restSize) + EXM_THROW(95, "Write error : cannot write end of decoded block : %s", + strerror(errno)); + storedSkips = 0; + } } } + + return storedSkips; +} + +static void +AIO_fwriteSparseEnd(const FIO_prefs_t* const prefs, FILE* file, unsigned storedSkips) +{ + if (prefs->testMode) assert(storedSkips == 0); + if (storedSkips>0) { + assert(prefs->sparseFileSupport > 0); /* storedSkips>0 implies sparse support is enabled */ + (void)prefs; /* assert can be disabled, in which case prefs becomes unused */ + if (LONG_SEEK(file, storedSkips-1, SEEK_CUR) != 0) + EXM_THROW(69, "Final skip error (sparse file support)"); + /* last zero must be explicitly written, + * so that skipped ones get implicitly translated as zero by FS */ + { const char lastZeroByte[1] = { 0 }; + if (fwrite(lastZeroByte, 1, 1, file) != 1) + EXM_THROW(69, "Write error : cannot write last zero : %s", strerror(errno)); + } } +} + + +/* ********************************************************************** + * AsyncIO functionality + ************************************************************************/ + +/* AIO_supported: + * Returns 1 if AsyncIO is supported on the system, 0 otherwise. */ +int AIO_supported(void) { +#ifdef ZSTD_MULTITHREAD + return 1; +#else + return 0; +#endif +} + +/* *********************************** + * Generic IoPool implementation + *************************************/ + +static IOJob_t *AIO_IOPool_createIoJob(IOPoolCtx_t *ctx, size_t bufferSize) { + IOJob_t* const job = (IOJob_t*) malloc(sizeof(IOJob_t)); + void* const buffer = malloc(bufferSize); + if(!job || !buffer) + EXM_THROW(101, "Allocation error : not enough memory"); + job->buffer = buffer; + job->bufferSize = bufferSize; + job->usedBufferSize = 0; + job->file = NULL; + job->ctx = ctx; + job->offset = 0; + return job; +} + + +/* AIO_IOPool_createThreadPool: + * Creates a thread pool and a mutex for threaded IO pool. + * Displays warning if asyncio is requested but MT isn't available. */ +static void AIO_IOPool_createThreadPool(IOPoolCtx_t* ctx, const FIO_prefs_t* prefs) { + ctx->threadPool = NULL; + ctx->threadPoolActive = 0; + if(prefs->asyncIO) { + if (ZSTD_pthread_mutex_init(&ctx->ioJobsMutex, NULL)) + EXM_THROW(102,"Failed creating ioJobsMutex mutex"); + /* We want MAX_IO_JOBS-2 queue items because we need to always have 1 free buffer to + * decompress into and 1 buffer that's actively written to disk and owned by the writing thread. */ + assert(MAX_IO_JOBS >= 2); + ctx->threadPool = POOL_create(1, MAX_IO_JOBS - 2); + ctx->threadPoolActive = 1; + if (!ctx->threadPool) + EXM_THROW(104, "Failed creating I/O thread pool"); + } +} + +/* AIO_IOPool_init: + * Allocates and sets and a new I/O thread pool including its included availableJobs. */ +static void AIO_IOPool_init(IOPoolCtx_t* ctx, const FIO_prefs_t* prefs, POOL_function poolFunction, size_t bufferSize) { + int i; + AIO_IOPool_createThreadPool(ctx, prefs); + ctx->prefs = prefs; + ctx->poolFunction = poolFunction; + ctx->totalIoJobs = ctx->threadPool ? MAX_IO_JOBS : 2; + ctx->availableJobsCount = ctx->totalIoJobs; + for(i=0; i < ctx->availableJobsCount; i++) { + ctx->availableJobs[i] = AIO_IOPool_createIoJob(ctx, bufferSize); + } + ctx->jobBufferSize = bufferSize; + ctx->file = NULL; +} + + +/* AIO_IOPool_threadPoolActive: + * Check if current operation uses thread pool. + * Note that in some cases we have a thread pool initialized but choose not to use it. */ +static int AIO_IOPool_threadPoolActive(IOPoolCtx_t* ctx) { + return ctx->threadPool && ctx->threadPoolActive; +} + + +/* AIO_IOPool_lockJobsMutex: + * Locks the IO jobs mutex if threading is active */ +static void AIO_IOPool_lockJobsMutex(IOPoolCtx_t* ctx) { + if(AIO_IOPool_threadPoolActive(ctx)) + ZSTD_pthread_mutex_lock(&ctx->ioJobsMutex); +} + +/* AIO_IOPool_unlockJobsMutex: + * Unlocks the IO jobs mutex if threading is active */ +static void AIO_IOPool_unlockJobsMutex(IOPoolCtx_t* ctx) { + if(AIO_IOPool_threadPoolActive(ctx)) + ZSTD_pthread_mutex_unlock(&ctx->ioJobsMutex); +} + +/* AIO_IOPool_releaseIoJob: + * Releases an acquired job back to the pool. Doesn't execute the job. */ +static void AIO_IOPool_releaseIoJob(IOJob_t* job) { + IOPoolCtx_t* const ctx = (IOPoolCtx_t *) job->ctx; + AIO_IOPool_lockJobsMutex(ctx); + assert(ctx->availableJobsCount < ctx->totalIoJobs); + ctx->availableJobs[ctx->availableJobsCount++] = job; + AIO_IOPool_unlockJobsMutex(ctx); +} + +/* AIO_IOPool_join: + * Waits for all tasks in the pool to finish executing. */ +static void AIO_IOPool_join(IOPoolCtx_t* ctx) { + if(AIO_IOPool_threadPoolActive(ctx)) + POOL_joinJobs(ctx->threadPool); +} + +/* AIO_IOPool_setThreaded: + * Allows (de)activating threaded mode, to be used when the expected overhead + * of threading costs more than the expected gains. */ +static void AIO_IOPool_setThreaded(IOPoolCtx_t* ctx, int threaded) { + assert(threaded == 0 || threaded == 1); + assert(ctx != NULL); + if(ctx->threadPoolActive != threaded) { + AIO_IOPool_join(ctx); + ctx->threadPoolActive = threaded; + } +} + +/* AIO_IOPool_free: + * Release a previously allocated IO thread pool. Makes sure all tasks are done and released. */ +static void AIO_IOPool_destroy(IOPoolCtx_t* ctx) { + int i; + if(ctx->threadPool) { + /* Make sure we finish all tasks and then free the resources */ + AIO_IOPool_join(ctx); + /* Make sure we are not leaking availableJobs */ + assert(ctx->availableJobsCount == ctx->totalIoJobs); + POOL_free(ctx->threadPool); + ZSTD_pthread_mutex_destroy(&ctx->ioJobsMutex); + } + assert(ctx->file == NULL); + for(i=0; iavailableJobsCount; i++) { + IOJob_t* job = (IOJob_t*) ctx->availableJobs[i]; + free(job->buffer); + free(job); + } +} + +/* AIO_IOPool_acquireJob: + * Returns an available io job to be used for a future io. */ +static IOJob_t* AIO_IOPool_acquireJob(IOPoolCtx_t* ctx) { + IOJob_t *job; + assert(ctx->file != NULL || ctx->prefs->testMode); + AIO_IOPool_lockJobsMutex(ctx); + assert(ctx->availableJobsCount > 0); + job = (IOJob_t*) ctx->availableJobs[--ctx->availableJobsCount]; + AIO_IOPool_unlockJobsMutex(ctx); + job->usedBufferSize = 0; + job->file = ctx->file; + job->offset = 0; + return job; +} + + +/* AIO_IOPool_setFile: + * Sets the destination file for future files in the pool. + * Requires completion of all queued jobs and release of all otherwise acquired jobs. */ +static void AIO_IOPool_setFile(IOPoolCtx_t* ctx, FILE* file) { + assert(ctx!=NULL); + AIO_IOPool_join(ctx); + assert(ctx->availableJobsCount == ctx->totalIoJobs); + ctx->file = file; +} + +static FILE* AIO_IOPool_getFile(const IOPoolCtx_t* ctx) { + return ctx->file; +} + +/* AIO_IOPool_enqueueJob: + * Enqueues an io job for execution. + * The queued job shouldn't be used directly after queueing it. */ +static void AIO_IOPool_enqueueJob(IOJob_t* job) { + IOPoolCtx_t* const ctx = (IOPoolCtx_t *)job->ctx; + if(AIO_IOPool_threadPoolActive(ctx)) + POOL_add(ctx->threadPool, ctx->poolFunction, job); + else + ctx->poolFunction(job); +} + +/* *********************************** + * WritePool implementation + *************************************/ + +/* AIO_WritePool_acquireJob: + * Returns an available write job to be used for a future write. */ +IOJob_t* AIO_WritePool_acquireJob(WritePoolCtx_t* ctx) { + return AIO_IOPool_acquireJob(&ctx->base); +} + +/* AIO_WritePool_enqueueAndReacquireWriteJob: + * Queues a write job for execution and acquires a new one. + * After execution `job`'s pointed value would change to the newly acquired job. + * Make sure to set `usedBufferSize` to the wanted length before call. + * The queued job shouldn't be used directly after queueing it. */ +void AIO_WritePool_enqueueAndReacquireWriteJob(IOJob_t **job) { + AIO_IOPool_enqueueJob(*job); + *job = AIO_IOPool_acquireJob((IOPoolCtx_t *)(*job)->ctx); +} + +/* AIO_WritePool_sparseWriteEnd: + * Ends sparse writes to the current file. + * Blocks on completion of all current write jobs before executing. */ +void AIO_WritePool_sparseWriteEnd(WritePoolCtx_t* ctx) { + assert(ctx != NULL); + AIO_IOPool_join(&ctx->base); + AIO_fwriteSparseEnd(ctx->base.prefs, ctx->base.file, ctx->storedSkips); + ctx->storedSkips = 0; +} + +/* AIO_WritePool_setFile: + * Sets the destination file for future writes in the pool. + * Requires completion of all queues write jobs and release of all otherwise acquired jobs. + * Also requires ending of sparse write if a previous file was used in sparse mode. */ +void AIO_WritePool_setFile(WritePoolCtx_t* ctx, FILE* file) { + AIO_IOPool_setFile(&ctx->base, file); + assert(ctx->storedSkips == 0); +} + +/* AIO_WritePool_getFile: + * Returns the file the writePool is currently set to write to. */ +FILE* AIO_WritePool_getFile(const WritePoolCtx_t* ctx) { + return AIO_IOPool_getFile(&ctx->base); +} + +/* AIO_WritePool_releaseIoJob: + * Releases an acquired job back to the pool. Doesn't execute the job. */ +void AIO_WritePool_releaseIoJob(IOJob_t* job) { + AIO_IOPool_releaseIoJob(job); +} + +/* AIO_WritePool_closeFile: + * Ends sparse write and closes the writePool's current file and sets the file to NULL. + * Requires completion of all queues write jobs and release of all otherwise acquired jobs. */ +int AIO_WritePool_closeFile(WritePoolCtx_t* ctx) { + FILE* const dstFile = ctx->base.file; + assert(dstFile!=NULL || ctx->base.prefs->testMode!=0); + AIO_WritePool_sparseWriteEnd(ctx); + AIO_IOPool_setFile(&ctx->base, NULL); + return fclose(dstFile); +} + +/* AIO_WritePool_executeWriteJob: + * Executes a write job synchronously. Can be used as a function for a thread pool. */ +static void AIO_WritePool_executeWriteJob(void* opaque){ + IOJob_t* const job = (IOJob_t*) opaque; + WritePoolCtx_t* const ctx = (WritePoolCtx_t*) job->ctx; + ctx->storedSkips = AIO_fwriteSparse(job->file, job->buffer, job->usedBufferSize, ctx->base.prefs, ctx->storedSkips); + AIO_IOPool_releaseIoJob(job); +} + +/* AIO_WritePool_create: + * Allocates and sets and a new write pool including its included jobs. */ +WritePoolCtx_t* AIO_WritePool_create(const FIO_prefs_t* prefs, size_t bufferSize) { + WritePoolCtx_t* const ctx = (WritePoolCtx_t*) malloc(sizeof(WritePoolCtx_t)); + if(!ctx) EXM_THROW(100, "Allocation error : not enough memory"); + AIO_IOPool_init(&ctx->base, prefs, AIO_WritePool_executeWriteJob, bufferSize); + ctx->storedSkips = 0; + return ctx; +} + +/* AIO_WritePool_free: + * Frees and releases a writePool and its resources. Closes destination file if needs to. */ +void AIO_WritePool_free(WritePoolCtx_t* ctx) { + /* Make sure we finish all tasks and then free the resources */ + if(AIO_WritePool_getFile(ctx)) + AIO_WritePool_closeFile(ctx); + AIO_IOPool_destroy(&ctx->base); + assert(ctx->storedSkips==0); + free(ctx); +} + +/* AIO_WritePool_setAsync: + * Allows (de)activating async mode, to be used when the expected overhead + * of asyncio costs more than the expected gains. */ +void AIO_WritePool_setAsync(WritePoolCtx_t* ctx, int async) { + AIO_IOPool_setThreaded(&ctx->base, async); +} + + +/* *********************************** + * ReadPool implementation + *************************************/ +static void AIO_ReadPool_releaseAllCompletedJobs(ReadPoolCtx_t* ctx) { + int i; + for(i=0; icompletedJobsCount; i++) { + IOJob_t* job = (IOJob_t*) ctx->completedJobs[i]; + AIO_IOPool_releaseIoJob(job); + } + ctx->completedJobsCount = 0; +} + +static void AIO_ReadPool_addJobToCompleted(IOJob_t* job) { + ReadPoolCtx_t* const ctx = (ReadPoolCtx_t *)job->ctx; + AIO_IOPool_lockJobsMutex(&ctx->base); + assert(ctx->completedJobsCount < MAX_IO_JOBS); + ctx->completedJobs[ctx->completedJobsCount++] = job; + if(AIO_IOPool_threadPoolActive(&ctx->base)) { + ZSTD_pthread_cond_signal(&ctx->jobCompletedCond); + } + AIO_IOPool_unlockJobsMutex(&ctx->base); +} + +/* AIO_ReadPool_findNextWaitingOffsetCompletedJob_locked: + * Looks through the completed jobs for a job matching the waitingOnOffset and returns it, + * if job wasn't found returns NULL. + * IMPORTANT: assumes ioJobsMutex is locked. */ +static IOJob_t* AIO_ReadPool_findNextWaitingOffsetCompletedJob_locked(ReadPoolCtx_t* ctx) { + IOJob_t *job = NULL; + int i; + /* This implementation goes through all completed jobs and looks for the one matching the next offset. + * While not strictly needed for a single threaded reader implementation (as in such a case we could expect + * reads to be completed in order) this implementation was chosen as it better fits other asyncio + * interfaces (such as io_uring) that do not provide promises regarding order of completion. */ + for (i=0; icompletedJobsCount; i++) { + job = (IOJob_t *) ctx->completedJobs[i]; + if (job->offset == ctx->waitingOnOffset) { + ctx->completedJobs[i] = ctx->completedJobs[--ctx->completedJobsCount]; + return job; + } + } + return NULL; +} + +/* AIO_ReadPool_numReadsInFlight: + * Returns the number of IO read jobs currently in flight. */ +static size_t AIO_ReadPool_numReadsInFlight(ReadPoolCtx_t* ctx) { + const size_t jobsHeld = (ctx->currentJobHeld==NULL ? 0 : 1); + return ctx->base.totalIoJobs - (ctx->base.availableJobsCount + ctx->completedJobsCount + jobsHeld); +} + +/* AIO_ReadPool_getNextCompletedJob: + * Returns a completed IOJob_t for the next read in line based on waitingOnOffset and advances waitingOnOffset. + * Would block. */ +static IOJob_t* AIO_ReadPool_getNextCompletedJob(ReadPoolCtx_t* ctx) { + IOJob_t *job = NULL; + AIO_IOPool_lockJobsMutex(&ctx->base); + + job = AIO_ReadPool_findNextWaitingOffsetCompletedJob_locked(ctx); + + /* As long as we didn't find the job matching the next read, and we have some reads in flight continue waiting */ + while (!job && (AIO_ReadPool_numReadsInFlight(ctx) > 0)) { + assert(ctx->base.threadPool != NULL); /* we shouldn't be here if we work in sync mode */ + ZSTD_pthread_cond_wait(&ctx->jobCompletedCond, &ctx->base.ioJobsMutex); + job = AIO_ReadPool_findNextWaitingOffsetCompletedJob_locked(ctx); + } + + if(job) { + assert(job->offset == ctx->waitingOnOffset); + ctx->waitingOnOffset += job->usedBufferSize; + } + + AIO_IOPool_unlockJobsMutex(&ctx->base); + return job; +} + + +/* AIO_ReadPool_executeReadJob: + * Executes a read job synchronously. Can be used as a function for a thread pool. */ +static void AIO_ReadPool_executeReadJob(void* opaque){ + IOJob_t* const job = (IOJob_t*) opaque; + ReadPoolCtx_t* const ctx = (ReadPoolCtx_t *)job->ctx; + if(ctx->reachedEof) { + job->usedBufferSize = 0; + AIO_ReadPool_addJobToCompleted(job); + return; + } + job->usedBufferSize = fread(job->buffer, 1, job->bufferSize, job->file); + if(job->usedBufferSize < job->bufferSize) { + if(ferror(job->file)) { + EXM_THROW(37, "Read error"); + } else if(feof(job->file)) { + ctx->reachedEof = 1; + } else { + EXM_THROW(37, "Unexpected short read"); + } + } + AIO_ReadPool_addJobToCompleted(job); +} + +static void AIO_ReadPool_enqueueRead(ReadPoolCtx_t* ctx) { + IOJob_t* const job = AIO_IOPool_acquireJob(&ctx->base); + job->offset = ctx->nextReadOffset; + ctx->nextReadOffset += job->bufferSize; + AIO_IOPool_enqueueJob(job); +} + +static void AIO_ReadPool_startReading(ReadPoolCtx_t* ctx) { + int i; + for (i = 0; i < ctx->base.availableJobsCount; i++) { + AIO_ReadPool_enqueueRead(ctx); + } +} + +/* AIO_ReadPool_setFile: + * Sets the source file for future read in the pool. Initiates reading immediately if file is not NULL. + * Waits for all current enqueued tasks to complete if a previous file was set. */ +void AIO_ReadPool_setFile(ReadPoolCtx_t* ctx, FILE* file) { + assert(ctx!=NULL); + AIO_IOPool_join(&ctx->base); + AIO_ReadPool_releaseAllCompletedJobs(ctx); + if (ctx->currentJobHeld) { + AIO_IOPool_releaseIoJob((IOJob_t *)ctx->currentJobHeld); + ctx->currentJobHeld = NULL; + } + AIO_IOPool_setFile(&ctx->base, file); + ctx->nextReadOffset = 0; + ctx->waitingOnOffset = 0; + ctx->srcBuffer = ctx->coalesceBuffer; + ctx->srcBufferLoaded = 0; + ctx->reachedEof = 0; + if(file != NULL) + AIO_ReadPool_startReading(ctx); +} + +/* AIO_ReadPool_create: + * Allocates and sets and a new readPool including its included jobs. + * bufferSize should be set to the maximal buffer we want to read at a time, will also be used + * as our basic read size. */ +ReadPoolCtx_t* AIO_ReadPool_create(const FIO_prefs_t* prefs, size_t bufferSize) { + ReadPoolCtx_t* const ctx = (ReadPoolCtx_t*) malloc(sizeof(ReadPoolCtx_t)); + if(!ctx) EXM_THROW(100, "Allocation error : not enough memory"); + AIO_IOPool_init(&ctx->base, prefs, AIO_ReadPool_executeReadJob, bufferSize); + + ctx->coalesceBuffer = (U8*) malloc(bufferSize * 2); + ctx->srcBuffer = ctx->coalesceBuffer; + ctx->srcBufferLoaded = 0; + ctx->completedJobsCount = 0; + ctx->currentJobHeld = NULL; + + if(ctx->base.threadPool) + if (ZSTD_pthread_cond_init(&ctx->jobCompletedCond, NULL)) + EXM_THROW(103,"Failed creating jobCompletedCond cond"); + + return ctx; +} + +/* AIO_ReadPool_free: + * Frees and releases a readPool and its resources. Closes source file. */ +void AIO_ReadPool_free(ReadPoolCtx_t* ctx) { + if(AIO_ReadPool_getFile(ctx)) + AIO_ReadPool_closeFile(ctx); + if(ctx->base.threadPool) + ZSTD_pthread_cond_destroy(&ctx->jobCompletedCond); + AIO_IOPool_destroy(&ctx->base); + free(ctx->coalesceBuffer); + free(ctx); +} + +/* AIO_ReadPool_consumeBytes: + * Consumes byes from srcBuffer's beginning and updates srcBufferLoaded accordingly. */ +void AIO_ReadPool_consumeBytes(ReadPoolCtx_t* ctx, size_t n) { + assert(n <= ctx->srcBufferLoaded); + ctx->srcBufferLoaded -= n; + ctx->srcBuffer += n; +} + +/* AIO_ReadPool_releaseCurrentlyHeldAndGetNext: + * Release the current held job and get the next one, returns NULL if no next job available. */ +static IOJob_t* AIO_ReadPool_releaseCurrentHeldAndGetNext(ReadPoolCtx_t* ctx) { + if (ctx->currentJobHeld) { + AIO_IOPool_releaseIoJob((IOJob_t *)ctx->currentJobHeld); + ctx->currentJobHeld = NULL; + AIO_ReadPool_enqueueRead(ctx); + } + ctx->currentJobHeld = AIO_ReadPool_getNextCompletedJob(ctx); + return (IOJob_t*) ctx->currentJobHeld; +} + +/* AIO_ReadPool_fillBuffer: + * Tries to fill the buffer with at least n or jobBufferSize bytes (whichever is smaller). + * Returns if srcBuffer has at least the expected number of bytes loaded or if we've reached the end of the file. + * Return value is the number of bytes added to the buffer. + * Note that srcBuffer might have up to 2 times jobBufferSize bytes. */ +size_t AIO_ReadPool_fillBuffer(ReadPoolCtx_t* ctx, size_t n) { + IOJob_t *job; + int useCoalesce = 0; + if(n > ctx->base.jobBufferSize) + n = ctx->base.jobBufferSize; + + /* We are good, don't read anything */ + if (ctx->srcBufferLoaded >= n) + return 0; + + /* We still have bytes loaded, but not enough to satisfy caller. We need to get the next job + * and coalesce the remaining bytes with the next job's buffer */ + if (ctx->srcBufferLoaded > 0) { + useCoalesce = 1; + memcpy(ctx->coalesceBuffer, ctx->srcBuffer, ctx->srcBufferLoaded); + ctx->srcBuffer = ctx->coalesceBuffer; + } + + /* Read the next chunk */ + job = AIO_ReadPool_releaseCurrentHeldAndGetNext(ctx); + if(!job) + return 0; + if(useCoalesce) { + assert(ctx->srcBufferLoaded + job->usedBufferSize <= 2*ctx->base.jobBufferSize); + memcpy(ctx->coalesceBuffer + ctx->srcBufferLoaded, job->buffer, job->usedBufferSize); + ctx->srcBufferLoaded += job->usedBufferSize; + } + else { + ctx->srcBuffer = (U8 *) job->buffer; + ctx->srcBufferLoaded = job->usedBufferSize; + } + return job->usedBufferSize; +} + +/* AIO_ReadPool_consumeAndRefill: + * Consumes the current buffer and refills it with bufferSize bytes. */ +size_t AIO_ReadPool_consumeAndRefill(ReadPoolCtx_t* ctx) { + AIO_ReadPool_consumeBytes(ctx, ctx->srcBufferLoaded); + return AIO_ReadPool_fillBuffer(ctx, ctx->base.jobBufferSize); +} + +/* AIO_ReadPool_getFile: + * Returns the current file set for the read pool. */ +FILE* AIO_ReadPool_getFile(const ReadPoolCtx_t* ctx) { + return AIO_IOPool_getFile(&ctx->base); +} + +/* AIO_ReadPool_closeFile: + * Closes the current set file. Waits for all current enqueued tasks to complete and resets state. */ +int AIO_ReadPool_closeFile(ReadPoolCtx_t* ctx) { + FILE* const file = AIO_ReadPool_getFile(ctx); + AIO_ReadPool_setFile(ctx, NULL); + return fclose(file); +} + +/* AIO_ReadPool_setAsync: + * Allows (de)activating async mode, to be used when the expected overhead + * of asyncio costs more than the expected gains. */ +void AIO_ReadPool_setAsync(ReadPoolCtx_t* ctx, int async) { + AIO_IOPool_setThreaded(&ctx->base, async); +} diff --git a/programs/fileio_asyncio.h b/programs/fileio_asyncio.h new file mode 100644 index 00000000000..feb25a3f9e9 --- /dev/null +++ b/programs/fileio_asyncio.h @@ -0,0 +1,203 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + /* + * FileIO AsyncIO exposes read/write IO pools that allow doing IO asynchronously. + * Current implementation relies on having one thread that reads and one that + * writes. + * Each IO pool supports up to `MAX_IO_JOBS` that can be enqueued for work, but + * are performed serially by the appropriate worker thread. + * Most systems exposes better primitives to perform asynchronous IO, such as + * io_uring on newer linux systems. The API is built in such a way that in the + * future we could replace the threads with better solutions when available. + */ + +#ifndef ZSTD_FILEIO_ASYNCIO_H +#define ZSTD_FILEIO_ASYNCIO_H + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "../lib/common/mem.h" /* U32, U64 */ +#include "fileio_types.h" +#include "platform.h" +#include "util.h" +#include "../lib/common/pool.h" +#include "../lib/common/threading.h" + +#define MAX_IO_JOBS (10) + +typedef struct { + /* These struct fields should be set only on creation and not changed afterwards */ + POOL_ctx* threadPool; + int threadPoolActive; + int totalIoJobs; + const FIO_prefs_t* prefs; + POOL_function poolFunction; + + /* Controls the file we currently write to, make changes only by using provided utility functions */ + FILE* file; + + /* The jobs and availableJobsCount fields are accessed by both the main and worker threads and should + * only be mutated after locking the mutex */ + ZSTD_pthread_mutex_t ioJobsMutex; + void* availableJobs[MAX_IO_JOBS]; + int availableJobsCount; + size_t jobBufferSize; +} IOPoolCtx_t; + +typedef struct { + IOPoolCtx_t base; + + /* State regarding the currently read file */ + int reachedEof; + U64 nextReadOffset; + U64 waitingOnOffset; + + /* We may hold an IOJob object as needed if we actively expose its buffer. */ + void *currentJobHeld; + + /* Coalesce buffer is used to join two buffers in case where we need to read more bytes than left in + * the first of them. Shouldn't be accessed from outside ot utility functions. */ + U8 *coalesceBuffer; + + /* Read buffer can be used by consumer code, take care when copying this pointer aside as it might + * change when consuming / refilling buffer. */ + U8 *srcBuffer; + size_t srcBufferLoaded; + + /* We need to know what tasks completed so we can use their buffers when their time comes. + * Should only be accessed after locking base.ioJobsMutex . */ + void* completedJobs[MAX_IO_JOBS]; + int completedJobsCount; + ZSTD_pthread_cond_t jobCompletedCond; +} ReadPoolCtx_t; + +typedef struct { + IOPoolCtx_t base; + unsigned storedSkips; +} WritePoolCtx_t; + +typedef struct { + /* These fields are automatically set and shouldn't be changed by non WritePool code. */ + void *ctx; + FILE* file; + void *buffer; + size_t bufferSize; + + /* This field should be changed before a job is queued for execution and should contain the number + * of bytes to write from the buffer. */ + size_t usedBufferSize; + U64 offset; +} IOJob_t; + +/* AIO_supported: + * Returns 1 if AsyncIO is supported on the system, 0 otherwise. */ +int AIO_supported(void); + + +/* AIO_WritePool_releaseIoJob: + * Releases an acquired job back to the pool. Doesn't execute the job. */ +void AIO_WritePool_releaseIoJob(IOJob_t *job); + +/* AIO_WritePool_acquireJob: + * Returns an available write job to be used for a future write. */ +IOJob_t* AIO_WritePool_acquireJob(WritePoolCtx_t *ctx); + +/* AIO_WritePool_enqueueAndReacquireWriteJob: + * Enqueues a write job for execution and acquires a new one. + * After execution `job`'s pointed value would change to the newly acquired job. + * Make sure to set `usedBufferSize` to the wanted length before call. + * The queued job shouldn't be used directly after queueing it. */ +void AIO_WritePool_enqueueAndReacquireWriteJob(IOJob_t **job); + +/* AIO_WritePool_sparseWriteEnd: + * Ends sparse writes to the current file. + * Blocks on completion of all current write jobs before executing. */ +void AIO_WritePool_sparseWriteEnd(WritePoolCtx_t *ctx); + +/* AIO_WritePool_setFile: + * Sets the destination file for future writes in the pool. + * Requires completion of all queues write jobs and release of all otherwise acquired jobs. + * Also requires ending of sparse write if a previous file was used in sparse mode. */ +void AIO_WritePool_setFile(WritePoolCtx_t *ctx, FILE* file); + +/* AIO_WritePool_getFile: + * Returns the file the writePool is currently set to write to. */ +FILE* AIO_WritePool_getFile(const WritePoolCtx_t* ctx); + +/* AIO_WritePool_closeFile: + * Ends sparse write and closes the writePool's current file and sets the file to NULL. + * Requires completion of all queues write jobs and release of all otherwise acquired jobs. */ +int AIO_WritePool_closeFile(WritePoolCtx_t *ctx); + +/* AIO_WritePool_create: + * Allocates and sets and a new write pool including its included jobs. + * bufferSize should be set to the maximal buffer we want to write to at a time. */ +WritePoolCtx_t* AIO_WritePool_create(const FIO_prefs_t* prefs, size_t bufferSize); + +/* AIO_WritePool_free: + * Frees and releases a writePool and its resources. Closes destination file. */ +void AIO_WritePool_free(WritePoolCtx_t* ctx); + +/* AIO_WritePool_setAsync: + * Allows (de)activating async mode, to be used when the expected overhead + * of asyncio costs more than the expected gains. */ +void AIO_WritePool_setAsync(WritePoolCtx_t* ctx, int async); + +/* AIO_ReadPool_create: + * Allocates and sets and a new readPool including its included jobs. + * bufferSize should be set to the maximal buffer we want to read at a time, will also be used + * as our basic read size. */ +ReadPoolCtx_t* AIO_ReadPool_create(const FIO_prefs_t* prefs, size_t bufferSize); + +/* AIO_ReadPool_free: + * Frees and releases a readPool and its resources. Closes source file. */ +void AIO_ReadPool_free(ReadPoolCtx_t* ctx); + +/* AIO_ReadPool_setAsync: + * Allows (de)activating async mode, to be used when the expected overhead + * of asyncio costs more than the expected gains. */ +void AIO_ReadPool_setAsync(ReadPoolCtx_t* ctx, int async); + +/* AIO_ReadPool_consumeBytes: + * Consumes byes from srcBuffer's beginning and updates srcBufferLoaded accordingly. */ +void AIO_ReadPool_consumeBytes(ReadPoolCtx_t *ctx, size_t n); + +/* AIO_ReadPool_fillBuffer: + * Makes sure buffer has at least n bytes loaded (as long as n is not bigger than the initialized bufferSize). + * Returns if srcBuffer has at least n bytes loaded or if we've reached the end of the file. + * Return value is the number of bytes added to the buffer. + * Note that srcBuffer might have up to 2 times bufferSize bytes. */ +size_t AIO_ReadPool_fillBuffer(ReadPoolCtx_t *ctx, size_t n); + +/* AIO_ReadPool_consumeAndRefill: + * Consumes the current buffer and refills it with bufferSize bytes. */ +size_t AIO_ReadPool_consumeAndRefill(ReadPoolCtx_t *ctx); + +/* AIO_ReadPool_setFile: + * Sets the source file for future read in the pool. Initiates reading immediately if file is not NULL. + * Waits for all current enqueued tasks to complete if a previous file was set. */ +void AIO_ReadPool_setFile(ReadPoolCtx_t *ctx, FILE* file); + +/* AIO_ReadPool_getFile: + * Returns the current file set for the read pool. */ +FILE* AIO_ReadPool_getFile(const ReadPoolCtx_t *ctx); + +/* AIO_ReadPool_closeFile: + * Closes the current set file. Waits for all current enqueued tasks to complete and resets state. */ +int AIO_ReadPool_closeFile(ReadPoolCtx_t *ctx); + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_FILEIO_ASYNCIO_H */ diff --git a/programs/fileio_common.h b/programs/fileio_common.h new file mode 100644 index 00000000000..55491b8e328 --- /dev/null +++ b/programs/fileio_common.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_FILEIO_COMMON_H +#define ZSTD_FILEIO_COMMON_H + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "../lib/common/mem.h" /* U32, U64 */ +#include "fileio_types.h" +#include "platform.h" +#include "timefn.h" /* UTIL_getTime, UTIL_clockSpanMicro */ + +/*-************************************* +* Macros +***************************************/ +#define KB *(1 <<10) +#define MB *(1 <<20) +#define GB *(1U<<30) +#undef MAX +#define MAX(a,b) ((a)>(b) ? (a) : (b)) + +extern FIO_display_prefs_t g_display_prefs; + +#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) +#define DISPLAYOUT(...) fprintf(stdout, __VA_ARGS__) +#define DISPLAYLEVEL(l, ...) { if (g_display_prefs.displayLevel>=l) { DISPLAY(__VA_ARGS__); } } + +extern UTIL_time_t g_displayClock; + +#define REFRESH_RATE ((U64)(SEC_TO_MICRO / 6)) +#define READY_FOR_UPDATE() (UTIL_clockSpanMicro(g_displayClock) > REFRESH_RATE || g_display_prefs.displayLevel >= 4) +#define DELAY_NEXT_UPDATE() { g_displayClock = UTIL_getTime(); } +#define DISPLAYUPDATE(l, ...) { \ + if (g_display_prefs.displayLevel>=l && (g_display_prefs.progressSetting != FIO_ps_never)) { \ + if (READY_FOR_UPDATE()) { \ + DELAY_NEXT_UPDATE(); \ + DISPLAY(__VA_ARGS__); \ + if (g_display_prefs.displayLevel>=4) fflush(stderr); \ + } } } + +#define SHOULD_DISPLAY_SUMMARY() \ + (g_display_prefs.displayLevel >= 2 || g_display_prefs.progressSetting == FIO_ps_always) +#define SHOULD_DISPLAY_PROGRESS() \ + (g_display_prefs.progressSetting != FIO_ps_never && SHOULD_DISPLAY_SUMMARY()) +#define DISPLAY_PROGRESS(...) { if (SHOULD_DISPLAY_PROGRESS()) { DISPLAYLEVEL(1, __VA_ARGS__); }} +#define DISPLAYUPDATE_PROGRESS(...) { if (SHOULD_DISPLAY_PROGRESS()) { DISPLAYUPDATE(1, __VA_ARGS__); }} +#define DISPLAY_SUMMARY(...) { if (SHOULD_DISPLAY_SUMMARY()) { DISPLAYLEVEL(1, __VA_ARGS__); } } + +#undef MIN /* in case it would be already defined */ +#define MIN(a,b) ((a) < (b) ? (a) : (b)) + + +#define EXM_THROW(error, ...) \ +{ \ + DISPLAYLEVEL(1, "zstd: "); \ + DISPLAYLEVEL(5, "Error defined at %s, line %i : \n", __FILE__, __LINE__); \ + DISPLAYLEVEL(1, "error %i : ", error); \ + DISPLAYLEVEL(1, __VA_ARGS__); \ + DISPLAYLEVEL(1, " \n"); \ + exit(error); \ +} + +#define CHECK_V(v, f) \ + v = f; \ + if (ZSTD_isError(v)) { \ + DISPLAYLEVEL(5, "%s \n", #f); \ + EXM_THROW(11, "%s", ZSTD_getErrorName(v)); \ + } +#define CHECK(f) { size_t err; CHECK_V(err, f); } + + +/* Avoid fseek()'s 2GiB barrier with MSVC, macOS, *BSD, MinGW */ +#if defined(_MSC_VER) && _MSC_VER >= 1400 +# define LONG_SEEK _fseeki64 +# define LONG_TELL _ftelli64 +#elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */ +# define LONG_SEEK fseeko +# define LONG_TELL ftello +#elif defined(__MINGW32__) && !defined(__STRICT_ANSI__) && !defined(__NO_MINGW_LFS) && defined(__MSVCRT__) +# define LONG_SEEK fseeko64 +# define LONG_TELL ftello64 +#elif defined(_WIN32) && !defined(__DJGPP__) +# include + static int LONG_SEEK(FILE* file, __int64 offset, int origin) { + LARGE_INTEGER off; + DWORD method; + off.QuadPart = offset; + if (origin == SEEK_END) + method = FILE_END; + else if (origin == SEEK_CUR) + method = FILE_CURRENT; + else + method = FILE_BEGIN; + + if (SetFilePointerEx((HANDLE) _get_osfhandle(_fileno(file)), off, NULL, method)) + return 0; + else + return -1; + } + static __int64 LONG_TELL(FILE* file) { + LARGE_INTEGER off, newOff; + off.QuadPart = 0; + newOff.QuadPart = 0; + SetFilePointerEx((HANDLE) _get_osfhandle(_fileno(file)), off, &newOff, FILE_CURRENT); + return newOff.QuadPart; + } +#else +# define LONG_SEEK fseek +# define LONG_TELL ftell +#endif + +#if defined (__cplusplus) +} +#endif +#endif /* ZSTD_FILEIO_COMMON_H */ diff --git a/programs/fileio_types.h b/programs/fileio_types.h new file mode 100644 index 00000000000..c1f42f1ad0b --- /dev/null +++ b/programs/fileio_types.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef FILEIO_TYPES_HEADER +#define FILEIO_TYPES_HEADER + +#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */ +#include "../lib/zstd.h" /* ZSTD_* */ + +/*-************************************* +* Parameters: FIO_prefs_t +***************************************/ + +typedef struct FIO_display_prefs_s FIO_display_prefs_t; + +typedef enum { FIO_ps_auto, FIO_ps_never, FIO_ps_always } FIO_progressSetting_e; + +struct FIO_display_prefs_s { + int displayLevel; /* 0 : no display; 1: errors; 2: + result + interaction + warnings; 3: + progression; 4: + information */ + FIO_progressSetting_e progressSetting; +}; + + +typedef enum { FIO_zstdCompression, FIO_gzipCompression, FIO_xzCompression, FIO_lzmaCompression, FIO_lz4Compression } FIO_compressionType_t; + +typedef struct FIO_prefs_s { + + /* Algorithm preferences */ + FIO_compressionType_t compressionType; + int sparseFileSupport; /* 0: no sparse allowed; 1: auto (file yes, stdout no); 2: force sparse */ + int dictIDFlag; + int checksumFlag; + int blockSize; + int overlapLog; + int adaptiveMode; + int useRowMatchFinder; + int rsyncable; + int minAdaptLevel; + int maxAdaptLevel; + int ldmFlag; + int ldmHashLog; + int ldmMinMatch; + int ldmBucketSizeLog; + int ldmHashRateLog; + size_t streamSrcSize; + size_t targetCBlockSize; + int srcSizeHint; + int testMode; + ZSTD_paramSwitch_e literalCompressionMode; + + /* IO preferences */ + int removeSrcFile; + int overwrite; + int asyncIO; + + /* Computation resources preferences */ + unsigned memLimit; + int nbWorkers; + + int excludeCompressedFiles; + int patchFromMode; + int contentSize; + int allowBlockDevices; + int passThrough; +} FIO_prefs_t; + +#endif /* FILEIO_TYPES_HEADER */ diff --git a/programs/platform.h b/programs/platform.h index b858e3b484c..18a3587bfe2 100644 --- a/programs/platform.h +++ b/programs/platform.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -33,7 +33,7 @@ extern "C" { /* ************************************** * Detect 64-bit OS -* http://nadeausoftware.com/articles/2012/02/c_c_tip_how_detect_processor_type_using_compiler_predefined_macros +* https://nadeausoftware.com/articles/2012/02/c_c_tip_how_detect_processor_type_using_compiler_predefined_macros ****************************************/ #if defined __ia64 || defined _M_IA64 /* Intel Itanium */ \ || defined __powerpc64__ || defined __ppc64__ || defined __PPC64__ /* POWER 64-bit */ \ @@ -80,7 +80,7 @@ extern "C" { * note: it's better to use unistd.h's _POSIX_VERSION whenever possible */ # define PLATFORM_POSIX_VERSION 200112L -/* try to determine posix version through official unistd.h's _POSIX_VERSION (http://pubs.opengroup.org/onlinepubs/7908799/xsh/unistd.h.html). +/* try to determine posix version through official unistd.h's _POSIX_VERSION (https://pubs.opengroup.org/onlinepubs/7908799/xsh/unistd.h.html). * note : there is no simple way to know in advance if is present or not on target system, * Posix specification mandates its presence and its content, but target system must respect this spec. * It's necessary to _not_ #include whenever target OS is not unix-like @@ -127,6 +127,10 @@ extern "C" { /*-********************************************* * Detect if isatty() and fileno() are available +* +* Note: Use UTIL_isConsole() for the zstd CLI +* instead, as it allows faking is console for +* testing. ************************************************/ #if (defined(__linux__) && (PLATFORM_POSIX_VERSION > 1)) \ || (PLATFORM_POSIX_VERSION >= 200112L) \ @@ -192,13 +196,13 @@ static __inline int IS_CONSOLE(FILE* stdStream) { #ifndef ZSTD_SETPRIORITY_SUPPORT - /* mandates presence of and support for setpriority() : http://man7.org/linux/man-pages/man2/setpriority.2.html */ + /* mandates presence of and support for setpriority() : https://man7.org/linux/man-pages/man2/setpriority.2.html */ # define ZSTD_SETPRIORITY_SUPPORT (PLATFORM_POSIX_VERSION >= 200112L) #endif #ifndef ZSTD_NANOSLEEP_SUPPORT - /* mandates support of nanosleep() within : http://man7.org/linux/man-pages/man2/nanosleep.2.html */ + /* mandates support of nanosleep() within : https://man7.org/linux/man-pages/man2/nanosleep.2.html */ # if (defined(__linux__) && (PLATFORM_POSIX_VERSION >= 199309L)) \ || (PLATFORM_POSIX_VERSION >= 200112L) # define ZSTD_NANOSLEEP_SUPPORT 1 diff --git a/programs/timefn.c b/programs/timefn.c index 64577b0e932..f941e57e61a 100644 --- a/programs/timefn.c +++ b/programs/timefn.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -12,7 +12,8 @@ /* === Dependencies === */ #include "timefn.h" - +#include "platform.h" /* set _POSIX_C_SOURCE */ +#include /* CLOCK_MONOTONIC, TIME_UTC */ /*-**************************************** * Time functions @@ -20,12 +21,11 @@ #if defined(_WIN32) /* Windows */ +#include /* LARGE_INTEGER */ #include /* abort */ #include /* perror */ -UTIL_time_t UTIL_getTime(void) { UTIL_time_t x; QueryPerformanceCounter(&x); return x; } - -PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) +UTIL_time_t UTIL_getTime(void) { static LARGE_INTEGER ticksPerSecond; static int init = 0; @@ -36,30 +36,20 @@ PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) } init = 1; } - return 1000000ULL*(clockEnd.QuadPart - clockStart.QuadPart)/ticksPerSecond.QuadPart; -} - -PTime UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) -{ - static LARGE_INTEGER ticksPerSecond; - static int init = 0; - if (!init) { - if (!QueryPerformanceFrequency(&ticksPerSecond)) { - perror("timefn::QueryPerformanceFrequency"); - abort(); - } - init = 1; + { UTIL_time_t r; + LARGE_INTEGER x; + QueryPerformanceCounter(&x); + r.t = (PTime)(x.QuadPart * 1000000000ULL / ticksPerSecond.QuadPart); + return r; } - return 1000000000ULL*(clockEnd.QuadPart - clockStart.QuadPart)/ticksPerSecond.QuadPart; } - #elif defined(__APPLE__) && defined(__MACH__) -UTIL_time_t UTIL_getTime(void) { return mach_absolute_time(); } +#include /* mach_timebase_info_data_t, mach_timebase_info, mach_absolute_time */ -PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) +UTIL_time_t UTIL_getTime(void) { static mach_timebase_info_data_t rate; static int init = 0; @@ -67,23 +57,39 @@ PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) mach_timebase_info(&rate); init = 1; } - return (((clockEnd - clockStart) * (PTime)rate.numer) / ((PTime)rate.denom))/1000ULL; + { UTIL_time_t r; + r.t = mach_absolute_time() * (PTime)rate.numer / (PTime)rate.denom; + return r; + } } -PTime UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) +/* POSIX.1-2001 (optional) */ +#elif defined(CLOCK_MONOTONIC) + +#include /* abort */ +#include /* perror */ + +UTIL_time_t UTIL_getTime(void) { - static mach_timebase_info_data_t rate; - static int init = 0; - if (!init) { - mach_timebase_info(&rate); - init = 1; + /* time must be initialized, othersize it may fail msan test. + * No good reason, likely a limitation of timespec_get() for some target */ + struct timespec time = { 0, 0 }; + if (clock_gettime(CLOCK_MONOTONIC, &time) != 0) { + perror("timefn::clock_gettime(CLOCK_MONOTONIC)"); + abort(); + } + { UTIL_time_t r; + r.t = (PTime)time.tv_sec * 1000000000ULL + (PTime)time.tv_nsec; + return r; } - return ((clockEnd - clockStart) * (PTime)rate.numer) / ((PTime)rate.denom); } -/* C11 requires timespec_get, but FreeBSD 11 lacks it, while still claiming C11 compliance. - Android also lacks it but does define TIME_UTC. */ +/* C11 requires support of timespec_get(). + * However, FreeBSD 11 claims C11 compliance while lacking timespec_get(). + * Double confirm timespec_get() support by checking the definition of TIME_UTC. + * However, some versions of Android manage to simultanously define TIME_UTC + * and lack timespec_get() support... */ #elif (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */) \ && defined(TIME_UTC) && !defined(__ANDROID__) @@ -94,65 +100,49 @@ UTIL_time_t UTIL_getTime(void) { /* time must be initialized, othersize it may fail msan test. * No good reason, likely a limitation of timespec_get() for some target */ - UTIL_time_t time = UTIL_TIME_INITIALIZER; + struct timespec time = { 0, 0 }; if (timespec_get(&time, TIME_UTC) != TIME_UTC) { - perror("timefn::timespec_get"); + perror("timefn::timespec_get(TIME_UTC)"); abort(); } - return time; -} - -static UTIL_time_t UTIL_getSpanTime(UTIL_time_t begin, UTIL_time_t end) -{ - UTIL_time_t diff; - if (end.tv_nsec < begin.tv_nsec) { - diff.tv_sec = (end.tv_sec - 1) - begin.tv_sec; - diff.tv_nsec = (end.tv_nsec + 1000000000ULL) - begin.tv_nsec; - } else { - diff.tv_sec = end.tv_sec - begin.tv_sec; - diff.tv_nsec = end.tv_nsec - begin.tv_nsec; + { UTIL_time_t r; + r.t = (PTime)time.tv_sec * 1000000000ULL + (PTime)time.tv_nsec; + return r; } - return diff; } -PTime UTIL_getSpanTimeMicro(UTIL_time_t begin, UTIL_time_t end) -{ - UTIL_time_t const diff = UTIL_getSpanTime(begin, end); - PTime micro = 0; - micro += 1000000ULL * diff.tv_sec; - micro += diff.tv_nsec / 1000ULL; - return micro; -} -PTime UTIL_getSpanTimeNano(UTIL_time_t begin, UTIL_time_t end) +#else /* relies on standard C90 (note : clock_t produces wrong measurements for multi-threaded workloads) */ + +UTIL_time_t UTIL_getTime(void) { - UTIL_time_t const diff = UTIL_getSpanTime(begin, end); - PTime nano = 0; - nano += 1000000000ULL * diff.tv_sec; - nano += diff.tv_nsec; - return nano; + UTIL_time_t r; + r.t = (PTime)clock() * 1000000000ULL / CLOCKS_PER_SEC; + return r; } - - -#else /* relies on standard C90 (note : clock_t measurements can be wrong when using multi-threading) */ - -UTIL_time_t UTIL_getTime(void) { return clock(); } -PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000ULL * (clockEnd - clockStart) / CLOCKS_PER_SEC; } -PTime UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000000ULL * (clockEnd - clockStart) / CLOCKS_PER_SEC; } +#define TIME_MT_MEASUREMENTS_NOT_SUPPORTED #endif +/* ==== Common functions, valid for all time API ==== */ +PTime UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) +{ + return clockEnd.t - clockStart.t; +} + +PTime UTIL_getSpanTimeMicro(UTIL_time_t begin, UTIL_time_t end) +{ + return UTIL_getSpanTimeNano(begin, end) / 1000ULL; +} -/* returns time span in microseconds */ PTime UTIL_clockSpanMicro(UTIL_time_t clockStart ) { UTIL_time_t const clockEnd = UTIL_getTime(); return UTIL_getSpanTimeMicro(clockStart, clockEnd); } -/* returns time span in microseconds */ PTime UTIL_clockSpanNano(UTIL_time_t clockStart ) { UTIL_time_t const clockEnd = UTIL_getTime(); @@ -167,3 +157,12 @@ void UTIL_waitForNextTick(void) clockEnd = UTIL_getTime(); } while (UTIL_getSpanTimeNano(clockStart, clockEnd) == 0); } + +int UTIL_support_MT_measurements(void) +{ +# if defined(TIME_MT_MEASUREMENTS_NOT_SUPPORTED) + return 0; +# else + return 1; +# endif +} diff --git a/programs/timefn.h b/programs/timefn.h index 3fcd78a28ec..b814ff8d8da 100644 --- a/programs/timefn.h +++ b/programs/timefn.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -16,70 +16,51 @@ extern "C" { #endif -/*-**************************************** -* Dependencies -******************************************/ -#include /* clock_t, clock, CLOCKS_PER_SEC */ - - /*-**************************************** -* Local Types +* Types ******************************************/ #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) # if defined(_AIX) # include # else -# include /* intptr_t */ +# include /* uint64_t */ # endif typedef uint64_t PTime; /* Precise Time */ #else typedef unsigned long long PTime; /* does not support compilers without long long support */ #endif +/* UTIL_time_t contains a nanosecond time counter. + * The absolute value is not meaningful. + * It's only valid to compute the difference between 2 measurements. */ +typedef struct { PTime t; } UTIL_time_t; +#define UTIL_TIME_INITIALIZER { 0 } /*-**************************************** * Time functions ******************************************/ -#if defined(_WIN32) /* Windows */ - - #include /* LARGE_INTEGER */ - typedef LARGE_INTEGER UTIL_time_t; - #define UTIL_TIME_INITIALIZER { { 0, 0 } } - -#elif defined(__APPLE__) && defined(__MACH__) - - #include - typedef PTime UTIL_time_t; - #define UTIL_TIME_INITIALIZER 0 - -/* C11 requires timespec_get, but FreeBSD 11 lacks it, while still claiming C11 compliance. - Android also lacks it but does define TIME_UTC. */ -#elif (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */) \ - && defined(TIME_UTC) && !defined(__ANDROID__) - - typedef struct timespec UTIL_time_t; - #define UTIL_TIME_INITIALIZER { 0, 0 } - -#else /* relies on standard C90 (note : clock_t measurements can be wrong when using multi-threading) */ - typedef clock_t UTIL_time_t; - #define UTIL_TIME_INITIALIZER 0 - -#endif +UTIL_time_t UTIL_getTime(void); +/* Timer resolution can be low on some platforms. + * To improve accuracy, it's recommended to wait for a new tick + * before starting benchmark measurements */ +void UTIL_waitForNextTick(void); +/* tells if timefn will return correct time measurements + * in presence of multi-threaded workload. + * note : this is not the case if only C90 clock_t measurements are available */ +int UTIL_support_MT_measurements(void); -UTIL_time_t UTIL_getTime(void); -PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd); PTime UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd); +PTime UTIL_clockSpanNano(UTIL_time_t clockStart); -#define SEC_TO_MICRO ((PTime)1000000) +PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd); PTime UTIL_clockSpanMicro(UTIL_time_t clockStart); -PTime UTIL_clockSpanNano(UTIL_time_t clockStart); -void UTIL_waitForNextTick(void); +#define SEC_TO_MICRO ((PTime)1000000) /* nb of microseconds in a second */ #if defined (__cplusplus) diff --git a/programs/util.c b/programs/util.c index d69b72a37ca..e017772ef6e 100644 --- a/programs/util.c +++ b/programs/util.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -66,6 +66,27 @@ extern "C" { #define UTIL_DISPLAY(...) fprintf(stderr, __VA_ARGS__) #define UTIL_DISPLAYLEVEL(l, ...) { if (g_utilDisplayLevel>=l) { UTIL_DISPLAY(__VA_ARGS__); } } +static int g_traceDepth = 0; +int g_traceFileStat = 0; + +#define UTIL_TRACE_CALL(...) \ + { \ + if (g_traceFileStat) { \ + UTIL_DISPLAY("Trace:FileStat: %*s> ", g_traceDepth, ""); \ + UTIL_DISPLAY(__VA_ARGS__); \ + UTIL_DISPLAY("\n"); \ + ++g_traceDepth; \ + } \ + } + +#define UTIL_TRACE_RET(ret) \ + { \ + if (g_traceFileStat) { \ + --g_traceDepth; \ + UTIL_DISPLAY("Trace:FileStat: %*s< %d\n", g_traceDepth, "", (ret)); \ + } \ + } + /* A modified version of realloc(). * If UTIL_realloc() fails the original block is freed. */ @@ -100,7 +121,7 @@ int UTIL_requireUserConfirmation(const char* prompt, const char* abortMsg, ch = getchar(); result = 0; if (strchr(acceptableLetters, ch) == NULL) { - UTIL_DISPLAY("%s", abortMsg); + UTIL_DISPLAY("%s \n", abortMsg); result = 1; } /* flush the rest */ @@ -121,21 +142,34 @@ int UTIL_requireUserConfirmation(const char* prompt, const char* abortMsg, * Functions ***************************************/ +void UTIL_traceFileStat(void) +{ + g_traceFileStat = 1; +} + int UTIL_stat(const char* filename, stat_t* statbuf) { + int ret; + UTIL_TRACE_CALL("UTIL_stat(%s)", filename); #if defined(_MSC_VER) - return !_stat64(filename, statbuf); + ret = !_stat64(filename, statbuf); #elif defined(__MINGW32__) && defined (__MSVCRT__) - return !_stati64(filename, statbuf); + ret = !_stati64(filename, statbuf); #else - return !stat(filename, statbuf); + ret = !stat(filename, statbuf); #endif + UTIL_TRACE_RET(ret); + return ret; } int UTIL_isRegularFile(const char* infilename) { stat_t statbuf; - return UTIL_stat(infilename, &statbuf) && UTIL_isRegularFileStat(&statbuf); + int ret; + UTIL_TRACE_CALL("UTIL_isRegularFile(%s)", infilename); + ret = UTIL_stat(infilename, &statbuf) && UTIL_isRegularFileStat(&statbuf); + UTIL_TRACE_RET(ret); + return ret; } int UTIL_isRegularFileStat(const stat_t* statbuf) @@ -151,71 +185,114 @@ int UTIL_isRegularFileStat(const stat_t* statbuf) int UTIL_chmod(char const* filename, const stat_t* statbuf, mode_t permissions) { stat_t localStatBuf; + UTIL_TRACE_CALL("UTIL_chmod(%s, %#4o)", filename, (unsigned)permissions); if (statbuf == NULL) { - if (!UTIL_stat(filename, &localStatBuf)) return 0; + if (!UTIL_stat(filename, &localStatBuf)) { + UTIL_TRACE_RET(0); + return 0; + } statbuf = &localStatBuf; } - if (!UTIL_isRegularFileStat(statbuf)) return 0; /* pretend success, but don't change anything */ - return chmod(filename, permissions); + if (!UTIL_isRegularFileStat(statbuf)) { + UTIL_TRACE_RET(0); + return 0; /* pretend success, but don't change anything */ + } + UTIL_TRACE_CALL("chmod"); + { + int const ret = chmod(filename, permissions); + UTIL_TRACE_RET(ret); + UTIL_TRACE_RET(ret); + return ret; + } } /* set access and modification times */ int UTIL_utime(const char* filename, const stat_t *statbuf) { int ret; + UTIL_TRACE_CALL("UTIL_utime(%s)", filename); /* We check that st_mtime is a macro here in order to give us confidence * that struct stat has a struct timespec st_mtim member. We need this * check because there are some platforms that claim to be POSIX 2008 * compliant but which do not have st_mtim... */ #if (PLATFORM_POSIX_VERSION >= 200809L) && defined(st_mtime) - /* (atime, mtime) */ - struct timespec timebuf[2] = { {0, UTIME_NOW} }; - timebuf[1] = statbuf->st_mtim; - ret = utimensat(AT_FDCWD, filename, timebuf, 0); + { + /* (atime, mtime) */ + struct timespec timebuf[2] = { {0, UTIME_NOW} }; + timebuf[1] = statbuf->st_mtim; + ret = utimensat(AT_FDCWD, filename, timebuf, 0); + } #else - struct utimbuf timebuf; - timebuf.actime = time(NULL); - timebuf.modtime = statbuf->st_mtime; - ret = utime(filename, &timebuf); + { + struct utimbuf timebuf; + timebuf.actime = time(NULL); + timebuf.modtime = statbuf->st_mtime; + ret = utime(filename, &timebuf); + } #endif errno = 0; + UTIL_TRACE_RET(ret); return ret; } int UTIL_setFileStat(const char *filename, const stat_t *statbuf) { int res = 0; - stat_t curStatBuf; - if (!UTIL_stat(filename, &curStatBuf) || !UTIL_isRegularFileStat(&curStatBuf)) + UTIL_TRACE_CALL("UTIL_setFileStat(%s)", filename); + + if (!UTIL_stat(filename, &curStatBuf) || !UTIL_isRegularFileStat(&curStatBuf)) { + UTIL_TRACE_RET(-1); return -1; + } /* set access and modification times */ res += UTIL_utime(filename, statbuf); + /* Mimic gzip's behavior: + * + * "Change the group first, then the permissions, then the owner. + * That way, the permissions will be correct on systems that allow + * users to give away files, without introducing a security hole. + * Security depends on permissions not containing the setuid or + * setgid bits." */ + #if !defined(_WIN32) - res += chown(filename, statbuf->st_uid, statbuf->st_gid); /* Copy ownership */ + res += chown(filename, -1, statbuf->st_gid); /* Apply group ownership */ #endif - res += UTIL_chmod(filename, &curStatBuf, statbuf->st_mode & 07777); /* Copy file permissions */ + res += UTIL_chmod(filename, &curStatBuf, statbuf->st_mode & 0777); /* Copy file permissions */ + +#if !defined(_WIN32) + res += chown(filename, statbuf->st_uid, -1); /* Apply user ownership */ +#endif errno = 0; + UTIL_TRACE_RET(-res); return -res; /* number of errors is returned */ } int UTIL_isDirectory(const char* infilename) { stat_t statbuf; - return UTIL_stat(infilename, &statbuf) && UTIL_isDirectoryStat(&statbuf); + int ret; + UTIL_TRACE_CALL("UTIL_isDirectory(%s)", infilename); + ret = UTIL_stat(infilename, &statbuf) && UTIL_isDirectoryStat(&statbuf); + UTIL_TRACE_RET(ret); + return ret; } int UTIL_isDirectoryStat(const stat_t* statbuf) { + int ret; + UTIL_TRACE_CALL("UTIL_isDirectoryStat()"); #if defined(_MSC_VER) - return (statbuf->st_mode & _S_IFDIR) != 0; + ret = (statbuf->st_mode & _S_IFDIR) != 0; #else - return S_ISDIR(statbuf->st_mode) != 0; + ret = S_ISDIR(statbuf->st_mode) != 0; #endif + UTIL_TRACE_RET(ret); + return ret; } int UTIL_compareStr(const void *p1, const void *p2) { @@ -224,33 +301,68 @@ int UTIL_compareStr(const void *p1, const void *p2) { int UTIL_isSameFile(const char* fName1, const char* fName2) { + int ret; assert(fName1 != NULL); assert(fName2 != NULL); + UTIL_TRACE_CALL("UTIL_isSameFile(%s, %s)", fName1, fName2); #if defined(_MSC_VER) || defined(_WIN32) /* note : Visual does not support file identification by inode. * inode does not work on Windows, even with a posix layer, like msys2. * The following work-around is limited to detecting exact name repetition only, * aka `filename` is considered different from `subdir/../filename` */ - return !strcmp(fName1, fName2); + ret = !strcmp(fName1, fName2); #else { stat_t file1Stat; stat_t file2Stat; - return UTIL_stat(fName1, &file1Stat) + ret = UTIL_stat(fName1, &file1Stat) && UTIL_stat(fName2, &file2Stat) - && (file1Stat.st_dev == file2Stat.st_dev) - && (file1Stat.st_ino == file2Stat.st_ino); + && UTIL_isSameFileStat(fName1, fName2, &file1Stat, &file2Stat); } #endif + UTIL_TRACE_RET(ret); + return ret; +} + +int UTIL_isSameFileStat( + const char* fName1, const char* fName2, + const stat_t* file1Stat, const stat_t* file2Stat) +{ + int ret; + assert(fName1 != NULL); assert(fName2 != NULL); + UTIL_TRACE_CALL("UTIL_isSameFileStat(%s, %s)", fName1, fName2); +#if defined(_MSC_VER) || defined(_WIN32) + /* note : Visual does not support file identification by inode. + * inode does not work on Windows, even with a posix layer, like msys2. + * The following work-around is limited to detecting exact name repetition only, + * aka `filename` is considered different from `subdir/../filename` */ + (void)file1Stat; + (void)file2Stat; + ret = !strcmp(fName1, fName2); +#else + { + ret = (file1Stat->st_dev == file2Stat->st_dev) + && (file1Stat->st_ino == file2Stat->st_ino); + } +#endif + UTIL_TRACE_RET(ret); + return ret; } /* UTIL_isFIFO : distinguish named pipes */ int UTIL_isFIFO(const char* infilename) { + UTIL_TRACE_CALL("UTIL_isFIFO(%s)", infilename); /* macro guards, as defined in : https://linux.die.net/man/2/lstat */ #if PLATFORM_POSIX_VERSION >= 200112L - stat_t statbuf; - if (UTIL_stat(infilename, &statbuf) && UTIL_isFIFOStat(&statbuf)) return 1; + { + stat_t statbuf; + if (UTIL_stat(infilename, &statbuf) && UTIL_isFIFOStat(&statbuf)) { + UTIL_TRACE_RET(1); + return 1; + } + } #endif (void)infilename; + UTIL_TRACE_RET(0); return 0; } @@ -278,21 +390,69 @@ int UTIL_isBlockDevStat(const stat_t* statbuf) int UTIL_isLink(const char* infilename) { + UTIL_TRACE_CALL("UTIL_isLink(%s)", infilename); /* macro guards, as defined in : https://linux.die.net/man/2/lstat */ #if PLATFORM_POSIX_VERSION >= 200112L - stat_t statbuf; - int const r = lstat(infilename, &statbuf); - if (!r && S_ISLNK(statbuf.st_mode)) return 1; + { + stat_t statbuf; + int const r = lstat(infilename, &statbuf); + if (!r && S_ISLNK(statbuf.st_mode)) { + UTIL_TRACE_RET(1); + return 1; + } + } #endif (void)infilename; + UTIL_TRACE_RET(0); return 0; } +static int g_fakeStdinIsConsole = 0; +static int g_fakeStderrIsConsole = 0; +static int g_fakeStdoutIsConsole = 0; + +int UTIL_isConsole(FILE* file) +{ + int ret; + UTIL_TRACE_CALL("UTIL_isConsole(%d)", fileno(file)); + if (file == stdin && g_fakeStdinIsConsole) + ret = 1; + else if (file == stderr && g_fakeStderrIsConsole) + ret = 1; + else if (file == stdout && g_fakeStdoutIsConsole) + ret = 1; + else + ret = IS_CONSOLE(file); + UTIL_TRACE_RET(ret); + return ret; +} + +void UTIL_fakeStdinIsConsole(void) +{ + g_fakeStdinIsConsole = 1; +} +void UTIL_fakeStdoutIsConsole(void) +{ + g_fakeStdoutIsConsole = 1; +} +void UTIL_fakeStderrIsConsole(void) +{ + g_fakeStderrIsConsole = 1; +} + U64 UTIL_getFileSize(const char* infilename) { stat_t statbuf; - if (!UTIL_stat(infilename, &statbuf)) return UTIL_FILESIZE_UNKNOWN; - return UTIL_getFileSizeStat(&statbuf); + UTIL_TRACE_CALL("UTIL_getFileSize(%s)", infilename); + if (!UTIL_stat(infilename, &statbuf)) { + UTIL_TRACE_RET(-1); + return UTIL_FILESIZE_UNKNOWN; + } + { + U64 const size = UTIL_getFileSizeStat(&statbuf); + UTIL_TRACE_RET((int)size); + return size; + } } U64 UTIL_getFileSizeStat(const stat_t* statbuf) @@ -369,11 +529,16 @@ U64 UTIL_getTotalFileSize(const char* const * fileNamesTable, unsigned nbFiles) { U64 total = 0; unsigned n; + UTIL_TRACE_CALL("UTIL_getTotalFileSize(%u)", nbFiles); for (n=0; ntableSize; i++) { + if(!strcmp(table->fileNames[i], name)) { + return (int)i; + } + } + return -1; +} + void UTIL_refFilename(FileNamesTable* fnt, const char* filename) { assert(fnt->tableSize < fnt->tableCapacity); @@ -559,7 +734,7 @@ UTIL_mergeFileNamesTable(FileNamesTable* table1, FileNamesTable* table2) for( idx2=0 ; (idx2 < table2->tableSize) && table2->fileNames[idx2] && (pos < newTotalTableSize) ; ++idx2, ++newTableIdx) { size_t const curLen = strlen(table2->fileNames[idx2]); memcpy(buf+pos, table2->fileNames[idx2], curLen); - assert(newTableIdx <= newTable->tableSize); + assert(newTableIdx < newTable->tableSize); newTable->fileNames[newTableIdx] = buf+pos; pos += curLen+1; } } @@ -683,8 +858,11 @@ static int UTIL_prepareFileList(const char *dirName, ptrdiff_t newListSize = (*bufEnd - *bufStart) + LIST_SIZE_INCREASE; assert(newListSize >= 0); *bufStart = (char*)UTIL_realloc(*bufStart, (size_t)newListSize); - *bufEnd = *bufStart + newListSize; - if (*bufStart == NULL) { free(path); closedir(dir); return 0; } + if (*bufStart != NULL) { + *bufEnd = *bufStart + newListSize; + } else { + free(path); closedir(dir); return 0; + } } if (*bufStart + *pos + pathLength < *bufEnd) { memcpy(*bufStart + *pos, path, pathLength + 1); /* with final \0 */ @@ -870,30 +1048,30 @@ static const char * trimPath(const char *pathname) static char* mallocAndJoin2Dir(const char *dir1, const char *dir2) { - const size_t dir1Size = strlen(dir1); - const size_t dir2Size = strlen(dir2); - char *outDirBuffer, *buffer, trailingChar; - assert(dir1 != NULL && dir2 != NULL); - outDirBuffer = (char *) malloc(dir1Size + dir2Size + 2); - CONTROL(outDirBuffer != NULL); + { const size_t dir1Size = strlen(dir1); + const size_t dir2Size = strlen(dir2); + char *outDirBuffer, *buffer; - memcpy(outDirBuffer, dir1, dir1Size); - outDirBuffer[dir1Size] = '\0'; + outDirBuffer = (char *) malloc(dir1Size + dir2Size + 2); + CONTROL(outDirBuffer != NULL); - if (dir2[0] == '.') - return outDirBuffer; + memcpy(outDirBuffer, dir1, dir1Size); + outDirBuffer[dir1Size] = '\0'; - buffer = outDirBuffer + dir1Size; - trailingChar = *(buffer - 1); - if (trailingChar != PATH_SEP) { - *buffer = PATH_SEP; - buffer++; - } - memcpy(buffer, dir2, dir2Size); - buffer[dir2Size] = '\0'; + if (dir2[0] == '.') + return outDirBuffer; + + buffer = outDirBuffer + dir1Size; + if (dir1Size > 0 && *(buffer - 1) != PATH_SEP) { + *buffer = PATH_SEP; + buffer++; + } + memcpy(buffer, dir2, dir2Size); + buffer[dir2Size] = '\0'; - return outDirBuffer; + return outDirBuffer; + } } /* this function will return NULL if input srcFileName is not valid name for mirrored output path */ @@ -999,7 +1177,7 @@ makeUniqueMirroredDestDirs(char** srcDirNames, unsigned nbFile, const char* outD trimPath(currDirName))) uniqueDirNr++; - /* we need maintain original src dir name instead of trimmed + /* we need to maintain original src dir name instead of trimmed * dir, so we can retrieve the original src dir's mode_t */ uniqueDirNames[uniqueDirNr - 1] = currDirName; } @@ -1378,6 +1556,9 @@ int UTIL_countCores(int logical) int UTIL_countCores(int logical) { + /* suppress unused parameter warning */ + (void)logical; + /* assume 1 */ return 1; } diff --git a/programs/util.h b/programs/util.h index add165d57ce..4ec54137dd3 100644 --- a/programs/util.h +++ b/programs/util.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -171,10 +171,30 @@ int UTIL_chmod(char const* filename, const stat_t* statbuf, mode_t permissions); int UTIL_isRegularFile(const char* infilename); int UTIL_isDirectory(const char* infilename); int UTIL_isSameFile(const char* file1, const char* file2); +int UTIL_isSameFileStat(const char* file1, const char* file2, const stat_t* file1Stat, const stat_t* file2Stat); int UTIL_isCompressedFile(const char* infilename, const char *extensionList[]); int UTIL_isLink(const char* infilename); int UTIL_isFIFO(const char* infilename); +/** + * Returns with the given file descriptor is a console. + * Allows faking whether stdin/stdout/stderr is a console + * using UTIL_fake*IsConsole(). + */ +int UTIL_isConsole(FILE* file); + +/** + * Pretends that stdin/stdout/stderr is a console for testing. + */ +void UTIL_fakeStdinIsConsole(void); +void UTIL_fakeStdoutIsConsole(void); +void UTIL_fakeStderrIsConsole(void); + +/** + * Emit traces for functions that read, or modify file metadata. + */ +void UTIL_traceFileStat(void); + #define UTIL_FILESIZE_UNKNOWN ((U64)(-1)) U64 UTIL_getFileSize(const char* infilename); U64 UTIL_getTotalFileSize(const char* const * fileNamesTable, unsigned nbFiles); @@ -248,7 +268,6 @@ UTIL_mergeFileNamesTable(FileNamesTable* table1, FileNamesTable* table2); /*! UTIL_expandFNT() : * read names from @fnt, and expand those corresponding to directories * update @fnt, now containing only file names, - * @return : 0 in case of success, 1 if error * note : in case of error, @fnt[0] is NULL */ void UTIL_expandFNT(FileNamesTable** fnt, int followLinks); @@ -269,6 +288,11 @@ UTIL_createFNT_fromROTable(const char** filenames, size_t nbFilenames); */ FileNamesTable* UTIL_allocateFileNamesTable(size_t tableSize); +/*! UTIL_searchFileNamesTable() : + * Searched through entries in FileNamesTable for a specific name. + * @return : index of entry if found or -1 if not found + */ +int UTIL_searchFileNamesTable(FileNamesTable* table, char const* name); /*! UTIL_refFilename() : * Add a reference to read-only name into @fnt table. diff --git a/programs/windres/verrsrc.h b/programs/windres/verrsrc.h index c1b60e90f32..61b1f3ddc7e 100644 --- a/programs/windres/verrsrc.h +++ b/programs/windres/verrsrc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/programs/windres/zstd.rc b/programs/windres/zstd.rc index f5e404730d2..a2118c2df10 100644 --- a/programs/windres/zstd.rc +++ b/programs/windres/zstd.rc @@ -32,11 +32,11 @@ BEGIN BEGIN BLOCK "040904B0" BEGIN - VALUE "CompanyName", "Yann Collet, Facebook, Inc." + VALUE "CompanyName", "Meta Platforms, Inc." VALUE "FileDescription", "Zstandard - Fast and efficient compression algorithm" VALUE "FileVersion", ZSTD_VERSION_STRING VALUE "InternalName", "zstd.exe" - VALUE "LegalCopyright", "Copyright (c) 2013-present, Yann Collet, Facebook, Inc." + VALUE "LegalCopyright", "Copyright (c) Meta Platforms, Inc. and affiliates." VALUE "OriginalFilename", "zstd.exe" VALUE "ProductName", "Zstandard" VALUE "ProductVersion", ZSTD_VERSION_STRING diff --git a/programs/zstd.1 b/programs/zstd.1 index c7a19dbacac..edc128fd137 100644 --- a/programs/zstd.1 +++ b/programs/zstd.1 @@ -1,501 +1,381 @@ -. -.TH "ZSTD" "1" "January 2022" "zstd 1.5.2" "User Commands" -. +.TH "ZSTD" "1" "February 2023" "zstd 1.5.4" "User Commands" .SH "NAME" \fBzstd\fR \- zstd, zstdmt, unzstd, zstdcat \- Compress or decompress \.zst files -. .SH "SYNOPSIS" -\fBzstd\fR [\fIOPTIONS\fR] [\-|\fIINPUT\-FILE\fR] [\-o \fIOUTPUT\-FILE\fR] -. +.TS +allbox; +\fBzstd\fR [\fIOPTIONS\fR] [\- \fIINPUT\-FILE\fR] [\-o \fIOUTPUT\-FILE\fR] +.TE .P \fBzstdmt\fR is equivalent to \fBzstd \-T0\fR -. .P \fBunzstd\fR is equivalent to \fBzstd \-d\fR -. .P \fBzstdcat\fR is equivalent to \fBzstd \-dcf\fR -. .SH "DESCRIPTION" -\fBzstd\fR is a fast lossless compression algorithm and data compression tool, with command line syntax similar to \fBgzip (1)\fR and \fBxz (1)\fR\. It is based on the \fBLZ77\fR family, with further FSE & huff0 entropy stages\. \fBzstd\fR offers highly configurable compression speed, with fast modes at > 200 MB/s per core, and strong modes nearing lzma compression ratios\. It also features a very fast decoder, with speeds > 500 MB/s per core\. -. +\fBzstd\fR is a fast lossless compression algorithm and data compression tool, with command line syntax similar to \fBgzip\fR(1) and \fBxz\fR(1)\. It is based on the \fBLZ77\fR family, with further FSE & huff0 entropy stages\. \fBzstd\fR offers highly configurable compression speed, from fast modes at > 200 MB/s per core, to strong modes with excellent compression ratios\. It also features a very fast decoder, with speeds > 500 MB/s per core\. .P -\fBzstd\fR command line syntax is generally similar to gzip, but features the following differences : -. -.IP "\(bu" 4 +\fBzstd\fR command line syntax is generally similar to gzip, but features the following differences: +.IP "\[ci]" 4 Source files are preserved by default\. It\'s possible to remove them automatically by using the \fB\-\-rm\fR command\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 When compressing a single file, \fBzstd\fR displays progress notifications and result summary by default\. Use \fB\-q\fR to turn them off\. -. -.IP "\(bu" 4 -\fBzstd\fR does not accept input from console, but it properly accepts \fBstdin\fR when it\'s not the console\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fBzstd\fR displays a short help page when command line is an error\. Use \fB\-q\fR to turn it off\. -. +.IP "\[ci]" 4 +\fBzstd\fR does not accept input from console, though it does accept \fBstdin\fR when it\'s not the console\. +.IP "\[ci]" 4 +\fBzstd\fR does not store the input\'s filename or attributes, only its contents\. .IP "" 0 -. .P -\fBzstd\fR compresses or decompresses each \fIfile\fR according to the selected operation mode\. If no \fIfiles\fR are given or \fIfile\fR is \fB\-\fR, \fBzstd\fR reads from standard input and writes the processed data to standard output\. \fBzstd\fR will refuse to write compressed data to standard output if it is a terminal : it will display an error message and skip the \fIfile\fR\. Similarly, \fBzstd\fR will refuse to read compressed data from standard input if it is a terminal\. -. +\fBzstd\fR processes each \fIfile\fR according to the selected operation mode\. If no \fIfiles\fR are given or \fIfile\fR is \fB\-\fR, \fBzstd\fR reads from standard input and writes the processed data to standard output\. \fBzstd\fR will refuse to write compressed data to standard output if it is a terminal: it will display an error message and skip the file\. Similarly, \fBzstd\fR will refuse to read compressed data from standard input if it is a terminal\. .P Unless \fB\-\-stdout\fR or \fB\-o\fR is specified, \fIfiles\fR are written to a new file whose name is derived from the source \fIfile\fR name: -. -.IP "\(bu" 4 +.IP "\[ci]" 4 When compressing, the suffix \fB\.zst\fR is appended to the source filename to get the target filename\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 When decompressing, the \fB\.zst\fR suffix is removed from the source filename to get the target filename -. .IP "" 0 -. -.SS "Concatenation with \.zst files" -It is possible to concatenate \fB\.zst\fR files as is\. \fBzstd\fR will decompress such files as if they were a single \fB\.zst\fR file\. -. +.SS "Concatenation with \.zst Files" +It is possible to concatenate multiple \fB\.zst\fR files\. \fBzstd\fR will decompress such agglomerated file as if it was a single \fB\.zst\fR file\. .SH "OPTIONS" -. -.SS "Integer suffixes and special values" +.SS "Integer Suffixes and Special Values" In most places where an integer argument is expected, an optional suffix is supported to easily indicate large integers\. There must be no space between the integer and the suffix\. -. .TP \fBKiB\fR -Multiply the integer by 1,024 (2^10)\. \fBKi\fR, \fBK\fR, and \fBKB\fR are accepted as synonyms for \fBKiB\fR\. -. +Multiply the integer by 1,024 (2\e^10)\. \fBKi\fR, \fBK\fR, and \fBKB\fR are accepted as synonyms for \fBKiB\fR\. .TP \fBMiB\fR -Multiply the integer by 1,048,576 (2^20)\. \fBMi\fR, \fBM\fR, and \fBMB\fR are accepted as synonyms for \fBMiB\fR\. -. -.SS "Operation mode" +Multiply the integer by 1,048,576 (2\e^20)\. \fBMi\fR, \fBM\fR, and \fBMB\fR are accepted as synonyms for \fBMiB\fR\. +.SS "Operation Mode" If multiple operation mode options are given, the last one takes effect\. -. .TP \fB\-z\fR, \fB\-\-compress\fR Compress\. This is the default operation mode when no operation mode option is specified and no other operation mode is implied from the command name (for example, \fBunzstd\fR implies \fB\-\-decompress\fR)\. -. .TP \fB\-d\fR, \fB\-\-decompress\fR, \fB\-\-uncompress\fR Decompress\. -. .TP \fB\-t\fR, \fB\-\-test\fR -Test the integrity of compressed \fIfiles\fR\. This option is equivalent to \fB\-\-decompress \-\-stdout\fR except that the decompressed data is discarded instead of being written to standard output\. No files are created or removed\. -. +Test the integrity of compressed \fIfiles\fR\. This option is equivalent to \fB\-\-decompress \-\-stdout > /dev/null\fR, decompressed data is discarded and checksummed for errors\. No files are created or removed\. .TP \fB\-b#\fR -Benchmark file(s) using compression level # -. +Benchmark file(s) using compression level \fI#\fR\. See \fIBENCHMARK\fR below for a description of this operation\. .TP -\fB\-\-train FILEs\fR -Use FILEs as a training set to create a dictionary\. The training set should contain a lot of small files (> 100)\. -. +\fB\-\-train FILES\fR +Use \fIFILES\fR as a training set to create a dictionary\. The training set should contain a lot of small files (> 100)\. See \fIDICTIONARY BUILDER\fR below for a description of this operation\. .TP \fB\-l\fR, \fB\-\-list\fR -Display information related to a zstd compressed file, such as size, ratio, and checksum\. Some of these fields may not be available\. This command can be augmented with the \fB\-v\fR modifier\. -. -.SS "Operation modifiers" -. -.IP "\(bu" 4 -\fB\-#\fR: \fB#\fR compression level [1\-19] (default: 3) -. -.IP "\(bu" 4 +Display information related to a zstd compressed file, such as size, ratio, and checksum\. Some of these fields may not be available\. This command\'s output can be augmented with the \fB\-v\fR modifier\. +.SS "Operation Modifiers" +.IP "\[ci]" 4 +\fB\-#\fR: selects \fB#\fR compression level [1\-19] (default: 3) +.IP "\[ci]" 4 \fB\-\-ultra\fR: unlocks high compression levels 20+ (maximum 22), using a lot more memory\. Note that decompression will also require more memory when using these levels\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-fast[=#]\fR: switch to ultra\-fast compression levels\. If \fB=#\fR is not present, it defaults to \fB1\fR\. The higher the value, the faster the compression speed, at the cost of some compression ratio\. This setting overwrites compression level if one was set previously\. Similarly, if a compression level is set after \fB\-\-fast\fR, it overrides it\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-T#\fR, \fB\-\-threads=#\fR: Compress using \fB#\fR working threads (default: 1)\. If \fB#\fR is 0, attempt to detect and use the number of physical CPU cores\. In all cases, the nb of threads is capped to \fBZSTDMT_NBWORKERS_MAX\fR, which is either 64 in 32\-bit mode, or 256 for 64\-bit environments\. This modifier does nothing if \fBzstd\fR is compiled without multithread support\. -. -.IP "\(bu" 4 -\fB\-\-single\-thread\fR: Does not spawn a thread for compression, use a single thread for both I/O and compression\. In this mode, compression is serialized with I/O, which is slightly slower\. (This is different from \fB\-T1\fR, which spawns 1 compression thread in parallel of I/O)\. This mode is the only one available when multithread support is disabled\. Single\-thread mode features lower memory usage\. Final compressed result is slightly different from \fB\-T1\fR\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 +\fB\-\-single\-thread\fR: Use a single thread for both I/O and compression\. As compression is serialized with I/O, this can be slightly slower\. Single\-thread mode features significantly lower memory usage, which can be useful for systems with limited amount of memory, such as 32\-bit systems\. +.IP +Note 1: this mode is the only available one when multithread support is disabled\. +.IP +Note 2: this mode is different from \fB\-T1\fR, which spawns 1 compression thread in parallel with I/O\. Final compressed result is also slightly different from \fB\-T1\fR\. +.IP "\[ci]" 4 \fB\-\-auto\-threads={physical,logical} (default: physical)\fR: When using a default amount of threads via \fB\-T0\fR, choose the default based on the number of detected physical or logical cores\. -. -.IP "\(bu" 4 -\fB\-\-adapt[=min=#,max=#]\fR : \fBzstd\fR will dynamically adapt compression level to perceived I/O conditions\. Compression level adaptation can be observed live by using command \fB\-v\fR\. Adaptation can be constrained between supplied \fBmin\fR and \fBmax\fR levels\. The feature works when combined with multi\-threading and \fB\-\-long\fR mode\. It does not work with \fB\-\-single\-thread\fR\. It sets window size to 8 MB by default (can be changed manually, see \fBwlog\fR)\. Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible\. \fInote\fR : at the time of this writing, \fB\-\-adapt\fR can remain stuck at low speed when combined with multiple worker threads (>=2)\. -. -.IP "\(bu" 4 -\fB\-\-long[=#]\fR: enables long distance matching with \fB#\fR \fBwindowLog\fR, if not \fB#\fR is not present it defaults to \fB27\fR\. This increases the window size (\fBwindowLog\fR) and memory usage for both the compressor and decompressor\. This setting is designed to improve the compression ratio for files with long matches at a large distance\. -. +.IP "\[ci]" 4 +\fB\-\-adapt[=min=#,max=#]\fR: \fBzstd\fR will dynamically adapt compression level to perceived I/O conditions\. Compression level adaptation can be observed live by using command \fB\-v\fR\. Adaptation can be constrained between supplied \fBmin\fR and \fBmax\fR levels\. The feature works when combined with multi\-threading and \fB\-\-long\fR mode\. It does not work with \fB\-\-single\-thread\fR\. It sets window size to 8 MiB by default (can be changed manually, see \fBwlog\fR)\. Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible\. +.IP +\fINote\fR: at the time of this writing, \fB\-\-adapt\fR can remain stuck at low speed when combined with multiple worker threads (>=2)\. +.IP "\[ci]" 4 +\fB\-\-long[=#]\fR: enables long distance matching with \fB#\fR \fBwindowLog\fR, if \fB#\fR is not present it defaults to \fB27\fR\. This increases the window size (\fBwindowLog\fR) and memory usage for both the compressor and decompressor\. This setting is designed to improve the compression ratio for files with long matches at a large distance\. .IP Note: If \fBwindowLog\fR is set to larger than 27, \fB\-\-long=windowLog\fR or \fB\-\-memory=windowSize\fR needs to be passed to the decompressor\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-D DICT\fR: use \fBDICT\fR as Dictionary to compress or decompress FILE(s) -. -.IP "\(bu" 4 -\fB\-\-patch\-from FILE\fR: Specify the file to be used as a reference point for zstd\'s diff engine\. This is effectively dictionary compression with some convenient parameter selection, namely that windowSize > srcSize\. -. -.IP -Note: cannot use both this and \-D together Note: \fB\-\-long\fR mode will be automatically activated if chainLog < fileLog (fileLog being the windowLog required to cover the whole file)\. You can also manually force it\. Node: for all levels, you can use \-\-patch\-from in \-\-single\-thread mode to improve compression ratio at the cost of speed Note: for level 19, you can get increased compression ratio at the cost of speed by specifying \fB\-\-zstd=targetLength=\fR to be something large (i\.e 4096), and by setting a large \fB\-\-zstd=chainLog=\fR -. -.IP "\(bu" 4 -\fB\-\-rsyncable\fR : \fBzstd\fR will periodically synchronize the compression state to make the compressed file more rsync\-friendly\. There is a negligible impact to compression ratio, and the faster compression levels will see a small compression speed hit\. This feature does not work with \fB\-\-single\-thread\fR\. You probably don\'t want to use it with long range mode, since it will decrease the effectiveness of the synchronization points, but your mileage may vary\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 +\fB\-\-patch\-from FILE\fR: Specify the file to be used as a reference point for zstd\'s diff engine\. This is effectively dictionary compression with some convenient parameter selection, namely that \fIwindowSize\fR > \fIsrcSize\fR\. +.IP +Note: cannot use both this and \fB\-D\fR together\. +.IP +Note: \fB\-\-long\fR mode will be automatically activated if \fIchainLog\fR < \fIfileLog\fR (\fIfileLog\fR being the \fIwindowLog\fR required to cover the whole file)\. You can also manually force it\. +.IP +Note: for all levels, you can use \fB\-\-patch\-from\fR in \fB\-\-single\-thread\fR mode to improve compression ratio at the cost of speed\. +.IP +Note: for level 19, you can get increased compression ratio at the cost of speed by specifying \fB\-\-zstd=targetLength=\fR to be something large (i\.e\. 4096), and by setting a large \fB\-\-zstd=chainLog=\fR\. +.IP "\[ci]" 4 +\fB\-\-rsyncable\fR: \fBzstd\fR will periodically synchronize the compression state to make the compressed file more rsync\-friendly\. There is a negligible impact to compression ratio, and the faster compression levels will see a small compression speed hit\. This feature does not work with \fB\-\-single\-thread\fR\. You probably don\'t want to use it with long range mode, since it will decrease the effectiveness of the synchronization points, but your mileage may vary\. +.IP "\[ci]" 4 \fB\-C\fR, \fB\-\-[no\-]check\fR: add integrity check computed from uncompressed data (default: enabled) -. -.IP "\(bu" 4 -\fB\-\-[no\-]content\-size\fR: enable / disable whether or not the original size of the file is placed in the header of the compressed file\. The default option is \-\-content\-size (meaning that the original size will be placed in the header)\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 +\fB\-\-[no\-]content\-size\fR: enable / disable whether or not the original size of the file is placed in the header of the compressed file\. The default option is \fB\-\-content\-size\fR (meaning that the original size will be placed in the header)\. +.IP "\[ci]" 4 \fB\-\-no\-dictID\fR: do not store dictionary ID within frame header (dictionary compression)\. The decoder will have to rely on implicit knowledge about which dictionary to use, it won\'t be able to check if it\'s correct\. -. -.IP "\(bu" 4 -\fB\-M#\fR, \fB\-\-memory=#\fR: Set a memory usage limit\. By default, Zstandard uses 128 MB for decompression as the maximum amount of memory the decompressor is allowed to use, but you can override this manually if need be in either direction (ie\. you can increase or decrease it)\. -. -.IP -This is also used during compression when using with \-\-patch\-from=\. In this case, this parameter overrides that maximum size allowed for a dictionary\. (128 MB)\. -. -.IP -Additionally, this can be used to limit memory for dictionary training\. This parameter overrides the default limit of 2 GB\. zstd will load training samples up to the memory limit and ignore the rest\. -. -.IP "\(bu" 4 -\fB\-\-stream\-size=#\fR : Sets the pledged source size of input coming from a stream\. This value must be exact, as it will be included in the produced frame header\. Incorrect stream sizes will cause an error\. This information will be used to better optimize compression parameters, resulting in better and potentially faster compression, especially for smaller source sizes\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 +\fB\-M#\fR, \fB\-\-memory=#\fR: Set a memory usage limit\. By default, \fBzstd\fR uses 128 MiB for decompression as the maximum amount of memory the decompressor is allowed to use, but you can override this manually if need be in either direction (i\.e\. you can increase or decrease it)\. +.IP +This is also used during compression when using with \fB\-\-patch\-from=\fR\. In this case, this parameter overrides that maximum size allowed for a dictionary\. (128 MiB)\. +.IP +Additionally, this can be used to limit memory for dictionary training\. This parameter overrides the default limit of 2 GiB\. zstd will load training samples up to the memory limit and ignore the rest\. +.IP "\[ci]" 4 +\fB\-\-stream\-size=#\fR: Sets the pledged source size of input coming from a stream\. This value must be exact, as it will be included in the produced frame header\. Incorrect stream sizes will cause an error\. This information will be used to better optimize compression parameters, resulting in better and potentially faster compression, especially for smaller source sizes\. +.IP "\[ci]" 4 \fB\-\-size\-hint=#\fR: When handling input from a stream, \fBzstd\fR must guess how large the source size will be when optimizing compression parameters\. If the stream size is relatively small, this guess may be a poor one, resulting in a higher compression ratio than expected\. This feature allows for controlling the guess when needed\. Exact guesses result in better compression ratios\. Overestimates result in slightly degraded compression ratios, while underestimates may result in significant degradation\. -. -.IP "\(bu" 4 -\fB\-o FILE\fR: save result into \fBFILE\fR -. -.IP "\(bu" 4 -\fB\-f\fR, \fB\-\-force\fR: disable input and output checks\. Allows overwriting existing files, input from console, output to stdout, operating on links, block devices, etc\. -. -.IP "\(bu" 4 -\fB\-c\fR, \fB\-\-stdout\fR: write to standard output (even if it is the console) -. -.IP "\(bu" 4 +.IP "\[ci]" 4 +\fB\-o FILE\fR: save result into \fBFILE\fR\. +.IP "\[ci]" 4 +\fB\-f\fR, \fB\-\-force\fR: disable input and output checks\. Allows overwriting existing files, input from console, output to stdout, operating on links, block devices, etc\. During decompression and when the output destination is stdout, pass\-through unrecognized formats as\-is\. +.IP "\[ci]" 4 +\fB\-c\fR, \fB\-\-stdout\fR: write to standard output (even if it is the console); keep original files unchanged\. +.IP "\[ci]" 4 \fB\-\-[no\-]sparse\fR: enable / disable sparse FS support, to make files with many zeroes smaller on disk\. Creating sparse files may save disk space and speed up decompression by reducing the amount of disk I/O\. default: enabled when output is into a file, and disabled when output is stdout\. This setting overrides default and can force sparse mode over stdout\. -. -.IP "\(bu" 4 -\fB\-\-rm\fR: remove source file(s) after successful compression or decompression\. If used in combination with \-o, will trigger a confirmation prompt (which can be silenced with \-f), as this is a destructive operation\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 +\fB\-\-[no\-]pass\-through\fR enable / disable passing through uncompressed files as\-is\. During decompression when pass\-through is enabled, unrecognized formats will be copied as\-is from the input to the output\. By default, pass\-through will occur when the output destination is stdout and the force (\fB\-f\fR) option is set\. +.IP "\[ci]" 4 +\fB\-\-rm\fR: remove source file(s) after successful compression or decompression\. This command is silently ignored if output is \fBstdout\fR\. If used in combination with \fB\-o\fR, triggers a confirmation prompt (which can be silenced with \fB\-f\fR), as this is a destructive operation\. +.IP "\[ci]" 4 \fB\-k\fR, \fB\-\-keep\fR: keep source file(s) after successful compression or decompression\. This is the default behavior\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-r\fR: operate recursively on directories\. It selects all files in the named directory and all its subdirectories\. This can be useful both to reduce command line typing, and to circumvent shell expansion limitations, when there are a lot of files and naming breaks the maximum size of a command line\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-filelist FILE\fR read a list of files to process as content from \fBFILE\fR\. Format is compatible with \fBls\fR output, with one file per line\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-output\-dir\-flat DIR\fR: resulting files are stored into target \fBDIR\fR directory, instead of same directory as origin file\. Be aware that this command can introduce name collision issues, if multiple files, from different directories, end up having the same name\. Collision resolution ensures first file with a given name will be present in \fBDIR\fR, while in combination with \fB\-f\fR, the last file will be present instead\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-output\-dir\-mirror DIR\fR: similar to \fB\-\-output\-dir\-flat\fR, the output files are stored underneath target \fBDIR\fR directory, but this option will replicate input directory hierarchy into output \fBDIR\fR\. -. .IP If input directory contains "\.\.", the files in this directory will be ignored\. If input directory is an absolute directory (i\.e\. "/var/tmp/abc"), it will be stored into the "output\-dir/var/tmp/abc"\. If there are multiple input files or directories, name collision resolution will follow the same rules as \fB\-\-output\-dir\-flat\fR\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-format=FORMAT\fR: compress and decompress in other formats\. If compiled with support, zstd can compress to or decompress from other compression algorithm formats\. Possibly available options are \fBzstd\fR, \fBgzip\fR, \fBxz\fR, \fBlzma\fR, and \fBlz4\fR\. If no such format is provided, \fBzstd\fR is the default\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-h\fR/\fB\-H\fR, \fB\-\-help\fR: display help/long help and exit -. -.IP "\(bu" 4 -\fB\-V\fR, \fB\-\-version\fR: display version number and exit\. Advanced : \fB\-vV\fR also displays supported formats\. \fB\-vvV\fR also displays POSIX support\. \fB\-q\fR will only display the version number, suitable for machine reading\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 +\fB\-V\fR, \fB\-\-version\fR: display version number and exit\. Advanced: \fB\-vV\fR also displays supported formats\. \fB\-vvV\fR also displays POSIX support\. \fB\-q\fR will only display the version number, suitable for machine reading\. +.IP "\[ci]" 4 \fB\-v\fR, \fB\-\-verbose\fR: verbose mode, display more information -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-q\fR, \fB\-\-quiet\fR: suppress warnings, interactivity, and notifications\. specify twice to suppress errors too\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-no\-progress\fR: do not display the progress bar, but keep all other messages\. -. -.IP "\(bu" 4 -\fB\-\-show\-default\-cparams\fR: Shows the default compression parameters that will be used for a particular src file\. If the provided src file is not a regular file (eg\. named pipe), the cli will just output the default parameters\. That is, the parameters that are used when the src size is unknown\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 +\fB\-\-show\-default\-cparams\fR: shows the default compression parameters that will be used for a particular input file, based on the provided compression level and the input size\. If the provided file is not a regular file (e\.g\. a pipe), this flag will output the parameters used for inputs of unknown size\. +.IP "\[ci]" 4 \fB\-\-\fR: All arguments after \fB\-\-\fR are treated as files -. .IP "" 0 -. -.SS "Restricted usage of Environment Variables" -Using environment variables to set parameters has security implications\. Therefore, this avenue is intentionally restricted\. Only \fBZSTD_CLEVEL\fR and \fBZSTD_NBTHREADS\fR are currently supported\. They set the compression level and number of threads to use during compression, respectively\. -. +.SS "gzip Operation Modifiers" +When invoked via a \fBgzip\fR symlink, \fBzstd\fR will support further options that intend to mimic the \fBgzip\fR behavior: +.TP +\fB\-n\fR, \fB\-\-no\-name\fR +do not store the original filename and timestamps when compressing a file\. This is the default behavior and hence a no\-op\. +.TP +\fB\-\-best\fR +alias to the option \fB\-9\fR\. +.SS "Environment Variables" +Employing environment variables to set parameters has security implications\. Therefore, this avenue is intentionally limited\. Only \fBZSTD_CLEVEL\fR and \fBZSTD_NBTHREADS\fR are currently supported\. They set the compression level and number of threads to use during compression, respectively\. .P \fBZSTD_CLEVEL\fR can be used to set the level between 1 and 19 (the "normal" range)\. If the value of \fBZSTD_CLEVEL\fR is not a valid integer, it will be ignored with a warning message\. \fBZSTD_CLEVEL\fR just replaces the default compression level (\fB3\fR)\. -. .P \fBZSTD_NBTHREADS\fR can be used to set the number of threads \fBzstd\fR will attempt to use during compression\. If the value of \fBZSTD_NBTHREADS\fR is not a valid unsigned integer, it will be ignored with a warning message\. \fBZSTD_NBTHREADS\fR has a default value of (\fB1\fR), and is capped at ZSTDMT_NBWORKERS_MAX==200\. \fBzstd\fR must be compiled with multithread support for this to have any effect\. -. .P They can both be overridden by corresponding command line arguments: \fB\-#\fR for compression level and \fB\-T#\fR for number of compression threads\. -. .SH "DICTIONARY BUILDER" -\fBzstd\fR offers \fIdictionary\fR compression, which greatly improves efficiency on small files and messages\. It\'s possible to train \fBzstd\fR with a set of samples, the result of which is saved into a file called a \fBdictionary\fR\. Then during compression and decompression, reference the same dictionary, using command \fB\-D dictionaryFileName\fR\. Compression of small files similar to the sample set will be greatly improved\. -. +\fBzstd\fR offers \fIdictionary\fR compression, which greatly improves efficiency on small files and messages\. It\'s possible to train \fBzstd\fR with a set of samples, the result of which is saved into a file called a \fBdictionary\fR\. Then, during compression and decompression, reference the same dictionary, using command \fB\-D dictionaryFileName\fR\. Compression of small files similar to the sample set will be greatly improved\. .TP \fB\-\-train FILEs\fR -Use FILEs as training set to create a dictionary\. The training set should contain a lot of small files (> 100), and weight typically 100x the target dictionary size (for example, 10 MB for a 100 KB dictionary)\. \fB\-\-train\fR can be combined with \fB\-r\fR to indicate a directory rather than listing all the files, which can be useful to circumvent shell expansion limits\. -. +Use FILEs as training set to create a dictionary\. The training set should ideally contain a lot of samples (> 100), and weight typically 100x the target dictionary size (for example, ~10 MB for a 100 KB dictionary)\. \fB\-\-train\fR can be combined with \fB\-r\fR to indicate a directory rather than listing all the files, which can be useful to circumvent shell expansion limits\. +.IP +Since dictionary compression is mostly effective for small files, the expectation is that the training set will only contain small files\. In the case where some samples happen to be large, only the first 128 KiB of these samples will be used for training\. .IP -\fB\-\-train\fR supports multithreading if \fBzstd\fR is compiled with threading support (default)\. Additional parameters can be specified with \fB\-\-train\-fastcover\fR\. The legacy dictionary builder can be accessed with \fB\-\-train\-legacy\fR\. The slower cover dictionary builder can be accessed with \fB\-\-train\-cover\fR\. Default is equivalent to \fB\-\-train\-fastcover=d=8,steps=4\fR\. -. +\fB\-\-train\fR supports multithreading if \fBzstd\fR is compiled with threading support (default)\. Additional advanced parameters can be specified with \fB\-\-train\-fastcover\fR\. The legacy dictionary builder can be accessed with \fB\-\-train\-legacy\fR\. The slower cover dictionary builder can be accessed with \fB\-\-train\-cover\fR\. Default \fB\-\-train\fR is equivalent to \fB\-\-train\-fastcover=d=8,steps=4\fR\. .TP -\fB\-o file\fR -Dictionary saved into \fBfile\fR (default name: dictionary)\. -. +\fB\-o FILE\fR +Dictionary saved into \fBFILE\fR (default name: dictionary)\. .TP \fB\-\-maxdict=#\fR -Limit dictionary to specified size (default: 112640)\. -. +Limit dictionary to specified size (default: 112640 bytes)\. As usual, quantities are expressed in bytes by default, and it\'s possible to employ suffixes (like \fBKB\fR or \fBMB\fR) to specify larger values\. .TP \fB\-#\fR Use \fB#\fR compression level during training (optional)\. Will generate statistics more tuned for selected compression level, resulting in a \fIsmall\fR compression ratio improvement for this level\. -. .TP \fB\-B#\fR Split input files into blocks of size # (default: no split) -. .TP \fB\-M#\fR, \fB\-\-memory=#\fR -Limit the amount of sample data loaded for training (default: 2 GB)\. See above for details\. -. +Limit the amount of sample data loaded for training (default: 2 GB)\. Note that the default (2 GB) is also the maximum\. This parameter can be useful in situations where the training set size is not well controlled and could be potentially very large\. Since speed of the training process is directly correlated to the size of the training sample set, a smaller sample set leads to faster training\. +.IP +In situations where the training set is larger than maximum memory, the CLI will randomly select samples among the available ones, up to the maximum allowed memory budget\. This is meant to improve dictionary relevance by mitigating the potential impact of clustering, such as selecting only files from the beginning of a list sorted by modification date, or sorted by alphabetical order\. The randomization process is deterministic, so training of the same list of files with the same parameters will lead to the creation of the same dictionary\. .TP \fB\-\-dictID=#\fR -A dictionary ID is a locally unique ID that a decoder can use to verify it is using the right dictionary\. By default, zstd will create a 4\-bytes random number ID\. It\'s possible to give a precise number instead\. Short numbers have an advantage : an ID < 256 will only need 1 byte in the compressed frame header, and an ID < 65536 will only need 2 bytes\. This compares favorably to 4 bytes default\. However, it\'s up to the dictionary manager to not assign twice the same ID to 2 different dictionaries\. -. +A dictionary ID is a locally unique ID\. The decoder will use this value to verify it is using the right dictionary\. By default, zstd will create a 4\-bytes random number ID\. It\'s possible to provide an explicit number ID instead\. It\'s up to the dictionary manager to not assign twice the same ID to 2 different dictionaries\. Note that short numbers have an advantage: an ID < 256 will only need 1 byte in the compressed frame header, and an ID < 65536 will only need 2 bytes\. This compares favorably to 4 bytes default\. +.IP +Note that RFC8878 reserves IDs less than 32768 and greater than or equal to 2\e^31, so they should not be used in public\. .TP \fB\-\-train\-cover[=k#,d=#,steps=#,split=#,shrink[=#]]\fR Select parameters for the default dictionary builder algorithm named cover\. If \fId\fR is not specified, then it tries \fId\fR = 6 and \fId\fR = 8\. If \fIk\fR is not specified, then it tries \fIsteps\fR values in the range [50, 2000]\. If \fIsteps\fR is not specified, then the default value of 40 is used\. If \fIsplit\fR is not specified or split <= 0, then the default value of 100 is used\. Requires that \fId\fR <= \fIk\fR\. If \fIshrink\fR flag is not used, then the default value for \fIshrinkDict\fR of 0 is used\. If \fIshrink\fR is not specified, then the default value for \fIshrinkDictMaxRegression\fR of 1 is used\. -. .IP Selects segments of size \fIk\fR with highest score to put in the dictionary\. The score of a segment is computed by the sum of the frequencies of all the subsegments of size \fId\fR\. Generally \fId\fR should be in the range [6, 8], occasionally up to 16, but the algorithm will run faster with d <= \fI8\fR\. Good values for \fIk\fR vary widely based on the input data, but a safe range is [2 * \fId\fR, 2000]\. If \fIsplit\fR is 100, all input samples are used for both training and testing to find optimal \fId\fR and \fIk\fR to build dictionary\. Supports multithreading if \fBzstd\fR is compiled with threading support\. Having \fIshrink\fR enabled takes a truncated dictionary of minimum size and doubles in size until compression ratio of the truncated dictionary is at most \fIshrinkDictMaxRegression%\fR worse than the compression ratio of the largest dictionary\. -. .IP Examples: -. .IP \fBzstd \-\-train\-cover FILEs\fR -. .IP \fBzstd \-\-train\-cover=k=50,d=8 FILEs\fR -. .IP \fBzstd \-\-train\-cover=d=8,steps=500 FILEs\fR -. .IP \fBzstd \-\-train\-cover=k=50 FILEs\fR -. .IP \fBzstd \-\-train\-cover=k=50,split=60 FILEs\fR -. .IP \fBzstd \-\-train\-cover=shrink FILEs\fR -. .IP \fBzstd \-\-train\-cover=shrink=2 FILEs\fR -. .TP \fB\-\-train\-fastcover[=k#,d=#,f=#,steps=#,split=#,accel=#]\fR Same as cover but with extra parameters \fIf\fR and \fIaccel\fR and different default value of split If \fIsplit\fR is not specified, then it tries \fIsplit\fR = 75\. If \fIf\fR is not specified, then it tries \fIf\fR = 20\. Requires that 0 < \fIf\fR < 32\. If \fIaccel\fR is not specified, then it tries \fIaccel\fR = 1\. Requires that 0 < \fIaccel\fR <= 10\. Requires that \fId\fR = 6 or \fId\fR = 8\. -. .IP \fIf\fR is log of size of array that keeps track of frequency of subsegments of size \fId\fR\. The subsegment is hashed to an index in the range [0,2^\fIf\fR \- 1]\. It is possible that 2 different subsegments are hashed to the same index, and they are considered as the same subsegment when computing frequency\. Using a higher \fIf\fR reduces collision but takes longer\. -. .IP Examples: -. .IP \fBzstd \-\-train\-fastcover FILEs\fR -. .IP \fBzstd \-\-train\-fastcover=d=8,f=15,accel=2 FILEs\fR -. .TP \fB\-\-train\-legacy[=selectivity=#]\fR -Use legacy dictionary builder algorithm with the given dictionary \fIselectivity\fR (default: 9)\. The smaller the \fIselectivity\fR value, the denser the dictionary, improving its efficiency but reducing its possible maximum size\. \fB\-\-train\-legacy=s=#\fR is also accepted\. -. +Use legacy dictionary builder algorithm with the given dictionary \fIselectivity\fR (default: 9)\. The smaller the \fIselectivity\fR value, the denser the dictionary, improving its efficiency but reducing its achievable maximum size\. \fB\-\-train\-legacy=s=#\fR is also accepted\. .IP Examples: -. .IP \fBzstd \-\-train\-legacy FILEs\fR -. .IP \fBzstd \-\-train\-legacy=selectivity=8 FILEs\fR -. .SH "BENCHMARK" -. .TP \fB\-b#\fR benchmark file(s) using compression level # -. .TP \fB\-e#\fR benchmark file(s) using multiple compression levels, from \fB\-b#\fR to \fB\-e#\fR (inclusive) -. .TP \fB\-i#\fR minimum evaluation time, in seconds (default: 3s), benchmark mode only -. .TP \fB\-B#\fR, \fB\-\-block\-size=#\fR -cut file(s) into independent blocks of size # (default: no block) -. +cut file(s) into independent chunks of size # (default: no chunking) .TP \fB\-\-priority=rt\fR set process priority to real\-time -. .P -\fBOutput Format:\fR CompressionLevel#Filename : IntputSize \-> OutputSize (CompressionRatio), CompressionSpeed, DecompressionSpeed -. +\fBOutput Format:\fR CompressionLevel#Filename: InputSize \-> OutputSize (CompressionRatio), CompressionSpeed, DecompressionSpeed .P \fBMethodology:\fR For both compression and decompression speed, the entire input is compressed/decompressed in\-memory to measure speed\. A run lasts at least 1 sec, so when files are small, they are compressed/decompressed several times per run, in order to improve measurement accuracy\. -. .SH "ADVANCED COMPRESSION OPTIONS" -. -.SS "\-B#:" -Select the size of each compression job\. This parameter is only available when multi\-threading is enabled\. Each compression job is run in parallel, so this value indirectly impacts the nb of active threads\. Default job size varies depending on compression level (generally \fB4 * windowSize\fR)\. \fB\-B#\fR makes it possible to manually select a custom size\. Note that job size must respect a minimum value which is enforced transparently\. This minimum is either 512 KB, or \fBoverlapSize\fR, whichever is largest\. Different job sizes will lead to (slightly) different compressed frames\. -. +### \-B#: Specify the size of each compression job\. This parameter is only available when multi\-threading is enabled\. Each compression job is run in parallel, so this value indirectly impacts the nb of active threads\. Default job size varies depending on compression level (generally \fB4 * windowSize\fR)\. \fB\-B#\fR makes it possible to manually select a custom size\. Note that job size must respect a minimum value which is enforced transparently\. This minimum is either 512 KB, or \fBoverlapSize\fR, whichever is largest\. Different job sizes will lead to non\-identical compressed frames\. .SS "\-\-zstd[=options]:" -\fBzstd\fR provides 22 predefined compression levels\. The selected or default predefined compression level can be changed with advanced compression options\. The \fIoptions\fR are provided as a comma\-separated list\. You may specify only the options you want to change and the rest will be taken from the selected or default compression level\. The list of available \fIoptions\fR: -. +\fBzstd\fR provides 22 predefined regular compression levels plus the fast levels\. This compression level is translated internally into a number of specific parameters that actually control the behavior of the compressor\. (You can see the result of this translation with \fB\-\-show\-default\-cparams\fR\.) These specific parameters can be overridden with advanced compression options\. The \fIoptions\fR are provided as a comma\-separated list\. You may specify only the options you want to change and the rest will be taken from the selected or default compression level\. The list of available \fIoptions\fR: .TP \fBstrategy\fR=\fIstrat\fR, \fBstrat\fR=\fIstrat\fR Specify a strategy used by a match finder\. -. .IP -There are 9 strategies numbered from 1 to 9, from faster to stronger: 1=ZSTD_fast, 2=ZSTD_dfast, 3=ZSTD_greedy, 4=ZSTD_lazy, 5=ZSTD_lazy2, 6=ZSTD_btlazy2, 7=ZSTD_btopt, 8=ZSTD_btultra, 9=ZSTD_btultra2\. -. +There are 9 strategies numbered from 1 to 9, from fastest to strongest: 1=\fBZSTD_fast\fR, 2=\fBZSTD_dfast\fR, 3=\fBZSTD_greedy\fR, 4=\fBZSTD_lazy\fR, 5=\fBZSTD_lazy2\fR, 6=\fBZSTD_btlazy2\fR, 7=\fBZSTD_btopt\fR, 8=\fBZSTD_btultra\fR, 9=\fBZSTD_btultra2\fR\. .TP \fBwindowLog\fR=\fIwlog\fR, \fBwlog\fR=\fIwlog\fR Specify the maximum number of bits for a match distance\. -. .IP The higher number of increases the chance to find a match which usually improves compression ratio\. It also increases memory requirements for the compressor and decompressor\. The minimum \fIwlog\fR is 10 (1 KiB) and the maximum is 30 (1 GiB) on 32\-bit platforms and 31 (2 GiB) on 64\-bit platforms\. -. .IP Note: If \fBwindowLog\fR is set to larger than 27, \fB\-\-long=windowLog\fR or \fB\-\-memory=windowSize\fR needs to be passed to the decompressor\. -. .TP \fBhashLog\fR=\fIhlog\fR, \fBhlog\fR=\fIhlog\fR Specify the maximum number of bits for a hash table\. -. .IP -Bigger hash tables cause less collisions which usually makes compression faster, but requires more memory during compression\. -. +Bigger hash tables cause fewer collisions which usually makes compression faster, but requires more memory during compression\. .IP -The minimum \fIhlog\fR is 6 (64 B) and the maximum is 30 (1 GiB)\. -. +The minimum \fIhlog\fR is 6 (64 entries / 256 B) and the maximum is 30 (1B entries / 4 GiB)\. .TP \fBchainLog\fR=\fIclog\fR, \fBclog\fR=\fIclog\fR -Specify the maximum number of bits for a hash chain or a binary tree\. -. +Specify the maximum number of bits for the secondary search structure, whose form depends on the selected \fBstrategy\fR\. .IP -Higher numbers of bits increases the chance to find a match which usually improves compression ratio\. It also slows down compression speed and increases memory requirements for compression\. This option is ignored for the ZSTD_fast strategy\. -. +Higher numbers of bits increases the chance to find a match which usually improves compression ratio\. It also slows down compression speed and increases memory requirements for compression\. This option is ignored for the \fBZSTD_fast\fR \fBstrategy\fR, which only has the primary hash table\. .IP -The minimum \fIclog\fR is 6 (64 B) and the maximum is 29 (524 Mib) on 32\-bit platforms and 30 (1 Gib) on 64\-bit platforms\. -. +The minimum \fIclog\fR is 6 (64 entries / 256 B) and the maximum is 29 (512M entries / 2 GiB) on 32\-bit platforms and 30 (1B entries / 4 GiB) on 64\-bit platforms\. .TP \fBsearchLog\fR=\fIslog\fR, \fBslog\fR=\fIslog\fR Specify the maximum number of searches in a hash chain or a binary tree using logarithmic scale\. -. .IP More searches increases the chance to find a match which usually increases compression ratio but decreases compression speed\. -. .IP The minimum \fIslog\fR is 1 and the maximum is \'windowLog\' \- 1\. -. .TP \fBminMatch\fR=\fImml\fR, \fBmml\fR=\fImml\fR Specify the minimum searched length of a match in a hash table\. -. .IP Larger search lengths usually decrease compression ratio but improve decompression speed\. -. .IP The minimum \fImml\fR is 3 and the maximum is 7\. -. .TP \fBtargetLength\fR=\fItlen\fR, \fBtlen\fR=\fItlen\fR The impact of this field vary depending on selected strategy\. -. .IP -For ZSTD_btopt, ZSTD_btultra and ZSTD_btultra2, it specifies the minimum match length that causes match finder to stop searching\. A larger \fBtargetLength\fR usually improves compression ratio but decreases compression speed\. t For ZSTD_fast, it triggers ultra\-fast mode when > 0\. The value represents the amount of data skipped between match sampling\. Impact is reversed : a larger \fBtargetLength\fR increases compression speed but decreases compression ratio\. -. +For \fBZSTD_btopt\fR, \fBZSTD_btultra\fR and \fBZSTD_btultra2\fR, it specifies the minimum match length that causes match finder to stop searching\. A larger \fBtargetLength\fR usually improves compression ratio but decreases compression speed\. +.IP +For \fBZSTD_fast\fR, it triggers ultra\-fast mode when > 0\. The value represents the amount of data skipped between match sampling\. Impact is reversed: a larger \fBtargetLength\fR increases compression speed but decreases compression ratio\. .IP For all other strategies, this field has no impact\. -. .IP -The minimum \fItlen\fR is 0 and the maximum is 128 Kib\. -. +The minimum \fItlen\fR is 0 and the maximum is 128 KiB\. .TP \fBoverlapLog\fR=\fIovlog\fR, \fBovlog\fR=\fIovlog\fR Determine \fBoverlapSize\fR, amount of data reloaded from previous job\. This parameter is only available when multithreading is enabled\. Reloading more data improves compression ratio, but decreases speed\. -. .IP -The minimum \fIovlog\fR is 0, and the maximum is 9\. 1 means "no overlap", hence completely independent jobs\. 9 means "full overlap", meaning up to \fBwindowSize\fR is reloaded from previous job\. Reducing \fIovlog\fR by 1 reduces the reloaded amount by a factor 2\. For example, 8 means "windowSize/2", and 6 means "windowSize/8"\. Value 0 is special and means "default" : \fIovlog\fR is automatically determined by \fBzstd\fR\. In which case, \fIovlog\fR will range from 6 to 9, depending on selected \fIstrat\fR\. -. +The minimum \fIovlog\fR is 0, and the maximum is 9\. 1 means "no overlap", hence completely independent jobs\. 9 means "full overlap", meaning up to \fBwindowSize\fR is reloaded from previous job\. Reducing \fIovlog\fR by 1 reduces the reloaded amount by a factor 2\. For example, 8 means "windowSize/2", and 6 means "windowSize/8"\. Value 0 is special and means "default": \fIovlog\fR is automatically determined by \fBzstd\fR\. In which case, \fIovlog\fR will range from 6 to 9, depending on selected \fIstrat\fR\. .TP \fBldmHashLog\fR=\fIlhlog\fR, \fBlhlog\fR=\fIlhlog\fR Specify the maximum size for a hash table used for long distance matching\. -. .IP This option is ignored unless long distance matching is enabled\. -. .IP Bigger hash tables usually improve compression ratio at the expense of more memory during compression and a decrease in compression speed\. -. .IP The minimum \fIlhlog\fR is 6 and the maximum is 30 (default: 20)\. -. .TP \fBldmMinMatch\fR=\fIlmml\fR, \fBlmml\fR=\fIlmml\fR Specify the minimum searched length of a match for long distance matching\. -. .IP This option is ignored unless long distance matching is enabled\. -. .IP Larger/very small values usually decrease compression ratio\. -. .IP The minimum \fIlmml\fR is 4 and the maximum is 4096 (default: 64)\. -. .TP \fBldmBucketSizeLog\fR=\fIlblog\fR, \fBlblog\fR=\fIlblog\fR Specify the size of each bucket for the hash table used for long distance matching\. -. .IP This option is ignored unless long distance matching is enabled\. -. .IP Larger bucket sizes improve collision resolution but decrease compression speed\. -. .IP The minimum \fIlblog\fR is 1 and the maximum is 8 (default: 3)\. -. .TP \fBldmHashRateLog\fR=\fIlhrlog\fR, \fBlhrlog\fR=\fIlhrlog\fR Specify the frequency of inserting entries into the long distance matching hash table\. -. .IP This option is ignored unless long distance matching is enabled\. -. .IP Larger values will improve compression speed\. Deviating far from the default value will likely result in a decrease in compression ratio\. -. .IP The default value is \fBwlog \- lhlog\fR\. -. .SS "Example" The following parameters sets advanced compression options to something similar to predefined level 19 for files bigger than 256 KB: -. .P \fB\-\-zstd\fR=wlog=23,clog=23,hlog=22,slog=6,mml=3,tlen=48,strat=6 -. +.SH "SEE ALSO" +\fBzstdgrep\fR(1), \fBzstdless\fR(1), \fBgzip\fR(1), \fBxz\fR(1) +.P +The \fIzstandard\fR format is specified in Y\. Collet, "Zstandard Compression and the \'application/zstd\' Media Type", https://www\.ietf\.org/rfc/rfc8878\.txt, Internet RFC 8878 (February 2021)\. .SH "BUGS" Report bugs at: https://github\.com/facebook/zstd/issues -. .SH "AUTHOR" Yann Collet diff --git a/programs/zstd.1.md b/programs/zstd.1.md index e343ec0448b..3b7f24f7980 100644 --- a/programs/zstd.1.md +++ b/programs/zstd.1.md @@ -4,7 +4,7 @@ zstd(1) -- zstd, zstdmt, unzstd, zstdcat - Compress or decompress .zst files SYNOPSIS -------- -`zstd` [*OPTIONS*] [-|_INPUT-FILE_] [-o _OUTPUT-FILE_] +`zstd` [] [-|] [-o ] `zstdmt` is equivalent to `zstd -T0` @@ -16,32 +16,32 @@ SYNOPSIS DESCRIPTION ----------- `zstd` is a fast lossless compression algorithm and data compression tool, -with command line syntax similar to `gzip (1)` and `xz (1)`. +with command line syntax similar to `gzip`(1) and `xz`(1). It is based on the **LZ77** family, with further FSE & huff0 entropy stages. `zstd` offers highly configurable compression speed, -with fast modes at > 200 MB/s per core, -and strong modes nearing lzma compression ratios. +from fast modes at > 200 MB/s per core, +to strong modes with excellent compression ratios. It also features a very fast decoder, with speeds > 500 MB/s per core. `zstd` command line syntax is generally similar to gzip, -but features the following differences : +but features the following differences: - Source files are preserved by default. It's possible to remove them automatically by using the `--rm` command. - When compressing a single file, `zstd` displays progress notifications and result summary by default. Use `-q` to turn them off. - - `zstd` does not accept input from console, - but it properly accepts `stdin` when it's not the console. - `zstd` displays a short help page when command line is an error. Use `-q` to turn it off. + - `zstd` does not accept input from console, + though it does accept `stdin` when it's not the console. + - `zstd` does not store the input's filename or attributes, only its contents. -`zstd` compresses or decompresses each _file_ according to the selected -operation mode. +`zstd` processes each _file_ according to the selected operation mode. If no _files_ are given or _file_ is `-`, `zstd` reads from standard input and writes the processed data to standard output. `zstd` will refuse to write compressed data to standard output -if it is a terminal : it will display an error message and skip the _file_. +if it is a terminal: it will display an error message and skip the file. Similarly, `zstd` will refuse to read compressed data from standard input if it is a terminal. @@ -53,14 +53,15 @@ whose name is derived from the source _file_ name: * When decompressing, the `.zst` suffix is removed from the source filename to get the target filename -### Concatenation with .zst files -It is possible to concatenate `.zst` files as is. -`zstd` will decompress such files as if they were a single `.zst` file. +### Concatenation with .zst Files +It is possible to concatenate multiple `.zst` files. `zstd` will decompress +such agglomerated file as if it was a single `.zst` file. OPTIONS ------- -### Integer suffixes and special values +### Integer Suffixes and Special Values + In most places where an integer argument is expected, an optional suffix is supported to easily indicate large integers. There must be no space between the integer and the suffix. @@ -72,7 +73,8 @@ There must be no space between the integer and the suffix. Multiply the integer by 1,048,576 (2\^20). `Mi`, `M`, and `MB` are accepted as synonyms for `MiB`. -### Operation mode +### Operation Mode + If multiple operation mode options are given, the last one takes effect. @@ -85,23 +87,25 @@ the last one takes effect. Decompress. * `-t`, `--test`: Test the integrity of compressed _files_. - This option is equivalent to `--decompress --stdout` except that the - decompressed data is discarded instead of being written to standard output. + This option is equivalent to `--decompress --stdout > /dev/null`, + decompressed data is discarded and checksummed for errors. No files are created or removed. * `-b#`: - Benchmark file(s) using compression level # -* `--train FILEs`: - Use FILEs as a training set to create a dictionary. + Benchmark file(s) using compression level _#_. + See _BENCHMARK_ below for a description of this operation. +* `--train FILES`: + Use _FILES_ as a training set to create a dictionary. The training set should contain a lot of small files (> 100). + See _DICTIONARY BUILDER_ below for a description of this operation. * `-l`, `--list`: Display information related to a zstd compressed file, such as size, ratio, and checksum. Some of these fields may not be available. - This command can be augmented with the `-v` modifier. + This command's output can be augmented with the `-v` modifier. -### Operation modifiers +### Operation Modifiers * `-#`: - `#` compression level \[1-19] (default: 3) + selects `#` compression level \[1-19\] (default: 3) * `--ultra`: unlocks high compression levels 20+ (maximum 22), using a lot more memory. Note that decompression will also require more memory when using these levels. @@ -119,27 +123,31 @@ the last one takes effect. which is either 64 in 32-bit mode, or 256 for 64-bit environments. This modifier does nothing if `zstd` is compiled without multithread support. * `--single-thread`: - Does not spawn a thread for compression, use a single thread for both I/O and compression. - In this mode, compression is serialized with I/O, which is slightly slower. - (This is different from `-T1`, which spawns 1 compression thread in parallel of I/O). - This mode is the only one available when multithread support is disabled. - Single-thread mode features lower memory usage. - Final compressed result is slightly different from `-T1`. + Use a single thread for both I/O and compression. + As compression is serialized with I/O, this can be slightly slower. + Single-thread mode features significantly lower memory usage, + which can be useful for systems with limited amount of memory, such as 32-bit systems. + + Note 1: this mode is the only available one when multithread support is disabled. + + Note 2: this mode is different from `-T1`, which spawns 1 compression thread in parallel with I/O. + Final compressed result is also slightly different from `-T1`. * `--auto-threads={physical,logical} (default: physical)`: When using a default amount of threads via `-T0`, choose the default based on the number of detected physical or logical cores. -* `--adapt[=min=#,max=#]` : +* `--adapt[=min=#,max=#]`: `zstd` will dynamically adapt compression level to perceived I/O conditions. Compression level adaptation can be observed live by using command `-v`. Adaptation can be constrained between supplied `min` and `max` levels. The feature works when combined with multi-threading and `--long` mode. It does not work with `--single-thread`. - It sets window size to 8 MB by default (can be changed manually, see `wlog`). + It sets window size to 8 MiB by default (can be changed manually, see `wlog`). Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible. - _note_ : at the time of this writing, `--adapt` can remain stuck at low speed + + _Note_: at the time of this writing, `--adapt` can remain stuck at low speed when combined with multiple worker threads (>=2). * `--long[=#]`: - enables long distance matching with `#` `windowLog`, if not `#` is not + enables long distance matching with `#` `windowLog`, if `#` is not present it defaults to `27`. This increases the window size (`windowLog`) and memory usage for both the compressor and decompressor. @@ -153,18 +161,21 @@ the last one takes effect. * `--patch-from FILE`: Specify the file to be used as a reference point for zstd's diff engine. This is effectively dictionary compression with some convenient parameter - selection, namely that windowSize > srcSize. + selection, namely that _windowSize_ > _srcSize_. + + Note: cannot use both this and `-D` together. - Note: cannot use both this and -D together - Note: `--long` mode will be automatically activated if chainLog < fileLog - (fileLog being the windowLog required to cover the whole file). You + Note: `--long` mode will be automatically activated if _chainLog_ < _fileLog_ + (_fileLog_ being the _windowLog_ required to cover the whole file). You can also manually force it. - Node: for all levels, you can use --patch-from in --single-thread mode - to improve compression ratio at the cost of speed + + Note: for all levels, you can use `--patch-from` in `--single-thread` mode + to improve compression ratio at the cost of speed. + Note: for level 19, you can get increased compression ratio at the cost of speed by specifying `--zstd=targetLength=` to be something large - (i.e 4096), and by setting a large `--zstd=chainLog=` -* `--rsyncable` : + (i.e. 4096), and by setting a large `--zstd=chainLog=`. +* `--rsyncable`: `zstd` will periodically synchronize the compression state to make the compressed file more rsync-friendly. There is a negligible impact to compression ratio, and the faster compression levels will see a small @@ -177,24 +188,24 @@ the last one takes effect. * `--[no-]content-size`: enable / disable whether or not the original size of the file is placed in the header of the compressed file. The default option is - --content-size (meaning that the original size will be placed in the header). + `--content-size` (meaning that the original size will be placed in the header). * `--no-dictID`: do not store dictionary ID within frame header (dictionary compression). The decoder will have to rely on implicit knowledge about which dictionary to use, it won't be able to check if it's correct. * `-M#`, `--memory=#`: - Set a memory usage limit. By default, Zstandard uses 128 MB for decompression + Set a memory usage limit. By default, `zstd` uses 128 MiB for decompression as the maximum amount of memory the decompressor is allowed to use, but you can - override this manually if need be in either direction (ie. you can increase or + override this manually if need be in either direction (i.e. you can increase or decrease it). - This is also used during compression when using with --patch-from=. In this case, - this parameter overrides that maximum size allowed for a dictionary. (128 MB). + This is also used during compression when using with `--patch-from=`. In this case, + this parameter overrides that maximum size allowed for a dictionary. (128 MiB). Additionally, this can be used to limit memory for dictionary training. This parameter - overrides the default limit of 2 GB. zstd will load training samples up to the memory limit + overrides the default limit of 2 GiB. zstd will load training samples up to the memory limit and ignore the rest. -* `--stream-size=#` : +* `--stream-size=#`: Sets the pledged source size of input coming from a stream. This value must be exact, as it will be included in the produced frame header. Incorrect stream sizes will cause an error. This information will be used to better optimize compression parameters, resulting in @@ -207,12 +218,14 @@ the last one takes effect. Exact guesses result in better compression ratios. Overestimates result in slightly degraded compression ratios, while underestimates may result in significant degradation. * `-o FILE`: - save result into `FILE` + save result into `FILE`. * `-f`, `--force`: disable input and output checks. Allows overwriting existing files, input from console, output to stdout, operating on links, block devices, etc. + During decompression and when the output destination is stdout, pass-through + unrecognized formats as-is. * `-c`, `--stdout`: - write to standard output (even if it is the console) + write to standard output (even if it is the console); keep original files unchanged. * `--[no-]sparse`: enable / disable sparse FS support, to make files with many zeroes smaller on disk. @@ -221,9 +234,17 @@ the last one takes effect. default: enabled when output is into a file, and disabled when output is stdout. This setting overrides default and can force sparse mode over stdout. +* `--[no-]pass-through` + enable / disable passing through uncompressed files as-is. During + decompression when pass-through is enabled, unrecognized formats will be + copied as-is from the input to the output. By default, pass-through will + occur when the output destination is stdout and the force (`-f`) option is + set. * `--rm`: - remove source file(s) after successful compression or decompression. If used in combination with - -o, will trigger a confirmation prompt (which can be silenced with -f), as this is a destructive operation. + remove source file(s) after successful compression or decompression. + This command is silently ignored if output is `stdout`. + If used in combination with `-o`, + triggers a confirmation prompt (which can be silenced with `-f`), as this is a destructive operation. * `-k`, `--keep`: keep source file(s) after successful compression or decompression. This is the default behavior. @@ -262,7 +283,7 @@ the last one takes effect. display help/long help and exit * `-V`, `--version`: display version number and exit. - Advanced : `-vV` also displays supported formats. + Advanced: `-vV` also displays supported formats. `-vvV` also displays POSIX support. `-q` will only display the version number, suitable for machine reading. * `-v`, `--verbose`: @@ -273,17 +294,27 @@ the last one takes effect. * `--no-progress`: do not display the progress bar, but keep all other messages. * `--show-default-cparams`: - Shows the default compression parameters that will be used for a - particular src file. If the provided src file is not a regular file - (eg. named pipe), the cli will just output the default parameters. - That is, the parameters that are used when the src size is unknown. + shows the default compression parameters that will be used for a particular input file, based on the provided compression level and the input size. + If the provided file is not a regular file (e.g. a pipe), this flag will output the parameters used for inputs of unknown size. * `--`: All arguments after `--` are treated as files -### Restricted usage of Environment Variables -Using environment variables to set parameters has security implications. -Therefore, this avenue is intentionally restricted. +### gzip Operation Modifiers +When invoked via a `gzip` symlink, `zstd` will support further +options that intend to mimic the `gzip` behavior: + +* `-n`, `--no-name`: + do not store the original filename and timestamps when compressing + a file. This is the default behavior and hence a no-op. +* `--best`: + alias to the option `-9`. + + +### Environment Variables + +Employing environment variables to set parameters has security implications. +Therefore, this avenue is intentionally limited. Only `ZSTD_CLEVEL` and `ZSTD_NBTHREADS` are currently supported. They set the compression level and number of threads to use during compression, respectively. @@ -293,8 +324,8 @@ If the value of `ZSTD_CLEVEL` is not a valid integer, it will be ignored with a `ZSTD_NBTHREADS` can be used to set the number of threads `zstd` will attempt to use during compression. If the value of `ZSTD_NBTHREADS` is not a valid unsigned integer, it will be ignored with a warning message. -`ZSTD_NBTHREADS` has a default value of (`1`), and is capped at ZSTDMT_NBWORKERS_MAX==200. `zstd` must be -compiled with multithread support for this to have any effect. +`ZSTD_NBTHREADS` has a default value of (`1`), and is capped at ZSTDMT_NBWORKERS_MAX==200. +`zstd` must be compiled with multithread support for this to have any effect. They can both be overridden by corresponding command line arguments: `-#` for compression level and `-T#` for number of compression threads. @@ -306,27 +337,36 @@ DICTIONARY BUILDER which greatly improves efficiency on small files and messages. It's possible to train `zstd` with a set of samples, the result of which is saved into a file called a `dictionary`. -Then during compression and decompression, reference the same dictionary, +Then, during compression and decompression, reference the same dictionary, using command `-D dictionaryFileName`. Compression of small files similar to the sample set will be greatly improved. * `--train FILEs`: Use FILEs as training set to create a dictionary. - The training set should contain a lot of small files (> 100), + The training set should ideally contain a lot of samples (> 100), and weight typically 100x the target dictionary size - (for example, 10 MB for a 100 KB dictionary). + (for example, ~10 MB for a 100 KB dictionary). `--train` can be combined with `-r` to indicate a directory rather than listing all the files, which can be useful to circumvent shell expansion limits. + Since dictionary compression is mostly effective for small files, + the expectation is that the training set will only contain small files. + In the case where some samples happen to be large, + only the first 128 KiB of these samples will be used for training. + `--train` supports multithreading if `zstd` is compiled with threading support (default). - Additional parameters can be specified with `--train-fastcover`. + Additional advanced parameters can be specified with `--train-fastcover`. The legacy dictionary builder can be accessed with `--train-legacy`. The slower cover dictionary builder can be accessed with `--train-cover`. - Default is equivalent to `--train-fastcover=d=8,steps=4`. -* `-o file`: - Dictionary saved into `file` (default name: dictionary). + Default `--train` is equivalent to `--train-fastcover=d=8,steps=4`. + +* `-o FILE`: + Dictionary saved into `FILE` (default name: dictionary). * `--maxdict=#`: - Limit dictionary to specified size (default: 112640). + Limit dictionary to specified size (default: 112640 bytes). + As usual, quantities are expressed in bytes by default, + and it's possible to employ suffixes (like `KB` or `MB`) + to specify larger values. * `-#`: Use `#` compression level during training (optional). Will generate statistics more tuned for selected compression level, @@ -334,17 +374,39 @@ Compression of small files similar to the sample set will be greatly improved. * `-B#`: Split input files into blocks of size # (default: no split) * `-M#`, `--memory=#`: - Limit the amount of sample data loaded for training (default: 2 GB). See above for details. + Limit the amount of sample data loaded for training (default: 2 GB). + Note that the default (2 GB) is also the maximum. + This parameter can be useful in situations where the training set size + is not well controlled and could be potentially very large. + Since speed of the training process is directly correlated to + the size of the training sample set, + a smaller sample set leads to faster training. + + In situations where the training set is larger than maximum memory, + the CLI will randomly select samples among the available ones, + up to the maximum allowed memory budget. + This is meant to improve dictionary relevance + by mitigating the potential impact of clustering, + such as selecting only files from the beginning of a list + sorted by modification date, or sorted by alphabetical order. + The randomization process is deterministic, so + training of the same list of files with the same parameters + will lead to the creation of the same dictionary. + * `--dictID=#`: - A dictionary ID is a locally unique ID - that a decoder can use to verify it is using the right dictionary. + A dictionary ID is a locally unique ID. + The decoder will use this value to verify it is using the right dictionary. By default, zstd will create a 4-bytes random number ID. - It's possible to give a precise number instead. - Short numbers have an advantage : an ID < 256 will only need 1 byte in the - compressed frame header, and an ID < 65536 will only need 2 bytes. - This compares favorably to 4 bytes default. - However, it's up to the dictionary manager to not assign twice the same ID to + It's possible to provide an explicit number ID instead. + It's up to the dictionary manager to not assign twice the same ID to 2 different dictionaries. + Note that short numbers have an advantage: + an ID < 256 will only need 1 byte in the compressed frame header, + and an ID < 65536 will only need 2 bytes. + This compares favorably to 4 bytes default. + + Note that RFC8878 reserves IDs less than 32768 and greater than or equal to 2\^31, so they should not be used in public. + * `--train-cover[=k#,d=#,steps=#,split=#,shrink[=#]]`: Select parameters for the default dictionary builder algorithm named cover. If _d_ is not specified, then it tries _d_ = 6 and _d_ = 8. @@ -409,7 +471,7 @@ Compression of small files similar to the sample set will be greatly improved. Use legacy dictionary builder algorithm with the given dictionary _selectivity_ (default: 9). The smaller the _selectivity_ value, the denser the dictionary, - improving its efficiency but reducing its possible maximum size. + improving its efficiency but reducing its achievable maximum size. `--train-legacy=s=#` is also accepted. Examples: @@ -429,30 +491,31 @@ BENCHMARK * `-i#`: minimum evaluation time, in seconds (default: 3s), benchmark mode only * `-B#`, `--block-size=#`: - cut file(s) into independent blocks of size # (default: no block) + cut file(s) into independent chunks of size # (default: no chunking) * `--priority=rt`: set process priority to real-time -**Output Format:** CompressionLevel#Filename : IntputSize -> OutputSize (CompressionRatio), CompressionSpeed, DecompressionSpeed +**Output Format:** CompressionLevel#Filename: InputSize -> OutputSize (CompressionRatio), CompressionSpeed, DecompressionSpeed **Methodology:** For both compression and decompression speed, the entire input is compressed/decompressed in-memory to measure speed. A run lasts at least 1 sec, so when files are small, they are compressed/decompressed several times per run, in order to improve measurement accuracy. ADVANCED COMPRESSION OPTIONS ---------------------------- ### -B#: -Select the size of each compression job. +Specify the size of each compression job. This parameter is only available when multi-threading is enabled. Each compression job is run in parallel, so this value indirectly impacts the nb of active threads. Default job size varies depending on compression level (generally `4 * windowSize`). `-B#` makes it possible to manually select a custom size. Note that job size must respect a minimum value which is enforced transparently. This minimum is either 512 KB, or `overlapSize`, whichever is largest. -Different job sizes will lead to (slightly) different compressed frames. +Different job sizes will lead to non-identical compressed frames. ### --zstd[=options]: -`zstd` provides 22 predefined compression levels. -The selected or default predefined compression level can be changed with -advanced compression options. +`zstd` provides 22 predefined regular compression levels plus the fast levels. +This compression level is translated internally into a number of specific parameters that actually control the behavior of the compressor. +(You can see the result of this translation with `--show-default-cparams`.) +These specific parameters can be overridden with advanced compression options. The _options_ are provided as a comma-separated list. You may specify only the options you want to change and the rest will be taken from the selected or default compression level. @@ -461,10 +524,10 @@ The list of available _options_: - `strategy`=_strat_, `strat`=_strat_: Specify a strategy used by a match finder. - There are 9 strategies numbered from 1 to 9, from faster to stronger: - 1=ZSTD\_fast, 2=ZSTD\_dfast, 3=ZSTD\_greedy, - 4=ZSTD\_lazy, 5=ZSTD\_lazy2, 6=ZSTD\_btlazy2, - 7=ZSTD\_btopt, 8=ZSTD\_btultra, 9=ZSTD\_btultra2. + There are 9 strategies numbered from 1 to 9, from fastest to strongest: + 1=`ZSTD_fast`, 2=`ZSTD_dfast`, 3=`ZSTD_greedy`, + 4=`ZSTD_lazy`, 5=`ZSTD_lazy2`, 6=`ZSTD_btlazy2`, + 7=`ZSTD_btopt`, 8=`ZSTD_btultra`, 9=`ZSTD_btultra2`. - `windowLog`=_wlog_, `wlog`=_wlog_: Specify the maximum number of bits for a match distance. @@ -481,22 +544,23 @@ The list of available _options_: - `hashLog`=_hlog_, `hlog`=_hlog_: Specify the maximum number of bits for a hash table. - Bigger hash tables cause less collisions which usually makes compression + Bigger hash tables cause fewer collisions which usually makes compression faster, but requires more memory during compression. - The minimum _hlog_ is 6 (64 B) and the maximum is 30 (1 GiB). + The minimum _hlog_ is 6 (64 entries / 256 B) and the maximum is 30 (1B entries / 4 GiB). - `chainLog`=_clog_, `clog`=_clog_: - Specify the maximum number of bits for a hash chain or a binary tree. + Specify the maximum number of bits for the secondary search structure, + whose form depends on the selected `strategy`. Higher numbers of bits increases the chance to find a match which usually improves compression ratio. It also slows down compression speed and increases memory requirements for compression. - This option is ignored for the ZSTD_fast strategy. + This option is ignored for the `ZSTD_fast` `strategy`, which only has the primary hash table. - The minimum _clog_ is 6 (64 B) and the maximum is 29 (524 Mib) on 32-bit platforms - and 30 (1 Gib) on 64-bit platforms. + The minimum _clog_ is 6 (64 entries / 256 B) and the maximum is 29 (512M entries / 2 GiB) on 32-bit platforms + and 30 (1B entries / 4 GiB) on 64-bit platforms. - `searchLog`=_slog_, `slog`=_slog_: Specify the maximum number of searches in a hash chain or a binary tree @@ -518,19 +582,19 @@ The list of available _options_: - `targetLength`=_tlen_, `tlen`=_tlen_: The impact of this field vary depending on selected strategy. - For ZSTD\_btopt, ZSTD\_btultra and ZSTD\_btultra2, it specifies + For `ZSTD_btopt`, `ZSTD_btultra` and `ZSTD_btultra2`, it specifies the minimum match length that causes match finder to stop searching. A larger `targetLength` usually improves compression ratio but decreases compression speed. -t - For ZSTD\_fast, it triggers ultra-fast mode when > 0. + + For `ZSTD_fast`, it triggers ultra-fast mode when > 0. The value represents the amount of data skipped between match sampling. - Impact is reversed : a larger `targetLength` increases compression speed + Impact is reversed: a larger `targetLength` increases compression speed but decreases compression ratio. For all other strategies, this field has no impact. - The minimum _tlen_ is 0 and the maximum is 128 Kib. + The minimum _tlen_ is 0 and the maximum is 128 KiB. - `overlapLog`=_ovlog_, `ovlog`=_ovlog_: Determine `overlapSize`, amount of data reloaded from previous job. @@ -542,7 +606,7 @@ t 9 means "full overlap", meaning up to `windowSize` is reloaded from previous job. Reducing _ovlog_ by 1 reduces the reloaded amount by a factor 2. For example, 8 means "windowSize/2", and 6 means "windowSize/8". - Value 0 is special and means "default" : _ovlog_ is automatically determined by `zstd`. + Value 0 is special and means "default": _ovlog_ is automatically determined by `zstd`. In which case, _ovlog_ will range from 6 to 9, depending on selected _strat_. - `ldmHashLog`=_lhlog_, `lhlog`=_lhlog_: @@ -592,6 +656,11 @@ similar to predefined level 19 for files bigger than 256 KB: `--zstd`=wlog=23,clog=23,hlog=22,slog=6,mml=3,tlen=48,strat=6 +SEE ALSO +-------- +`zstdgrep`(1), `zstdless`(1), `gzip`(1), `xz`(1) + +The format is specified in Y. Collet, "Zstandard Compression and the 'application/zstd' Media Type", https://www.ietf.org/rfc/rfc8878.txt, Internet RFC 8878 (February 2021). BUGS ---- diff --git a/programs/zstdcli.c b/programs/zstdcli.c index bfe18c0c1ba..93f75e21d9d 100644 --- a/programs/zstdcli.c +++ b/programs/zstdcli.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -27,8 +27,8 @@ /*-************************************ * Dependencies **************************************/ -#include "platform.h" /* IS_CONSOLE, PLATFORM_POSIX_VERSION */ -#include "util.h" /* UTIL_HAS_CREATEFILELIST, UTIL_createFileList */ +#include "platform.h" /* PLATFORM_POSIX_VERSION */ +#include "util.h" /* UTIL_HAS_CREATEFILELIST, UTIL_createFileList, UTIL_isConsole */ #include /* getenv */ #include /* strcmp, strlen */ #include /* fprintf(), stdin, stdout, stderr */ @@ -46,17 +46,18 @@ # include "zstdcli_trace.h" #endif #include "../lib/zstd.h" /* ZSTD_VERSION_STRING, ZSTD_minCLevel, ZSTD_maxCLevel */ +#include "fileio_asyncio.h" /*-************************************ * Constants **************************************/ -#define COMPRESSOR_NAME "zstd command line interface" +#define COMPRESSOR_NAME "Zstandard CLI" #ifndef ZSTD_VERSION # define ZSTD_VERSION "v" ZSTD_VERSION_STRING #endif #define AUTHOR "Yann Collet" -#define WELCOME_MESSAGE "*** %s %i-bits %s, by %s ***\n", COMPRESSOR_NAME, (int)(sizeof(size_t)*8), ZSTD_VERSION, AUTHOR +#define WELCOME_MESSAGE "*** %s (%i-bit) %s, by %s ***\n", COMPRESSOR_NAME, (int)(sizeof(size_t)*8), ZSTD_VERSION, AUTHOR #define ZSTD_ZSTDMT "zstdmt" #define ZSTD_UNZSTD "unzstd" @@ -124,6 +125,15 @@ static void checkLibVersion(void) } +/*! exeNameMatch() : + @return : a non-zero value if exeName matches test, excluding the extension + */ +static int exeNameMatch(const char* exeName, const char* test) +{ + return !strncmp(exeName, test, strlen(test)) && + (exeName[strlen(test)] == '\0' || exeName[strlen(test)] == '.'); +} + /*-************************************ * Command Line **************************************/ @@ -133,143 +143,174 @@ static void checkLibVersion(void) */ static void usage(FILE* f, const char* programName) { - DISPLAY_F(f, "Usage : \n"); - DISPLAY_F(f, " %s [args] [FILE(s)] [-o file] \n", programName); + DISPLAY_F(f, "Compress or decompress the INPUT file(s); reads from STDIN if INPUT is `-` or not provided.\n\n"); + DISPLAY_F(f, "Usage: %s [OPTIONS...] [INPUT... | -] [-o OUTPUT]\n\n", programName); + DISPLAY_F(f, "Options:\n"); + DISPLAY_F(f, " -o OUTPUT Write output to a single file, OUTPUT.\n"); + DISPLAY_F(f, " -k, --keep Preserve INPUT file(s). [Default] \n"); + DISPLAY_F(f, " --rm Remove INPUT file(s) after successful (de)compression.\n"); +#ifdef ZSTD_GZCOMPRESS + if (exeNameMatch(programName, ZSTD_GZ)) { /* behave like gzip */ + DISPLAY_F(f, " -n, --no-name Do not store original filename when compressing.\n\n"); + } +#endif DISPLAY_F(f, "\n"); - DISPLAY_F(f, "FILE : a filename \n"); - DISPLAY_F(f, " with no FILE, or when FILE is - , read standard input\n"); - DISPLAY_F(f, "Arguments : \n"); #ifndef ZSTD_NOCOMPRESS - DISPLAY_F(f, " -# : # compression level (1-%d, default: %d) \n", ZSTDCLI_CLEVEL_MAX, ZSTDCLI_CLEVEL_DEFAULT); + DISPLAY_F(f, " -# Desired compression level, where `#` is a number between 1 and %d;\n", ZSTDCLI_CLEVEL_MAX); + DISPLAY_F(f, " lower numbers provide faster compression, higher numbers yield\n"); + DISPLAY_F(f, " better compression ratios. [Default: %d]\n\n", ZSTDCLI_CLEVEL_DEFAULT); #endif #ifndef ZSTD_NODECOMPRESS - DISPLAY_F(f, " -d : decompression \n"); + DISPLAY_F(f, " -d, --decompress Perform decompression.\n"); #endif - DISPLAY_F(f, " -D DICT: use DICT as Dictionary for compression or decompression \n"); - DISPLAY_F(f, " -o file: result stored into `file` (only 1 output file) \n"); - DISPLAY_F(f, " -f : disable input and output checks. Allows overwriting existing files,\n"); - DISPLAY_F(f, " input from console, output to stdout, operating on links,\n"); - DISPLAY_F(f, " block devices, etc.\n"); - DISPLAY_F(f, "--rm : remove source file(s) after successful de/compression \n"); - DISPLAY_F(f, " -k : preserve source file(s) (default) \n"); - DISPLAY_F(f, " -h/-H : display help/long help and exit \n"); + DISPLAY_F(f, " -D DICT Use DICT as the dictionary for compression or decompression.\n\n"); + DISPLAY_F(f, " -f, --force Disable input and output checks. Allows overwriting existing files,\n"); + DISPLAY_F(f, " receiving input from the console, printing ouput to STDOUT, and\n"); + DISPLAY_F(f, " operating on links, block devices, etc. Unrecognized formats will be\n"); + DISPLAY_F(f, " passed-through through as-is.\n\n"); + + DISPLAY_F(f, " -h Display short usage and exit.\n"); + DISPLAY_F(f, " -H, --help Display full help and exit.\n"); + DISPLAY_F(f, " -V, --version Display the program version and exit.\n"); + DISPLAY_F(f, "\n"); } static void usage_advanced(const char* programName) { DISPLAYOUT(WELCOME_MESSAGE); + DISPLAYOUT("\n"); usage(stdout, programName); - DISPLAYOUT( "\n"); - DISPLAYOUT( "Advanced arguments : \n"); - DISPLAYOUT( " -V : display Version number and exit \n"); - - DISPLAYOUT( " -c : write to standard output (even if it is the console) \n"); + DISPLAYOUT("Advanced options:\n"); + DISPLAYOUT(" -c, --stdout Write to STDOUT (even if it is a console) and keep the INPUT file(s).\n\n"); - DISPLAYOUT( " -v : verbose mode; specify multiple times to increase verbosity \n"); - DISPLAYOUT( " -q : suppress warnings; specify twice to suppress errors too \n"); - DISPLAYOUT( "--[no-]progress : forcibly display, or never display the progress counter.\n"); - DISPLAYOUT( " note: any (de)compressed output to terminal will mix with progress counter text. \n"); + DISPLAYOUT(" -v, --verbose Enable verbose output; pass multiple times to increase verbosity.\n"); + DISPLAYOUT(" -q, --quiet Suppress warnings; pass twice to suppress errors.\n"); +#ifndef ZSTD_NOTRACE + DISPLAYOUT(" --trace LOG Log tracing information to LOG.\n"); +#endif + DISPLAYOUT("\n"); + DISPLAYOUT(" --[no-]progress Forcibly show/hide the progress counter. NOTE: Any (de)compressed\n"); + DISPLAYOUT(" output to terminal will mix with progress counter text.\n\n"); #ifdef UTIL_HAS_CREATEFILELIST - DISPLAYOUT( " -r : operate recursively on directories \n"); - DISPLAYOUT( "--filelist FILE : read list of files to operate upon from FILE \n"); - DISPLAYOUT( "--output-dir-flat DIR : processed files are stored into DIR \n"); + DISPLAYOUT(" -r Operate recursively on directories.\n"); + DISPLAYOUT(" --filelist LIST Read a list of files to operate on from LIST.\n"); + DISPLAYOUT(" --output-dir-flat DIR Store processed files in DIR.\n"); #endif #ifdef UTIL_HAS_MIRRORFILELIST - DISPLAYOUT( "--output-dir-mirror DIR : processed files are stored into DIR respecting original directory structure \n"); + DISPLAYOUT(" --output-dir-mirror DIR Store processed files in DIR, respecting original directory structure.\n"); #endif + if (AIO_supported()) + DISPLAYOUT(" --[no-]asyncio Use asynchronous IO. [Default: Enabled]\n"); - + DISPLAYOUT("\n"); #ifndef ZSTD_NOCOMPRESS - DISPLAYOUT( "--[no-]check : during compression, add XXH64 integrity checksum to frame (default: enabled)"); + DISPLAYOUT(" --[no-]check Add XXH64 integrity checksums during compression. [Default: Add, Validate]\n"); #ifndef ZSTD_NODECOMPRESS - DISPLAYOUT( ". If specified with -d, decompressor will ignore/validate checksums in compressed frame (default: validate)."); + DISPLAYOUT(" If `-d` is present, ignore/validate checksums during decompression.\n"); #endif #else #ifdef ZSTD_NOCOMPRESS - DISPLAYOUT( "--[no-]check : during decompression, ignore/validate checksums in compressed frame (default: validate)."); + DISPLAYOUT(" --[no-]check Ignore/validate checksums during decompression. [Default: Validate]"); #endif #endif /* ZSTD_NOCOMPRESS */ -#ifndef ZSTD_NOTRACE - DISPLAYOUT( "\n"); - DISPLAYOUT( "--trace FILE : log tracing information to FILE."); -#endif - DISPLAYOUT( "\n"); - - DISPLAYOUT( "-- : All arguments after \"--\" are treated as files \n"); + DISPLAYOUT("\n"); + DISPLAYOUT(" -- Treat remaining arguments after `--` as files.\n"); #ifndef ZSTD_NOCOMPRESS - DISPLAYOUT( "\n"); - DISPLAYOUT( "Advanced compression arguments : \n"); - DISPLAYOUT( "--ultra : enable levels beyond %i, up to %i (requires more memory) \n", ZSTDCLI_CLEVEL_MAX, ZSTD_maxCLevel()); - DISPLAYOUT( "--long[=#]: enable long distance matching with given window log (default: %u) \n", g_defaultMaxWindowLog); - DISPLAYOUT( "--fast[=#]: switch to very fast compression levels (default: %u) \n", 1); - DISPLAYOUT( "--adapt : dynamically adapt compression level to I/O conditions \n"); - DISPLAYOUT( "--[no-]row-match-finder : force enable/disable usage of fast row-based matchfinder for greedy, lazy, and lazy2 strategies \n"); - DISPLAYOUT( "--patch-from=FILE : specify the file to be used as a reference point for zstd's diff engine. \n"); + DISPLAYOUT("\n"); + DISPLAYOUT("Advanced compression options:\n"); + DISPLAYOUT(" --ultra Enable levels beyond %i, up to %i; requires more memory.\n", ZSTDCLI_CLEVEL_MAX, ZSTD_maxCLevel()); + DISPLAYOUT(" --fast[=#] Use to very fast compression levels. [Default: %u]\n", 1); +#ifdef ZSTD_GZCOMPRESS + if (exeNameMatch(programName, ZSTD_GZ)) { /* behave like gzip */ + DISPLAYOUT(" --best Compatibility alias for `-9`.\n"); + } +#endif + DISPLAYOUT(" --adapt Dynamically adapt compression level to I/O conditions.\n"); + DISPLAYOUT(" --long[=#] Enable long distance matching with window log #. [Default: %u]\n", g_defaultMaxWindowLog); + DISPLAYOUT(" --patch-from=REF Use REF as the reference point for Zstandard's diff engine. \n\n"); # ifdef ZSTD_MULTITHREAD - DISPLAYOUT( " -T# : spawns # compression threads (default: 1, 0==# cores) \n"); - DISPLAYOUT( " -B# : select size of each job (default: 0==automatic) \n"); - DISPLAYOUT( "--single-thread : use a single thread for both I/O and compression (result slightly different than -T1) \n"); - DISPLAYOUT( "--auto-threads={physical,logical} (default: physical} : use either physical cores or logical cores as default when specifying -T0 \n"); - DISPLAYOUT( "--rsyncable : compress using a rsync-friendly method (-B sets block size) \n"); + DISPLAYOUT(" -T# Spawn # compression threads. [Default: 1; pass 0 for core count.]\n"); + DISPLAYOUT(" --single-thread Share a single thread for I/O and compression (slightly different than `-T1`).\n"); + DISPLAYOUT(" --auto-threads={physical|logical}\n"); + DISPLAYOUT(" Use physical/logical cores when using `-T0`. [Default: Physical]\n\n"); + DISPLAYOUT(" -B# Set job size to #. [Default: 0 (automatic)]\n"); + DISPLAYOUT(" --rsyncable Compress using a rsync-friendly method (`-B` sets block size). \n"); + DISPLAYOUT("\n"); # endif - DISPLAYOUT( "--exclude-compressed: only compress files that are not already compressed \n"); - DISPLAYOUT( "--stream-size=# : specify size of streaming input from `stdin` \n"); - DISPLAYOUT( "--size-hint=# optimize compression parameters for streaming input of approximately this size \n"); - DISPLAYOUT( "--target-compressed-block-size=# : generate compressed block of approximately targeted size \n"); - DISPLAYOUT( "--no-dictID : don't write dictID into header (dictionary compression only) \n"); - DISPLAYOUT( "--[no-]compress-literals : force (un)compressed literals \n"); - - DISPLAYOUT( "--format=zstd : compress files to the .zst format (default) \n"); + DISPLAYOUT(" --exclude-compressed Only compress files that are not already compressed.\n\n"); + + DISPLAYOUT(" --stream-size=# Specify size of streaming input from STDIN.\n"); + DISPLAYOUT(" --size-hint=# Optimize compression parameters for streaming input of approximately size #.\n"); + DISPLAYOUT(" --target-compressed-block-size=#\n"); + DISPLAYOUT(" Generate compressed blocks of approximately # size.\n\n"); + DISPLAYOUT(" --no-dictID Don't write `dictID` into the header (dictionary compression only).\n"); + DISPLAYOUT(" --[no-]compress-literals Force (un)compressed literals.\n"); + DISPLAYOUT(" --[no-]row-match-finder Explicitly enable/disable the fast, row-based matchfinder for\n"); + DISPLAYOUT(" the 'greedy', 'lazy', and 'lazy2' strategies.\n"); + + DISPLAYOUT("\n"); + DISPLAYOUT(" --format=zstd Compress files to the `.zst` format. [Default]\n"); #ifdef ZSTD_GZCOMPRESS - DISPLAYOUT( "--format=gzip : compress files to the .gz format \n"); + DISPLAYOUT(" --format=gzip Compress files to the `.gz` format.\n"); #endif #ifdef ZSTD_LZMACOMPRESS - DISPLAYOUT( "--format=xz : compress files to the .xz format \n"); - DISPLAYOUT( "--format=lzma : compress files to the .lzma format \n"); + DISPLAYOUT(" --format=xz Compress files to the `.xz` format.\n"); + DISPLAYOUT(" --format=lzma Compress files to the `.lzma` format.\n"); #endif #ifdef ZSTD_LZ4COMPRESS - DISPLAYOUT( "--format=lz4 : compress files to the .lz4 format \n"); + DISPLAYOUT( " --format=lz4 Compress files to the `.lz4` format.\n"); #endif #endif /* !ZSTD_NOCOMPRESS */ #ifndef ZSTD_NODECOMPRESS - DISPLAYOUT( "\n"); - DISPLAYOUT( "Advanced decompression arguments : \n"); - DISPLAYOUT( " -l : print information about zstd compressed files \n"); - DISPLAYOUT( "--test : test compressed file integrity \n"); - DISPLAYOUT( " -M# : Set a memory usage limit for decompression \n"); + DISPLAYOUT("\n"); + DISPLAYOUT("Advanced decompression options:\n"); + DISPLAYOUT(" -l Print information about Zstandard-compressed files.\n"); + DISPLAYOUT(" --test Test compressed file integrity.\n"); + DISPLAYOUT(" -M# Set the memory usage limit to # megabytes.\n"); # if ZSTD_SPARSE_DEFAULT - DISPLAYOUT( "--[no-]sparse : sparse mode (default: enabled on file, disabled on stdout) \n"); + DISPLAYOUT(" --[no-]sparse Enable sparse mode. [Default: Enabled for files, disabled for STDOUT.]\n"); # else - DISPLAYOUT( "--[no-]sparse : sparse mode (default: disabled) \n"); + DISPLAYOUT(" --[no-]sparse Enable sparse mode. [Default: Disabled]\n"); # endif + { + char const* passThroughDefault = "Disabled"; + if (exeNameMatch(programName, ZSTD_CAT) || + exeNameMatch(programName, ZSTD_ZCAT) || + exeNameMatch(programName, ZSTD_GZCAT)) { + passThroughDefault = "Enabled"; + } + DISPLAYOUT(" --[no-]pass-through Pass through uncompressed files as-is. [Default: %s]\n", passThroughDefault); + } #endif /* ZSTD_NODECOMPRESS */ #ifndef ZSTD_NODICT - DISPLAYOUT( "\n"); - DISPLAYOUT( "Dictionary builder : \n"); - DISPLAYOUT( "--train ## : create a dictionary from a training set of files \n"); - DISPLAYOUT( "--train-cover[=k=#,d=#,steps=#,split=#,shrink[=#]] : use the cover algorithm with optional args \n"); - DISPLAYOUT( "--train-fastcover[=k=#,d=#,f=#,steps=#,split=#,accel=#,shrink[=#]] : use the fast cover algorithm with optional args \n"); - DISPLAYOUT( "--train-legacy[=s=#] : use the legacy algorithm with selectivity (default: %u) \n", g_defaultSelectivityLevel); - DISPLAYOUT( " -o DICT : DICT is dictionary name (default: %s) \n", g_defaultDictName); - DISPLAYOUT( "--maxdict=# : limit dictionary to specified size (default: %u) \n", g_defaultMaxDictSize); - DISPLAYOUT( "--dictID=# : force dictionary ID to specified value (default: random) \n"); + DISPLAYOUT("\n"); + DISPLAYOUT("Dictionary builder:\n"); + DISPLAYOUT(" --train Create a dictionary from a training set of files.\n\n"); + DISPLAYOUT(" --train-cover[=k=#,d=#,steps=#,split=#,shrink[=#]]\n"); + DISPLAYOUT(" Use the cover algorithm (with optional arguments).\n"); + DISPLAYOUT(" --train-fastcover[=k=#,d=#,f=#,steps=#,split=#,accel=#,shrink[=#]]\n"); + DISPLAYOUT(" Use the fast cover algorithm (with optional arguments).\n\n"); + DISPLAYOUT(" --train-legacy[=s=#] Use the legacy algorithm with selectivity #. [Default: %u]\n", g_defaultSelectivityLevel); + DISPLAYOUT(" -o NAME Use NAME as dictionary name. [Default: %s]\n", g_defaultDictName); + DISPLAYOUT(" --maxdict=# Limit dictionary to specified size #. [Default: %u]\n", g_defaultMaxDictSize); + DISPLAYOUT(" --dictID=# Force dictionary ID to #. [Default: Random]\n"); #endif #ifndef ZSTD_NOBENCH - DISPLAYOUT( "\n"); - DISPLAYOUT( "Benchmark arguments : \n"); - DISPLAYOUT( " -b# : benchmark file(s), using # compression level (default: %d) \n", ZSTDCLI_CLEVEL_DEFAULT); - DISPLAYOUT( " -e# : test all compression levels successively from -b# to -e# (default: 1) \n"); - DISPLAYOUT( " -i# : minimum evaluation time in seconds (default: 3s) \n"); - DISPLAYOUT( " -B# : cut file into independent blocks of size # (default: no block) \n"); - DISPLAYOUT( " -S : output one benchmark result per input file (default: consolidated result) \n"); - DISPLAYOUT( "--priority=rt : set process priority to real-time \n"); + DISPLAYOUT("\n"); + DISPLAYOUT("Benchmark options:\n"); + DISPLAYOUT(" -b# Perform benchmarking with compression level #. [Default: %d]\n", ZSTDCLI_CLEVEL_DEFAULT); + DISPLAYOUT(" -e# Test all compression levels up to #; starting level is `-b#`. [Default: 1]\n"); + DISPLAYOUT(" -i# Set the minimum evaluation to time # seconds. [Default: 3]\n"); + DISPLAYOUT(" -B# Cut file into independent chunks of size #. [Default: No chunking]\n"); + DISPLAYOUT(" -S Output one benchmark result per input file. [Default: Consolidated result]\n"); + DISPLAYOUT(" --priority=rt Set process priority to real-time.\n"); #endif } @@ -296,18 +337,9 @@ static const char* lastNameFromPath(const char* path) return name; } -/*! exeNameMatch() : - @return : a non-zero value if exeName matches test, excluding the extension - */ -static int exeNameMatch(const char* exeName, const char* test) -{ - return !strncmp(exeName, test, strlen(test)) && - (exeName[strlen(test)] == '\0' || exeName[strlen(test)] == '.'); -} - static void errorOut(const char* msg) { - DISPLAY("%s \n", msg); exit(1); + DISPLAYLEVEL(1, "%s \n", msg); exit(1); } /*! readU32FromCharChecked() : @@ -634,6 +666,11 @@ static void printVersion(void) #endif DISPLAYOUT("\n"); if (g_displayLevel >= 4) { + /* library versions */ + DISPLAYOUT("zlib version %s\n", FIO_zlibVersion()); + DISPLAYOUT("lz4 version %s\n", FIO_lz4Version()); + DISPLAYOUT("lzma version %s\n", FIO_lzmaVersion()); + /* posix support */ #ifdef _POSIX_C_SOURCE DISPLAYOUT("_POSIX_C_SOURCE defined: %ldL\n", (long) _POSIX_C_SOURCE); @@ -749,13 +786,13 @@ static unsigned init_nbThreads(void) { } else { \ argNb++; \ if (argNb >= argCount) { \ - DISPLAY("error: missing command argument \n"); \ + DISPLAYLEVEL(1, "error: missing command argument \n"); \ CLEAN_RETURN(1); \ } \ ptr = argv[argNb]; \ assert(ptr != NULL); \ if (ptr[0]=='-') { \ - DISPLAY("error: command cannot be separated from its argument by another command \n"); \ + DISPLAYLEVEL(1, "error: command cannot be separated from its argument by another command \n"); \ CLEAN_RETURN(1); \ } } } @@ -763,6 +800,18 @@ static unsigned init_nbThreads(void) { const char* __nb; \ NEXT_FIELD(__nb); \ val32 = readU32FromChar(&__nb); \ + if(*__nb != 0) { \ + errorOut("error: only numeric values with optional suffixes K, KB, KiB, M, MB, MiB are allowed"); \ + } \ +} + +#define NEXT_TSIZE(valTsize) { \ + const char* __nb; \ + NEXT_FIELD(__nb); \ + valTsize = readSizeTFromChar(&__nb); \ + if(*__nb != 0) { \ + errorOut("error: only numeric values with optional suffixes K, KB, KiB, M, MB, MiB are allowed"); \ + } \ } typedef enum { zom_compress, zom_decompress, zom_test, zom_bench, zom_train, zom_list } zstd_operation_mode; @@ -788,9 +837,7 @@ int main(int argCount, const char* argv[]) hasStdout = 0, ldmFlag = 0, main_pause = 0, - nbWorkers = 0, adapt = 0, - useRowMatchFinder = 0, adaptMin = MINCLEVEL, adaptMax = MAXCLEVEL, rsyncable = 0, @@ -799,18 +846,21 @@ int main(int argCount, const char* argv[]) separateFiles = 0, setRealTimePrio = 0, singleThread = 0, -#ifdef ZSTD_MULTITHREAD defaultLogicalCores = 0, -#endif showDefaultCParams = 0, ultra=0, - contentSize=1; + contentSize=1, + removeSrcFile=0; + ZSTD_paramSwitch_e useRowMatchFinder = ZSTD_ps_auto; + FIO_compressionType_t cType = FIO_zstdCompression; + unsigned nbWorkers = 0; double compressibility = 0.5; unsigned bench_nbSeconds = 3; /* would be better if this value was synchronized from bench */ size_t blockSize = 0; FIO_prefs_t* const prefs = FIO_createPreferences(); FIO_ctx_t* const fCtx = FIO_createContext(); + FIO_progressSetting_e progress = FIO_ps_auto; zstd_operation_mode operation = zom_compress; ZSTD_compressionParameters compressionParams; int cLevel = init_cLevel(); @@ -831,6 +881,7 @@ int main(int argCount, const char* argv[]) size_t streamSrcSize = 0; size_t targetCBlockSize = 0; size_t srcSizeHint = 0; + size_t nbInputFileNames = 0; int dictCLevel = g_defaultDictCLevel; unsigned dictSelect = g_defaultSelectivityLevel; #ifndef ZSTD_NODICT @@ -849,7 +900,7 @@ int main(int argCount, const char* argv[]) (void)recursive; (void)cLevelLast; /* not used when ZSTD_NOBENCH set */ (void)memLimit; assert(argCount >= 1); - if ((filenames==NULL) || (file_of_names==NULL)) { DISPLAY("zstd: allocation error \n"); exit(1); } + if ((filenames==NULL) || (file_of_names==NULL)) { DISPLAYLEVEL(1, "zstd: allocation error \n"); exit(1); } programName = lastNameFromPath(programName); #ifdef ZSTD_MULTITHREAD nbWorkers = init_nbThreads(); @@ -858,17 +909,20 @@ int main(int argCount, const char* argv[]) /* preset behaviors */ if (exeNameMatch(programName, ZSTD_ZSTDMT)) nbWorkers=0, singleThread=0; if (exeNameMatch(programName, ZSTD_UNZSTD)) operation=zom_decompress; - if (exeNameMatch(programName, ZSTD_CAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; outFileName=stdoutmark; g_displayLevel=1; } /* supports multiple formats */ - if (exeNameMatch(programName, ZSTD_ZCAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; outFileName=stdoutmark; g_displayLevel=1; } /* behave like zcat, also supports multiple formats */ - if (exeNameMatch(programName, ZSTD_GZ)) { suffix = GZ_EXTENSION; FIO_setCompressionType(prefs, FIO_gzipCompression); FIO_setRemoveSrcFile(prefs, 1); } /* behave like gzip */ - if (exeNameMatch(programName, ZSTD_GUNZIP)) { operation=zom_decompress; FIO_setRemoveSrcFile(prefs, 1); } /* behave like gunzip, also supports multiple formats */ - if (exeNameMatch(programName, ZSTD_GZCAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; outFileName=stdoutmark; g_displayLevel=1; } /* behave like gzcat, also supports multiple formats */ - if (exeNameMatch(programName, ZSTD_LZMA)) { suffix = LZMA_EXTENSION; FIO_setCompressionType(prefs, FIO_lzmaCompression); FIO_setRemoveSrcFile(prefs, 1); } /* behave like lzma */ - if (exeNameMatch(programName, ZSTD_UNLZMA)) { operation=zom_decompress; FIO_setCompressionType(prefs, FIO_lzmaCompression); FIO_setRemoveSrcFile(prefs, 1); } /* behave like unlzma, also supports multiple formats */ - if (exeNameMatch(programName, ZSTD_XZ)) { suffix = XZ_EXTENSION; FIO_setCompressionType(prefs, FIO_xzCompression); FIO_setRemoveSrcFile(prefs, 1); } /* behave like xz */ - if (exeNameMatch(programName, ZSTD_UNXZ)) { operation=zom_decompress; FIO_setCompressionType(prefs, FIO_xzCompression); FIO_setRemoveSrcFile(prefs, 1); } /* behave like unxz, also supports multiple formats */ - if (exeNameMatch(programName, ZSTD_LZ4)) { suffix = LZ4_EXTENSION; FIO_setCompressionType(prefs, FIO_lz4Compression); } /* behave like lz4 */ - if (exeNameMatch(programName, ZSTD_UNLZ4)) { operation=zom_decompress; FIO_setCompressionType(prefs, FIO_lz4Compression); } /* behave like unlz4, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_CAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; FIO_setPassThroughFlag(prefs, 1); outFileName=stdoutmark; g_displayLevel=1; } /* supports multiple formats */ + if (exeNameMatch(programName, ZSTD_ZCAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; FIO_setPassThroughFlag(prefs, 1); outFileName=stdoutmark; g_displayLevel=1; } /* behave like zcat, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_GZ)) { /* behave like gzip */ + suffix = GZ_EXTENSION; cType = FIO_gzipCompression; removeSrcFile=1; + dictCLevel = cLevel = 6; /* gzip default is -6 */ + } + if (exeNameMatch(programName, ZSTD_GUNZIP)) { operation=zom_decompress; removeSrcFile=1; } /* behave like gunzip, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_GZCAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; FIO_setPassThroughFlag(prefs, 1); outFileName=stdoutmark; g_displayLevel=1; } /* behave like gzcat, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_LZMA)) { suffix = LZMA_EXTENSION; cType = FIO_lzmaCompression; removeSrcFile=1; } /* behave like lzma */ + if (exeNameMatch(programName, ZSTD_UNLZMA)) { operation=zom_decompress; cType = FIO_lzmaCompression; removeSrcFile=1; } /* behave like unlzma, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_XZ)) { suffix = XZ_EXTENSION; cType = FIO_xzCompression; removeSrcFile=1; } /* behave like xz */ + if (exeNameMatch(programName, ZSTD_UNXZ)) { operation=zom_decompress; cType = FIO_xzCompression; removeSrcFile=1; } /* behave like unxz, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_LZ4)) { suffix = LZ4_EXTENSION; cType = FIO_lz4Compression; } /* behave like lz4 */ + if (exeNameMatch(programName, ZSTD_UNLZ4)) { operation=zom_decompress; cType = FIO_lz4Compression; } /* behave like unlz4, also supports multiple formats */ memset(&compressionParams, 0, sizeof(compressionParams)); /* init crash handler */ @@ -905,43 +959,55 @@ int main(int argCount, const char* argv[]) if (!strcmp(argument, "--help")) { usage_advanced(programName); CLEAN_RETURN(0); } if (!strcmp(argument, "--verbose")) { g_displayLevel++; continue; } if (!strcmp(argument, "--quiet")) { g_displayLevel--; continue; } - if (!strcmp(argument, "--stdout")) { forceStdout=1; outFileName=stdoutmark; g_displayLevel-=(g_displayLevel==2); continue; } + if (!strcmp(argument, "--stdout")) { forceStdout=1; outFileName=stdoutmark; removeSrcFile=0; continue; } if (!strcmp(argument, "--ultra")) { ultra=1; continue; } if (!strcmp(argument, "--check")) { FIO_setChecksumFlag(prefs, 2); continue; } if (!strcmp(argument, "--no-check")) { FIO_setChecksumFlag(prefs, 0); continue; } if (!strcmp(argument, "--sparse")) { FIO_setSparseWrite(prefs, 2); continue; } if (!strcmp(argument, "--no-sparse")) { FIO_setSparseWrite(prefs, 0); continue; } + if (!strcmp(argument, "--pass-through")) { FIO_setPassThroughFlag(prefs, 1); continue; } + if (!strcmp(argument, "--no-pass-through")) { FIO_setPassThroughFlag(prefs, 0); continue; } if (!strcmp(argument, "--test")) { operation=zom_test; continue; } + if (!strcmp(argument, "--asyncio")) { FIO_setAsyncIOFlag(prefs, 1); continue;} + if (!strcmp(argument, "--no-asyncio")) { FIO_setAsyncIOFlag(prefs, 0); continue;} if (!strcmp(argument, "--train")) { operation=zom_train; if (outFileName==NULL) outFileName=g_defaultDictName; continue; } if (!strcmp(argument, "--no-dictID")) { FIO_setDictIDFlag(prefs, 0); continue; } - if (!strcmp(argument, "--keep")) { FIO_setRemoveSrcFile(prefs, 0); continue; } - if (!strcmp(argument, "--rm")) { FIO_setRemoveSrcFile(prefs, 1); continue; } + if (!strcmp(argument, "--keep")) { removeSrcFile=0; continue; } + if (!strcmp(argument, "--rm")) { removeSrcFile=1; continue; } if (!strcmp(argument, "--priority=rt")) { setRealTimePrio = 1; continue; } if (!strcmp(argument, "--show-default-cparams")) { showDefaultCParams = 1; continue; } if (!strcmp(argument, "--content-size")) { contentSize = 1; continue; } if (!strcmp(argument, "--no-content-size")) { contentSize = 0; continue; } if (!strcmp(argument, "--adapt")) { adapt = 1; continue; } - if (!strcmp(argument, "--no-row-match-finder")) { useRowMatchFinder = 1; continue; } - if (!strcmp(argument, "--row-match-finder")) { useRowMatchFinder = 2; continue; } + if (!strcmp(argument, "--no-row-match-finder")) { useRowMatchFinder = ZSTD_ps_disable; continue; } + if (!strcmp(argument, "--row-match-finder")) { useRowMatchFinder = ZSTD_ps_enable; continue; } if (longCommandWArg(&argument, "--adapt=")) { adapt = 1; if (!parseAdaptParameters(argument, &adaptMin, &adaptMax)) { badusage(programName); CLEAN_RETURN(1); } continue; } if (!strcmp(argument, "--single-thread")) { nbWorkers = 0; singleThread = 1; continue; } - if (!strcmp(argument, "--format=zstd")) { suffix = ZSTD_EXTENSION; FIO_setCompressionType(prefs, FIO_zstdCompression); continue; } + if (!strcmp(argument, "--format=zstd")) { suffix = ZSTD_EXTENSION; cType = FIO_zstdCompression; continue; } #ifdef ZSTD_GZCOMPRESS - if (!strcmp(argument, "--format=gzip")) { suffix = GZ_EXTENSION; FIO_setCompressionType(prefs, FIO_gzipCompression); continue; } + if (!strcmp(argument, "--format=gzip")) { suffix = GZ_EXTENSION; cType = FIO_gzipCompression; continue; } + if (exeNameMatch(programName, ZSTD_GZ)) { /* behave like gzip */ + if (!strcmp(argument, "--best")) { dictCLevel = cLevel = 9; continue; } + if (!strcmp(argument, "--no-name")) { /* ignore for now */; continue; } + } #endif #ifdef ZSTD_LZMACOMPRESS - if (!strcmp(argument, "--format=lzma")) { suffix = LZMA_EXTENSION; FIO_setCompressionType(prefs, FIO_lzmaCompression); continue; } - if (!strcmp(argument, "--format=xz")) { suffix = XZ_EXTENSION; FIO_setCompressionType(prefs, FIO_xzCompression); continue; } + if (!strcmp(argument, "--format=lzma")) { suffix = LZMA_EXTENSION; cType = FIO_lzmaCompression; continue; } + if (!strcmp(argument, "--format=xz")) { suffix = XZ_EXTENSION; cType = FIO_xzCompression; continue; } #endif #ifdef ZSTD_LZ4COMPRESS - if (!strcmp(argument, "--format=lz4")) { suffix = LZ4_EXTENSION; FIO_setCompressionType(prefs, FIO_lz4Compression); continue; } + if (!strcmp(argument, "--format=lz4")) { suffix = LZ4_EXTENSION; cType = FIO_lz4Compression; continue; } #endif if (!strcmp(argument, "--rsyncable")) { rsyncable = 1; continue; } if (!strcmp(argument, "--compress-literals")) { literalCompressionMode = ZSTD_ps_enable; continue; } if (!strcmp(argument, "--no-compress-literals")) { literalCompressionMode = ZSTD_ps_disable; continue; } - if (!strcmp(argument, "--no-progress")) { FIO_setProgressSetting(FIO_ps_never); continue; } - if (!strcmp(argument, "--progress")) { FIO_setProgressSetting(FIO_ps_always); continue; } + if (!strcmp(argument, "--no-progress")) { progress = FIO_ps_never; continue; } + if (!strcmp(argument, "--progress")) { progress = FIO_ps_always; continue; } if (!strcmp(argument, "--exclude-compressed")) { FIO_setExcludeCompressedFile(prefs, 1); continue; } + if (!strcmp(argument, "--fake-stdin-is-console")) { UTIL_fakeStdinIsConsole(); continue; } + if (!strcmp(argument, "--fake-stdout-is-console")) { UTIL_fakeStdoutIsConsole(); continue; } + if (!strcmp(argument, "--fake-stderr-is-console")) { UTIL_fakeStderrIsConsole(); continue; } + if (!strcmp(argument, "--trace-file-stat")) { UTIL_traceFileStat(); continue; } /* long commands with arguments */ #ifndef ZSTD_NODICT @@ -983,15 +1049,21 @@ int main(int argCount, const char* argv[]) if (longCommandWArg(&argument, "--memlimit")) { NEXT_UINT32(memLimit); continue; } if (longCommandWArg(&argument, "--memory")) { NEXT_UINT32(memLimit); continue; } if (longCommandWArg(&argument, "--memlimit-decompress")) { NEXT_UINT32(memLimit); continue; } - if (longCommandWArg(&argument, "--block-size=")) { blockSize = readSizeTFromChar(&argument); continue; } + if (longCommandWArg(&argument, "--block-size")) { NEXT_TSIZE(blockSize); continue; } if (longCommandWArg(&argument, "--maxdict")) { NEXT_UINT32(maxDictSize); continue; } if (longCommandWArg(&argument, "--dictID")) { NEXT_UINT32(dictID); continue; } - if (longCommandWArg(&argument, "--zstd=")) { if (!parseCompressionParameters(argument, &compressionParams)) { badusage(programName); CLEAN_RETURN(1); } continue; } - if (longCommandWArg(&argument, "--stream-size=")) { streamSrcSize = readSizeTFromChar(&argument); continue; } - if (longCommandWArg(&argument, "--target-compressed-block-size=")) { targetCBlockSize = readSizeTFromChar(&argument); continue; } - if (longCommandWArg(&argument, "--size-hint=")) { srcSizeHint = readSizeTFromChar(&argument); continue; } - if (longCommandWArg(&argument, "--output-dir-flat")) { NEXT_FIELD(outDirName); continue; } -#ifdef ZSTD_MULTITHREAD + if (longCommandWArg(&argument, "--zstd=")) { if (!parseCompressionParameters(argument, &compressionParams)) { badusage(programName); CLEAN_RETURN(1); } ; cType = FIO_zstdCompression; continue; } + if (longCommandWArg(&argument, "--stream-size")) { NEXT_TSIZE(streamSrcSize); continue; } + if (longCommandWArg(&argument, "--target-compressed-block-size")) { NEXT_TSIZE(targetCBlockSize); continue; } + if (longCommandWArg(&argument, "--size-hint")) { NEXT_TSIZE(srcSizeHint); continue; } + if (longCommandWArg(&argument, "--output-dir-flat")) { + NEXT_FIELD(outDirName); + if (strlen(outDirName) == 0) { + DISPLAYLEVEL(1, "error: output dir cannot be empty string (did you mean to pass '.' instead?)\n"); + CLEAN_RETURN(1); + } + continue; + } if (longCommandWArg(&argument, "--auto-threads")) { const char* threadDefault = NULL; NEXT_FIELD(threadDefault); @@ -999,9 +1071,15 @@ int main(int argCount, const char* argv[]) defaultLogicalCores = 1; continue; } -#endif #ifdef UTIL_HAS_MIRRORFILELIST - if (longCommandWArg(&argument, "--output-dir-mirror")) { NEXT_FIELD(outMirroredDirName); continue; } + if (longCommandWArg(&argument, "--output-dir-mirror")) { + NEXT_FIELD(outMirroredDirName); + if (strlen(outMirroredDirName) == 0) { + DISPLAYLEVEL(1, "error: output dir cannot be empty string (did you mean to pass '.' instead?)\n"); + CLEAN_RETURN(1); + } + continue; + } #endif #ifndef ZSTD_NOTRACE if (longCommandWArg(&argument, "--trace")) { char const* traceFile; NEXT_FIELD(traceFile); TRACE_enable(traceFile); continue; } @@ -1018,6 +1096,8 @@ int main(int argCount, const char* argv[]) /* Invalid character following --long */ badusage(programName); CLEAN_RETURN(1); + } else { + ldmWindowLog = g_defaultMaxWindowLog; } /* Only set windowLog if not already set by --zstd */ if (compressionParams.windowLog == 0) @@ -1075,8 +1155,8 @@ int main(int argCount, const char* argv[]) { /* Display help */ case 'V': printVersion(); CLEAN_RETURN(0); /* Version Only */ - case 'H': - case 'h': usage_advanced(programName); CLEAN_RETURN(0); + case 'H': usage_advanced(programName); CLEAN_RETURN(0); + case 'h': usage(stdout, programName); CLEAN_RETURN(0); /* Compress */ case 'z': operation=zom_compress; argument++; break; @@ -1090,7 +1170,10 @@ int main(int argCount, const char* argv[]) operation=zom_decompress; argument++; break; /* Force stdout, even if stdout==console */ - case 'c': forceStdout=1; outFileName=stdoutmark; argument++; break; + case 'c': forceStdout=1; outFileName=stdoutmark; removeSrcFile=0; argument++; break; + + /* do not store filename - gzip compatibility - nothing to do */ + case 'n': argument++; break; /* Use file content as dictionary */ case 'D': argument++; NEXT_FIELD(dictFileName); break; @@ -1105,7 +1188,7 @@ int main(int argCount, const char* argv[]) case 'q': g_displayLevel--; argument++; break; /* keep source file (default) */ - case 'k': FIO_setRemoveSrcFile(prefs, 0); argument++; break; + case 'k': removeSrcFile=0; argument++; break; /* Checksum */ case 'C': FIO_setChecksumFlag(prefs, 2); argument++; break; @@ -1164,7 +1247,7 @@ int main(int argCount, const char* argv[]) /* nb of threads (hidden option) */ case 'T': argument++; - nbWorkers = (int)readU32FromChar(&argument); + nbWorkers = readU32FromChar(&argument); break; /* Dictionary Selection level */ @@ -1204,18 +1287,21 @@ int main(int argCount, const char* argv[]) DISPLAYLEVEL(3, WELCOME_MESSAGE); #ifdef ZSTD_MULTITHREAD + if ((operation==zom_decompress) && (!singleThread) && (nbWorkers > 1)) { + DISPLAYLEVEL(2, "Warning : decompression does not support multi-threading\n"); + } if ((nbWorkers==0) && (!singleThread)) { /* automatically set # workers based on # of reported cpus */ if (defaultLogicalCores) { - nbWorkers = UTIL_countLogicalCores(); + nbWorkers = (unsigned)UTIL_countLogicalCores(); DISPLAYLEVEL(3, "Note: %d logical core(s) detected \n", nbWorkers); } else { - nbWorkers = UTIL_countPhysicalCores(); + nbWorkers = (unsigned)UTIL_countPhysicalCores(); DISPLAYLEVEL(3, "Note: %d physical core(s) detected \n", nbWorkers); } } #else - (void)singleThread; (void)nbWorkers; + (void)singleThread; (void)nbWorkers; (void)defaultLogicalCores; #endif g_utilDisplayLevel = g_displayLevel; @@ -1251,6 +1337,8 @@ int main(int argCount, const char* argv[]) } } + nbInputFileNames = filenames->tableSize; /* saving number of input files */ + if (recursive) { /* at this stage, filenameTable is a list of paths, which can contain both files and directories */ UTIL_expandFNT(&filenames, followLinks); } @@ -1263,7 +1351,7 @@ int main(int argCount, const char* argv[]) int const ret = FIO_listMultipleFiles((unsigned)filenames->tableSize, filenames->fileNames, g_displayLevel); CLEAN_RETURN(ret); #else - DISPLAY("file information is not supported \n"); + DISPLAYLEVEL(1, "file information is not supported \n"); CLEAN_RETURN(1); #endif } @@ -1271,14 +1359,18 @@ int main(int argCount, const char* argv[]) /* Check if benchmark is selected */ if (operation==zom_bench) { #ifndef ZSTD_NOBENCH + if (cType != FIO_zstdCompression) { + DISPLAYLEVEL(1, "benchmark mode is only compatible with zstd format \n"); + CLEAN_RETURN(1); + } benchParams.blockSize = blockSize; - benchParams.nbWorkers = nbWorkers; + benchParams.nbWorkers = (int)nbWorkers; benchParams.realTime = (unsigned)setRealTimePrio; benchParams.nbSeconds = bench_nbSeconds; benchParams.ldmFlag = ldmFlag; benchParams.ldmMinMatch = (int)g_ldmMinMatch; benchParams.ldmHashLog = (int)g_ldmHashLog; - benchParams.useRowMatchFinder = useRowMatchFinder; + benchParams.useRowMatchFinder = (int)useRowMatchFinder; if (g_ldmBucketSizeLog != LDM_PARAM_DEFAULT) { benchParams.ldmBucketSizeLog = (int)g_ldmBucketSizeLog; } @@ -1299,15 +1391,18 @@ int main(int argCount, const char* argv[]) int c; DISPLAYLEVEL(3, "Benchmarking %s \n", filenames->fileNames[i]); for(c = cLevel; c <= cLevelLast; c++) { - BMK_benchFilesAdvanced(&filenames->fileNames[i], 1, dictFileName, c, &compressionParams, g_displayLevel, &benchParams); + BMK_benchOutcome_t const bo = BMK_benchFilesAdvanced(&filenames->fileNames[i], 1, dictFileName, c, &compressionParams, g_displayLevel, &benchParams); + if (!BMK_isSuccessful_benchOutcome(bo)) return 1; } } } else { for(; cLevel <= cLevelLast; cLevel++) { - BMK_benchFilesAdvanced(filenames->fileNames, (unsigned)filenames->tableSize, dictFileName, cLevel, &compressionParams, g_displayLevel, &benchParams); + BMK_benchOutcome_t const bo = BMK_benchFilesAdvanced(filenames->fileNames, (unsigned)filenames->tableSize, dictFileName, cLevel, &compressionParams, g_displayLevel, &benchParams); + if (!BMK_isSuccessful_benchOutcome(bo)) return 1; } } } else { for(; cLevel <= cLevelLast; cLevel++) { - BMK_syntheticTest(cLevel, compressibility, &compressionParams, g_displayLevel, &benchParams); + BMK_benchOutcome_t const bo = BMK_syntheticTest(cLevel, compressibility, &compressionParams, g_displayLevel, &benchParams); + if (!BMK_isSuccessful_benchOutcome(bo)) return 1; } } #else @@ -1349,24 +1444,34 @@ int main(int argCount, const char* argv[]) } #ifndef ZSTD_NODECOMPRESS - if (operation==zom_test) { FIO_setTestMode(prefs, 1); outFileName=nulmark; FIO_setRemoveSrcFile(prefs, 0); } /* test mode */ + if (operation==zom_test) { FIO_setTestMode(prefs, 1); outFileName=nulmark; removeSrcFile=0; } /* test mode */ #endif /* No input filename ==> use stdin and stdout */ - if (filenames->tableSize == 0) UTIL_refFilename(filenames, stdinmark); - if (!strcmp(filenames->fileNames[0], stdinmark) && !outFileName) + if (filenames->tableSize == 0) { + /* It is possible that the input + was a number of empty directories. In this case + stdin and stdout should not be used */ + if (nbInputFileNames > 0 ){ + DISPLAYLEVEL(1, "please provide correct input file(s) or non-empty directories -- ignored \n"); + CLEAN_RETURN(0); + } + UTIL_refFilename(filenames, stdinmark); + } + + if (filenames->tableSize == 1 && !strcmp(filenames->fileNames[0], stdinmark) && !outFileName) outFileName = stdoutmark; /* when input is stdin, default output is stdout */ /* Check if input/output defined as console; trigger an error in this case */ if (!forceStdin - && !strcmp(filenames->fileNames[0], stdinmark) - && IS_CONSOLE(stdin) ) { + && (UTIL_searchFileNamesTable(filenames, stdinmark) != -1) + && UTIL_isConsole(stdin) ) { DISPLAYLEVEL(1, "stdin is a console, aborting\n"); CLEAN_RETURN(1); } - if ( outFileName && !strcmp(outFileName, stdoutmark) - && IS_CONSOLE(stdout) - && !strcmp(filenames->fileNames[0], stdinmark) + if ( (!outFileName || !strcmp(outFileName, stdoutmark)) + && UTIL_isConsole(stdout) + && (UTIL_searchFileNamesTable(filenames, stdinmark) != -1) && !forceStdout && operation!=zom_decompress ) { DISPLAYLEVEL(1, "stdout is a console, aborting\n"); @@ -1384,25 +1489,35 @@ int main(int argCount, const char* argv[]) if (showDefaultCParams) { if (operation == zom_decompress) { - DISPLAY("error : can't use --show-default-cparams in decomrpession mode \n"); + DISPLAYLEVEL(1, "error : can't use --show-default-cparams in decompression mode \n"); CLEAN_RETURN(1); } } if (dictFileName != NULL && patchFromDictFileName != NULL) { - DISPLAY("error : can't use -D and --patch-from=# at the same time \n"); + DISPLAYLEVEL(1, "error : can't use -D and --patch-from=# at the same time \n"); CLEAN_RETURN(1); } if (patchFromDictFileName != NULL && filenames->tableSize > 1) { - DISPLAY("error : can't use --patch-from=# on multiple files \n"); + DISPLAYLEVEL(1, "error : can't use --patch-from=# on multiple files \n"); CLEAN_RETURN(1); } - /* No status message in pipe mode (stdin - stdout) */ + /* No status message by default when output is stdout */ hasStdout = outFileName && !strcmp(outFileName,stdoutmark); + if (hasStdout && (g_displayLevel==2)) g_displayLevel=1; + + /* when stderr is not the console, do not pollute it with progress updates (unless requested) */ + if (!UTIL_isConsole(stderr) && (progress!=FIO_ps_always)) progress=FIO_ps_never; + FIO_setProgressSetting(progress); - if ((hasStdout || !IS_CONSOLE(stderr)) && (g_displayLevel==2)) g_displayLevel=1; + /* don't remove source files when output is stdout */; + if (hasStdout && removeSrcFile) { + DISPLAYLEVEL(3, "Note: src files are not removed when output is stdout \n"); + removeSrcFile = 0; + } + FIO_setRemoveSrcFile(prefs, removeSrcFile); /* IO Stream/File */ FIO_setHasStdoutOutput(fCtx, hasStdout); @@ -1422,8 +1537,9 @@ int main(int argCount, const char* argv[]) FIO_setMemLimit(prefs, memLimit); if (operation==zom_compress) { #ifndef ZSTD_NOCOMPRESS + FIO_setCompressionType(prefs, cType); FIO_setContentSize(prefs, contentSize); - FIO_setNbWorkers(prefs, nbWorkers); + FIO_setNbWorkers(prefs, (int)nbWorkers); FIO_setBlockSize(prefs, (int)blockSize); if (g_overlapLog!=OVERLAP_LOG_DEFAULT) FIO_setOverlapLog(prefs, (int)g_overlapLog); FIO_setLdmFlag(prefs, (unsigned)ldmFlag); @@ -1431,8 +1547,8 @@ int main(int argCount, const char* argv[]) FIO_setLdmMinMatch(prefs, (int)g_ldmMinMatch); if (g_ldmBucketSizeLog != LDM_PARAM_DEFAULT) FIO_setLdmBucketSizeLog(prefs, (int)g_ldmBucketSizeLog); if (g_ldmHashRateLog != LDM_PARAM_DEFAULT) FIO_setLdmHashRateLog(prefs, (int)g_ldmHashRateLog); - FIO_setAdaptiveMode(prefs, (unsigned)adapt); - FIO_setUseRowMatchFinder(prefs, useRowMatchFinder); + FIO_setAdaptiveMode(prefs, adapt); + FIO_setUseRowMatchFinder(prefs, (int)useRowMatchFinder); FIO_setAdaptMin(prefs, adaptMin); FIO_setAdaptMax(prefs, adaptMax); FIO_setRsyncable(prefs, rsyncable); @@ -1440,6 +1556,7 @@ int main(int argCount, const char* argv[]) FIO_setTargetCBlockSize(prefs, targetCBlockSize); FIO_setSrcSizeHint(prefs, srcSizeHint); FIO_setLiteralCompressionMode(prefs, literalCompressionMode); + FIO_setSparseWrite(prefs, 0); if (adaptMin > cLevel) cLevel = adaptMin; if (adaptMax < cLevel) cLevel = adaptMax; @@ -1465,8 +1582,12 @@ int main(int argCount, const char* argv[]) else operationResult = FIO_compressMultipleFilenames(fCtx, prefs, filenames->fileNames, outMirroredDirName, outDirName, outFileName, suffix, dictFileName, cLevel, compressionParams); #else - (void)contentSize; (void)suffix; (void)adapt; (void)rsyncable; (void)ultra; (void)cLevel; (void)ldmFlag; (void)literalCompressionMode; (void)targetCBlockSize; (void)streamSrcSize; (void)srcSizeHint; (void)ZSTD_strategyMap; (void)useRowMatchFinder; /* not used when ZSTD_NOCOMPRESS set */ - DISPLAY("Compression not supported \n"); + /* these variables are only used when compression mode is enabled */ + (void)contentSize; (void)suffix; (void)adapt; (void)rsyncable; + (void)ultra; (void)cLevel; (void)ldmFlag; (void)literalCompressionMode; + (void)targetCBlockSize; (void)streamSrcSize; (void)srcSizeHint; + (void)ZSTD_strategyMap; (void)useRowMatchFinder; (void)cType; + DISPLAYLEVEL(1, "Compression not supported \n"); #endif } else { /* decompression or test */ #ifndef ZSTD_NODECOMPRESS @@ -1476,7 +1597,7 @@ int main(int argCount, const char* argv[]) operationResult = FIO_decompressMultipleFilenames(fCtx, prefs, filenames->fileNames, outMirroredDirName, outDirName, outFileName, dictFileName); } #else - DISPLAY("Decompression not supported \n"); + DISPLAYLEVEL(1, "Decompression not supported \n"); #endif } diff --git a/programs/zstdcli_trace.c b/programs/zstdcli_trace.c index b3b977feb53..35075a52c4d 100644 --- a/programs/zstdcli_trace.c +++ b/programs/zstdcli_trace.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/programs/zstdcli_trace.h b/programs/zstdcli_trace.h index 38c27dc04c4..9c135d3ca84 100644 --- a/programs/zstdcli_trace.h +++ b/programs/zstdcli_trace.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/programs/zstdgrep.1 b/programs/zstdgrep.1 index 563696d339e..77d29b4bfc5 100644 --- a/programs/zstdgrep.1 +++ b/programs/zstdgrep.1 @@ -1,26 +1,17 @@ -. -.TH "ZSTDGREP" "1" "January 2022" "zstd 1.5.2" "User Commands" -. +.TH "ZSTDGREP" "1" "February 2023" "zstd 1.5.4" "User Commands" .SH "NAME" \fBzstdgrep\fR \- print lines matching a pattern in zstandard\-compressed files -. .SH "SYNOPSIS" -\fBzstdgrep\fR [\fIgrep\-flags\fR] [\-\-] \fIpattern\fR [\fIfiles\fR \.\.\.] -. +\fBzstdgrep\fR [\fIgrep\-flags\fR] [\-\-] \fIpattern\fR [\fIfiles\fR \|\.\|\.\|\.] .SH "DESCRIPTION" -\fBzstdgrep\fR runs \fBgrep (1)\fR on files, or \fBstdin\fR if no files argument is given, after decompressing them with \fBzstdcat (1)\fR\. -. +\fBzstdgrep\fR runs \fBgrep\fR(1) on files, or \fBstdin\fR if no files argument is given, after decompressing them with \fBzstdcat\fR(1)\. .P -The grep\-flags and pattern arguments are passed on to \fBgrep (1)\fR\. If an \fB\-e\fR flag is found in the \fBgrep\-flags\fR, \fBzstdgrep\fR will not look for a pattern argument\. -. +The \fIgrep\-flags\fR and \fIpattern\fR arguments are passed on to \fBgrep\fR(1)\. If an \fB\-e\fR flag is found in the \fIgrep\-flags\fR, \fBzstdgrep\fR will not look for a \fIpattern\fR argument\. .P -Note that modern \fBgrep\fR alternatives such as \fBripgrep\fR (\fBrg\fR) support \fBzstd\fR\-compressed files out of the box, and can prove better alternatives than \fBzstdgrep\fR notably for unsupported complex pattern searches\. Note though that such alternatives may also feature some minor command line differences\. -. +Note that modern \fBgrep\fR alternatives such as \fBripgrep\fR (\fBrg\fR(1)) support \fBzstd\fR\-compressed files out of the box, and can prove better alternatives than \fBzstdgrep\fR notably for unsupported complex pattern searches\. Note though that such alternatives may also feature some minor command line differences\. .SH "EXIT STATUS" In case of missing arguments or missing pattern, 1 will be returned, otherwise 0\. -. .SH "SEE ALSO" -\fBzstd (1)\fR -. +\fBzstd\fR(1) .SH "AUTHORS" Thomas Klausner \fIwiz@NetBSD\.org\fR diff --git a/programs/zstdgrep.1.md b/programs/zstdgrep.1.md index 35186a4bf02..6370a81c7ca 100644 --- a/programs/zstdgrep.1.md +++ b/programs/zstdgrep.1.md @@ -4,16 +4,16 @@ zstdgrep(1) -- print lines matching a pattern in zstandard-compressed files SYNOPSIS -------- -`zstdgrep` [*grep-flags*] [--] _pattern_ [_files_ ...] +`zstdgrep` [] [--] [ ...] DESCRIPTION ----------- -`zstdgrep` runs `grep (1)` on files, or `stdin` if no files argument is given, after decompressing them with `zstdcat (1)`. +`zstdgrep` runs `grep`(1) on files, or `stdin` if no files argument is given, after decompressing them with `zstdcat`(1). -The grep-flags and pattern arguments are passed on to `grep (1)`. If an `-e` flag is found in the `grep-flags`, `zstdgrep` will not look for a pattern argument. +The and arguments are passed on to `grep`(1). If an `-e` flag is found in the , `zstdgrep` will not look for a argument. -Note that modern `grep` alternatives such as `ripgrep` (`rg`) support `zstd`-compressed files out of the box, +Note that modern `grep` alternatives such as `ripgrep` (`rg`(1)) support `zstd`-compressed files out of the box, and can prove better alternatives than `zstdgrep` notably for unsupported complex pattern searches. Note though that such alternatives may also feature some minor command line differences. @@ -23,7 +23,7 @@ In case of missing arguments or missing pattern, 1 will be returned, otherwise 0 SEE ALSO -------- -`zstd (1)` +`zstd`(1) AUTHORS ------- diff --git a/programs/zstdless b/programs/zstdless index 893799e7d95..17726a4f6e1 100755 --- a/programs/zstdless +++ b/programs/zstdless @@ -1,2 +1,8 @@ #!/bin/sh -zstdcat "$@" | less + +zstd=${ZSTD:-zstd} + +# TODO: Address quirks and bugs tied to old versions of less, provide a mechanism to pass flags directly to zstd + +export LESSOPEN="|-${zstd} -cdfq %s" +exec less "$@" diff --git a/programs/zstdless.1 b/programs/zstdless.1 index ab38e7a7f45..6c82b9c9e26 100644 --- a/programs/zstdless.1 +++ b/programs/zstdless.1 @@ -1,14 +1,9 @@ -. -.TH "ZSTDLESS" "1" "January 2022" "zstd 1.5.2" "User Commands" -. +.TH "ZSTDLESS" "1" "February 2023" "zstd 1.5.4" "User Commands" .SH "NAME" \fBzstdless\fR \- view zstandard\-compressed files -. .SH "SYNOPSIS" -\fBzstdless\fR [\fIflags\fR] [\fIfile\fR \.\.\.] -. +\fBzstdless\fR [\fIflags\fR] [\fIfile\fR \|\.\|\.\|\.] .SH "DESCRIPTION" -\fBzstdless\fR runs \fBless (1)\fR on files or stdin, if no files argument is given, after decompressing them with \fBzstdcat (1)\fR\. -. +\fBzstdless\fR runs \fBless\fR(1) on files or stdin, if no \fIfile\fR argument is given, after decompressing them with \fBzstdcat\fR(1)\. .SH "SEE ALSO" -\fBzstd (1)\fR +\fBzstd\fR(1) diff --git a/programs/zstdless.1.md b/programs/zstdless.1.md index d91d48abcc7..67c1c76769b 100644 --- a/programs/zstdless.1.md +++ b/programs/zstdless.1.md @@ -4,13 +4,13 @@ zstdless(1) -- view zstandard-compressed files SYNOPSIS -------- -`zstdless` [*flags*] [_file_ ...] +`zstdless` [] [ ...] DESCRIPTION ----------- -`zstdless` runs `less (1)` on files or stdin, if no files argument is given, after decompressing them with `zstdcat (1)`. +`zstdless` runs `less`(1) on files or stdin, if no argument is given, after decompressing them with `zstdcat`(1). SEE ALSO -------- -`zstd (1)` +`zstd`(1) diff --git a/tests/.gitignore b/tests/.gitignore index 9a6939a5766..fcb865d61ec 100644 --- a/tests/.gitignore +++ b/tests/.gitignore @@ -12,6 +12,7 @@ zstreamtest zstreamtest32 zstreamtest_asan zstreamtest_tsan +zstreamtest_ubsan zstreamtest-dll datagen paramgrill diff --git a/tests/DEPRECATED-test-zstd-speed.py b/tests/DEPRECATED-test-zstd-speed.py index 665e0a7f91a..71d75b8ccce 100755 --- a/tests/DEPRECATED-test-zstd-speed.py +++ b/tests/DEPRECATED-test-zstd-speed.py @@ -2,7 +2,7 @@ # THIS BENCHMARK IS BEING REPLACED BY automated-bencmarking.py # ################################################################ -# Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/tests/Makefile b/tests/Makefile index 132fa7a0818..3eed19ea80c 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -1,6 +1,6 @@ - # ################################################################ -# Copyright (c) Yann Collet, Facebook, Inc. +# ################################################################ +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -37,7 +37,7 @@ TESTARTEFACT := versionsTest DEBUGFLAGS += -g -Wno-c++-compat CPPFLAGS += -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \ -I$(ZSTDDIR)/dictBuilder -I$(ZSTDDIR)/deprecated -I$(PRGDIR) \ - -DZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY=1 + -DZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY=1 ZSTDCOMMON_FILES := $(sort $(ZSTD_COMMON_FILES)) ZSTDCOMP_FILES := $(sort $(ZSTD_COMPRESS_FILES)) @@ -134,14 +134,17 @@ zstdmt_d_%.o : $(ZSTDDIR)/decompress/%.c zstdmt_d_%.o : $(ZSTDDIR)/decompress/%.S $(CC) -c $(CPPFLAGS) $(ASFLAGS) $< -o $@ +FULLBENCHS := fullbench fullbench32 +CLEAN += $(FULLBENCHS) fullbench32: CPPFLAGS += -m32 -fullbench fullbench32 : CPPFLAGS += $(MULTITHREAD_CPP) -Wno-deprecated-declarations -fullbench fullbench32 : LDFLAGS += $(MULTITHREAD_LD) -fullbench fullbench32 : DEBUGFLAGS = -DNDEBUG # turn off assert() for speed measurements -fullbench fullbench32 : $(ZSTD_FILES) -fullbench fullbench32 : $(PRGDIR)/datagen.c $(PRGDIR)/util.c $(PRGDIR)/timefn.c $(PRGDIR)/benchfn.c fullbench.c +$(FULLBENCHS) : CPPFLAGS += $(MULTITHREAD_CPP) -Wno-deprecated-declarations +$(FULLBENCHS) : LDFLAGS += $(MULTITHREAD_LD) +$(FULLBENCHS) : DEBUGFLAGS = -DNDEBUG # turn off assert() for speed measurements +$(FULLBENCHS) : $(ZSTD_FILES) +$(FULLBENCHS) : $(PRGDIR)/datagen.c $(PRGDIR)/util.c $(PRGDIR)/timefn.c $(PRGDIR)/benchfn.c fullbench.c $(LINK.c) $^ -o $@$(EXT) +CLEAN += fullbench-lib fullbench-lib : CPPFLAGS += -DXXH_NAMESPACE=ZSTD_ fullbench-lib : $(PRGDIR)/datagen.c $(PRGDIR)/util.c $(PRGDIR)/timefn.c $(PRGDIR)/benchfn.c $(ZSTDDIR)/libzstd.a fullbench.c $(LINK.c) $^ -o $@$(EXT) @@ -151,6 +154,7 @@ fullbench-dll: $(PRGDIR)/datagen.c $(PRGDIR)/util.c $(PRGDIR)/benchfn.c $(PRGDIR # $(CC) $(FLAGS) $(filter %.c,$^) -o $@$(EXT) -DZSTD_DLL_IMPORT=1 $(ZSTDDIR)/dll/libzstd.dll $(LINK.c) $^ $(LDLIBS) -o $@$(EXT) +CLEAN += fuzzer fuzzer32 fuzzer : CPPFLAGS += $(MULTITHREAD_CPP) -Wno-deprecated-declarations fuzzer : LDFLAGS += $(MULTITHREAD_LD) fuzzer : $(ZSTDMT_OBJECTS) @@ -164,7 +168,8 @@ fuzzer32 : $(ZSTD_FILES) fuzzer-dll : $(ZSTDDIR)/common/xxhash.c $(PRGDIR)/util.c $(PRGDIR)/timefn.c $(PRGDIR)/datagen.c fuzzer.c $(CC) $(CPPFLAGS) $(CFLAGS) $(filter %.c,$^) $(LDFLAGS) -o $@$(EXT) -ZSTREAM_LOCAL_FILES := $(PRGDIR)/datagen.c $(PRGDIR)/util.c $(PRGDIR)/timefn.c seqgen.c zstreamtest.c +CLEAN += zstreamtest zstreamtest32 +ZSTREAM_LOCAL_FILES := $(PRGDIR)/datagen.c $(PRGDIR)/util.c $(PRGDIR)/timefn.c seqgen.c zstreamtest.c external_matchfinder.c ZSTREAM_PROPER_FILES := $(ZDICT_FILES) $(ZSTREAM_LOCAL_FILES) ZSTREAMFILES := $(ZSTD_FILES) $(ZSTREAM_PROPER_FILES) zstreamtest32 : CFLAGS += -m32 @@ -175,42 +180,58 @@ zstreamtest32 : $(ZSTREAMFILES) zstreamtest zstreamtest32 : $(LINK.c) $^ -o $@$(EXT) +CLEAN += zstreamtest_asan zstreamtest_asan : CFLAGS += -fsanitize=address zstreamtest_asan : $(ZSTREAMFILES) $(LINK.c) $(MULTITHREAD) $^ -o $@$(EXT) +CLEAN += zstreamtest_tsan zstreamtest_tsan : CFLAGS += -fsanitize=thread zstreamtest_tsan : $(ZSTREAMFILES) $(LINK.c) $(MULTITHREAD) $^ -o $@$(EXT) +CLEAN += zstreamtest_ubsan +zstreamtest_ubsan : CFLAGS += -fsanitize=undefined +zstreamtest_ubsan : $(ZSTREAMFILES) + $(LINK.c) $(MULTITHREAD) $^ -o $@$(EXT) + # note : broken : requires symbols unavailable from dynamic library zstreamtest-dll : $(ZSTDDIR)/common/xxhash.c # xxh symbols not exposed from dll zstreamtest-dll : $(ZSTREAM_LOCAL_FILES) $(CC) $(CPPFLAGS) $(CFLAGS) $(filter %.c,$^) $(LDFLAGS) -o $@$(EXT) +CLEAN += paramgrill paramgrill : DEBUGFLAGS = # turn off debug for speed measurements paramgrill : LDLIBS += -lm paramgrill : $(ZSTD_FILES) $(PRGDIR)/util.c $(PRGDIR)/timefn.c $(PRGDIR)/benchfn.c $(PRGDIR)/benchzstd.c $(PRGDIR)/datagen.c paramgrill.c +CLEAN += datagen datagen : $(PRGDIR)/datagen.c datagencli.c $(LINK.c) $^ -o $@$(EXT) +CLEAN += roundTripCrash roundTripCrash: CFLAGS += $(MULTITHREAD) roundTripCrash : $(ZSTD_OBJECTS) roundTripCrash.c +CLEAN += longmatch longmatch : $(ZSTD_OBJECTS) longmatch.c +CLEAN += bigdict bigdict: CFLAGS += $(MULTITHREAD) bigdict: $(ZSTDMT_OBJECTS) $(PRGDIR)/datagen.c bigdict.c +CLEAN += invalidDictionaries invalidDictionaries : $(ZSTD_OBJECTS) invalidDictionaries.c +CLEAN += legacy legacy : CPPFLAGS += -I$(ZSTDDIR)/legacy -UZSTD_LEGACY_SUPPORT -DZSTD_LEGACY_SUPPORT=4 legacy : $(ZSTD_FILES) $(sort $(wildcard $(ZSTDDIR)/legacy/*.c)) legacy.c +CLEAN += decodecorpus decodecorpus : LDLIBS += -lm decodecorpus : $(filter-out zstdc_zstd_compress.o, $(ZSTD_OBJECTS)) $(ZDICT_FILES) $(PRGDIR)/util.c $(PRGDIR)/timefn.c decodecorpus.c +CLEAN += poolTests poolTests : $(PRGDIR)/util.c $(PRGDIR)/timefn.c poolTests.c $(ZSTDDIR)/common/pool.c $(ZSTDDIR)/common/threading.c $(ZSTDDIR)/common/zstd_common.c $(ZSTDDIR)/common/error_private.c $(LINK.c) $(MULTITHREAD) $^ -o $@$(EXT) @@ -222,7 +243,8 @@ versionsTest: clean automated_benchmarking: clean $(PYTHON) automated_benchmarking.py -# make checkTag +# make checkTag : check that release tag corresponds to release version +CLEAN += checkTag checkTag.o : $(ZSTDDIR)/zstd.h .PHONY: clean @@ -231,28 +253,22 @@ clean: $(MAKE) -C $(PRGDIR) clean $(RM) -fR $(TESTARTEFACT) $(RM) -rf tmp* # some test directories are named tmp* - $(RM) core *.o *.tmp result* *.gcda dictionary *.zst \ + $(RM) $(CLEAN) core *.o *.tmp result* *.gcda dictionary *.zst \ $(PRGDIR)/zstd$(EXT) $(PRGDIR)/zstd32$(EXT) \ - fullbench$(EXT) fullbench32$(EXT) \ - fullbench-lib$(EXT) fullbench-dll$(EXT) \ - fuzzer$(EXT) fuzzer32$(EXT) \ - fuzzer-dll$(EXT) zstreamtest-dll$(EXT) \ - zstreamtest$(EXT) zstreamtest32$(EXT) \ - datagen$(EXT) paramgrill$(EXT) roundTripCrash$(EXT) longmatch$(EXT) \ - symbols$(EXT) invalidDictionaries$(EXT) legacy$(EXT) poolTests$(EXT) \ - decodecorpus$(EXT) checkTag$(EXT) bigdict$(EXT) + fullbench-dll$(EXT) fuzzer-dll$(EXT) zstreamtest-dll$(EXT) @echo Cleaning completed #---------------------------------------------------------------------------------- -# valgrind tests are validated only for some posix platforms +# valgrind tests validated only for some posix platforms #---------------------------------------------------------------------------------- UNAME := $(shell uname) ifneq (,$(filter $(UNAME),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS AIX)) HOST_OS = POSIX -valgrindTest: VALGRIND = valgrind --leak-check=full --show-leak-kinds=all --error-exitcode=1 -valgrindTest: zstd datagen fuzzer fullbench +.PHONY: test-valgrind +test-valgrind: VALGRIND = valgrind --leak-check=full --show-leak-kinds=all --error-exitcode=1 +test-valgrind: zstd datagen fuzzer fullbench @echo "\n ---- valgrind tests : memory analyzer ----" $(VALGRIND) ./datagen -g50M > $(VOID) $(VALGRIND) $(PRGDIR)/zstd ; if [ $$? -eq 0 ] ; then echo "zstd without argument should have failed"; false; fi @@ -261,7 +277,7 @@ valgrindTest: zstd datagen fuzzer fullbench ./datagen -g2930KB | $(VALGRIND) $(PRGDIR)/zstd -5 -vf - -o tmp $(VALGRIND) $(PRGDIR)/zstd -vdf tmp -c > $(VOID) ./datagen -g64MB | $(VALGRIND) $(PRGDIR)/zstd -vf - -c > $(VOID) - @rm tmp + $(RM) tmp $(VALGRIND) ./fuzzer -T1mn -t1 $(VALGRIND) ./fullbench -i1 @@ -297,7 +313,7 @@ check: shortest fuzztest: test-fuzzer test-zstream test-decodecorpus .PHONY: test -test: test-zstd test-fullbench test-fuzzer test-zstream test-invalidDictionaries test-legacy test-decodecorpus +test: test-zstd test-fullbench test-fuzzer test-zstream test-invalidDictionaries test-legacy test-decodecorpus test-cli-tests ifeq ($(QEMU_SYS),) test: test-pool endif @@ -306,9 +322,9 @@ endif test32: test-zstd32 test-fullbench32 test-fuzzer32 test-zstream32 .PHONY: test-all -test-all: test test32 valgrindTest test-decodecorpus-cli +test-all: test test32 test-decodecorpus-cli -.PHONY: test-zstd test-zstd32 test-zstd-nolegacy test-zstdgrep +.PHONY: test-zstd test-zstd32 test-zstd-nolegacy test-zstd: ZSTD = $(PRGDIR)/zstd test-zstd: zstd @@ -322,29 +338,42 @@ test-zstd test-zstd32 test-zstd-nolegacy: datagen file $(ZSTD) EXE_PREFIX="$(QEMU_SYS)" ZSTD_BIN="$(ZSTD)" DATAGEN_BIN=./datagen ./playTests.sh $(ZSTDRTTEST) +.PHONY: test-cli-tests +test-cli-tests: ZSTD = $(PRGDIR)/zstd +test-cli-tests: zstd datagen + file $(ZSTD) + ./cli-tests/run.py --exec-prefix="$(QEMU_SYS)" --zstd="$(ZSTD)" --datagen=./datagen + +.PHONY: test-fullbench test-fullbench: fullbench datagen $(QEMU_SYS) ./fullbench -i1 $(QEMU_SYS) ./fullbench -i1 -P0 +.PHONY: test-fullbench32 test-fullbench32: fullbench32 datagen $(QEMU_SYS) ./fullbench32 -i1 $(QEMU_SYS) ./fullbench32 -i1 -P0 +.PHONY: test-fuzzer test-fuzzer: fuzzer $(QEMU_SYS) ./fuzzer -v $(FUZZERTEST) $(FUZZER_FLAGS) +# Note : this test presumes `fuzzer` will be built +.PHONY: test-fuzzer-stackmode test-fuzzer-stackmode: MOREFLAGS += -DZSTD_HEAPMODE=0 test-fuzzer-stackmode: test-fuzzer +.PHONY: test-fuzzer32 test-fuzzer32: fuzzer32 $(QEMU_SYS) ./fuzzer32 -v $(FUZZERTEST) $(FUZZER_FLAGS) +.PHONY: test-zstream test-zstream: zstreamtest $(QEMU_SYS) ./zstreamtest -v $(ZSTREAM_TESTTIME) $(FUZZER_FLAGS) $(QEMU_SYS) ./zstreamtest --newapi -t1 $(ZSTREAM_TESTTIME) $(FUZZER_FLAGS) test-zstream32: zstreamtest32 - $(QEMU_SYS) ./zstreamtest32 $(ZSTREAM_TESTTIME) $(FUZZER_FLAGS) + $(QEMU_SYS) ./zstreamtest32 -v $(ZSTREAM_TESTTIME) $(FUZZER_FLAGS) test-longmatch: longmatch $(QEMU_SYS) ./longmatch diff --git a/tests/README.md b/tests/README.md index c6ffb4095a7..2cf0e76c20e 100644 --- a/tests/README.md +++ b/tests/README.md @@ -45,7 +45,7 @@ optional arguments: -h, --help show this help message and exit --directory DIRECTORY directory with files to benchmark - --levels LEVELS levels to test eg ('1,2,3') + --levels LEVELS levels to test e.g. ('1,2,3') --iterations ITERATIONS number of benchmark iterations to run --emails EMAILS email addresses of people who will be alerted upon @@ -70,7 +70,7 @@ After `sleepTime` (an optional parameter, default 300 seconds) seconds the scrip If a new commit is found it is compiled and a speed benchmark for this commit is performed. The results of the speed benchmark are compared to the previous results. If compression or decompression speed for one of zstd levels is lower than `lowerLimit` (an optional parameter, default 0.98) the speed benchmark is restarted. -If second results are also lower than `lowerLimit` the warning e-mail is send to recipients from the list (the `emails` parameter). +If second results are also lower than `lowerLimit` the warning e-mail is sent to recipients from the list (the `emails` parameter). Additional remarks: - To be sure that speed results are accurate the script should be run on a "stable" target system with no other jobs running in parallel @@ -168,7 +168,7 @@ Full list of arguments can use all --zstd parameter names and 'cParams' as a shorthand for all parameters used in ZSTD_compressionParameters (Default: display all params available) -P# : generated sample compressibility (when no file is provided) - -t# : Caps runtime of operation in seconds (default : 99999 seconds (about 27 hours )) + -t# : Caps runtime of operation in seconds (default: 99999 seconds (about 27 hours)) -v : Prints Benchmarking output -D : Next argument dictionary file -s : Benchmark all files separately diff --git a/tests/automated_benchmarking.py b/tests/automated_benchmarking.py index e0c03ec2d4a..153e7db4c84 100644 --- a/tests/automated_benchmarking.py +++ b/tests/automated_benchmarking.py @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -291,7 +291,7 @@ def main(filenames, levels, iterations, builds=None, emails=None, continuous=Fal parser = argparse.ArgumentParser() parser.add_argument("--directory", help="directory with files to benchmark", default="golden-compression") - parser.add_argument("--levels", help="levels to test eg ('1,2,3')", default="1") + parser.add_argument("--levels", help="levels to test e.g. ('1,2,3')", default="1") parser.add_argument("--iterations", help="number of benchmark iterations to run", default="1") parser.add_argument("--emails", help="email addresses of people who will be alerted upon regression. Only for continuous mode", default=None) parser.add_argument("--frequency", help="specifies the number of seconds to wait before each successive check for new PRs in continuous mode", default=DEFAULT_MAX_API_CALL_FREQUENCY_SEC) diff --git a/tests/bigdict.c b/tests/bigdict.c index fb089258690..ff2bb2d7032 100644 --- a/tests/bigdict.c +++ b/tests/bigdict.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/checkTag.c b/tests/checkTag.c index f6c5e9766ee..26871ed0fd8 100644 --- a/tests/checkTag.c +++ b/tests/checkTag.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/check_size.py b/tests/check_size.py new file mode 100755 index 00000000000..028b0a9c07c --- /dev/null +++ b/tests/check_size.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +# ################################################################ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# You may select, at your option, one of the above-listed licenses. +# ################################################################ + +import os +import subprocess +import sys + +if len(sys.argv) != 3: + print(f"Usage: {sys.argv[0]} FILE SIZE_LIMIT") + sys.exit(1) + +file = sys.argv[1] +limit = int(sys.argv[2]) + +if not os.path.exists(file): + print(f"{file} does not exist") + sys.exit(1) + +size = os.path.getsize(file) + +if size > limit: + print(f"file {file} is {size} bytes, which is greater than the limit of {limit} bytes") + sys.exit(1) diff --git a/tests/cli-tests/.gitignore b/tests/cli-tests/.gitignore new file mode 100644 index 00000000000..0ad01b24eff --- /dev/null +++ b/tests/cli-tests/.gitignore @@ -0,0 +1,6 @@ +!bin/ +!datagen +!zstdcat + +scratch/ +bin/symlinks diff --git a/tests/cli-tests/README.md b/tests/cli-tests/README.md new file mode 100644 index 00000000000..7ca07c3e9ac --- /dev/null +++ b/tests/cli-tests/README.md @@ -0,0 +1,258 @@ +# CLI tests + +The CLI tests are focused on testing the zstd CLI. +They are intended to be simple tests that the CLI and arguments work as advertised. +They are not intended to test the library, only the code in `programs/`. +The library will get incidental coverage, but if you find yourself trying to trigger a specific condition in the library, this is the wrong tool. + +## Test runner usage + +The test runner `run.py` will run tests against the in-tree build of `zstd` and `datagen` by default. Which means that `zstd` and `datagen` must be built. + +The `zstd` binary used can be passed with `--zstd /path/to/zstd`. +Additionally, to run `zstd` through a tool like `valgrind` or `qemu`, set the `--exec-prefix 'valgrind -q'` flag. + +Similarly, the `--datagen`, and `--zstdgrep` flags can be set to specify +the paths to their respective binaries. However, these tools do not use +the `EXEC_PREFIX`. + +Each test executes in its own scratch directory under `scratch/test/name`. E.g. `scratch/basic/help.sh/`. Normally these directories are removed after the test executes. However, the `--preserve` flag will preserve these directories after execution, and save the tests exit code, stdout, and stderr in the scratch directory to `exit`, `stderr`, and `stdout` respectively. This can be useful for debugging/editing a test and updating the expected output. + +### Running all the tests + +By default the test runner `run.py` will run all the tests, and report the results. + +Examples: + +``` +./run.py +./run.py --preserve +./run.py --zstd ../../build/programs/zstd --datagen ../../build/tests/datagen +``` + +### Running specific tests + +A set of test names can be passed to the test runner `run.py` to only execute those tests. +This can be useful for writing or debugging a test, especially with `--preserve`. + +The test name can either be the path to the test file, or the test name, which is the path relative to the test directory. + +Examples: + +``` +./run.py basic/help.sh +./run.py --preserve basic/help.sh basic/version.sh +./run.py --preserve --verbose basic/help.sh +``` + +### Updating exact output + +If a test is failing because a `.stderr.exact` or `.stdout.exact` no longer matches, you can re-run the tests with `--set-exact-output` and the correct output will be written. + +Example: +``` +./run.py --set-exact-output +./run.py basic/help.sh --set-exact-output +``` + +## Writing a test + +Test cases are arbitrary executables, and can be written in any language, but are generally shell scripts. +After the script executes, the exit code, stderr, and stdout are compared against the expectations. + +Each test is run in a clean directory that the test can use for intermediate files. This directory will be cleaned up at the end of the test, unless `--preserve` is passed to the test runner. Additionally, the `setup` script can prepare the directory before the test runs. + +### Calling zstd, utilities, and environment variables + +The `$PATH` for tests is prepended with the `bin/` sub-directory, which contains helper scripts for ease of testing. +The `zstd` binary will call the zstd binary specified by `run.py` with the correct `$EXEC_PREFIX`. +Similarly, `datagen`, `unzstd`, `zstdgrep`, `zstdcat`, etc, are provided. + +Helper utilities like `cmp_size`, `println`, and `die` are provided here too. See their scripts for details. + +Common shell script libraries are provided under `common/`, with helper variables and functions. They can be sourced with `source "$COMMON/library.sh`. + +Lastly, environment variables are provided for testing, which can be listed when calling `run.py` with `--verbose`. +They are generally used by the helper scripts in `bin/` to coordinate everything. + +### Basic test case + +When executing your `$TEST` executable, by default the exit code is expected to be `0`. However, you can provide an alternate expected exit code in a `$TEST.exit` file. + +When executing your `$TEST` executable, by default the expected stderr and stdout are empty. However, you can override the default by providing one of three files: + +* `$TEST.{stdout,stderr}.exact` +* `$TEST.{stdout,stderr}.glob` +* `$TEST.{stdout,stderr}.ignore` + +If you provide a `.exact` file, the output is expected to exactly match, byte-for-byte. + +If you provide a `.glob` file, the output is expected to match the expected file, where each line is interpreted as a glob syntax. Additionally, a line containing only `...` matches all lines until the next expected line matches. + +If you provide a `.ignore` file, the output is ignored. + +#### Passing examples + +All these examples pass. + +Exit 1, and change the expectation to be 1. + +``` +exit-1.sh +--- +#!/bin/sh +exit 1 +--- + +exit-1.sh.exit +--- +1 +--- +``` + +Check the stdout output exactly matches. + +``` +echo.sh +--- +#!/bin/sh +echo "hello world" +--- + +echo.sh.stdout.exact +--- +hello world +--- +``` + +Check the stderr output using a glob. + +``` +random.sh +--- +#!/bin/sh +head -c 10 < /dev/urandom | xxd >&2 +--- + +random.sh.stderr.glob +--- +00000000: * * * * * * +``` + +Multiple lines can be matched with ... + +``` +random-num-lines.sh +--- +#!/bin/sh +echo hello +seq 0 $RANDOM +echo world +--- + +random-num-lines.sh.stdout.glob +--- +hello +0 +... +world +--- +``` + +#### Failing examples + +Exit code is expected to be 0, but is 1. + +``` +exit-1.sh +--- +#!/bin/sh +exit 1 +--- +``` + +Stdout is expected to be empty, but isn't. + +``` +echo.sh +--- +#!/bin/sh +echo hello world +``` + +Stderr is expected to be hello but is world. + +``` +hello.sh +--- +#!/bin/sh +echo world >&2 +--- + +hello.sh.stderr.exact +--- +hello +--- +``` + +### Setup & teardown scripts + +Finally, test writing can be eased with setup and teardown scripts. +Each directory in the test directory is a test-suite consisting of all tests within that directory (but not sub-directories). +This test suite can come with 4 scripts to help test writing: + +* `setup_once` +* `teardown_once` +* `setup` +* `teardown` + +The `setup_once` and `teardown_once` are run once before and after all the tests in the suite respectively. +They operate in the scratch directory for the test suite, which is the parent directory of each scratch directory for each test case. +They can do work that is shared between tests to improve test efficiency. +For example, the `dictionaries/setup_once` script builds several dictionaries, for use in the `dictionaries` tests. + +The `setup` and `teardown` scripts run before and after each test case respectively, in the test case's scratch directory. +These scripts can do work that is shared between test cases to make tests more succinct. +For example, the `dictionaries/setup` script copies the dictionaries built by the `dictionaries/setup_once` script into the test's scratch directory, to make them easier to use, and make sure they aren't accidentally modified. + +#### Examples + +``` +basic/setup +--- +#!/bin/sh +# Create some files for testing with +datagen > file +datagen > file0 +datagen > file1 +--- + +basic/test.sh +--- +#!/bin/sh +zstd file file0 file1 +--- + +dictionaries/setup_once +--- +#!/bin/sh +set -e + +mkdir files/ dicts/ +for i in $(seq 10); do + datagen -g1000 > files/$i +done + +zstd --train -r files/ -o dicts/0 +--- + +dictionaries/setup +--- +#!/bin/sh + +# Runs in the test case's scratch directory. +# The test suite's scratch directory that +# `setup_once` operates in is the parent directory. +cp -r ../files ../dicts . +--- +``` diff --git a/tests/cli-tests/basic/help.sh b/tests/cli-tests/basic/help.sh new file mode 100755 index 00000000000..927c3ffbcc5 --- /dev/null +++ b/tests/cli-tests/basic/help.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +set -e + +println "+ zstd -h" +zstd -h +println "+ zstd -H" +zstd -H +println "+ zstd --help" +zstd --help diff --git a/tests/cli-tests/basic/help.sh.stdout.glob b/tests/cli-tests/basic/help.sh.stdout.glob new file mode 100644 index 00000000000..5580dc66919 --- /dev/null +++ b/tests/cli-tests/basic/help.sh.stdout.glob @@ -0,0 +1,34 @@ ++ zstd -h +Compress or decompress the INPUT file(s); reads from STDIN if INPUT is `-` or not provided. + +Usage: zstd *OPTIONS...* *INPUT... | -* *-o OUTPUT* + +Options: + -o OUTPUT Write output to a single file, OUTPUT. + -k, --keep Preserve INPUT file(s). *Default* + --rm Remove INPUT file(s) after successful (de)compression. + + -# Desired compression level, where `#` is a number between 1 and 19; + lower numbers provide faster compression, higher numbers yield + better compression ratios. *Default: 3* + + -d, --decompress Perform decompression. + -D DICT Use DICT as the dictionary for compression or decompression. + + -f, --force Disable input and output checks. Allows overwriting existing files, + receiving input from the console, printing ouput to STDOUT, and + operating on links, block devices, etc. Unrecognized formats will be + passed-through through as-is. + + -h Display short usage and exit. + -H, --help Display full help and exit. + -V, --version Display the program version and exit. + ++ zstd -H +... +Advanced options: +... ++ zstd --help +... +Advanced options: +... diff --git a/tests/cli-tests/basic/memlimit.sh b/tests/cli-tests/basic/memlimit.sh new file mode 100755 index 00000000000..88d734d0d9d --- /dev/null +++ b/tests/cli-tests/basic/memlimit.sh @@ -0,0 +1,40 @@ +#!/bin/sh + +echo "some data" > file + +println "+ zstd --memory=32LB file" +zstd --memory=32LB file && die "Should not allow bogus suffix" +println "+ zstd --memory=32LiB file" +zstd --memory=32LiB file && die "Should not allow bogus suffix" +println "+ zstd --memory=32A file" +zstd --memory=32A file && die "Should not allow bogus suffix" +println "+ zstd --memory=32r82347dn83 file" +zstd --memory=32r82347dn83 file && die "Should not allow bogus suffix" +println "+ zstd --memory=32asbdf file" +zstd --memory=32asbdf file && die "Should not allow bogus suffix" +println "+ zstd --memory=hello file" +zstd --memory=hello file && die "Should not allow non-numeric parameter" +println "+ zstd --memory=1 file" +zstd -q --memory=1 file && die "Should allow numeric parameter without suffix" +rm file.zst +println "+ zstd --memory=1K file" +zstd -q --memory=1K file && die "Should allow numeric parameter with expected suffix" +rm file.zst +println "+ zstd --memory=1KB file" +zstd -q --memory=1KB file && die "Should allow numeric parameter with expected suffix" +rm file.zst +println "+ zstd --memory=1KiB file" +zstd -q --memory=1KiB file && die "Should allow numeric parameter with expected suffix" +rm file.zst +println "+ zstd --memory=1M file" +zstd -q --memory=1M file && die "Should allow numeric parameter with expected suffix" +rm file.zst +println "+ zstd --memory=1MB file" +zstd -q --memory=1MB file && die "Should allow numeric parameter with expected suffix" +rm file.zst +println "+ zstd --memory=1MiB file" +zstd -q --memory=1MiB file && die "Should allow numeric parameter with expected suffix" +rm file.zst + +rm file +exit 0 diff --git a/tests/cli-tests/basic/memlimit.sh.stderr.exact b/tests/cli-tests/basic/memlimit.sh.stderr.exact new file mode 100644 index 00000000000..3785b0f92f3 --- /dev/null +++ b/tests/cli-tests/basic/memlimit.sh.stderr.exact @@ -0,0 +1,13 @@ +error: only numeric values with optional suffixes K, KB, KiB, M, MB, MiB are allowed +error: only numeric values with optional suffixes K, KB, KiB, M, MB, MiB are allowed +error: only numeric values with optional suffixes K, KB, KiB, M, MB, MiB are allowed +error: only numeric values with optional suffixes K, KB, KiB, M, MB, MiB are allowed +error: only numeric values with optional suffixes K, KB, KiB, M, MB, MiB are allowed +error: only numeric values with optional suffixes K, KB, KiB, M, MB, MiB are allowed +Should allow numeric parameter without suffix +Should allow numeric parameter with expected suffix +Should allow numeric parameter with expected suffix +Should allow numeric parameter with expected suffix +Should allow numeric parameter with expected suffix +Should allow numeric parameter with expected suffix +Should allow numeric parameter with expected suffix diff --git a/tests/cli-tests/basic/memlimit.sh.stdout.exact b/tests/cli-tests/basic/memlimit.sh.stdout.exact new file mode 100644 index 00000000000..1821648e406 --- /dev/null +++ b/tests/cli-tests/basic/memlimit.sh.stdout.exact @@ -0,0 +1,13 @@ ++ zstd --memory=32LB file ++ zstd --memory=32LiB file ++ zstd --memory=32A file ++ zstd --memory=32r82347dn83 file ++ zstd --memory=32asbdf file ++ zstd --memory=hello file ++ zstd --memory=1 file ++ zstd --memory=1K file ++ zstd --memory=1KB file ++ zstd --memory=1KiB file ++ zstd --memory=1M file ++ zstd --memory=1MB file ++ zstd --memory=1MiB file diff --git a/tests/cli-tests/basic/output_dir.sh b/tests/cli-tests/basic/output_dir.sh new file mode 100755 index 00000000000..a8819d29260 --- /dev/null +++ b/tests/cli-tests/basic/output_dir.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +println "+ zstd -r * --output-dir-mirror=\"\"" +zstd -r * --output-dir-mirror="" && die "Should not allow empty output dir!" +println "+ zstd -r * --output-dir-flat=\"\"" +zstd -r * --output-dir-flat="" && die "Should not allow empty output dir!" +exit 0 diff --git a/tests/cli-tests/basic/output_dir.sh.stderr.exact b/tests/cli-tests/basic/output_dir.sh.stderr.exact new file mode 100644 index 00000000000..e12b50427cf --- /dev/null +++ b/tests/cli-tests/basic/output_dir.sh.stderr.exact @@ -0,0 +1,2 @@ +error: output dir cannot be empty string (did you mean to pass '.' instead?) +error: output dir cannot be empty string (did you mean to pass '.' instead?) diff --git a/tests/cli-tests/basic/output_dir.sh.stdout.exact b/tests/cli-tests/basic/output_dir.sh.stdout.exact new file mode 100644 index 00000000000..1e478cd7531 --- /dev/null +++ b/tests/cli-tests/basic/output_dir.sh.stdout.exact @@ -0,0 +1,2 @@ ++ zstd -r * --output-dir-mirror="" ++ zstd -r * --output-dir-flat="" diff --git a/tests/cli-tests/basic/version.sh b/tests/cli-tests/basic/version.sh new file mode 100755 index 00000000000..f75eaa84fb4 --- /dev/null +++ b/tests/cli-tests/basic/version.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +set -e + +zstd -V +zstd --version diff --git a/tests/cli-tests/basic/version.sh.stdout.glob b/tests/cli-tests/basic/version.sh.stdout.glob new file mode 100644 index 00000000000..4cc9fb9b234 --- /dev/null +++ b/tests/cli-tests/basic/version.sh.stdout.glob @@ -0,0 +1,2 @@ +*** Zstandard CLI (*-bit) v1.*.*, by Yann Collet *** +*** Zstandard CLI (*-bit) v1.*.*, by Yann Collet *** diff --git a/tests/cli-tests/bin/cmp_size b/tests/cli-tests/bin/cmp_size new file mode 100755 index 00000000000..8e4bef88eab --- /dev/null +++ b/tests/cli-tests/bin/cmp_size @@ -0,0 +1,44 @@ +#!/bin/sh + +set -e + +usage() +{ + printf "USAGE:\n\t$0 [-eq|-ne|-lt|-le|-gt|-ge] FILE1 FILE2\n" +} + +help() +{ + printf "Small utility to compare file sizes without printing them with set -x.\n\n" + usage +} + +case "$1" in + -h) help; exit 0 ;; + --help) help; exit 0 ;; +esac + +if ! test -f $2; then + printf "FILE1='%b' is not a file\n\n" "$2" + usage + exit 1 +fi + +if ! test -f $3; then + printf "FILE2='%b' is not a file\n\n" "$3" + usage + exit 1 +fi + + +size1=$(wc -c < $2) +size2=$(wc -c < $3) + +case "$1" in + -eq) [ "$size1" -eq "$size2" ] ;; + -ne) [ "$size1" -ne "$size2" ] ;; + -lt) [ "$size1" -lt "$size2" ] ;; + -le) [ "$size1" -le "$size2" ] ;; + -gt) [ "$size1" -gt "$size2" ] ;; + -ge) [ "$size1" -ge "$size2" ] ;; +esac diff --git a/tests/cli-tests/bin/datagen b/tests/cli-tests/bin/datagen new file mode 100755 index 00000000000..8c60cbcc9e7 --- /dev/null +++ b/tests/cli-tests/bin/datagen @@ -0,0 +1,3 @@ +#!/bin/sh + +"$DATAGEN_BIN" $@ diff --git a/tests/cli-tests/bin/die b/tests/cli-tests/bin/die new file mode 100755 index 00000000000..8633bc975b0 --- /dev/null +++ b/tests/cli-tests/bin/die @@ -0,0 +1,4 @@ +#!/bin/sh + +println "${*}" 1>&2 +exit 1 diff --git a/tests/cli-tests/bin/println b/tests/cli-tests/bin/println new file mode 100755 index 00000000000..494eb18c850 --- /dev/null +++ b/tests/cli-tests/bin/println @@ -0,0 +1,2 @@ +#!/bin/sh +printf '%b\n' "${*}" diff --git a/tests/cli-tests/bin/unzstd b/tests/cli-tests/bin/unzstd new file mode 120000 index 00000000000..613f917fd7c --- /dev/null +++ b/tests/cli-tests/bin/unzstd @@ -0,0 +1 @@ +zstd \ No newline at end of file diff --git a/tests/cli-tests/bin/zstd b/tests/cli-tests/bin/zstd new file mode 100755 index 00000000000..7a40aec90d4 --- /dev/null +++ b/tests/cli-tests/bin/zstd @@ -0,0 +1,9 @@ +#!/bin/sh + +zstdname=$(basename $0) + +if [ -z "$EXEC_PREFIX" ]; then + "$ZSTD_SYMLINK_DIR/$zstdname" $@ +else + $EXEC_PREFIX "$ZSTD_SYMLINK_DIR/$zstdname" $@ +fi diff --git a/tests/cli-tests/bin/zstdcat b/tests/cli-tests/bin/zstdcat new file mode 120000 index 00000000000..613f917fd7c --- /dev/null +++ b/tests/cli-tests/bin/zstdcat @@ -0,0 +1 @@ +zstd \ No newline at end of file diff --git a/tests/cli-tests/bin/zstdgrep b/tests/cli-tests/bin/zstdgrep new file mode 100755 index 00000000000..8821ebb5be5 --- /dev/null +++ b/tests/cli-tests/bin/zstdgrep @@ -0,0 +1,2 @@ +#!/bin/sh +"$ZSTDGREP_BIN" $@ diff --git a/tests/cli-tests/bin/zstdless b/tests/cli-tests/bin/zstdless new file mode 100755 index 00000000000..d1d6f82d20a --- /dev/null +++ b/tests/cli-tests/bin/zstdless @@ -0,0 +1,2 @@ +#!/bin/sh +"$ZSTDLESS_BIN" $@ diff --git a/tests/cli-tests/cltools/setup b/tests/cli-tests/cltools/setup new file mode 100755 index 00000000000..3009bd5be5b --- /dev/null +++ b/tests/cli-tests/cltools/setup @@ -0,0 +1,6 @@ +#!/bin/sh + +set -e + +echo "1234" > file +zstd file diff --git a/tests/cli-tests/cltools/zstdgrep.sh b/tests/cli-tests/cltools/zstdgrep.sh new file mode 100755 index 00000000000..6cd68b7ab27 --- /dev/null +++ b/tests/cli-tests/cltools/zstdgrep.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +set -e + +println "+ good path" +zstdgrep "1234" file file.zst +println "+ bad path" +zstdgrep "1234" bad.zst diff --git a/tests/cli-tests/cltools/zstdgrep.sh.exit b/tests/cli-tests/cltools/zstdgrep.sh.exit new file mode 100644 index 00000000000..56a6051ca2b --- /dev/null +++ b/tests/cli-tests/cltools/zstdgrep.sh.exit @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/tests/cli-tests/cltools/zstdgrep.sh.stderr.exact b/tests/cli-tests/cltools/zstdgrep.sh.stderr.exact new file mode 100644 index 00000000000..f147f28d7ea --- /dev/null +++ b/tests/cli-tests/cltools/zstdgrep.sh.stderr.exact @@ -0,0 +1 @@ +zstd: can't stat bad.zst : No such file or directory -- ignored diff --git a/tests/cli-tests/cltools/zstdgrep.sh.stdout.glob b/tests/cli-tests/cltools/zstdgrep.sh.stdout.glob new file mode 100644 index 00000000000..96d4fa2cbcc --- /dev/null +++ b/tests/cli-tests/cltools/zstdgrep.sh.stdout.glob @@ -0,0 +1,4 @@ ++ good path +file:1234 +file.zst:1234 ++ bad path diff --git a/tests/cli-tests/cltools/zstdless.sh b/tests/cli-tests/cltools/zstdless.sh new file mode 100755 index 00000000000..a0697bde644 --- /dev/null +++ b/tests/cli-tests/cltools/zstdless.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +set -e + +println "+ good path" +zstdless file.zst +println "+ pass parameters" +zstdless -N file.zst # This parameter does not produce line #s when piped, but still serves to test that the flag went to less and not zstd +println "+ bad path" +zstdless bad.zst >&2 diff --git a/tests/cli-tests/cltools/zstdless.sh.stderr.exact b/tests/cli-tests/cltools/zstdless.sh.stderr.exact new file mode 100644 index 00000000000..5a726f1d149 --- /dev/null +++ b/tests/cli-tests/cltools/zstdless.sh.stderr.exact @@ -0,0 +1,2 @@ +zstd: can't stat bad.zst : No such file or directory -- ignored +bad.zst: No such file or directory diff --git a/tests/cli-tests/cltools/zstdless.sh.stdout.glob b/tests/cli-tests/cltools/zstdless.sh.stdout.glob new file mode 100644 index 00000000000..2784dddf17c --- /dev/null +++ b/tests/cli-tests/cltools/zstdless.sh.stdout.glob @@ -0,0 +1,5 @@ ++ good path +1234 ++ pass parameters +1234 ++ bad path diff --git a/tests/cli-tests/common/format.sh b/tests/cli-tests/common/format.sh new file mode 100644 index 00000000000..e574e973075 --- /dev/null +++ b/tests/cli-tests/common/format.sh @@ -0,0 +1,19 @@ +#!/bin/sh + +. "$COMMON/platform.sh" + +zstd_supports_format() +{ + zstd -h | grep > $INTOVOID -- "--format=$1" +} + +format_extension() +{ + if [ "$1" = "zstd" ]; then + printf "zst" + elif [ "$1" = "gzip" ]; then + printf "gz" + else + printf "$1" + fi +} diff --git a/tests/cli-tests/common/mtime.sh b/tests/cli-tests/common/mtime.sh new file mode 100644 index 00000000000..344074d398c --- /dev/null +++ b/tests/cli-tests/common/mtime.sh @@ -0,0 +1,13 @@ +. "$COMMON/platform.sh" + +MTIME="stat -c %Y" +case "$UNAME" in + Darwin | FreeBSD | OpenBSD | NetBSD) MTIME="stat -f %m" ;; +esac + +assertSameMTime() { + MT1=$($MTIME "$1") + MT2=$($MTIME "$2") + echo MTIME $MT1 $MT2 + [ "$MT1" = "$MT2" ] || die "mtime on $1 doesn't match mtime on $2 ($MT1 != $MT2)" +} diff --git a/tests/cli-tests/common/permissions.sh b/tests/cli-tests/common/permissions.sh new file mode 100644 index 00000000000..6bce1f0b387 --- /dev/null +++ b/tests/cli-tests/common/permissions.sh @@ -0,0 +1,18 @@ +. "$COMMON/platform.sh" + +GET_PERMS="stat -c %a" +case "$UNAME" in + Darwin | FreeBSD | OpenBSD | NetBSD) GET_PERMS="stat -f %Lp" ;; +esac + +assertFilePermissions() { + STAT1=$($GET_PERMS "$1") + STAT2=$2 + [ "$STAT1" = "$STAT2" ] || die "permissions on $1 don't match expected ($STAT1 != $STAT2)" +} + +assertSamePermissions() { + STAT1=$($GET_PERMS "$1") + STAT2=$($GET_PERMS "$2") + [ "$STAT1" = "$STAT2" ] || die "permissions on $1 don't match those on $2 ($STAT1 != $STAT2)" +} diff --git a/tests/cli-tests/common/platform.sh b/tests/cli-tests/common/platform.sh new file mode 100644 index 00000000000..6eb45eab99e --- /dev/null +++ b/tests/cli-tests/common/platform.sh @@ -0,0 +1,37 @@ +#!/bin/sh + +UNAME=$(uname) + +isWindows=false +INTOVOID="/dev/null" +case "$UNAME" in + GNU) DEVDEVICE="/dev/random" ;; + *) DEVDEVICE="/dev/zero" ;; +esac +case "$OS" in + Windows*) + isWindows=true + INTOVOID="NUL" + DEVDEVICE="NUL" + ;; +esac + +case "$UNAME" in + Darwin) MD5SUM="md5 -r" ;; + FreeBSD) MD5SUM="gmd5sum" ;; + NetBSD) MD5SUM="md5 -n" ;; + OpenBSD) MD5SUM="md5" ;; + *) MD5SUM="md5sum" ;; +esac + +DIFF="diff" +case "$UNAME" in + SunOS) DIFF="gdiff" ;; +esac + +if echo hello | zstd -v -T2 2>&1 > $INTOVOID | grep -q 'multi-threading is disabled' +then + hasMT="" +else + hasMT="true" +fi diff --git a/tests/cli-tests/compression/adapt.sh b/tests/cli-tests/compression/adapt.sh new file mode 100755 index 00000000000..30b9afaa03b --- /dev/null +++ b/tests/cli-tests/compression/adapt.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +set -e + +# Test --adapt +zstd -f file --adapt -c | zstd -t + +datagen -g100M > file100M + +# Pick parameters to force fast adaptation, even on slow systems +zstd --adapt -vvvv -19 --zstd=wlog=10 file100M -o /dev/null 2>&1 | grep -q "faster speed , lighter compression" + +# Adaption still happens with --no-progress +zstd --no-progress --adapt -vvvv -19 --zstd=wlog=10 file100M -o /dev/null 2>&1 | grep -q "faster speed , lighter compression" diff --git a/tests/cli-tests/compression/basic.sh b/tests/cli-tests/compression/basic.sh new file mode 100755 index 00000000000..b6e2aa0456a --- /dev/null +++ b/tests/cli-tests/compression/basic.sh @@ -0,0 +1,34 @@ +#!/bin/sh + +set -e + +# Uncomment the set -v line for debugging +# set -v + +# Test compression flags and check that they work +zstd file ; zstd -t file.zst +zstd -f file ; zstd -t file.zst +zstd -f -z file ; zstd -t file.zst +zstd -f -k file ; zstd -t file.zst +zstd -f -C file ; zstd -t file.zst +zstd -f --check file ; zstd -t file.zst +zstd -f --no-check file ; zstd -t file.zst +zstd -f -- file ; zstd -t file.zst + +# Test output file compression +zstd -o file-out.zst ; zstd -t file-out.zst +zstd -fo file-out.zst; zstd -t file-out.zst + +# Test compression to stdout +zstd -c file | zstd -t +zstd --stdout file | zstd -t +println bob | zstd | zstd -t + +# Test keeping input file when compressing to stdout in gzip mode +$ZSTD_SYMLINK_DIR/gzip -c file | zstd -t ; test -f file +$ZSTD_SYMLINK_DIR/gzip --stdout file | zstd -t ; test -f file + +# Test --rm +cp file file-rm +zstd --rm file-rm; zstd -t file-rm.zst +test ! -f file-rm diff --git a/tests/cli-tests/compression/compress-literals.sh b/tests/cli-tests/compression/compress-literals.sh new file mode 100755 index 00000000000..573481a3f5b --- /dev/null +++ b/tests/cli-tests/compression/compress-literals.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +set -e + +# Test --[no-]compress-literals +zstd file --no-compress-literals -1 -c | zstd -t +zstd file --no-compress-literals -19 -c | zstd -t +zstd file --no-compress-literals --fast=1 -c | zstd -t +zstd file --compress-literals -1 -c | zstd -t +zstd file --compress-literals --fast=1 -c | zstd -t diff --git a/tests/cli-tests/compression/format.sh b/tests/cli-tests/compression/format.sh new file mode 100755 index 00000000000..192fa2cf29f --- /dev/null +++ b/tests/cli-tests/compression/format.sh @@ -0,0 +1,16 @@ +#!/bin/sh + +. "$COMMON/format.sh" + +set -e + +# Test --format +zstd --format=zstd file -f +zstd -t file.zst +for format in "gzip" "lz4" "xz" "lzma"; do + if zstd_supports_format $format; then + zstd --format=$format file + zstd -t file.$(format_extension $format) + zstd -c --format=$format file | zstd -t --format=$format + fi +done diff --git a/tests/cli-tests/compression/golden.sh b/tests/cli-tests/compression/golden.sh new file mode 100755 index 00000000000..85dd3fdb8af --- /dev/null +++ b/tests/cli-tests/compression/golden.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +set -e + +GOLDEN_DIR="$ZSTD_REPO_DIR/tests/golden-compression/" +cp -r "$GOLDEN_DIR" golden/ + +zstd -rf golden/ --output-dir-mirror golden-compressed/ +zstd -r -t golden-compressed/ + +zstd --target-compressed-block-size=1024 -rf golden/ --output-dir-mirror golden-compressed/ +zstd -r -t golden-compressed/ diff --git a/tests/cli-tests/compression/gzip-compat.sh b/tests/cli-tests/compression/gzip-compat.sh new file mode 100755 index 00000000000..bb72e05fcc0 --- /dev/null +++ b/tests/cli-tests/compression/gzip-compat.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +set -e + +# Uncomment the set -v line for debugging +# set -v + +# Test gzip specific compression option +$ZSTD_SYMLINK_DIR/gzip --fast file ; $ZSTD_SYMLINK_DIR/gzip -d file.gz +$ZSTD_SYMLINK_DIR/gzip --best file ; $ZSTD_SYMLINK_DIR/gzip -d file.gz + +# Test -n / --no-name: do not embed original filename in archive +$ZSTD_SYMLINK_DIR/gzip -n file ; grep -qv file file.gz ; $ZSTD_SYMLINK_DIR/gzip -d file.gz +$ZSTD_SYMLINK_DIR/gzip --no-name file ; grep -qv file file.gz ; $ZSTD_SYMLINK_DIR/gzip -d file.gz +$ZSTD_SYMLINK_DIR/gzip -c --no-name file | grep -qv file diff --git a/tests/cli-tests/compression/levels.sh b/tests/cli-tests/compression/levels.sh new file mode 100755 index 00000000000..cc2700a3097 --- /dev/null +++ b/tests/cli-tests/compression/levels.sh @@ -0,0 +1,62 @@ +#!/bin/sh + +set -e +set -v + +datagen > file + +# Compress with various levels and ensure that their sizes are ordered +zstd --fast=10 file -o file-f10.zst -q +zstd --fast=1 file -o file-f1.zst -q +zstd -1 file -o file-1.zst -q +zstd -19 file -o file-19.zst -q + +zstd -t file-f10.zst file-f1.zst file-1.zst file-19.zst + +cmp_size -lt file-19.zst file-1.zst +cmp_size -lt file-1.zst file-f1.zst +cmp_size -lt file-f1.zst file-f10.zst + +# Test default levels +zstd --fast file -f -q +cmp file.zst file-f1.zst || die "--fast is not level -1" + +zstd -0 file -o file-0.zst -q +zstd -f file -q +cmp file.zst file-0.zst || die "Level 0 is not the default level" + +# Test level clamping +zstd -99 file -o file-99.zst -q +cmp file-19.zst file-99.zst || die "Level 99 is clamped to 19" +zstd --fast=200000 file -c | zstd -t + +zstd -5000000000 -f file && die "Level too large, must fail" ||: +zstd --fast=5000000000 -f file && die "Level too large, must fail" ||: + +# Test setting a level through the environment variable +ZSTD_CLEVEL=-10 zstd file -o file-f10-env.zst -q +ZSTD_CLEVEL=1 zstd file -o file-1-env.zst -q +ZSTD_CLEVEL=+19 zstd file -o file-19-env.zst -q +ZSTD_CLEVEL=+99 zstd file -o file-99-env.zst -q + +cmp file-f10.zst file-f10-env.zst || die "Environment variable failed to set level" +cmp file-1.zst file-1-env.zst || die "Environment variable failed to set level" +cmp file-19.zst file-19-env.zst || die "Environment variable failed to set level" +cmp file-99.zst file-99-env.zst || die "Environment variable failed to set level" + +# Test invalid environment clevel is the default level +zstd -f file -q +ZSTD_CLEVEL=- zstd -f file -o file-env.zst -q ; cmp file.zst file-env.zst +ZSTD_CLEVEL=+ zstd -f file -o file-env.zst -q ; cmp file.zst file-env.zst +ZSTD_CLEVEL=a zstd -f file -o file-env.zst -q ; cmp file.zst file-env.zst +ZSTD_CLEVEL=-a zstd -f file -o file-env.zst -q ; cmp file.zst file-env.zst +ZSTD_CLEVEL=+a zstd -f file -o file-env.zst -q ; cmp file.zst file-env.zst +ZSTD_CLEVEL=3a7 zstd -f file -o file-env.zst -q ; cmp file.zst file-env.zst +ZSTD_CLEVEL=5000000000 zstd -f file -o file-env.zst -q ; cmp file.zst file-env.zst + +# Test environment clevel is overridden by command line +ZSTD_CLEVEL=10 zstd -f file -1 -o file-1-env.zst -q +ZSTD_CLEVEL=10 zstd -f file --fast=1 -o file-f1-env.zst -q + +cmp file-1.zst file-1-env.zst || die "Environment variable not overridden" +cmp file-f1.zst file-f1-env.zst || die "Environment variable not overridden" diff --git a/tests/cli-tests/compression/levels.sh.stderr.exact b/tests/cli-tests/compression/levels.sh.stderr.exact new file mode 100644 index 00000000000..c8fb79c6896 --- /dev/null +++ b/tests/cli-tests/compression/levels.sh.stderr.exact @@ -0,0 +1,69 @@ + +datagen > file + +# Compress with various levels and ensure that their sizes are ordered +zstd --fast=10 file -o file-f10.zst -q +zstd --fast=1 file -o file-f1.zst -q +zstd -1 file -o file-1.zst -q +zstd -19 file -o file-19.zst -q + +zstd -t file-f10.zst file-f1.zst file-1.zst file-19.zst +4 files decompressed : 262148 bytes total + +cmp_size -lt file-19.zst file-1.zst +cmp_size -lt file-1.zst file-f1.zst +cmp_size -lt file-f1.zst file-f10.zst + +# Test default levels +zstd --fast file -f -q +cmp file.zst file-f1.zst || die "--fast is not level -1" + +zstd -0 file -o file-0.zst -q +zstd -f file -q +cmp file.zst file-0.zst || die "Level 0 is not the default level" + +# Test level clamping +zstd -99 file -o file-99.zst -q +cmp file-19.zst file-99.zst || die "Level 99 is clamped to 19" +zstd --fast=200000 file -c | zstd -t +/*stdin*\ : 65537 bytes + +zstd -5000000000 -f file && die "Level too large, must fail" ||: +error: numeric value overflows 32-bit unsigned int +zstd --fast=5000000000 -f file && die "Level too large, must fail" ||: +error: numeric value overflows 32-bit unsigned int + +# Test setting a level through the environment variable +ZSTD_CLEVEL=-10 zstd file -o file-f10-env.zst -q +ZSTD_CLEVEL=1 zstd file -o file-1-env.zst -q +ZSTD_CLEVEL=+19 zstd file -o file-19-env.zst -q +ZSTD_CLEVEL=+99 zstd file -o file-99-env.zst -q + +cmp file-f10.zst file-f10-env.zst || die "Environment variable failed to set level" +cmp file-1.zst file-1-env.zst || die "Environment variable failed to set level" +cmp file-19.zst file-19-env.zst || die "Environment variable failed to set level" +cmp file-99.zst file-99-env.zst || die "Environment variable failed to set level" + +# Test invalid environment clevel is the default level +zstd -f file -q +ZSTD_CLEVEL=- zstd -f file -o file-env.zst -q ; cmp file.zst file-env.zst +Ignore environment variable setting ZSTD_CLEVEL=-: not a valid integer value +ZSTD_CLEVEL=+ zstd -f file -o file-env.zst -q ; cmp file.zst file-env.zst +Ignore environment variable setting ZSTD_CLEVEL=+: not a valid integer value +ZSTD_CLEVEL=a zstd -f file -o file-env.zst -q ; cmp file.zst file-env.zst +Ignore environment variable setting ZSTD_CLEVEL=a: not a valid integer value +ZSTD_CLEVEL=-a zstd -f file -o file-env.zst -q ; cmp file.zst file-env.zst +Ignore environment variable setting ZSTD_CLEVEL=-a: not a valid integer value +ZSTD_CLEVEL=+a zstd -f file -o file-env.zst -q ; cmp file.zst file-env.zst +Ignore environment variable setting ZSTD_CLEVEL=+a: not a valid integer value +ZSTD_CLEVEL=3a7 zstd -f file -o file-env.zst -q ; cmp file.zst file-env.zst +Ignore environment variable setting ZSTD_CLEVEL=3a7: not a valid integer value +ZSTD_CLEVEL=5000000000 zstd -f file -o file-env.zst -q ; cmp file.zst file-env.zst +Ignore environment variable setting ZSTD_CLEVEL=5000000000: numeric value too large + +# Test environment clevel is overridden by command line +ZSTD_CLEVEL=10 zstd -f file -1 -o file-1-env.zst -q +ZSTD_CLEVEL=10 zstd -f file --fast=1 -o file-f1-env.zst -q + +cmp file-1.zst file-1-env.zst || die "Environment variable not overridden" +cmp file-f1.zst file-f1-env.zst || die "Environment variable not overridden" diff --git a/tests/cli-tests/compression/long-distance-matcher.sh b/tests/cli-tests/compression/long-distance-matcher.sh new file mode 100755 index 00000000000..8f2c61bf75c --- /dev/null +++ b/tests/cli-tests/compression/long-distance-matcher.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +set -e + +# Test --long +zstd -f file --long ; zstd -t file.zst +zstd -f file --long=20; zstd -t file.zst diff --git a/tests/cli-tests/compression/multi-threaded.sh b/tests/cli-tests/compression/multi-threaded.sh new file mode 100755 index 00000000000..17a5eb5186b --- /dev/null +++ b/tests/cli-tests/compression/multi-threaded.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +set -e + +# Test multi-threaded flags +zstd --single-thread file -f -q ; zstd -t file.zst +zstd -T2 -f file -q ; zstd -t file.zst +zstd --rsyncable -f file -q ; zstd -t file.zst +zstd -T0 -f file -q ; zstd -t file.zst +zstd -T0 --auto-threads=logical -f file -q ; zstd -t file.zst +zstd -T0 --auto-threads=physical -f file -q ; zstd -t file.zst + +# multi-thread decompression warning test +zstd -T0 -f file -q ; zstd -t file.zst; zstd -T0 -d file.zst -o file3 +zstd -T0 -f file -q ; zstd -t file.zst; zstd -T2 -d file.zst -o file4 diff --git a/tests/cli-tests/compression/multi-threaded.sh.stderr.exact b/tests/cli-tests/compression/multi-threaded.sh.stderr.exact new file mode 100644 index 00000000000..11daff6ba63 --- /dev/null +++ b/tests/cli-tests/compression/multi-threaded.sh.stderr.exact @@ -0,0 +1,11 @@ +file.zst : 65537 bytes +file.zst : 65537 bytes +file.zst : 65537 bytes +file.zst : 65537 bytes +file.zst : 65537 bytes +file.zst : 65537 bytes +file.zst : 65537 bytes +file.zst : 65537 bytes +file.zst : 65537 bytes +Warning : decompression does not support multi-threading +file.zst : 65537 bytes diff --git a/tests/cli-tests/compression/multiple-files.sh b/tests/cli-tests/compression/multiple-files.sh new file mode 100755 index 00000000000..aeb74cf25eb --- /dev/null +++ b/tests/cli-tests/compression/multiple-files.sh @@ -0,0 +1,21 @@ +#!/bin/sh +set -e + +# setup +echo "file1" > file1 +echo "file2" > file2 + +echo "Test zstd ./file1 - file2" +rm -f ./file*.zst +echo "stdin" | zstd ./file1 - ./file2 | zstd -d +cat file1.zst | zstd -d +cat file2.zst | zstd -d + +echo "Test zstd -d ./file1.zst - file2.zst" +rm ./file1 ./file2 +echo "stdin" | zstd - | zstd -d ./file1.zst - file2.zst +cat file1 +cat file2 + +echo "zstd -d ./file1.zst - file2.zst -c" +echo "stdin" | zstd | zstd -d ./file1.zst - file2.zst -c diff --git a/tests/cli-tests/compression/multiple-files.sh.stdout.exact b/tests/cli-tests/compression/multiple-files.sh.stdout.exact new file mode 100644 index 00000000000..aad61d6359a --- /dev/null +++ b/tests/cli-tests/compression/multiple-files.sh.stdout.exact @@ -0,0 +1,12 @@ +Test zstd ./file1 - file2 +stdin +file1 +file2 +Test zstd -d ./file1.zst - file2.zst +stdin +file1 +file2 +zstd -d ./file1.zst - file2.zst -c +file1 +stdin +file2 diff --git a/tests/cli-tests/compression/row-match-finder.sh b/tests/cli-tests/compression/row-match-finder.sh new file mode 100755 index 00000000000..5b36017a0ca --- /dev/null +++ b/tests/cli-tests/compression/row-match-finder.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +set -e + +# Test --[no-]row-match-finder +zstd file -7f --row-match-finder +zstd file -7f --no-row-match-finder diff --git a/tests/cli-tests/compression/setup b/tests/cli-tests/compression/setup new file mode 100755 index 00000000000..96e2309b6a1 --- /dev/null +++ b/tests/cli-tests/compression/setup @@ -0,0 +1,7 @@ +#!/bin/sh + +set -e + +datagen > file +datagen > file0 +datagen > file1 diff --git a/tests/cli-tests/compression/stream-size.sh b/tests/cli-tests/compression/stream-size.sh new file mode 100755 index 00000000000..7344769a253 --- /dev/null +++ b/tests/cli-tests/compression/stream-size.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +set -e + +# Test stream size & hint +datagen -g7654 | zstd --stream-size=7654 | zstd -t +datagen -g7654 | zstd --size-hint=7000 | zstd -t diff --git a/tests/cli-tests/compression/verbose-wlog.sh b/tests/cli-tests/compression/verbose-wlog.sh new file mode 100755 index 00000000000..88ee11ac84d --- /dev/null +++ b/tests/cli-tests/compression/verbose-wlog.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +set -e + +. "$COMMON/platform.sh" + +zstd < file -vv -19 -o file.19.zst +zstd -vv -l file.19.zst + +zstd < file -vv -19 --long -o file.19.long.zst +zstd -vv -l file.19.long.zst diff --git a/tests/cli-tests/compression/verbose-wlog.sh.stderr.glob b/tests/cli-tests/compression/verbose-wlog.sh.stderr.glob new file mode 100644 index 00000000000..13534718980 --- /dev/null +++ b/tests/cli-tests/compression/verbose-wlog.sh.stderr.glob @@ -0,0 +1,5 @@ +... +*wlog=23* +... +*wlog=27* +... diff --git a/tests/cli-tests/compression/verbose-wlog.sh.stdout.glob b/tests/cli-tests/compression/verbose-wlog.sh.stdout.glob new file mode 100644 index 00000000000..19913a1679c --- /dev/null +++ b/tests/cli-tests/compression/verbose-wlog.sh.stdout.glob @@ -0,0 +1,5 @@ +... +*Window Size: 8388608 B* +... +*Window Size: 134217728 B* +... diff --git a/tests/cli-tests/compression/window-resize.sh b/tests/cli-tests/compression/window-resize.sh new file mode 100755 index 00000000000..3b5e6fe24f3 --- /dev/null +++ b/tests/cli-tests/compression/window-resize.sh @@ -0,0 +1,9 @@ +#!/bin/sh +datagen -g1G > file +zstd --long=31 -1 --single-thread --no-content-size -f file +zstd -l -v file.zst + +# We want to ignore stderr (its outputting "*** zstd command line interface +# 64-bits v1.5.3, by Yann Collet ***") + +rm file file.zst diff --git a/tests/cli-tests/compression/window-resize.sh.stderr.ignore b/tests/cli-tests/compression/window-resize.sh.stderr.ignore new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/cli-tests/compression/window-resize.sh.stdout.glob b/tests/cli-tests/compression/window-resize.sh.stdout.glob new file mode 100644 index 00000000000..313d216e1e7 --- /dev/null +++ b/tests/cli-tests/compression/window-resize.sh.stdout.glob @@ -0,0 +1,3 @@ +... +Window Size: 1.000 GiB (1073741824 B) +... diff --git a/tests/cli-tests/decompression/golden.sh b/tests/cli-tests/decompression/golden.sh new file mode 100755 index 00000000000..36919e69285 --- /dev/null +++ b/tests/cli-tests/decompression/golden.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +set -e + +GOLDEN_DIR="$ZSTD_REPO_DIR/tests/golden-decompression/" + +zstd -r -t "$GOLDEN_DIR" diff --git a/tests/cli-tests/decompression/pass-through.sh b/tests/cli-tests/decompression/pass-through.sh new file mode 100755 index 00000000000..2cab463f840 --- /dev/null +++ b/tests/cli-tests/decompression/pass-through.sh @@ -0,0 +1,57 @@ +#!/bin/sh + +set -e + +. "$COMMON/platform.sh" + +echo "" > 1 +echo "2" > 2 +echo "23" > 3 +echo "234" > 4 +echo "some data" > file + +println "+ passthrough enabled" + +zstd file + +# Test short files +zstd -dc --pass-through 1 2 3 4 + +# Test *cat symlinks +zstdcat file +"$ZSTD_SYMLINK_DIR/zcat" file +"$ZSTD_SYMLINK_DIR/gzcat" file + +# Test multiple files with mix of compressed & not +zstdcat file file.zst +zstdcat file.zst file + +# Test --pass-through +zstd -dc --pass-through file +zstd -d --pass-through file -o pass-through-file + +# Test legacy implicit passthrough with -fc +zstd -dcf file +zstd -dcf file file.zst +zstd -df < file +zstd -dcf < file file.zst - +zstd -dcf < file.zst file - + +$DIFF file pass-through-file + +println "+ passthrough disabled" + +# Test *cat +zstdcat --no-pass-through file && die "should fail" +"$ZSTD_SYMLINK_DIR/zcat" --no-pass-through file && die "should fail" +"$ZSTD_SYMLINK_DIR/gzcat" --no-pass-through file && die "should fail" +# Test zstd without implicit passthrough +zstd -d file -o no-pass-through-file && die "should fail" +zstd -d < file && die "should fail" + +# Test legacy implicit passthrough with -fc +zstd --no-pass-through -dcf file && die "should fail" +zstd --no-pass-through -dcf file file.zst && die "should fail" +zstd --no-pass-through -df < file && die "should fail" +zstd --no-pass-through -dcf < file file.zst - && die "should fail" +zstd --no-pass-through -dcf < file.zst file - && die "should fail" ||: diff --git a/tests/cli-tests/decompression/pass-through.sh.stderr.exact b/tests/cli-tests/decompression/pass-through.sh.stderr.exact new file mode 100644 index 00000000000..62f96ae420e --- /dev/null +++ b/tests/cli-tests/decompression/pass-through.sh.stderr.exact @@ -0,0 +1,11 @@ +file :230.00% ( 10 B => 23 B, file.zst) +zstd: file: unsupported format +zstd: file: unsupported format +zstd: file: unsupported format +zstd: file: unsupported format +zstd: /*stdin*\: unsupported format +zstd: file: unsupported format +zstd: file: unsupported format +zstd: /*stdin*\: unsupported format +zstd: /*stdin*\: unsupported format +zstd: file: unsupported format diff --git a/tests/cli-tests/decompression/pass-through.sh.stdout.exact b/tests/cli-tests/decompression/pass-through.sh.stdout.exact new file mode 100644 index 00000000000..b0d494c14aa --- /dev/null +++ b/tests/cli-tests/decompression/pass-through.sh.stdout.exact @@ -0,0 +1,25 @@ ++ passthrough enabled + +2 +23 +234 +some data +some data +some data +some data +some data +some data +some data +some data +some data +some data +some data +some data +some data +some data +some data +some data ++ passthrough disabled +some data +some data +some data diff --git a/tests/cli-tests/dict-builder/empty-input.sh b/tests/cli-tests/dict-builder/empty-input.sh new file mode 100755 index 00000000000..b500bfebda0 --- /dev/null +++ b/tests/cli-tests/dict-builder/empty-input.sh @@ -0,0 +1,9 @@ +#!/bin/sh +set -e +for i in $(seq 50); do + datagen -s$i > file$i +done +touch empty + +set -v +zstd -q --train empty file* diff --git a/tests/cli-tests/dict-builder/empty-input.sh.stderr.exact b/tests/cli-tests/dict-builder/empty-input.sh.stderr.exact new file mode 100644 index 00000000000..2747e766cdf --- /dev/null +++ b/tests/cli-tests/dict-builder/empty-input.sh.stderr.exact @@ -0,0 +1 @@ +zstd -q --train empty file* diff --git a/tests/cli-tests/dict-builder/no-inputs.sh b/tests/cli-tests/dict-builder/no-inputs.sh new file mode 100755 index 00000000000..416b83742f2 --- /dev/null +++ b/tests/cli-tests/dict-builder/no-inputs.sh @@ -0,0 +1,3 @@ +#!/bin/sh +set -v +zstd --train diff --git a/tests/cli-tests/dict-builder/no-inputs.sh.exit b/tests/cli-tests/dict-builder/no-inputs.sh.exit new file mode 100644 index 00000000000..8351c19397f --- /dev/null +++ b/tests/cli-tests/dict-builder/no-inputs.sh.exit @@ -0,0 +1 @@ +14 diff --git a/tests/cli-tests/dict-builder/no-inputs.sh.stderr.exact b/tests/cli-tests/dict-builder/no-inputs.sh.stderr.exact new file mode 100644 index 00000000000..d7b3ea020f7 --- /dev/null +++ b/tests/cli-tests/dict-builder/no-inputs.sh.stderr.exact @@ -0,0 +1,5 @@ +zstd --train +! Warning : nb of samples too low for proper processing ! +! Please provide _one file per sample_. +! Alternatively, split files into fixed-size blocks representative of samples, with -B# +Error 14 : nb of samples too low diff --git a/tests/cli-tests/dictionaries/dictionary-mismatch.sh b/tests/cli-tests/dictionaries/dictionary-mismatch.sh new file mode 100755 index 00000000000..885cac223ad --- /dev/null +++ b/tests/cli-tests/dictionaries/dictionary-mismatch.sh @@ -0,0 +1,29 @@ +#!/bin/sh + +. "$COMMON/platform.sh" + +set -e + +if [ false ]; then + for seed in $(seq 100); do + datagen -g1000 -s$seed > file$seed + done + + zstd --train -r . -o dict0 -qq + + for seed in $(seq 101 200); do + datagen -g1000 -s$seed > file$seed + done + + zstd --train -r . -o dict1 -qq + + [ "$($MD5SUM < dict0)" != "$($MD5SUM < dict1)" ] || die "dictionaries must not match" + + datagen -g1000 -s0 > file0 +fi + +set -v +zstd files/0 -D dicts/0 -q +zstd -t files/0.zst -D dicts/0 +zstd -t files/0.zst -D dicts/1 && die "Must fail" ||: +zstd -t files/0.zst && die "Must fail" ||: diff --git a/tests/cli-tests/dictionaries/dictionary-mismatch.sh.stderr.exact b/tests/cli-tests/dictionaries/dictionary-mismatch.sh.stderr.exact new file mode 100644 index 00000000000..8896763c1bc --- /dev/null +++ b/tests/cli-tests/dictionaries/dictionary-mismatch.sh.stderr.exact @@ -0,0 +1,7 @@ +zstd files/0 -D dicts/0 -q +zstd -t files/0.zst -D dicts/0 +files/0.zst : 1000 bytes +zstd -t files/0.zst -D dicts/1 && die "Must fail" ||: +files/0.zst : Decoding error (36) : Dictionary mismatch +zstd -t files/0.zst && die "Must fail" ||: +files/0.zst : Decoding error (36) : Dictionary mismatch diff --git a/tests/cli-tests/dictionaries/golden.sh b/tests/cli-tests/dictionaries/golden.sh new file mode 100755 index 00000000000..85da2ee538c --- /dev/null +++ b/tests/cli-tests/dictionaries/golden.sh @@ -0,0 +1,9 @@ +#!/bin/sh + +set -e + +GOLDEN_COMP_DIR="$ZSTD_REPO_DIR/tests/golden-compression/" +GOLDEN_DICT_DIR="$ZSTD_REPO_DIR/tests/golden-dictionaries/" + +zstd -D "$GOLDEN_DICT_DIR/http-dict-missing-symbols" "$GOLDEN_COMP_DIR/http" -o http.zst +zstd -D "$GOLDEN_DICT_DIR/http-dict-missing-symbols" -t http.zst diff --git a/tests/cli-tests/dictionaries/setup b/tests/cli-tests/dictionaries/setup new file mode 100755 index 00000000000..616c73eb804 --- /dev/null +++ b/tests/cli-tests/dictionaries/setup @@ -0,0 +1,6 @@ +#!/bin/sh + +set -e + +cp -r ../files . +cp -r ../dicts . diff --git a/tests/cli-tests/dictionaries/setup_once b/tests/cli-tests/dictionaries/setup_once new file mode 100755 index 00000000000..1241c578214 --- /dev/null +++ b/tests/cli-tests/dictionaries/setup_once @@ -0,0 +1,24 @@ +#!/bin/sh + +set -e + +. "$COMMON/platform.sh" + + +mkdir files/ dicts/ + +for seed in $(seq 50); do + datagen -g1000 -s$seed > files/$seed +done + +zstd --train -r files -o dicts/0 -qq + +for seed in $(seq 51 100); do + datagen -g1000 -s$seed > files/$seed +done + +zstd --train -r files -o dicts/1 -qq + +cmp dicts/0 dicts/1 && die "dictionaries must not match!" + +datagen -g1000 > files/0 diff --git a/tests/cli-tests/file-stat/compress-file-to-file.sh b/tests/cli-tests/file-stat/compress-file-to-file.sh new file mode 100755 index 00000000000..c5f5900318f --- /dev/null +++ b/tests/cli-tests/file-stat/compress-file-to-file.sh @@ -0,0 +1,9 @@ +#!/bin/sh + +set -e + +datagen > file +chmod 642 file + +zstd file -q --trace-file-stat -o file.zst +zstd -tq file.zst diff --git a/tests/cli-tests/file-stat/compress-file-to-file.sh.stderr.exact b/tests/cli-tests/file-stat/compress-file-to-file.sh.stderr.exact new file mode 100644 index 00000000000..a1ad09ef705 --- /dev/null +++ b/tests/cli-tests/file-stat/compress-file-to-file.sh.stderr.exact @@ -0,0 +1,42 @@ +Trace:FileStat: > UTIL_isLink(file) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isConsole(2) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_getFileSize(file) +Trace:FileStat: > UTIL_stat(file) +Trace:FileStat: < 1 +Trace:FileStat: < 65537 +Trace:FileStat: > UTIL_stat(file) +Trace:FileStat: < 1 +Trace:FileStat: > UTIL_isDirectoryStat() +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_stat(file) +Trace:FileStat: < 1 +Trace:FileStat: > UTIL_isSameFile(file, file.zst) +Trace:FileStat: > UTIL_stat(file) +Trace:FileStat: < 1 +Trace:FileStat: > UTIL_stat(file.zst) +Trace:FileStat: < 0 +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isRegularFile(file.zst) +Trace:FileStat: > UTIL_stat(file.zst) +Trace:FileStat: < 0 +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isRegularFile(file.zst) +Trace:FileStat: > UTIL_stat(file.zst) +Trace:FileStat: < 1 +Trace:FileStat: < 1 +Trace:FileStat: > UTIL_getFileSize(file) +Trace:FileStat: > UTIL_stat(file) +Trace:FileStat: < 1 +Trace:FileStat: < 65537 +Trace:FileStat: > UTIL_setFileStat(file.zst) +Trace:FileStat: > UTIL_stat(file.zst) +Trace:FileStat: < 1 +Trace:FileStat: > UTIL_utime(file.zst) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_chmod(file.zst, 0642) +Trace:FileStat: > chmod +Trace:FileStat: < 0 +Trace:FileStat: < 0 +Trace:FileStat: < 0 diff --git a/tests/cli-tests/file-stat/compress-file-to-stdout.sh b/tests/cli-tests/file-stat/compress-file-to-stdout.sh new file mode 100755 index 00000000000..99ebfc46f59 --- /dev/null +++ b/tests/cli-tests/file-stat/compress-file-to-stdout.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +set -e + +datagen > file + +zstd file -cq --trace-file-stat > file.zst +zstd -tq file.zst diff --git a/tests/cli-tests/file-stat/compress-file-to-stdout.sh.stderr.exact b/tests/cli-tests/file-stat/compress-file-to-stdout.sh.stderr.exact new file mode 100644 index 00000000000..7c690d20b84 --- /dev/null +++ b/tests/cli-tests/file-stat/compress-file-to-stdout.sh.stderr.exact @@ -0,0 +1,24 @@ +Trace:FileStat: > UTIL_isLink(file) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isConsole(1) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isConsole(2) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_getFileSize(file) +Trace:FileStat: > UTIL_stat(file) +Trace:FileStat: < 1 +Trace:FileStat: < 65537 +Trace:FileStat: > UTIL_stat(file) +Trace:FileStat: < 1 +Trace:FileStat: > UTIL_isDirectoryStat() +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_stat(file) +Trace:FileStat: < 1 +Trace:FileStat: > UTIL_isRegularFile(/*stdout*\) +Trace:FileStat: > UTIL_stat(/*stdout*\) +Trace:FileStat: < 0 +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_getFileSize(file) +Trace:FileStat: > UTIL_stat(file) +Trace:FileStat: < 1 +Trace:FileStat: < 65537 diff --git a/tests/cli-tests/file-stat/compress-stdin-to-file.sh b/tests/cli-tests/file-stat/compress-stdin-to-file.sh new file mode 100755 index 00000000000..83794619860 --- /dev/null +++ b/tests/cli-tests/file-stat/compress-stdin-to-file.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +set -e + +datagen > file + +zstd < file -q --trace-file-stat -o file.zst +zstd -tq file.zst diff --git a/tests/cli-tests/file-stat/compress-stdin-to-file.sh.stderr.exact b/tests/cli-tests/file-stat/compress-stdin-to-file.sh.stderr.exact new file mode 100644 index 00000000000..00afd97c365 --- /dev/null +++ b/tests/cli-tests/file-stat/compress-stdin-to-file.sh.stderr.exact @@ -0,0 +1,24 @@ +Trace:FileStat: > UTIL_isConsole(0) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isConsole(2) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_getFileSize(/*stdin*\) +Trace:FileStat: > UTIL_stat(/*stdin*\) +Trace:FileStat: < 0 +Trace:FileStat: < -1 +Trace:FileStat: > UTIL_isSameFile(/*stdin*\, file.zst) +Trace:FileStat: > UTIL_stat(/*stdin*\) +Trace:FileStat: < 0 +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isRegularFile(file.zst) +Trace:FileStat: > UTIL_stat(file.zst) +Trace:FileStat: < 0 +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isRegularFile(file.zst) +Trace:FileStat: > UTIL_stat(file.zst) +Trace:FileStat: < 1 +Trace:FileStat: < 1 +Trace:FileStat: > UTIL_getFileSize(/*stdin*\) +Trace:FileStat: > UTIL_stat(/*stdin*\) +Trace:FileStat: < 0 +Trace:FileStat: < -1 diff --git a/tests/cli-tests/file-stat/compress-stdin-to-stdout.sh b/tests/cli-tests/file-stat/compress-stdin-to-stdout.sh new file mode 100755 index 00000000000..64f4b036216 --- /dev/null +++ b/tests/cli-tests/file-stat/compress-stdin-to-stdout.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +set -e + +datagen > file + +zstd < file -cq --trace-file-stat > file.zst +zstd -tq file.zst diff --git a/tests/cli-tests/file-stat/compress-stdin-to-stdout.sh.stderr.exact b/tests/cli-tests/file-stat/compress-stdin-to-stdout.sh.stderr.exact new file mode 100644 index 00000000000..8bf05e641e1 --- /dev/null +++ b/tests/cli-tests/file-stat/compress-stdin-to-stdout.sh.stderr.exact @@ -0,0 +1,18 @@ +Trace:FileStat: > UTIL_isConsole(0) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isConsole(1) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isConsole(2) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_getFileSize(/*stdin*\) +Trace:FileStat: > UTIL_stat(/*stdin*\) +Trace:FileStat: < 0 +Trace:FileStat: < -1 +Trace:FileStat: > UTIL_isRegularFile(/*stdout*\) +Trace:FileStat: > UTIL_stat(/*stdout*\) +Trace:FileStat: < 0 +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_getFileSize(/*stdin*\) +Trace:FileStat: > UTIL_stat(/*stdin*\) +Trace:FileStat: < 0 +Trace:FileStat: < -1 diff --git a/tests/cli-tests/file-stat/decompress-file-to-file.sh b/tests/cli-tests/file-stat/decompress-file-to-file.sh new file mode 100755 index 00000000000..9e68f8f332a --- /dev/null +++ b/tests/cli-tests/file-stat/decompress-file-to-file.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +set -e + +datagen | zstd -q > file.zst +chmod 642 file.zst + +zstd -dq --trace-file-stat file.zst diff --git a/tests/cli-tests/file-stat/decompress-file-to-file.sh.stderr.exact b/tests/cli-tests/file-stat/decompress-file-to-file.sh.stderr.exact new file mode 100644 index 00000000000..d264c63e418 --- /dev/null +++ b/tests/cli-tests/file-stat/decompress-file-to-file.sh.stderr.exact @@ -0,0 +1,38 @@ +Trace:FileStat: > UTIL_isLink(file.zst) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isConsole(1) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isConsole(2) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isDirectory(file.zst) +Trace:FileStat: > UTIL_stat(file.zst) +Trace:FileStat: < 1 +Trace:FileStat: > UTIL_isDirectoryStat() +Trace:FileStat: < 0 +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_stat(file.zst) +Trace:FileStat: < 1 +Trace:FileStat: > UTIL_isSameFile(file.zst, file) +Trace:FileStat: > UTIL_stat(file.zst) +Trace:FileStat: < 1 +Trace:FileStat: > UTIL_stat(file) +Trace:FileStat: < 0 +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isRegularFile(file) +Trace:FileStat: > UTIL_stat(file) +Trace:FileStat: < 0 +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isRegularFile(file) +Trace:FileStat: > UTIL_stat(file) +Trace:FileStat: < 1 +Trace:FileStat: < 1 +Trace:FileStat: > UTIL_setFileStat(file) +Trace:FileStat: > UTIL_stat(file) +Trace:FileStat: < 1 +Trace:FileStat: > UTIL_utime(file) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_chmod(file, 0642) +Trace:FileStat: > chmod +Trace:FileStat: < 0 +Trace:FileStat: < 0 +Trace:FileStat: < 0 diff --git a/tests/cli-tests/file-stat/decompress-file-to-stdout.sh b/tests/cli-tests/file-stat/decompress-file-to-stdout.sh new file mode 100755 index 00000000000..518c2a950d9 --- /dev/null +++ b/tests/cli-tests/file-stat/decompress-file-to-stdout.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +set -e + +datagen | zstd -q > file.zst + +zstd -dcq --trace-file-stat file.zst > file diff --git a/tests/cli-tests/file-stat/decompress-file-to-stdout.sh.stderr.exact b/tests/cli-tests/file-stat/decompress-file-to-stdout.sh.stderr.exact new file mode 100644 index 00000000000..7fe6dda1503 --- /dev/null +++ b/tests/cli-tests/file-stat/decompress-file-to-stdout.sh.stderr.exact @@ -0,0 +1,18 @@ +Trace:FileStat: > UTIL_isLink(file.zst) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isConsole(1) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isConsole(2) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isDirectory(file.zst) +Trace:FileStat: > UTIL_stat(file.zst) +Trace:FileStat: < 1 +Trace:FileStat: > UTIL_isDirectoryStat() +Trace:FileStat: < 0 +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_stat(file.zst) +Trace:FileStat: < 1 +Trace:FileStat: > UTIL_isRegularFile(/*stdout*\) +Trace:FileStat: > UTIL_stat(/*stdout*\) +Trace:FileStat: < 0 +Trace:FileStat: < 0 diff --git a/tests/cli-tests/file-stat/decompress-stdin-to-file.sh b/tests/cli-tests/file-stat/decompress-stdin-to-file.sh new file mode 100755 index 00000000000..135d75524d0 --- /dev/null +++ b/tests/cli-tests/file-stat/decompress-stdin-to-file.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +set -e + +datagen | zstd -q > file.zst + +zstd -dcq --trace-file-stat < file.zst -o file diff --git a/tests/cli-tests/file-stat/decompress-stdin-to-file.sh.stderr.exact b/tests/cli-tests/file-stat/decompress-stdin-to-file.sh.stderr.exact new file mode 100644 index 00000000000..749fd395a58 --- /dev/null +++ b/tests/cli-tests/file-stat/decompress-stdin-to-file.sh.stderr.exact @@ -0,0 +1,20 @@ +Trace:FileStat: > UTIL_isConsole(0) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isConsole(2) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isDirectory(/*stdin*\) +Trace:FileStat: > UTIL_stat(/*stdin*\) +Trace:FileStat: < 0 +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isSameFile(/*stdin*\, file) +Trace:FileStat: > UTIL_stat(/*stdin*\) +Trace:FileStat: < 0 +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isRegularFile(file) +Trace:FileStat: > UTIL_stat(file) +Trace:FileStat: < 0 +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isRegularFile(file) +Trace:FileStat: > UTIL_stat(file) +Trace:FileStat: < 1 +Trace:FileStat: < 1 diff --git a/tests/cli-tests/file-stat/decompress-stdin-to-stdout.sh b/tests/cli-tests/file-stat/decompress-stdin-to-stdout.sh new file mode 100755 index 00000000000..495f07b17fe --- /dev/null +++ b/tests/cli-tests/file-stat/decompress-stdin-to-stdout.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +set -e + +datagen | zstd -q > file.zst + +zstd -dcq --trace-file-stat < file.zst > file diff --git a/tests/cli-tests/file-stat/decompress-stdin-to-stdout.sh.stderr.exact b/tests/cli-tests/file-stat/decompress-stdin-to-stdout.sh.stderr.exact new file mode 100644 index 00000000000..e36cb9d05f8 --- /dev/null +++ b/tests/cli-tests/file-stat/decompress-stdin-to-stdout.sh.stderr.exact @@ -0,0 +1,14 @@ +Trace:FileStat: > UTIL_isConsole(0) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isConsole(1) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isConsole(2) +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isDirectory(/*stdin*\) +Trace:FileStat: > UTIL_stat(/*stdin*\) +Trace:FileStat: < 0 +Trace:FileStat: < 0 +Trace:FileStat: > UTIL_isRegularFile(/*stdout*\) +Trace:FileStat: > UTIL_stat(/*stdout*\) +Trace:FileStat: < 0 +Trace:FileStat: < 0 diff --git a/tests/cli-tests/progress/no-progress.sh b/tests/cli-tests/progress/no-progress.sh new file mode 100755 index 00000000000..708878f2643 --- /dev/null +++ b/tests/cli-tests/progress/no-progress.sh @@ -0,0 +1,46 @@ +#!/bin/sh + +#!/bin/sh + +. "$COMMON/platform.sh" + +set -e + +echo hello > hello +echo world > world + +zstd -q hello world + +println >&2 "Tests cases where progress information should not be printed" + +for args in \ + "" \ + "--fake-stderr-is-console -q" \ + "--fake-stderr-is-console -qq --progress" \ + "--no-progress --fake-stderr-is-console" \ + "--no-progress --fake-stderr-is-console -v" +do + println >&2 "args = $args" + println >&2 "compress file to file" + zstd $args -f hello + println >&2 "compress pipe to pipe" + zstd $args < hello > $INTOVOID + println >&2 "compress pipe to file" + zstd $args < hello -fo hello.zst + println >&2 "compress file to pipe" + zstd $args hello -c > $INTOVOID + println >&2 "compress 2 files" + zstd $args -f hello world + + println >&2 "decompress file to file" + zstd $args -d -f hello.zst + println >&2 "decompress pipe to pipe" + zstd $args -d < hello.zst > $INTOVOID + println >&2 "decompress pipe to file" + zstd $args -d < hello.zst -fo hello + println >&2 "decompress file to pipe" + zstd $args -d hello.zst -c > $INTOVOID + println >&2 "decompress 2 files" + zstd $args -d -f hello.zst world.zst + println >&2 "" +done diff --git a/tests/cli-tests/progress/no-progress.sh.stderr.glob b/tests/cli-tests/progress/no-progress.sh.stderr.glob new file mode 100644 index 00000000000..d0f91122769 --- /dev/null +++ b/tests/cli-tests/progress/no-progress.sh.stderr.glob @@ -0,0 +1,96 @@ +Tests cases where progress information should not be printed +args = +compress file to file +hello*hello.zst* +compress pipe to pipe +compress pipe to file +*stdin*hello.zst* +compress file to pipe +compress 2 files +2 files compressed* +decompress file to file +hello.zst* +decompress pipe to pipe +decompress pipe to file +*stdin* +decompress file to pipe +decompress 2 files +2 files decompressed* + +args = --fake-stderr-is-console -q +compress file to file +compress pipe to pipe +compress pipe to file +compress file to pipe +compress 2 files +decompress file to file +decompress pipe to pipe +decompress pipe to file +decompress file to pipe +decompress 2 files + +args = --fake-stderr-is-console -qq --progress +compress file to file +compress pipe to pipe +compress pipe to file +compress file to pipe +compress 2 files +decompress file to file +decompress pipe to pipe +decompress pipe to file +decompress file to pipe +decompress 2 files + +args = --no-progress --fake-stderr-is-console +compress file to file +hello*hello.zst* +compress pipe to pipe +compress pipe to file +*stdin*hello.zst* +compress file to pipe +compress 2 files +2 files compressed* +decompress file to file +hello.zst* +decompress pipe to pipe +decompress pipe to file +*stdin* +decompress file to pipe +decompress 2 files +2 files decompressed* + +args = --no-progress --fake-stderr-is-console -v +compress file to file +*Zstandard CLI* +hello*hello.zst* +compress pipe to pipe +*Zstandard CLI* +*stdin*stdout* +compress pipe to file +*Zstandard CLI* +*stdin*hello.zst* +compress file to pipe +*Zstandard CLI* +*hello*stdout* +compress 2 files +*Zstandard CLI* +*hello*hello.zst* +*world*world.zst* +2 files compressed* +decompress file to file +*Zstandard CLI* +hello.zst* +decompress pipe to pipe +*Zstandard CLI* +*stdin* +decompress pipe to file +*Zstandard CLI* +*stdin* +decompress file to pipe +*Zstandard CLI* +hello.zst* +decompress 2 files +*Zstandard CLI* +hello.zst* +world.zst* +2 files decompressed* diff --git a/tests/cli-tests/progress/progress.sh b/tests/cli-tests/progress/progress.sh new file mode 100755 index 00000000000..eb464993a24 --- /dev/null +++ b/tests/cli-tests/progress/progress.sh @@ -0,0 +1,41 @@ +#!/bin/sh + +. "$COMMON/platform.sh" + +set -e + +println >&2 "Tests cases where progress information should be printed" + +echo hello > hello +echo world > world + +zstd -q hello world + +for args in \ + "--progress" \ + "--fake-stderr-is-console" \ + "--progress --fake-stderr-is-console -q"; do + println >&2 "args = $args" + println >&2 "compress file to file" + zstd $args -f hello + println >&2 "compress pipe to pipe" + zstd $args < hello > $INTOVOID + println >&2 "compress pipe to file" + zstd $args < hello -fo hello.zst + println >&2 "compress file to pipe" + zstd $args hello -c > $INTOVOID + println >&2 "compress 2 files" + zstd $args -f hello world + + println >&2 "decompress file to file" + zstd $args -d -f hello.zst + println >&2 "decompress pipe to pipe" + zstd $args -d < hello.zst > $INTOVOID + println >&2 "decompress pipe to file" + zstd $args -d < hello.zst -fo hello + println >&2 "decompress file to pipe" + zstd $args -d hello.zst -c > $INTOVOID + println >&2 "decompress 2 files" + zstd $args -d -f hello.zst world.zst + println >&2 "" +done diff --git a/tests/cli-tests/progress/progress.sh.stderr.glob b/tests/cli-tests/progress/progress.sh.stderr.glob new file mode 100644 index 00000000000..ca620d3c282 --- /dev/null +++ b/tests/cli-tests/progress/progress.sh.stderr.glob @@ -0,0 +1,62 @@ +Tests cases where progress information should be printed +args = --progress +compress file to file +*Read:*hello*hello.zst* +compress pipe to pipe +*Read:*stdin*stdout* +compress pipe to file +*Read:*stdin*hello.zst* +compress file to pipe +*Read:*hello*stdout* +compress 2 files +*Read*2 files compressed* +decompress file to file +*hello.zst*hello.zst* +decompress pipe to pipe +*stdin*stdin* +decompress pipe to file +*stdin*stdin* +decompress file to pipe +*hello.zst*hello.zst* +decompress 2 files +*hello.zst*2 files decompressed* + +args = --fake-stderr-is-console +compress file to file +*Read:*hello*hello.zst* +compress pipe to pipe +compress pipe to file +*Read:*stdin*hello.zst* +compress file to pipe +compress 2 files +*Read*2 files compressed* +decompress file to file +*hello.zst*hello.zst* +decompress pipe to pipe +decompress pipe to file +*stdin*stdin* +decompress file to pipe +decompress 2 files +*hello.zst*2 files decompressed* + +args = --progress --fake-stderr-is-console -q +compress file to file +*Read:*hello*hello.zst* +compress pipe to pipe +*Read:*stdin*stdout* +compress pipe to file +*Read:*stdin*hello.zst* +compress file to pipe +*Read:*hello*stdout* +compress 2 files +*Read*2 files compressed* +decompress file to file +*hello.zst*hello.zst* +decompress pipe to pipe +*stdin*stdin* +decompress pipe to file +*stdin*stdin* +decompress file to pipe +*hello.zst*hello.zst* +decompress 2 files +*hello.zst*2 files decompressed* diff --git a/tests/cli-tests/run.py b/tests/cli-tests/run.py new file mode 100755 index 00000000000..45af5124b31 --- /dev/null +++ b/tests/cli-tests/run.py @@ -0,0 +1,738 @@ +#!/usr/bin/env python3 +# ################################################################ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# You may select, at your option, one of the above-listed licenses. +# ########################################################################## + +import argparse +import contextlib +import copy +import fnmatch +import os +import shutil +import subprocess +import sys +import tempfile +import typing + + +ZSTD_SYMLINKS = [ + "zstd", + "zstdmt", + "unzstd", + "zstdcat", + "zcat", + "gzip", + "gunzip", + "gzcat", + "lzma", + "unlzma", + "xz", + "unxz", + "lz4", + "unlz4", +] + + +EXCLUDED_DIRS = { + "bin", + "common", + "scratch", +} + + +EXCLUDED_BASENAMES = { + "setup", + "setup_once", + "teardown", + "teardown_once", + "README.md", + "run.py", + ".gitignore", +} + +EXCLUDED_SUFFIXES = [ + ".exact", + ".glob", + ".ignore", + ".exit", +] + + +def exclude_dir(dirname: str) -> bool: + """ + Should files under the directory :dirname: be excluded from the test runner? + """ + if dirname in EXCLUDED_DIRS: + return True + return False + + +def exclude_file(filename: str) -> bool: + """Should the file :filename: be excluded from the test runner?""" + if filename in EXCLUDED_BASENAMES: + return True + for suffix in EXCLUDED_SUFFIXES: + if filename.endswith(suffix): + return True + return False + +def read_file(filename: str) -> bytes: + """Reads the file :filename: and returns the contents as bytes.""" + with open(filename, "rb") as f: + return f.read() + + +def diff(a: bytes, b: bytes) -> str: + """Returns a diff between two different byte-strings :a: and :b:.""" + assert a != b + with tempfile.NamedTemporaryFile("wb") as fa: + fa.write(a) + fa.flush() + with tempfile.NamedTemporaryFile("wb") as fb: + fb.write(b) + fb.flush() + + diff_bytes = subprocess.run(["diff", fa.name, fb.name], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL).stdout + return diff_bytes.decode("utf8") + + +def pop_line(data: bytes) -> typing.Tuple[typing.Optional[bytes], bytes]: + """ + Pop the first line from :data: and returns the first line and the remainder + of the data as a tuple. If :data: is empty, returns :(None, data):. Otherwise + the first line always ends in a :\n:, even if it is the last line and :data: + doesn't end in :\n:. + """ + NEWLINE = b"\n"[0] + + if data == b'': + return (None, data) + + newline_idx = data.find(b"\n") + if newline_idx == -1: + end_idx = len(data) + else: + end_idx = newline_idx + 1 + + line = data[:end_idx] + data = data[end_idx:] + + assert len(line) != 0 + if line[-1] != NEWLINE: + line += NEWLINE + + return (line, data) + + +def glob_line_matches(actual: bytes, expect: bytes) -> bool: + """ + Does the `actual` line match the expected glob line `expect`? + """ + return fnmatch.fnmatchcase(actual.strip(), expect.strip()) + + +def glob_diff(actual: bytes, expect: bytes) -> bytes: + """ + Returns None if the :actual: content matches the expected glob :expect:, + otherwise returns the diff bytes. + """ + diff = b'' + actual_line, actual = pop_line(actual) + expect_line, expect = pop_line(expect) + while True: + # Handle end of file conditions - allow extra newlines + while expect_line is None and actual_line == b"\n": + actual_line, actual = pop_line(actual) + while actual_line is None and expect_line == b"\n": + expect_line, expect = pop_line(expect) + + if expect_line is None and actual_line is None: + if diff == b'': + return None + return diff + elif expect_line is None: + diff += b"---\n" + while actual_line != None: + diff += b"> " + diff += actual_line + actual_line, actual = pop_line(actual) + return diff + elif actual_line is None: + diff += b"---\n" + while expect_line != None: + diff += b"< " + diff += expect_line + expect_line, expect = pop_line(expect) + return diff + + assert expect_line is not None + assert actual_line is not None + + if expect_line == b'...\n': + next_expect_line, expect = pop_line(expect) + if next_expect_line is None: + if diff == b'': + return None + return diff + while not glob_line_matches(actual_line, next_expect_line): + actual_line, actual = pop_line(actual) + if actual_line is None: + diff += b"---\n" + diff += b"< " + diff += next_expect_line + return diff + expect_line = next_expect_line + continue + + if not glob_line_matches(actual_line, expect_line): + diff += b'---\n' + diff += b'< ' + expect_line + diff += b'> ' + actual_line + + actual_line, actual = pop_line(actual) + expect_line, expect = pop_line(expect) + + +class Options: + """Options configuring how to run a :TestCase:.""" + def __init__( + self, + env: typing.Dict[str, str], + timeout: typing.Optional[int], + verbose: bool, + preserve: bool, + scratch_dir: str, + test_dir: str, + set_exact_output: bool, + ) -> None: + self.env = env + self.timeout = timeout + self.verbose = verbose + self.preserve = preserve + self.scratch_dir = scratch_dir + self.test_dir = test_dir + self.set_exact_output = set_exact_output + + +class TestCase: + """ + Logic and state related to running a single test case. + + 1. Initialize the test case. + 2. Launch the test case with :TestCase.launch():. + This will start the test execution in a subprocess, but + not wait for completion. So you could launch multiple test + cases in parallel. This will now print any test output. + 3. Analyze the results with :TestCase.analyze():. This will + join the test subprocess, check the results against the + expectations, and print the results to stdout. + + :TestCase.run(): is also provided which combines the launch & analyze + steps for single-threaded use-cases. + + All other methods, prefixed with _, are private helper functions. + """ + def __init__(self, test_filename: str, options: Options) -> None: + """ + Initialize the :TestCase: for the test located in :test_filename: + with the given :options:. + """ + self._opts = options + self._test_file = test_filename + self._test_name = os.path.normpath( + os.path.relpath(test_filename, start=self._opts.test_dir) + ) + self._success = {} + self._message = {} + self._test_stdin = None + self._scratch_dir = os.path.abspath(os.path.join(self._opts.scratch_dir, self._test_name)) + + @property + def name(self) -> str: + """Returns the unique name for the test.""" + return self._test_name + + def launch(self) -> None: + """ + Launch the test case as a subprocess, but do not block on completion. + This allows users to run multiple tests in parallel. Results aren't yet + printed out. + """ + self._launch_test() + + def analyze(self) -> bool: + """ + Must be called after :TestCase.launch():. Joins the test subprocess and + checks the results against expectations. Finally prints the results to + stdout and returns the success. + """ + self._join_test() + self._check_exit() + self._check_stderr() + self._check_stdout() + self._analyze_results() + return self._succeeded + + def run(self) -> bool: + """Shorthand for combining both :TestCase.launch(): and :TestCase.analyze():.""" + self.launch() + return self.analyze() + + def _log(self, *args, **kwargs) -> None: + """Logs test output.""" + print(file=sys.stdout, *args, **kwargs) + + def _vlog(self, *args, **kwargs) -> None: + """Logs verbose test output.""" + if self._opts.verbose: + print(file=sys.stdout, *args, **kwargs) + + def _test_environment(self) -> typing.Dict[str, str]: + """ + Returns the environment to be used for the + test subprocess. + """ + # We want to omit ZSTD cli flags so tests will be consistent across environments + env = {k: v for k, v in os.environ.items() if not k.startswith("ZSTD")} + for k, v in self._opts.env.items(): + self._vlog(f"${k}='{v}'") + env[k] = v + return env + + def _launch_test(self) -> None: + """Launch the test subprocess, but do not join it.""" + args = [os.path.abspath(self._test_file)] + stdin_name = f"{self._test_file}.stdin" + if os.path.exists(stdin_name): + self._test_stdin = open(stdin_name, "rb") + stdin = self._test_stdin + else: + stdin = subprocess.DEVNULL + cwd = self._scratch_dir + env = self._test_environment() + self._test_process = subprocess.Popen( + args=args, + stdin=stdin, + cwd=cwd, + env=env, + stderr=subprocess.PIPE, + stdout=subprocess.PIPE + ) + + def _join_test(self) -> None: + """Join the test process and save stderr, stdout, and the exit code.""" + (stdout, stderr) = self._test_process.communicate(timeout=self._opts.timeout) + self._output = {} + self._output["stdout"] = stdout + self._output["stderr"] = stderr + self._exit_code = self._test_process.returncode + self._test_process = None + if self._test_stdin is not None: + self._test_stdin.close() + self._test_stdin = None + + def _check_output_exact(self, out_name: str, expected: bytes, exact_name: str) -> None: + """ + Check the output named :out_name: for an exact match against the :expected: content. + Saves the success and message. + """ + check_name = f"check_{out_name}" + actual = self._output[out_name] + if actual == expected: + self._success[check_name] = True + self._message[check_name] = f"{out_name} matches!" + else: + self._success[check_name] = False + self._message[check_name] = f"{out_name} does not match!\n> diff expected actual\n{diff(expected, actual)}" + + if self._opts.set_exact_output: + with open(exact_name, "wb") as f: + f.write(actual) + + def _check_output_glob(self, out_name: str, expected: bytes) -> None: + """ + Check the output named :out_name: for a glob match against the :expected: glob. + Saves the success and message. + """ + check_name = f"check_{out_name}" + actual = self._output[out_name] + diff = glob_diff(actual, expected) + if diff is None: + self._success[check_name] = True + self._message[check_name] = f"{out_name} matches!" + else: + utf8_diff = diff.decode('utf8') + self._success[check_name] = False + self._message[check_name] = f"{out_name} does not match!\n> diff expected actual\n{utf8_diff}" + + def _check_output(self, out_name: str) -> None: + """ + Checks the output named :out_name: for a match against the expectation. + We check for a .exact, .glob, and a .ignore file. If none are found we + expect that the output should be empty. + + If :Options.preserve: was set then we save the scratch directory and + save the stderr, stdout, and exit code to the scratch directory for + debugging. + """ + if self._opts.preserve: + # Save the output to the scratch directory + actual_name = os.path.join(self._scratch_dir, f"{out_name}") + with open(actual_name, "wb") as f: + f.write(self._output[out_name]) + + exact_name = f"{self._test_file}.{out_name}.exact" + glob_name = f"{self._test_file}.{out_name}.glob" + ignore_name = f"{self._test_file}.{out_name}.ignore" + + if os.path.exists(exact_name): + return self._check_output_exact(out_name, read_file(exact_name), exact_name) + elif os.path.exists(glob_name): + return self._check_output_glob(out_name, read_file(glob_name)) + else: + check_name = f"check_{out_name}" + self._success[check_name] = True + self._message[check_name] = f"{out_name} ignored!" + + def _check_stderr(self) -> None: + """Checks the stderr output against the expectation.""" + self._check_output("stderr") + + def _check_stdout(self) -> None: + """Checks the stdout output against the expectation.""" + self._check_output("stdout") + + def _check_exit(self) -> None: + """ + Checks the exit code against expectations. If a .exit file + exists, we expect that the exit code matches the contents. + Otherwise we expect the exit code to be zero. + + If :Options.preserve: is set we save the exit code to the + scratch directory under the filename "exit". + """ + if self._opts.preserve: + exit_name = os.path.join(self._scratch_dir, "exit") + with open(exit_name, "w") as f: + f.write(str(self._exit_code) + "\n") + exit_name = f"{self._test_file}.exit" + if os.path.exists(exit_name): + exit_code: int = int(read_file(exit_name)) + else: + exit_code: int = 0 + if exit_code == self._exit_code: + self._success["check_exit"] = True + self._message["check_exit"] = "Exit code matches!" + else: + self._success["check_exit"] = False + self._message["check_exit"] = f"Exit code mismatch! Expected {exit_code} but got {self._exit_code}" + + def _analyze_results(self) -> None: + """ + After all tests have been checked, collect all the successes + and messages, and print the results to stdout. + """ + STATUS = {True: "PASS", False: "FAIL"} + checks = sorted(self._success.keys()) + self._succeeded = all(self._success.values()) + self._log(f"{STATUS[self._succeeded]}: {self._test_name}") + + if not self._succeeded or self._opts.verbose: + for check in checks: + if self._opts.verbose or not self._success[check]: + self._log(f"{STATUS[self._success[check]]}: {self._test_name}.{check}") + self._log(self._message[check]) + + self._log("----------------------------------------") + + +class TestSuite: + """ + Setup & teardown test suite & cases. + This class is intended to be used as a context manager. + + TODO: Make setup/teardown failure emit messages, not throw exceptions. + """ + def __init__(self, test_directory: str, options: Options) -> None: + self._opts = options + self._test_dir = os.path.abspath(test_directory) + rel_test_dir = os.path.relpath(test_directory, start=self._opts.test_dir) + assert not rel_test_dir.startswith(os.path.sep) + self._scratch_dir = os.path.normpath(os.path.join(self._opts.scratch_dir, rel_test_dir)) + + def __enter__(self) -> 'TestSuite': + self._setup_once() + return self + + def __exit__(self, _exc_type, _exc_value, _traceback) -> None: + self._teardown_once() + + @contextlib.contextmanager + def test_case(self, test_basename: str) -> TestCase: + """ + Context manager for a test case in the test suite. + Pass the basename of the test relative to the :test_directory:. + """ + assert os.path.dirname(test_basename) == "" + try: + self._setup(test_basename) + test_filename = os.path.join(self._test_dir, test_basename) + yield TestCase(test_filename, self._opts) + finally: + self._teardown(test_basename) + + def _remove_scratch_dir(self, dir: str) -> None: + """Helper to remove a scratch directory with sanity checks""" + assert "scratch" in dir + assert dir.startswith(self._scratch_dir) + assert os.path.exists(dir) + shutil.rmtree(dir) + + def _setup_once(self) -> None: + if os.path.exists(self._scratch_dir): + self._remove_scratch_dir(self._scratch_dir) + os.makedirs(self._scratch_dir) + setup_script = os.path.join(self._test_dir, "setup_once") + if os.path.exists(setup_script): + self._run_script(setup_script, cwd=self._scratch_dir) + + def _teardown_once(self) -> None: + assert os.path.exists(self._scratch_dir) + teardown_script = os.path.join(self._test_dir, "teardown_once") + if os.path.exists(teardown_script): + self._run_script(teardown_script, cwd=self._scratch_dir) + if not self._opts.preserve: + self._remove_scratch_dir(self._scratch_dir) + + def _setup(self, test_basename: str) -> None: + test_scratch_dir = os.path.join(self._scratch_dir, test_basename) + assert not os.path.exists(test_scratch_dir) + os.makedirs(test_scratch_dir) + setup_script = os.path.join(self._test_dir, "setup") + if os.path.exists(setup_script): + self._run_script(setup_script, cwd=test_scratch_dir) + + def _teardown(self, test_basename: str) -> None: + test_scratch_dir = os.path.join(self._scratch_dir, test_basename) + assert os.path.exists(test_scratch_dir) + teardown_script = os.path.join(self._test_dir, "teardown") + if os.path.exists(teardown_script): + self._run_script(teardown_script, cwd=test_scratch_dir) + if not self._opts.preserve: + self._remove_scratch_dir(test_scratch_dir) + + def _run_script(self, script: str, cwd: str) -> None: + env = copy.copy(os.environ) + for k, v in self._opts.env.items(): + env[k] = v + try: + subprocess.run( + args=[script], + stdin=subprocess.DEVNULL, + capture_output=True, + cwd=cwd, + env=env, + check=True, + ) + except subprocess.CalledProcessError as e: + print(f"{script} failed with exit code {e.returncode}!") + print(f"stderr:\n{e.stderr}") + print(f"stdout:\n{e.stdout}") + raise + +TestSuites = typing.Dict[str, typing.List[str]] + +def get_all_tests(options: Options) -> TestSuites: + """ + Find all the test in the test directory and return the test suites. + """ + test_suites = {} + for root, dirs, files in os.walk(options.test_dir, topdown=True): + dirs[:] = [d for d in dirs if not exclude_dir(d)] + test_cases = [] + for file in files: + if not exclude_file(file): + test_cases.append(file) + assert root == os.path.normpath(root) + test_suites[root] = test_cases + return test_suites + + +def resolve_listed_tests( + tests: typing.List[str], options: Options +) -> TestSuites: + """ + Resolve the list of tests passed on the command line into their + respective test suites. Tests can either be paths, or test names + relative to the test directory. + """ + test_suites = {} + for test in tests: + if not os.path.exists(test): + test = os.path.join(options.test_dir, test) + if not os.path.exists(test): + raise RuntimeError(f"Test {test} does not exist!") + + test = os.path.normpath(os.path.abspath(test)) + assert test.startswith(options.test_dir) + test_suite = os.path.dirname(test) + test_case = os.path.basename(test) + test_suites.setdefault(test_suite, []).append(test_case) + + return test_suites + +def run_tests(test_suites: TestSuites, options: Options) -> bool: + """ + Runs all the test in the :test_suites: with the given :options:. + Prints the results to stdout. + """ + tests = {} + for test_dir, test_files in test_suites.items(): + with TestSuite(test_dir, options) as test_suite: + test_files = sorted(set(test_files)) + for test_file in test_files: + with test_suite.test_case(test_file) as test_case: + tests[test_case.name] = test_case.run() + + successes = 0 + for test, status in tests.items(): + if status: + successes += 1 + else: + print(f"FAIL: {test}") + if successes == len(tests): + print(f"PASSED all {len(tests)} tests!") + return True + else: + print(f"FAILED {len(tests) - successes} / {len(tests)} tests!") + return False + + +def setup_zstd_symlink_dir(zstd_symlink_dir: str, zstd: str) -> None: + assert os.path.join("bin", "symlinks") in zstd_symlink_dir + if not os.path.exists(zstd_symlink_dir): + os.makedirs(zstd_symlink_dir) + for symlink in ZSTD_SYMLINKS: + path = os.path.join(zstd_symlink_dir, symlink) + if os.path.exists(path): + os.remove(path) + os.symlink(zstd, path) + +if __name__ == "__main__": + CLI_TEST_DIR = os.path.dirname(sys.argv[0]) + REPO_DIR = os.path.join(CLI_TEST_DIR, "..", "..") + PROGRAMS_DIR = os.path.join(REPO_DIR, "programs") + TESTS_DIR = os.path.join(REPO_DIR, "tests") + ZSTD_PATH = os.path.join(PROGRAMS_DIR, "zstd") + ZSTDGREP_PATH = os.path.join(PROGRAMS_DIR, "zstdgrep") + ZSTDLESS_PATH = os.path.join(PROGRAMS_DIR, "zstdless") + DATAGEN_PATH = os.path.join(TESTS_DIR, "datagen") + + parser = argparse.ArgumentParser( + ( + "Runs the zstd CLI tests. Exits nonzero on failure. Default arguments are\n" + "generally correct. Pass --preserve to preserve test output for debugging,\n" + "and --verbose to get verbose test output.\n" + ) + ) + parser.add_argument( + "--preserve", + action="store_true", + help="Preserve the scratch directory TEST_DIR/scratch/ for debugging purposes." + ) + parser.add_argument("--verbose", action="store_true", help="Verbose test output.") + parser.add_argument("--timeout", default=60, type=int, help="Test case timeout in seconds. Set to 0 to disable timeouts.") + parser.add_argument( + "--exec-prefix", + default=None, + help="Sets the EXEC_PREFIX environment variable. Prefix to invocations of the zstd CLI." + ) + parser.add_argument( + "--zstd", + default=ZSTD_PATH, + help="Sets the ZSTD_BIN environment variable. Path of the zstd CLI." + ) + parser.add_argument( + "--zstdgrep", + default=ZSTDGREP_PATH, + help="Sets the ZSTDGREP_BIN environment variable. Path of the zstdgrep CLI." + ) + parser.add_argument( + "--zstdless", + default=ZSTDLESS_PATH, + help="Sets the ZSTDLESS_BIN environment variable. Path of the zstdless CLI." + ) + parser.add_argument( + "--datagen", + default=DATAGEN_PATH, + help="Sets the DATAGEN_BIN environment variable. Path to the datagen CLI." + ) + parser.add_argument( + "--test-dir", + default=CLI_TEST_DIR, + help=( + "Runs the tests under this directory. " + "Adds TEST_DIR/bin/ to path. " + "Scratch directory located in TEST_DIR/scratch/." + ) + ) + parser.add_argument( + "--set-exact-output", + action="store_true", + help="Set stderr.exact and stdout.exact for all failing tests, unless .ignore or .glob already exists" + ) + parser.add_argument( + "tests", + nargs="*", + help="Run only these test cases. Can either be paths or test names relative to TEST_DIR/" + ) + args = parser.parse_args() + + if args.timeout <= 0: + args.timeout = None + + args.test_dir = os.path.normpath(os.path.abspath(args.test_dir)) + bin_dir = os.path.abspath(os.path.join(args.test_dir, "bin")) + zstd_symlink_dir = os.path.join(bin_dir, "symlinks") + scratch_dir = os.path.join(args.test_dir, "scratch") + + setup_zstd_symlink_dir(zstd_symlink_dir, os.path.abspath(args.zstd)) + + env = {} + if args.exec_prefix is not None: + env["EXEC_PREFIX"] = args.exec_prefix + env["ZSTD_SYMLINK_DIR"] = zstd_symlink_dir + env["ZSTD_REPO_DIR"] = os.path.abspath(REPO_DIR) + env["DATAGEN_BIN"] = os.path.abspath(args.datagen) + env["ZSTDGREP_BIN"] = os.path.abspath(args.zstdgrep) + env["ZSTDLESS_BIN"] = os.path.abspath(args.zstdless) + env["COMMON"] = os.path.abspath(os.path.join(args.test_dir, "common")) + env["PATH"] = bin_dir + ":" + os.getenv("PATH", "") + env["LC_ALL"] = "C" + + opts = Options( + env=env, + timeout=args.timeout, + verbose=args.verbose, + preserve=args.preserve, + test_dir=args.test_dir, + scratch_dir=scratch_dir, + set_exact_output=args.set_exact_output, + ) + + if len(args.tests) == 0: + tests = get_all_tests(opts) + else: + tests = resolve_listed_tests(args.tests, opts) + + success = run_tests(tests, opts) + if success: + sys.exit(0) + else: + sys.exit(1) diff --git a/tests/cli-tests/zstd-symlinks/setup b/tests/cli-tests/zstd-symlinks/setup new file mode 100755 index 00000000000..cf391ed2117 --- /dev/null +++ b/tests/cli-tests/zstd-symlinks/setup @@ -0,0 +1,6 @@ +#!/bin/sh +set -e + +println "hello" > hello +println "world" > world +zstd hello world diff --git a/tests/cli-tests/zstd-symlinks/zstdcat.sh b/tests/cli-tests/zstd-symlinks/zstdcat.sh new file mode 100755 index 00000000000..74ec063d13c --- /dev/null +++ b/tests/cli-tests/zstd-symlinks/zstdcat.sh @@ -0,0 +1,12 @@ +#!/bin/sh +set -e + +# Test zstdcat symlink in bin/ +zstdcat hello.zst +zstdcat hello.zst world +zstdcat hello world.zst +zstdcat hello.zst world.zst + +# Test local zstdcat symlink +ln -s $(which zstd) ./zstdcat +./zstdcat hello.zst diff --git a/tests/cli-tests/zstd-symlinks/zstdcat.sh.stdout.exact b/tests/cli-tests/zstd-symlinks/zstdcat.sh.stdout.exact new file mode 100644 index 00000000000..3205b059b34 --- /dev/null +++ b/tests/cli-tests/zstd-symlinks/zstdcat.sh.stdout.exact @@ -0,0 +1,8 @@ +hello +hello +world +hello +world +hello +world +hello diff --git a/tests/datagencli.c b/tests/datagencli.c index ecc05f95f19..09ec5e9ae36 100644 --- a/tests/datagencli.c +++ b/tests/datagencli.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -124,7 +124,7 @@ int main(int argc, const char** argv) DISPLAYLEVEL(3, "Seed = %u \n", (unsigned)seed); RDG_genStdout(size, (double)probaU32/100, litProba, seed); - DISPLAYLEVEL(1, "\n"); + DISPLAYLEVEL(3, "\n"); return 0; } diff --git a/tests/decodecorpus.c b/tests/decodecorpus.c index 1037a36596c..148088a7634 100644 --- a/tests/decodecorpus.c +++ b/tests/decodecorpus.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,6 +14,7 @@ #include #include #include +#include /* time(), for seed random initialization */ #include "util.h" #include "timefn.h" /* UTIL_clockSpanMicro, SEC_TO_MICRO, UTIL_TIME_INITIALIZER */ @@ -24,21 +25,13 @@ #include "zdict.h" /* Direct access to internal compression functions is required */ -#include "zstd_compress.c" +#include "compress/zstd_compress.c" /* ZSTD_resetSeqStore, ZSTD_storeSeq, *_TO_OFFBASE, HIST_countFast_wksp, HIST_isError */ #define XXH_STATIC_LINKING_ONLY #include "xxhash.h" /* XXH64 */ -#ifndef MIN - #define MIN(a, b) ((a) < (b) ? (a) : (b)) -#endif - -#ifndef MAX_PATH - #ifdef PATH_MAX - #define MAX_PATH PATH_MAX - #else - #define MAX_PATH 256 - #endif +#if !(defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)) +# define inline /* disable */ #endif /*-************************************ @@ -70,6 +63,7 @@ static UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER; } \ } while (0) + /*-******************************************************* * Random function *********************************************************/ @@ -136,7 +130,7 @@ static void RAND_genDist(U32* seed, BYTE* dist, double weight) BYTE step = (BYTE) ((RAND(seed) % 256) | 1); /* force it to be odd so it's relatively prime to 256 */ while (i < DISTSIZE) { - size_t states = ((size_t)(weight * statesLeft)) + 1; + size_t states = ((size_t)(weight * (double)statesLeft)) + 1; size_t j; for (j = 0; j < states && i < DISTSIZE; j++, i++) { dist[i] = symb; @@ -165,7 +159,7 @@ static double RAND_exp(U32* seed, double mean) /*-******************************************************* * Constants and Structs *********************************************************/ -const char *BLOCK_TYPES[] = {"raw", "rle", "compressed"}; +const char* BLOCK_TYPES[] = {"raw", "rle", "compressed"}; #define MAX_DECOMPRESSED_SIZE_LOG 20 #define MAX_DECOMPRESSED_SIZE (1ULL << MAX_DECOMPRESSED_SIZE_LOG) @@ -175,6 +169,14 @@ const char *BLOCK_TYPES[] = {"raw", "rle", "compressed"}; #define MIN_SEQ_LEN (3) #define MAX_NB_SEQ ((ZSTD_BLOCKSIZE_MAX + MIN_SEQ_LEN - 1) / MIN_SEQ_LEN) +#ifndef MAX_PATH + #ifdef PATH_MAX + #define MAX_PATH PATH_MAX + #else + #define MAX_PATH 256 + #endif +#endif + BYTE CONTENT_BUFFER[MAX_DECOMPRESSED_SIZE]; BYTE FRAME_BUFFER[MAX_DECOMPRESSED_SIZE * 2]; BYTE LITERAL_BUFFER[ZSTD_BLOCKSIZE_MAX]; @@ -240,6 +242,10 @@ typedef enum { gt_block, /* generate compressed blocks without block/frame headers */ } genType_e; +#ifndef MIN + #define MIN(a, b) ((a) < (b) ? (a) : (b)) +#endif + /*-******************************************************* * Global variables (set from command line) *********************************************************/ @@ -454,7 +460,7 @@ static size_t writeHufHeader(U32* seed, HUF_CElt* hufTable, void* dst, size_t ds } /* Write table description header */ - { size_t const hSize = HUF_writeCTable (op, dstSize, hufTable, maxSymbolValue, huffLog); + { size_t const hSize = HUF_writeCTable_wksp (op, dstSize, hufTable, maxSymbolValue, huffLog, WKSP, sizeof(WKSP)); if (hSize + 12 >= srcSize) return 0; /* not useful to try compression */ op += hSize; } @@ -558,10 +564,10 @@ static size_t writeLiteralsBlockCompressed(U32* seed, frame_t* frame, size_t con sizeFormat == 0 ? HUF_compress1X_usingCTable( op, opend - op, LITERAL_BUFFER, litSize, - frame->stats.hufTable) + frame->stats.hufTable, /* flags */ 0) : HUF_compress4X_usingCTable( op, opend - op, LITERAL_BUFFER, litSize, - frame->stats.hufTable); + frame->stats.hufTable, /* flags */ 0); CHECKERR(compressedSize); /* this only occurs when it could not compress or similar */ } while (compressedSize <= 0); @@ -662,16 +668,16 @@ generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore, * ensure nice numbers */ U32 matchLen = MIN_SEQ_LEN + - ROUND(RAND_exp(seed, excessMatch / (double)(numSequences - i))); + ROUND(RAND_exp(seed, (double)excessMatch / (double)(numSequences - i))); U32 literalLen = (RAND(seed) & 7) ? ROUND(RAND_exp(seed, - literalsSize / + (double)literalsSize / (double)(numSequences - i))) : 0; /* actual offset, code to send, and point to copy up to when shifting * codes in the repeat offsets history */ - U32 offset, offsetCode, repIndex; + U32 offset, offBase, repIndex; /* bounds checks */ matchLen = (U32) MIN(matchLen, excessMatch + MIN_SEQ_LEN); @@ -707,12 +713,12 @@ generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore, } } } - offsetCode = STORE_OFFSET(offset); + offBase = OFFSET_TO_OFFBASE(offset); repIndex = 2; } else { /* do a repeat offset */ U32 const randomRepIndex = RAND(seed) % 3; - offsetCode = STORE_REPCODE(randomRepIndex + 1); /* expects values between 1 & 3 */ + offBase = REPCODE_TO_OFFBASE(randomRepIndex + 1); /* expects values between 1 & 3 */ if (literalLen > 0) { offset = frame->stats.rep[randomRepIndex]; repIndex = randomRepIndex; @@ -751,12 +757,12 @@ generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore, DISPLAYLEVEL(7, " srcPos: %8u seqNb: %3u", (unsigned)((BYTE*)srcPtr - (BYTE*)frame->srcStart), (unsigned)i); DISPLAYLEVEL(6, "\n"); - if (STORED_IS_REPCODE(offsetCode)) { /* expects sumtype numeric representation of ZSTD_storeSeq() */ + if (OFFBASE_IS_REPCODE(offBase)) { /* expects sumtype numeric representation of ZSTD_storeSeq() */ DISPLAYLEVEL(7, " repeat offset: %d\n", (int)repIndex); } /* use libzstd sequence handling */ ZSTD_storeSeq(seqStore, literalLen, literals, literals + literalLen, - offsetCode, matchLen); + offBase, matchLen); literalsSize -= literalLen; excessMatch -= (matchLen - MIN_SEQ_LEN); @@ -765,8 +771,8 @@ generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore, memcpy(srcPtr, literals, literalsSize); srcPtr += literalsSize; - DISPLAYLEVEL(6, " excess literals: %5u", (unsigned)literalsSize); - DISPLAYLEVEL(7, " srcPos: %8u", (unsigned)((BYTE*)srcPtr - (BYTE*)frame->srcStart)); + DISPLAYLEVEL(6, " excess literals: %5u ", (unsigned)literalsSize); + DISPLAYLEVEL(7, "srcPos: %8u ", (unsigned)((BYTE*)srcPtr - (BYTE*)frame->srcStart)); DISPLAYLEVEL(6, "\n"); return numSequences; diff --git a/tests/external_matchfinder.c b/tests/external_matchfinder.c new file mode 100644 index 00000000000..76ad41126ec --- /dev/null +++ b/tests/external_matchfinder.c @@ -0,0 +1,140 @@ +/* + * Copyright (c) Yann Collet, Meta Platforms, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "external_matchfinder.h" +#include +#include "zstd_compress_internal.h" + +#define HSIZE 1024 +static U32 const HLOG = 10; +static U32 const MLS = 4; +static U32 const BADIDX = 0xffffffff; + +static size_t simpleSequenceProducer( + void* sequenceProducerState, + ZSTD_Sequence* outSeqs, size_t outSeqsCapacity, + const void* src, size_t srcSize, + const void* dict, size_t dictSize, + int compressionLevel, + size_t windowSize +) { + const BYTE* const istart = (const BYTE*)src; + const BYTE* const iend = istart + srcSize; + const BYTE* ip = istart; + const BYTE* anchor = istart; + size_t seqCount = 0; + U32 hashTable[HSIZE]; + + (void)sequenceProducerState; + (void)dict; + (void)dictSize; + (void)outSeqsCapacity; + (void)compressionLevel; + + { int i; + for (i=0; i < HSIZE; i++) { + hashTable[i] = BADIDX; + } } + + while (ip + MLS < iend) { + size_t const hash = ZSTD_hashPtr(ip, HLOG, MLS); + U32 const matchIndex = hashTable[hash]; + hashTable[hash] = (U32)(ip - istart); + + if (matchIndex != BADIDX) { + const BYTE* const match = istart + matchIndex; + U32 const matchLen = (U32)ZSTD_count(ip, match, iend); + if (matchLen >= ZSTD_MINMATCH_MIN) { + U32 const litLen = (U32)(ip - anchor); + U32 const offset = (U32)(ip - match); + ZSTD_Sequence const seq = { + offset, litLen, matchLen, 0 + }; + + /* Note: it's crucial to stay within the window size! */ + if (offset <= windowSize) { + outSeqs[seqCount++] = seq; + ip += matchLen; + anchor = ip; + continue; + } + } + } + + ip++; + } + + { ZSTD_Sequence const finalSeq = { + 0, (U32)(iend - anchor), 0, 0 + }; + outSeqs[seqCount++] = finalSeq; + } + + return seqCount; +} + +size_t zstreamSequenceProducer( + void* sequenceProducerState, + ZSTD_Sequence* outSeqs, size_t outSeqsCapacity, + const void* src, size_t srcSize, + const void* dict, size_t dictSize, + int compressionLevel, + size_t windowSize +) { + EMF_testCase const testCase = *((EMF_testCase*)sequenceProducerState); + memset(outSeqs, 0, outSeqsCapacity); + + switch (testCase) { + case EMF_ZERO_SEQS: + return 0; + case EMF_ONE_BIG_SEQ: + outSeqs[0].offset = 0; + outSeqs[0].matchLength = 0; + outSeqs[0].litLength = (U32)(srcSize); + return 1; + case EMF_LOTS_OF_SEQS: + return simpleSequenceProducer( + sequenceProducerState, + outSeqs, outSeqsCapacity, + src, srcSize, + dict, dictSize, + compressionLevel, + windowSize + ); + case EMF_INVALID_OFFSET: + outSeqs[0].offset = 1 << 20; + outSeqs[0].matchLength = 4; + outSeqs[0].litLength = (U32)(srcSize - 4); + return 1; + case EMF_INVALID_MATCHLEN: + outSeqs[0].offset = 1; + outSeqs[0].matchLength = (U32)(srcSize); + outSeqs[0].litLength = 1; + return 1; + case EMF_INVALID_LITLEN: + outSeqs[0].offset = 0; + outSeqs[0].matchLength = 0; + outSeqs[0].litLength = (U32)(srcSize + 1); + return 1; + case EMF_INVALID_LAST_LITS: + outSeqs[0].offset = 1; + outSeqs[0].matchLength = 1; + outSeqs[0].litLength = 1; + outSeqs[1].offset = 0; + outSeqs[1].matchLength = 0; + outSeqs[1].litLength = (U32)(srcSize - 1); + return 2; + case EMF_SMALL_ERROR: + return outSeqsCapacity + 1; + case EMF_BIG_ERROR: + default: + return ZSTD_SEQUENCE_PRODUCER_ERROR; + } +} diff --git a/tests/external_matchfinder.h b/tests/external_matchfinder.h new file mode 100644 index 00000000000..e38dc25caa2 --- /dev/null +++ b/tests/external_matchfinder.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) Yann Collet, Meta Platforms, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef EXTERNAL_MATCHFINDER +#define EXTERNAL_MATCHFINDER + +#define ZSTD_STATIC_LINKING_ONLY +#include "zstd.h" + +/* See external_matchfinder.c for details on each test case */ +typedef enum { + EMF_ZERO_SEQS = 0, + EMF_ONE_BIG_SEQ = 1, + EMF_LOTS_OF_SEQS = 2, + EMF_BIG_ERROR = 3, + EMF_SMALL_ERROR = 4, + EMF_INVALID_OFFSET = 5, + EMF_INVALID_MATCHLEN = 6, + EMF_INVALID_LITLEN = 7, + EMF_INVALID_LAST_LITS = 8 +} EMF_testCase; + +size_t zstreamSequenceProducer( + void* sequenceProducerState, + ZSTD_Sequence* outSeqs, size_t outSeqsCapacity, + const void* src, size_t srcSize, + const void* dict, size_t dictSize, + int compressionLevel, + size_t windowSize +); + +#endif /* EXTERNAL_MATCHFINDER */ diff --git a/tests/fullbench.c b/tests/fullbench.c index b55ff767f2f..3a72d89d374 100644 --- a/tests/fullbench.c +++ b/tests/fullbench.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -171,6 +171,7 @@ FORCE_NOINLINE size_t ZSTD_decodeLiteralsHeader(ZSTD_DCtx* dctx, void const* src size_t lhSize, litSize, litCSize; U32 const lhlCode = (istart[0] >> 2) & 3; U32 const lhc = MEM_readLE32(istart); + int const flags = ZSTD_DCtx_get_bmi2(dctx) ? HUF_flags_bmi2 : 0; switch(lhlCode) { case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */ @@ -195,16 +196,16 @@ FORCE_NOINLINE size_t ZSTD_decodeLiteralsHeader(ZSTD_DCtx* dctx, void const* src RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, ""); RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, ""); #ifndef HUF_FORCE_DECOMPRESS_X2 - return HUF_readDTableX1_wksp_bmi2( + return HUF_readDTableX1_wksp( dctx->entropy.hufTable, istart+lhSize, litCSize, dctx->workspace, sizeof(dctx->workspace), - ZSTD_DCtx_get_bmi2(dctx)); + flags); #else return HUF_readDTableX2_wksp( dctx->entropy.hufTable, istart+lhSize, litCSize, - dctx->workspace, sizeof(dctx->workspace)); + dctx->workspace, sizeof(dctx->workspace), flags); #endif } } diff --git a/tests/fuzz/Makefile b/tests/fuzz/Makefile index 912348c3843..bbb262add1b 100644 --- a/tests/fuzz/Makefile +++ b/tests/fuzz/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -34,9 +34,14 @@ ZSTDDIR = ../../lib PRGDIR = ../../programs CONTRIBDIR = ../../contrib +# TODO(embg) make it possible to plug in an arbitrary matchfinder as a .o file +MATCHFINDER_DIR = $(CONTRIBDIR)/externalSequenceProducer +MATCHFINDER_SRC = $(MATCHFINDER_DIR)/sequence_producer.c + FUZZ_CPPFLAGS := -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \ -I$(ZSTDDIR)/dictBuilder -I$(ZSTDDIR)/deprecated -I$(ZSTDDIR)/legacy \ - -I$(CONTRIBDIR)/seekable_format -I$(PRGDIR) -DZSTD_MULTITHREAD -DZSTD_LEGACY_SUPPORT=1 $(CPPFLAGS) + -I$(CONTRIBDIR)/seekable_format -I$(PRGDIR) -I$(MATCHFINDER_DIR) \ + -DZSTD_MULTITHREAD -DZSTD_LEGACY_SUPPORT=1 $(CPPFLAGS) FUZZ_EXTRA_FLAGS := -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \ -Wstrict-prototypes -Wundef \ @@ -69,7 +74,8 @@ FUZZ_SRC := \ $(ZSTDCOMMON_SRC) \ $(ZSTDCOMP_SRC) \ $(ZSTDDICT_SRC) \ - $(ZSTDLEGACY_SRC) + $(ZSTDLEGACY_SRC) \ + $(MATCHFINDER_SRC) FUZZ_SRC := $(sort $(wildcard $(FUZZ_SRC))) FUZZ_D_OBJ1 := $(subst $(ZSTDDIR)/common/,d_lib_common_,$(FUZZ_SRC)) @@ -78,9 +84,10 @@ FUZZ_D_OBJ3 := $(subst $(ZSTDDIR)/decompress/,d_lib_decompress_,$(FUZZ_D_OBJ2)) FUZZ_D_OBJ4 := $(subst $(ZSTDDIR)/dictBuilder/,d_lib_dictBuilder_,$(FUZZ_D_OBJ3)) FUZZ_D_OBJ5 := $(subst $(ZSTDDIR)/legacy/,d_lib_legacy_,$(FUZZ_D_OBJ4)) FUZZ_D_OBJ6 := $(subst $(PRGDIR)/,d_prg_,$(FUZZ_D_OBJ5)) -FUZZ_D_OBJ7 := $(subst $\./,d_fuzz_,$(FUZZ_D_OBJ6)) -FUZZ_D_OBJ8 := $(FUZZ_D_OBJ7:.c=.o) -FUZZ_DECOMPRESS_OBJ := $(FUZZ_D_OBJ8:.S=.o) +FUZZ_D_OBJ7 := $(subst $(MATCHFINDER_DIR)/,d_matchfinder_,$(FUZZ_D_OBJ6)) +FUZZ_D_OBJ8 := $(subst $\./,d_fuzz_,$(FUZZ_D_OBJ7)) +FUZZ_D_OBJ9 := $(FUZZ_D_OBJ8:.c=.o) +FUZZ_DECOMPRESS_OBJ := $(FUZZ_D_OBJ9:.S=.o) FUZZ_RT_OBJ1 := $(subst $(ZSTDDIR)/common/,rt_lib_common_,$(FUZZ_SRC)) FUZZ_RT_OBJ2 := $(subst $(ZSTDDIR)/compress/,rt_lib_compress_,$(FUZZ_RT_OBJ1)) @@ -88,9 +95,10 @@ FUZZ_RT_OBJ3 := $(subst $(ZSTDDIR)/decompress/,rt_lib_decompress_,$(FUZZ_RT_OBJ2 FUZZ_RT_OBJ4 := $(subst $(ZSTDDIR)/dictBuilder/,rt_lib_dictBuilder_,$(FUZZ_RT_OBJ3)) FUZZ_RT_OBJ5 := $(subst $(ZSTDDIR)/legacy/,rt_lib_legacy_,$(FUZZ_RT_OBJ4)) FUZZ_RT_OBJ6 := $(subst $(PRGDIR)/,rt_prg_,$(FUZZ_RT_OBJ5)) -FUZZ_RT_OBJ7 := $(subst $\./,rt_fuzz_,$(FUZZ_RT_OBJ6)) -FUZZ_RT_OBJ8 := $(FUZZ_RT_OBJ7:.c=.o) -FUZZ_ROUND_TRIP_OBJ := $(FUZZ_RT_OBJ8:.S=.o) +FUZZ_RT_OBJ7 := $(subst $(MATCHFINDER_DIR)/,rt_matchfinder_,$(FUZZ_RT_OBJ6)) +FUZZ_RT_OBJ8 := $(subst $\./,rt_fuzz_,$(FUZZ_RT_OBJ7)) +FUZZ_RT_OBJ9 := $(FUZZ_RT_OBJ8:.c=.o) +FUZZ_ROUND_TRIP_OBJ := $(FUZZ_RT_OBJ9:.S=.o) .PHONY: default all clean cleanall @@ -143,6 +151,9 @@ rt_prg_%.o: $(PRGDIR)/%.c rt_fuzz_%.o: %.c $(CC) $(FUZZ_CPPFLAGS) $(FUZZ_CFLAGS) $(FUZZ_ROUND_TRIP_FLAGS) $< -c -o $@ +rt_matchfinder_%.o: $(MATCHFINDER_DIR)/%.c + $(CC) $(FUZZ_CPPFLAGS) $(FUZZ_CFLAGS) $(FUZZ_ROUND_TRIP_FLAGS) $< -c -o $@ + d_lib_common_%.o: $(ZSTDDIR)/common/%.c $(CC) $(FUZZ_CPPFLAGS) $(FUZZ_CFLAGS) $< -c -o $@ @@ -167,6 +178,9 @@ d_prg_%.o: $(PRGDIR)/%.c d_fuzz_%.o: %.c $(CC) $(FUZZ_CPPFLAGS) $(FUZZ_CFLAGS) $< -c -o $@ +d_matchfinder_%.o: $(MATCHFINDER_DIR)/%.c + $(CC) $(FUZZ_CPPFLAGS) $(FUZZ_CFLAGS) $< -c -o $@ + simple_round_trip: $(FUZZ_HEADERS) $(FUZZ_ROUND_TRIP_OBJ) rt_fuzz_simple_round_trip.o $(CXX) $(FUZZ_TARGET_FLAGS) $(FUZZ_ROUND_TRIP_OBJ) rt_fuzz_simple_round_trip.o $(LIB_FUZZING_ENGINE) -o $@ diff --git a/tests/fuzz/README.md b/tests/fuzz/README.md index 71afa40631a..4ff7fe3ac1d 100644 --- a/tests/fuzz/README.md +++ b/tests/fuzz/README.md @@ -50,6 +50,20 @@ See the help of the relevant command for options. Flags not parsed by `fuzz.py` are passed to the fuzzing engine. The command used to run the fuzzer is printed for debugging. +Here's a helpful command to fuzz each target across all cores, +stopping only if a bug is found: +``` +for target in $(./fuzz.py list); do + ./fuzz.py libfuzzer $target -jobs=10 -workers=10 -max_total_time=1000 || break; +done +``` +Alternatively, you can fuzz all targets in parallel, using one core per target: +``` +python3 ./fuzz.py list | xargs -P$(python3 ./fuzz.py list | wc -l) -I__ sh -c "python3 ./fuzz.py libfuzzer __ 2>&1 | tee __.log" +``` +Either way, to double-check that no crashes were found, run `ls corpora/*crash`. +If any crashes were found, you can use the hashes to reproduce them. + ## LibFuzzer ``` diff --git a/tests/fuzz/block_decompress.c b/tests/fuzz/block_decompress.c index bdbf7691615..e4767b3f509 100644 --- a/tests/fuzz/block_decompress.c +++ b/tests/fuzz/block_decompress.c @@ -1,5 +1,5 @@ /** - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/fuzz/block_round_trip.c b/tests/fuzz/block_round_trip.c index 46a84c7b98b..54012a1dada 100644 --- a/tests/fuzz/block_round_trip.c +++ b/tests/fuzz/block_round_trip.c @@ -1,5 +1,5 @@ /** - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/fuzz/decompress_dstSize_tooSmall.c b/tests/fuzz/decompress_dstSize_tooSmall.c index 3f7607b2952..8ad3ea1c952 100644 --- a/tests/fuzz/decompress_dstSize_tooSmall.c +++ b/tests/fuzz/decompress_dstSize_tooSmall.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/fuzz/dictionary_decompress.c b/tests/fuzz/dictionary_decompress.c index 33c58c8bdce..8dea3e56094 100644 --- a/tests/fuzz/dictionary_decompress.c +++ b/tests/fuzz/dictionary_decompress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/fuzz/dictionary_loader.c b/tests/fuzz/dictionary_loader.c index 5b60bc484b0..1ac22740f26 100644 --- a/tests/fuzz/dictionary_loader.c +++ b/tests/fuzz/dictionary_loader.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/fuzz/dictionary_round_trip.c b/tests/fuzz/dictionary_round_trip.c index 0b20e8d6797..6dd78c3f314 100644 --- a/tests/fuzz/dictionary_round_trip.c +++ b/tests/fuzz/dictionary_round_trip.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/fuzz/dictionary_stream_round_trip.c b/tests/fuzz/dictionary_stream_round_trip.c index 9af712f0e4f..3ebbd843bb5 100644 --- a/tests/fuzz/dictionary_stream_round_trip.c +++ b/tests/fuzz/dictionary_stream_round_trip.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/fuzz/fse_read_ncount.c b/tests/fuzz/fse_read_ncount.c index c3238609970..29e19442a70 100644 --- a/tests/fuzz/fse_read_ncount.c +++ b/tests/fuzz/fse_read_ncount.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/fuzz/fuzz.h b/tests/fuzz/fuzz.h index 810daa2ce86..d1e439fef3e 100644 --- a/tests/fuzz/fuzz.h +++ b/tests/fuzz/fuzz.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -26,8 +26,7 @@ * @param MEM_FORCE_MEMORY_ACCESS: * This flag controls how the zstd library accesses unaligned memory. * It can be undefined, or 0 through 2. If it is undefined, it selects - * the method to use based on the compiler. If testing with UBSAN set - * MEM_FORCE_MEMORY_ACCESS=0 to use the standard compliant method. + * the method to use based on the compiler. * @param FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION * This is the canonical flag to enable deterministic builds for fuzzing. * Changes to zstd for fuzzing are gated behind this define. diff --git a/tests/fuzz/fuzz.py b/tests/fuzz/fuzz.py index 0c56cccb217..03ffeeefd77 100755 --- a/tests/fuzz/fuzz.py +++ b/tests/fuzz/fuzz.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # ################################################################ -# Copyright (c) Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -636,7 +636,7 @@ def regression(args): try: description = """ Runs one or more regression tests. - The fuzzer should have been built with with + The fuzzer should have been built with LIB_FUZZING_ENGINE='libregression.a'. Takes input from CORPORA. """ diff --git a/tests/fuzz/fuzz_data_producer.c b/tests/fuzz/fuzz_data_producer.c index eae8ee4b698..a93e8ba950b 100644 --- a/tests/fuzz/fuzz_data_producer.c +++ b/tests/fuzz/fuzz_data_producer.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/fuzz/fuzz_data_producer.h b/tests/fuzz/fuzz_data_producer.h index 62771a9f8ca..8ca501f9b09 100644 --- a/tests/fuzz/fuzz_data_producer.h +++ b/tests/fuzz/fuzz_data_producer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/fuzz/fuzz_helpers.c b/tests/fuzz/fuzz_helpers.c index 61c0debe5a6..1b6ad9736cb 100644 --- a/tests/fuzz/fuzz_helpers.c +++ b/tests/fuzz/fuzz_helpers.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/fuzz/fuzz_helpers.h b/tests/fuzz/fuzz_helpers.h index c180478e953..aaf4c1df45e 100644 --- a/tests/fuzz/fuzz_helpers.h +++ b/tests/fuzz/fuzz_helpers.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/fuzz/huf_decompress.c b/tests/fuzz/huf_decompress.c index fea09fc934e..fcd4b1a3bd2 100644 --- a/tests/fuzz/huf_decompress.c +++ b/tests/fuzz/huf_decompress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -13,8 +13,6 @@ * compares the result with the original, and calls abort() on corruption. */ -#define HUF_STATIC_LINKING_ONLY - #include #include #include @@ -30,7 +28,13 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size) /* Select random parameters: #streams, X1 or X2 decoding, bmi2 */ int const streams = FUZZ_dataProducer_int32Range(producer, 0, 1); int const symbols = FUZZ_dataProducer_int32Range(producer, 0, 1); - int const bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()) && FUZZ_dataProducer_int32Range(producer, 0, 1); + int const flags = 0 + | (ZSTD_cpuid_bmi2(ZSTD_cpuid()) && FUZZ_dataProducer_int32Range(producer, 0, 1) ? HUF_flags_bmi2 : 0) + | (FUZZ_dataProducer_int32Range(producer, 0, 1) ? HUF_flags_optimalDepth : 0) + | (FUZZ_dataProducer_int32Range(producer, 0, 1) ? HUF_flags_preferRepeat : 0) + | (FUZZ_dataProducer_int32Range(producer, 0, 1) ? HUF_flags_suspectUncompressible : 0) + | (FUZZ_dataProducer_int32Range(producer, 0, 1) ? HUF_flags_disableAsm : 0) + | (FUZZ_dataProducer_int32Range(producer, 0, 1) ? HUF_flags_disableFast : 0); /* Select a random cBufSize - it may be too small */ size_t const dBufSize = FUZZ_dataProducer_uint32Range(producer, 0, 8 * size + 500); size_t const maxTableLog = FUZZ_dataProducer_uint32Range(producer, 1, HUF_TABLELOG_MAX); @@ -42,18 +46,18 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size) size = FUZZ_dataProducer_remainingBytes(producer); if (symbols == 0) { - size_t const err = HUF_readDTableX1_wksp_bmi2(dt, src, size, wksp, wkspSize, bmi2); + size_t const err = HUF_readDTableX1_wksp(dt, src, size, wksp, wkspSize, flags); if (ZSTD_isError(err)) goto _out; } else { - size_t const err = HUF_readDTableX2_wksp_bmi2(dt, src, size, wksp, wkspSize, bmi2); + size_t const err = HUF_readDTableX2_wksp(dt, src, size, wksp, wkspSize, flags); if (ZSTD_isError(err)) goto _out; } if (streams == 0) - HUF_decompress1X_usingDTable_bmi2(dBuf, dBufSize, src, size, dt, bmi2); + HUF_decompress1X_usingDTable(dBuf, dBufSize, src, size, dt, flags); else - HUF_decompress4X_usingDTable_bmi2(dBuf, dBufSize, src, size, dt, bmi2); + HUF_decompress4X_usingDTable(dBuf, dBufSize, src, size, dt, flags); _out: free(dt); diff --git a/tests/fuzz/huf_round_trip.c b/tests/fuzz/huf_round_trip.c index 0e26ca9b51c..4d0f8de23f5 100644 --- a/tests/fuzz/huf_round_trip.c +++ b/tests/fuzz/huf_round_trip.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -13,8 +13,6 @@ * compares the result with the original, and calls abort() on corruption. */ -#define HUF_STATIC_LINKING_ONLY - #include #include #include @@ -24,11 +22,12 @@ #include "common/huf.h" #include "fuzz_helpers.h" #include "fuzz_data_producer.h" +#include "common/bits.h" static size_t adjustTableLog(size_t tableLog, size_t maxSymbol) { size_t const alphabetSize = maxSymbol + 1; - size_t minTableLog = BIT_highbit32(alphabetSize) + 1; + size_t minTableLog = ZSTD_highbit32(alphabetSize) + 1; if ((alphabetSize & (alphabetSize - 1)) != 0) { ++minTableLog; } @@ -45,7 +44,13 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size) /* Select random parameters: #streams, X1 or X2 decoding, bmi2 */ int const streams = FUZZ_dataProducer_int32Range(producer, 0, 1); int const symbols = FUZZ_dataProducer_int32Range(producer, 0, 1); - int const bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()) && FUZZ_dataProducer_int32Range(producer, 0, 1); + int const flags = 0 + | (ZSTD_cpuid_bmi2(ZSTD_cpuid()) && FUZZ_dataProducer_int32Range(producer, 0, 1) ? HUF_flags_bmi2 : 0) + | (FUZZ_dataProducer_int32Range(producer, 0, 1) ? HUF_flags_optimalDepth : 0) + | (FUZZ_dataProducer_int32Range(producer, 0, 1) ? HUF_flags_preferRepeat : 0) + | (FUZZ_dataProducer_int32Range(producer, 0, 1) ? HUF_flags_suspectUncompressible : 0) + | (FUZZ_dataProducer_int32Range(producer, 0, 1) ? HUF_flags_disableAsm : 0) + | (FUZZ_dataProducer_int32Range(producer, 0, 1) ? HUF_flags_disableFast : 0); /* Select a random cBufSize - it may be too small */ size_t const cBufSize = FUZZ_dataProducer_uint32Range(producer, 0, 4 * size); /* Select a random tableLog - we'll adjust it up later */ @@ -82,7 +87,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size) HUF_DTable* dt = (HUF_DTable*)FUZZ_malloc(HUF_DTABLE_SIZE(tableLog) * sizeof(HUF_DTable)); dt[0] = tableLog * 0x01000001; - tableLog = HUF_optimalTableLog(tableLog, size, maxSymbol); + tableLog = HUF_optimalTableLog(tableLog, size, maxSymbol, wksp, wkspSize, ct, count, flags); FUZZ_ASSERT(tableLog <= 12); tableLog = HUF_buildCTable_wksp(ct, count, maxSymbol, tableLog, wksp, wkspSize); FUZZ_ZASSERT(tableLog); @@ -93,11 +98,11 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size) } FUZZ_ZASSERT(tableSize); if (symbols == 0) { - FUZZ_ZASSERT(HUF_readDTableX1_wksp_bmi2(dt, cBuf, tableSize, wksp, wkspSize, bmi2)); + FUZZ_ZASSERT(HUF_readDTableX1_wksp(dt, cBuf, tableSize, wksp, wkspSize, flags)); } else { - size_t const ret = HUF_readDTableX2_wksp(dt, cBuf, tableSize, wksp, wkspSize); + size_t const ret = HUF_readDTableX2_wksp(dt, cBuf, tableSize, wksp, wkspSize, flags); if (ERR_getErrorCode(ret) == ZSTD_error_tableLog_tooLarge) { - FUZZ_ZASSERT(HUF_readDTableX1_wksp_bmi2(dt, cBuf, tableSize, wksp, wkspSize, bmi2)); + FUZZ_ZASSERT(HUF_readDTableX1_wksp(dt, cBuf, tableSize, wksp, wkspSize, flags)); } else { FUZZ_ZASSERT(ret); } @@ -106,15 +111,15 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size) size_t cSize; size_t rSize; if (streams == 0) { - cSize = HUF_compress1X_usingCTable_bmi2(cBuf, cBufSize, src, size, ct, bmi2); + cSize = HUF_compress1X_usingCTable(cBuf, cBufSize, src, size, ct, flags); FUZZ_ZASSERT(cSize); if (cSize != 0) - rSize = HUF_decompress1X_usingDTable_bmi2(rBuf, size, cBuf, cSize, dt, bmi2); + rSize = HUF_decompress1X_usingDTable(rBuf, size, cBuf, cSize, dt, flags); } else { - cSize = HUF_compress4X_usingCTable_bmi2(cBuf, cBufSize, src, size, ct, bmi2); + cSize = HUF_compress4X_usingCTable(cBuf, cBufSize, src, size, ct, flags); FUZZ_ZASSERT(cSize); if (cSize != 0) - rSize = HUF_decompress4X_usingDTable_bmi2(rBuf, size, cBuf, cSize, dt, bmi2); + rSize = HUF_decompress4X_usingDTable(rBuf, size, cBuf, cSize, dt, flags); } if (cSize != 0) { FUZZ_ZASSERT(rSize); diff --git a/tests/fuzz/raw_dictionary_round_trip.c b/tests/fuzz/raw_dictionary_round_trip.c index 0e65176548a..7ceab2be6d1 100644 --- a/tests/fuzz/raw_dictionary_round_trip.c +++ b/tests/fuzz/raw_dictionary_round_trip.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/fuzz/regression_driver.c b/tests/fuzz/regression_driver.c index e6d2dec64d3..550c65d8600 100644 --- a/tests/fuzz/regression_driver.c +++ b/tests/fuzz/regression_driver.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/fuzz/seekable_roundtrip.c b/tests/fuzz/seekable_roundtrip.c index dcdcaae116a..6f0aa288fe8 100644 --- a/tests/fuzz/seekable_roundtrip.c +++ b/tests/fuzz/seekable_roundtrip.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/fuzz/sequence_compression_api.c b/tests/fuzz/sequence_compression_api.c index a2959e1aca6..9d3f0a18a8d 100644 --- a/tests/fuzz/sequence_compression_api.c +++ b/tests/fuzz/sequence_compression_api.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -26,16 +26,20 @@ #include "zstd_helpers.h" #include "fuzz_data_producer.h" -static ZSTD_CCtx *cctx = NULL; -static ZSTD_DCtx *dctx = NULL; +static ZSTD_CCtx* cctx = NULL; +static ZSTD_DCtx* dctx = NULL; static void* literalsBuffer = NULL; static void* generatedSrc = NULL; static ZSTD_Sequence* generatedSequences = NULL; +static void* dictBuffer = NULL; +static ZSTD_CDict* cdict = NULL; +static ZSTD_DDict* ddict = NULL; + #define ZSTD_FUZZ_GENERATED_SRC_MAXSIZE (1 << 20) /* Allow up to 1MB generated data */ +#define ZSTD_FUZZ_GENERATED_LITERALS_SIZE (1 << 20) /* Fixed size 1MB literals buffer */ #define ZSTD_FUZZ_MATCHLENGTH_MAXSIZE (1 << 18) /* Allow up to 256KB matches */ -#define ZSTD_FUZZ_GENERATED_DICT_MAXSIZE (1 << 18) /* Allow up to a 256KB dict */ -#define ZSTD_FUZZ_GENERATED_LITERALS_SIZE (1 << 18) /* Fixed size 256KB literals buffer */ +#define ZSTD_FUZZ_GENERATED_DICT_MAXSIZE (1 << ZSTD_WINDOWLOG_MAX_32) /* Allow up to 1 << ZSTD_WINDOWLOG_MAX_32 dictionary */ #define ZSTD_FUZZ_MAX_NBSEQ (1 << 17) /* Maximum of 128K sequences */ /* Deterministic random number generator */ @@ -55,9 +59,9 @@ static uint32_t FUZZ_RDG_rand(uint32_t* src) /* Make a pseudorandom string - this simple function exists to avoid * taking a dependency on datagen.h to have RDG_genBuffer(). */ -static char *generatePseudoRandomString(char *str, size_t size) { +static char* generatePseudoRandomString(char* str, size_t size, FUZZ_dataProducer_t* producer) { const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJK1234567890!@#$^&*()_"; - uint32_t seed = 0; + uint32_t seed = FUZZ_dataProducer_uint32(producer); if (size) { for (size_t n = 0; n < size; n++) { int key = FUZZ_RDG_rand(&seed) % (int) (sizeof charset - 1); @@ -69,7 +73,10 @@ static char *generatePseudoRandomString(char *str, size_t size) { /* Returns size of source buffer */ static size_t decodeSequences(void* dst, size_t nbSequences, - size_t literalsSize, const void* dict, size_t dictSize) { + size_t literalsSize, + const void* dict, size_t dictSize, + ZSTD_sequenceFormat_e mode) +{ const uint8_t* litPtr = literalsBuffer; const uint8_t* const litBegin = literalsBuffer; const uint8_t* const litEnd = litBegin + literalsSize; @@ -78,38 +85,37 @@ static size_t decodeSequences(void* dst, size_t nbSequences, const uint8_t* const oend = (uint8_t*)dst + ZSTD_FUZZ_GENERATED_SRC_MAXSIZE; size_t generatedSrcBufferSize = 0; size_t bytesWritten = 0; - uint32_t lastLLSize; for (size_t i = 0; i < nbSequences; ++i) { - FUZZ_ASSERT(generatedSequences[i].matchLength != 0); - FUZZ_ASSERT(generatedSequences[i].offset != 0); + /* block boundary */ + if (generatedSequences[i].offset == 0) + FUZZ_ASSERT(generatedSequences[i].matchLength == 0); if (litPtr + generatedSequences[i].litLength > litEnd) { litPtr = litBegin; } - ZSTD_memcpy(op, litPtr, generatedSequences[i].litLength); + memcpy(op, litPtr, generatedSequences[i].litLength); bytesWritten += generatedSequences[i].litLength; op += generatedSequences[i].litLength; litPtr += generatedSequences[i].litLength; - FUZZ_ASSERT(generatedSequences[i].offset != 0); /* Copy over the match */ { size_t matchLength = generatedSequences[i].matchLength; size_t j = 0; size_t k = 0; if (dictSize != 0) { - if (generatedSequences[i].offset > bytesWritten) { - /* Offset goes into the dictionary */ - size_t offsetFromEndOfDict = generatedSequences[i].offset - bytesWritten; - for (; k < offsetFromEndOfDict && k < matchLength; ++k) { - op[k] = dictPtr[dictSize - offsetFromEndOfDict + k]; + if (generatedSequences[i].offset > bytesWritten) { /* Offset goes into the dictionary */ + size_t dictOffset = generatedSequences[i].offset - bytesWritten; + size_t matchInDict = MIN(matchLength, dictOffset); + for (; k < matchInDict; ++k) { + op[k] = dictPtr[dictSize - dictOffset + k]; } - matchLength -= k; - op += k; + matchLength -= matchInDict; + op += matchInDict; } } for (; j < matchLength; ++j) { - op[j] = op[j-(int)generatedSequences[i].offset]; + op[j] = op[j - generatedSequences[i].offset]; } op += j; FUZZ_ASSERT(generatedSequences[i].matchLength == j + k); @@ -118,55 +124,65 @@ static size_t decodeSequences(void* dst, size_t nbSequences, } generatedSrcBufferSize = bytesWritten; FUZZ_ASSERT(litPtr <= litEnd); - lastLLSize = (uint32_t)(litEnd - litPtr); - if (lastLLSize <= oend - op) { - ZSTD_memcpy(op, litPtr, lastLLSize); - generatedSrcBufferSize += lastLLSize; - } + if (mode == ZSTD_sf_noBlockDelimiters) { + const uint32_t lastLLSize = (uint32_t)(litEnd - litPtr); + if (lastLLSize <= oend - op) { + memcpy(op, litPtr, lastLLSize); + generatedSrcBufferSize += lastLLSize; + } } return generatedSrcBufferSize; } /* Returns nb sequences generated - * TODO: Add repcode fuzzing once we support repcode match splits + * Note : random sequences are always valid in ZSTD_sf_noBlockDelimiters mode. + * However, it can fail with ZSTD_sf_explicitBlockDelimiters, + * due to potential lack of space in */ static size_t generateRandomSequences(FUZZ_dataProducer_t* producer, size_t literalsSizeLimit, size_t dictSize, - size_t windowLog) { + size_t windowLog, ZSTD_sequenceFormat_e mode) +{ + const uint32_t repCode = 0; /* not used by sequence ingestion api */ + size_t windowSize = 1ULL << windowLog; + size_t blockSizeMax = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); + uint32_t matchLengthMax = ZSTD_FUZZ_MATCHLENGTH_MAXSIZE; uint32_t bytesGenerated = 0; uint32_t nbSeqGenerated = 0; - uint32_t litLength; - uint32_t matchLength; - uint32_t matchBound; - uint32_t offset; - uint32_t offsetBound; - uint32_t repCode = 0; uint32_t isFirstSequence = 1; - uint32_t windowSize = 1 << windowLog; + uint32_t blockSize = 0; - while (nbSeqGenerated < ZSTD_FUZZ_MAX_NBSEQ + if (mode == ZSTD_sf_explicitBlockDelimiters) { + /* ensure that no sequence can be larger than one block */ + literalsSizeLimit = MIN(literalsSizeLimit, blockSizeMax/2); + matchLengthMax = MIN(matchLengthMax, blockSizeMax/2); + } + + while ( nbSeqGenerated < ZSTD_FUZZ_MAX_NBSEQ - 3 /* extra room for explicit delimiters */ && bytesGenerated < ZSTD_FUZZ_GENERATED_SRC_MAXSIZE && !FUZZ_dataProducer_empty(producer)) { - matchBound = ZSTD_FUZZ_MATCHLENGTH_MAXSIZE; - litLength = isFirstSequence && dictSize == 0 ? FUZZ_dataProducer_uint32Range(producer, 1, literalsSizeLimit) - : FUZZ_dataProducer_uint32Range(producer, 0, literalsSizeLimit); + uint32_t matchLength; + uint32_t matchBound = matchLengthMax; + uint32_t offset; + uint32_t offsetBound; + const uint32_t minLitLength = (isFirstSequence && (dictSize == 0)); + const uint32_t litLength = FUZZ_dataProducer_uint32Range(producer, minLitLength, (uint32_t)literalsSizeLimit); bytesGenerated += litLength; if (bytesGenerated > ZSTD_FUZZ_GENERATED_SRC_MAXSIZE) { break; } - offsetBound = bytesGenerated > windowSize ? windowSize : bytesGenerated + dictSize; + offsetBound = (bytesGenerated > windowSize) ? windowSize : bytesGenerated + (uint32_t)dictSize; offset = FUZZ_dataProducer_uint32Range(producer, 1, offsetBound); if (dictSize > 0 && bytesGenerated <= windowSize) { /* Prevent match length from being such that it would be associated with an offset too large * from the decoder's perspective. If not possible (match would be too small), * then reduce the offset if necessary. */ - size_t bytesToReachWindowSize = windowSize - bytesGenerated; + const size_t bytesToReachWindowSize = windowSize - bytesGenerated; if (bytesToReachWindowSize < ZSTD_MINMATCH_MIN) { - uint32_t newOffsetBound = offsetBound > windowSize ? windowSize : offsetBound; + const uint32_t newOffsetBound = offsetBound > windowSize ? windowSize : offsetBound; offset = FUZZ_dataProducer_uint32Range(producer, 1, newOffsetBound); } else { - matchBound = bytesToReachWindowSize > ZSTD_FUZZ_MATCHLENGTH_MAXSIZE ? - ZSTD_FUZZ_MATCHLENGTH_MAXSIZE : bytesToReachWindowSize; + matchBound = MIN(matchLengthMax, (uint32_t)bytesToReachWindowSize); } } matchLength = FUZZ_dataProducer_uint32Range(producer, ZSTD_MINMATCH_MIN, matchBound); @@ -174,56 +190,78 @@ static size_t generateRandomSequences(FUZZ_dataProducer_t* producer, if (bytesGenerated > ZSTD_FUZZ_GENERATED_SRC_MAXSIZE) { break; } - ZSTD_Sequence seq = {offset, litLength, matchLength, repCode}; - generatedSequences[nbSeqGenerated++] = seq; - isFirstSequence = 0; + { ZSTD_Sequence seq = {offset, litLength, matchLength, repCode}; + const uint32_t lastLits = FUZZ_dataProducer_uint32Range(producer, 0, litLength); + #define SPLITPROB 6000 + #define SPLITMARK 5234 + const int split = (FUZZ_dataProducer_uint32Range(producer, 0, SPLITPROB) == SPLITMARK); + if (mode == ZSTD_sf_explicitBlockDelimiters) { + const size_t seqSize = seq.litLength + seq.matchLength; + if (blockSize + seqSize > blockSizeMax) { /* reaching limit : must end block now */ + const ZSTD_Sequence endBlock = {0, 0, 0, 0}; + generatedSequences[nbSeqGenerated++] = endBlock; + blockSize = seqSize; + } + if (split) { + const ZSTD_Sequence endBlock = {0, lastLits, 0, 0}; + generatedSequences[nbSeqGenerated++] = endBlock; + assert(lastLits <= seq.litLength); + seq.litLength -= lastLits; + blockSize = seqSize - lastLits; + } else { + blockSize += seqSize; + } + } + generatedSequences[nbSeqGenerated++] = seq; + isFirstSequence = 0; + } } + if (mode == ZSTD_sf_explicitBlockDelimiters) { + /* always end sequences with a block delimiter */ + const ZSTD_Sequence endBlock = {0, 0, 0, 0}; + assert(nbSeqGenerated < ZSTD_FUZZ_MAX_NBSEQ); + generatedSequences[nbSeqGenerated++] = endBlock; + } return nbSeqGenerated; } -static size_t roundTripTest(void *result, size_t resultCapacity, - void *compressed, size_t compressedCapacity, - size_t srcSize, - const void *dict, size_t dictSize, - size_t generatedSequencesSize, - size_t wLog, unsigned cLevel, unsigned hasDict) +static size_t roundTripTest(void* result, size_t resultCapacity, + void* compressed, size_t compressedCapacity, + const void* src, size_t srcSize, + const ZSTD_Sequence* seqs, size_t seqSize, + unsigned hasDict, + ZSTD_sequenceFormat_e mode) { size_t cSize; size_t dSize; - ZSTD_CDict* cdict = NULL; - ZSTD_DDict* ddict = NULL; - ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters); - ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, 0); - ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, cLevel); - ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, wLog); - ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, ZSTD_MINMATCH_MIN); - ZSTD_CCtx_setParameter(cctx, ZSTD_c_validateSequences, 1); - /* TODO: Add block delim mode fuzzing */ - ZSTD_CCtx_setParameter(cctx, ZSTD_c_blockDelimiters, ZSTD_sf_noBlockDelimiters); if (hasDict) { - FUZZ_ZASSERT(ZSTD_CCtx_loadDictionary(cctx, dict, dictSize)); - FUZZ_ZASSERT(ZSTD_DCtx_loadDictionary(dctx, dict, dictSize)); + FUZZ_ZASSERT(ZSTD_CCtx_refCDict(cctx, cdict)); + FUZZ_ZASSERT(ZSTD_DCtx_refDDict(dctx, ddict)); } cSize = ZSTD_compressSequences(cctx, compressed, compressedCapacity, - generatedSequences, generatedSequencesSize, - generatedSrc, srcSize); + seqs, seqSize, + src, srcSize); + if ( (ZSTD_getErrorCode(cSize) == ZSTD_error_dstSize_tooSmall) + && (mode == ZSTD_sf_explicitBlockDelimiters) ) { + /* Valid scenario : in explicit delimiter mode, + * it might be possible for the compressed size to outgrow dstCapacity. + * In which case, it's still a valid fuzzer scenario, + * but no roundtrip shall be possible */ + return 0; + } + /* round-trip */ FUZZ_ZASSERT(cSize); dSize = ZSTD_decompressDCtx(dctx, result, resultCapacity, compressed, cSize); FUZZ_ZASSERT(dSize); - - if (cdict) { - ZSTD_freeCDict(cdict); - } - if (ddict) { - ZSTD_freeDDict(ddict); - } + FUZZ_ASSERT_MSG(dSize == srcSize, "Incorrect regenerated size"); + FUZZ_ASSERT_MSG(!FUZZ_memcmp(src, result, srcSize), "Corruption!"); return dSize; } -int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size) +int LLVMFuzzerTestOneInput(const uint8_t* src, size_t size) { void* rBuf; size_t rBufSize; @@ -231,27 +269,70 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size) size_t cBufSize; size_t generatedSrcSize; size_t nbSequences; - void* dictBuffer; size_t dictSize = 0; unsigned hasDict; unsigned wLog; int cLevel; + ZSTD_sequenceFormat_e mode; + + FUZZ_dataProducer_t* const producer = FUZZ_dataProducer_create(src, size); + FUZZ_ASSERT(producer); + + if (!cctx) { + cctx = ZSTD_createCCtx(); + FUZZ_ASSERT(cctx); + } + if (!dctx) { + dctx = ZSTD_createDCtx(); + FUZZ_ASSERT(dctx); + } - FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size); - if (literalsBuffer == NULL) { + /* Generate window log first so we don't generate offsets too large */ + wLog = FUZZ_dataProducer_uint32Range(producer, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX); + cLevel = FUZZ_dataProducer_int32Range(producer, -3, 22); + mode = (ZSTD_sequenceFormat_e)FUZZ_dataProducer_int32Range(producer, 0, 1); + + ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters); + ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, 0); + ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, cLevel); + ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, wLog); + ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, ZSTD_MINMATCH_MIN); + ZSTD_CCtx_setParameter(cctx, ZSTD_c_validateSequences, 1); + ZSTD_CCtx_setParameter(cctx, ZSTD_c_blockDelimiters, mode); + ZSTD_CCtx_setParameter(cctx, ZSTD_c_forceAttachDict, ZSTD_dictForceAttach); + + if (!literalsBuffer) { literalsBuffer = FUZZ_malloc(ZSTD_FUZZ_GENERATED_LITERALS_SIZE); - literalsBuffer = generatePseudoRandomString(literalsBuffer, ZSTD_FUZZ_GENERATED_LITERALS_SIZE); + FUZZ_ASSERT(literalsBuffer); + literalsBuffer = generatePseudoRandomString(literalsBuffer, ZSTD_FUZZ_GENERATED_LITERALS_SIZE, producer); } + if (!dictBuffer) { /* Generate global dictionary buffer */ + ZSTD_compressionParameters cParams; + + /* Generate a large dictionary buffer */ + dictBuffer = calloc(ZSTD_FUZZ_GENERATED_DICT_MAXSIZE, 1); + FUZZ_ASSERT(dictBuffer); + + /* Create global cdict and ddict */ + cParams = ZSTD_getCParams(1, ZSTD_FUZZ_GENERATED_SRC_MAXSIZE, ZSTD_FUZZ_GENERATED_DICT_MAXSIZE); + cParams.minMatch = ZSTD_MINMATCH_MIN; + cParams.hashLog = ZSTD_HASHLOG_MIN; + cParams.chainLog = ZSTD_CHAINLOG_MIN; + + cdict = ZSTD_createCDict_advanced(dictBuffer, ZSTD_FUZZ_GENERATED_DICT_MAXSIZE, ZSTD_dlm_byRef, ZSTD_dct_rawContent, cParams, ZSTD_defaultCMem); + ddict = ZSTD_createDDict_advanced(dictBuffer, ZSTD_FUZZ_GENERATED_DICT_MAXSIZE, ZSTD_dlm_byRef, ZSTD_dct_rawContent, ZSTD_defaultCMem); + FUZZ_ASSERT(cdict); + FUZZ_ASSERT(ddict); + } + + FUZZ_ASSERT(cdict); + FUZZ_ASSERT(ddict); + hasDict = FUZZ_dataProducer_uint32Range(producer, 0, 1); if (hasDict) { - dictSize = FUZZ_dataProducer_uint32Range(producer, 1, ZSTD_FUZZ_GENERATED_DICT_MAXSIZE); - dictBuffer = FUZZ_malloc(dictSize); - dictBuffer = generatePseudoRandomString(dictBuffer, dictSize); + dictSize = ZSTD_FUZZ_GENERATED_DICT_MAXSIZE; } - /* Generate window log first so we dont generate offsets too large */ - wLog = FUZZ_dataProducer_uint32Range(producer, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX_32); - cLevel = FUZZ_dataProducer_int32Range(producer, -3, 22); if (!generatedSequences) { generatedSequences = FUZZ_malloc(sizeof(ZSTD_Sequence)*ZSTD_FUZZ_MAX_NBSEQ); @@ -259,39 +340,32 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size) if (!generatedSrc) { generatedSrc = FUZZ_malloc(ZSTD_FUZZ_GENERATED_SRC_MAXSIZE); } - nbSequences = generateRandomSequences(producer, ZSTD_FUZZ_GENERATED_LITERALS_SIZE, dictSize, wLog); - generatedSrcSize = decodeSequences(generatedSrc, nbSequences, ZSTD_FUZZ_GENERATED_LITERALS_SIZE, dictBuffer, dictSize); + + nbSequences = generateRandomSequences(producer, ZSTD_FUZZ_GENERATED_LITERALS_SIZE, dictSize, wLog, mode); + generatedSrcSize = decodeSequences(generatedSrc, nbSequences, ZSTD_FUZZ_GENERATED_LITERALS_SIZE, dictBuffer, dictSize, mode); + + /* Note : in explicit block delimiters mode, + * the fuzzer might generate a lot of small blocks. + * In which case, the final compressed size might be > ZSTD_compressBound(). + * This is still a valid scenario fuzzer though, which makes it possible to check under-sized dstCapacity. + * The test just doesn't roundtrip. */ cBufSize = ZSTD_compressBound(generatedSrcSize); cBuf = FUZZ_malloc(cBufSize); rBufSize = generatedSrcSize; rBuf = FUZZ_malloc(rBufSize); - if (!cctx) { - cctx = ZSTD_createCCtx(); - FUZZ_ASSERT(cctx); - } - if (!dctx) { - dctx = ZSTD_createDCtx(); - FUZZ_ASSERT(dctx); - } - - size_t const result = roundTripTest(rBuf, rBufSize, + { const size_t result = roundTripTest(rBuf, rBufSize, cBuf, cBufSize, - generatedSrcSize, - dictBuffer, dictSize, - nbSequences, - wLog, cLevel, hasDict); - FUZZ_ZASSERT(result); - FUZZ_ASSERT_MSG(result == generatedSrcSize, "Incorrect regenerated size"); - FUZZ_ASSERT_MSG(!FUZZ_memcmp(generatedSrc, rBuf, generatedSrcSize), "Corruption!"); + generatedSrc, generatedSrcSize, + generatedSequences, nbSequences, + hasDict, mode); + FUZZ_ASSERT(result <= generatedSrcSize); /* can be 0 when no round-trip */ + } free(rBuf); free(cBuf); FUZZ_dataProducer_free(producer); - if (hasDict) { - free(dictBuffer); - } #ifndef STATEFUL_FUZZING ZSTD_freeCCtx(cctx); cctx = NULL; ZSTD_freeDCtx(dctx); dctx = NULL; diff --git a/tests/fuzz/simple_compress.c b/tests/fuzz/simple_compress.c index 3716d0d0fe1..c9fea22d3da 100644 --- a/tests/fuzz/simple_compress.c +++ b/tests/fuzz/simple_compress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -9,7 +9,7 @@ */ /** - * This fuzz target attempts to comprss the fuzzed data with the simple + * This fuzz target attempts to compress the fuzzed data with the simple * compression function with an output buffer that may be too small to * ensure that the compressor never crashes. */ diff --git a/tests/fuzz/simple_decompress.c b/tests/fuzz/simple_decompress.c index dfff11cbc87..ce5f9f0981b 100644 --- a/tests/fuzz/simple_decompress.c +++ b/tests/fuzz/simple_decompress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/fuzz/simple_round_trip.c b/tests/fuzz/simple_round_trip.c index 9da986bc906..c2c69d950b6 100644 --- a/tests/fuzz/simple_round_trip.c +++ b/tests/fuzz/simple_round_trip.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -26,6 +26,23 @@ static ZSTD_CCtx *cctx = NULL; static ZSTD_DCtx *dctx = NULL; +static size_t getDecompressionMargin(void const* compressed, size_t cSize, size_t srcSize, int hasSmallBlocks) +{ + size_t margin = ZSTD_decompressionMargin(compressed, cSize); + if (!hasSmallBlocks) { + /* The macro should be correct in this case, but it may be smaller + * because of e.g. block splitting, so take the smaller of the two. + */ + ZSTD_frameHeader zfh; + size_t marginM; + FUZZ_ZASSERT(ZSTD_getFrameHeader(&zfh, compressed, cSize)); + marginM = ZSTD_DECOMPRESSION_MARGIN(srcSize, zfh.blockSizeMax); + if (marginM < margin) + margin = marginM; + } + return margin; +} + static size_t roundTripTest(void *result, size_t resultCapacity, void *compressed, size_t compressedCapacity, const void *src, size_t srcSize, @@ -67,6 +84,25 @@ static size_t roundTripTest(void *result, size_t resultCapacity, } dSize = ZSTD_decompressDCtx(dctx, result, resultCapacity, compressed, cSize); FUZZ_ZASSERT(dSize); + FUZZ_ASSERT_MSG(dSize == srcSize, "Incorrect regenerated size"); + FUZZ_ASSERT_MSG(!FUZZ_memcmp(src, result, dSize), "Corruption!"); + + { + size_t margin = getDecompressionMargin(compressed, cSize, srcSize, targetCBlockSize); + size_t const outputSize = srcSize + margin; + char* const output = (char*)FUZZ_malloc(outputSize); + char* const input = output + outputSize - cSize; + FUZZ_ASSERT(outputSize >= cSize); + memcpy(input, compressed, cSize); + + dSize = ZSTD_decompressDCtx(dctx, output, outputSize, input, cSize); + FUZZ_ZASSERT(dSize); + FUZZ_ASSERT_MSG(dSize == srcSize, "Incorrect regenerated size"); + FUZZ_ASSERT_MSG(!FUZZ_memcmp(src, output, srcSize), "Corruption!"); + + free(output); + } + /* When superblock is enabled make sure we don't expand the block more than expected. * NOTE: This test is currently disabled because superblock mode can arbitrarily * expand the block in the worst case. Once superblock mode has been improved we can @@ -120,13 +156,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size) FUZZ_ASSERT(dctx); } - { - size_t const result = - roundTripTest(rBuf, rBufSize, cBuf, cBufSize, src, size, producer); - FUZZ_ZASSERT(result); - FUZZ_ASSERT_MSG(result == size, "Incorrect regenerated size"); - FUZZ_ASSERT_MSG(!FUZZ_memcmp(src, rBuf, size), "Corruption!"); - } + roundTripTest(rBuf, rBufSize, cBuf, cBufSize, src, size, producer); free(rBuf); free(cBuf); FUZZ_dataProducer_free(producer); diff --git a/tests/fuzz/stream_decompress.c b/tests/fuzz/stream_decompress.c index e0cdd34d9c5..0254d06ecea 100644 --- a/tests/fuzz/stream_decompress.c +++ b/tests/fuzz/stream_decompress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -99,14 +99,14 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size) while (size > 0) { ZSTD_inBuffer in = makeInBuffer(&src, &size, producer); - while (in.pos != in.size) { + do { + size_t const rc = ZSTD_decompressStream(dstream, &out, &in); + if (ZSTD_isError(rc)) goto error; if (out.pos == out.size) { if (stableOutBuffer) goto error; out = makeOutBuffer(producer, buf, bufSize); } - size_t const rc = ZSTD_decompressStream(dstream, &out, &in); - if (ZSTD_isError(rc)) goto error; - } + } while (in.pos != in.size); } error: diff --git a/tests/fuzz/stream_round_trip.c b/tests/fuzz/stream_round_trip.c index 719eac48d7d..fae9ccbf498 100644 --- a/tests/fuzz/stream_round_trip.c +++ b/tests/fuzz/stream_round_trip.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -166,6 +166,24 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size) FUZZ_ZASSERT(rSize); FUZZ_ASSERT_MSG(rSize == size, "Incorrect regenerated size"); FUZZ_ASSERT_MSG(!FUZZ_memcmp(src, rBuf, size), "Corruption!"); + + /* Test in-place decompression (note the macro doesn't work in this case) */ + { + size_t const margin = ZSTD_decompressionMargin(cBuf, cSize); + size_t const outputSize = size + margin; + char* const output = (char*)FUZZ_malloc(outputSize); + char* const input = output + outputSize - cSize; + size_t dSize; + FUZZ_ASSERT(outputSize >= cSize); + memcpy(input, cBuf, cSize); + + dSize = ZSTD_decompressDCtx(dctx, output, outputSize, input, cSize); + FUZZ_ZASSERT(dSize); + FUZZ_ASSERT_MSG(dSize == size, "Incorrect regenerated size"); + FUZZ_ASSERT_MSG(!FUZZ_memcmp(src, output, size), "Corruption!"); + + free(output); + } } FUZZ_dataProducer_free(producer); diff --git a/tests/fuzz/zstd_frame_info.c b/tests/fuzz/zstd_frame_info.c index 9ce645d15f5..95dbdd49a47 100644 --- a/tests/fuzz/zstd_frame_info.c +++ b/tests/fuzz/zstd_frame_info.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/fuzz/zstd_helpers.c b/tests/fuzz/zstd_helpers.c index f66579754ff..411b6391c19 100644 --- a/tests/fuzz/zstd_helpers.c +++ b/tests/fuzz/zstd_helpers.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -17,6 +17,7 @@ #include "fuzz_helpers.h" #include "zstd.h" #include "zdict.h" +#include "sequence_producer.h" const int kMinClevel = -3; const int kMaxClevel = 19; @@ -26,9 +27,14 @@ static void set(ZSTD_CCtx *cctx, ZSTD_cParameter param, int value) FUZZ_ZASSERT(ZSTD_CCtx_setParameter(cctx, param, value)); } +static unsigned produceParamValue(unsigned min, unsigned max, + FUZZ_dataProducer_t *producer) { + return FUZZ_dataProducer_uint32Range(producer, min, max); +} + static void setRand(ZSTD_CCtx *cctx, ZSTD_cParameter param, unsigned min, unsigned max, FUZZ_dataProducer_t *producer) { - unsigned const value = FUZZ_dataProducer_uint32Range(producer, min, max); + unsigned const value = produceParamValue(min, max, producer); set(cctx, param, value); } @@ -65,6 +71,17 @@ ZSTD_parameters FUZZ_randomParams(size_t srcSize, FUZZ_dataProducer_t *producer) return params; } +static void setSequenceProducerParams(ZSTD_CCtx *cctx, FUZZ_dataProducer_t *producer) { + ZSTD_registerSequenceProducer( + cctx, + NULL, + simpleSequenceProducer + ); + setRand(cctx, ZSTD_c_enableSeqProducerFallback, 0, 1, producer); + FUZZ_ZASSERT(ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, 0)); + FUZZ_ZASSERT(ZSTD_CCtx_setParameter(cctx, ZSTD_c_enableLongDistanceMatching, ZSTD_ps_disable)); +} + void FUZZ_setRandomParameters(ZSTD_CCtx *cctx, size_t srcSize, FUZZ_dataProducer_t *producer) { ZSTD_compressionParameters cParams = FUZZ_randomCParams(srcSize, producer); @@ -80,7 +97,7 @@ void FUZZ_setRandomParameters(ZSTD_CCtx *cctx, size_t srcSize, FUZZ_dataProducer setRand(cctx, ZSTD_c_checksumFlag, 0, 1, producer); setRand(cctx, ZSTD_c_dictIDFlag, 0, 1, producer); /* Select long distance matching parameters */ - setRand(cctx, ZSTD_c_enableLongDistanceMatching, 0, 1, producer); + setRand(cctx, ZSTD_c_enableLongDistanceMatching, ZSTD_ps_auto, ZSTD_ps_disable, producer); setRand(cctx, ZSTD_c_ldmHashLog, ZSTD_HASHLOG_MIN, 16, producer); setRand(cctx, ZSTD_c_ldmMinMatch, ZSTD_LDM_MINMATCH_MIN, ZSTD_LDM_MINMATCH_MAX, producer); @@ -89,8 +106,19 @@ void FUZZ_setRandomParameters(ZSTD_CCtx *cctx, size_t srcSize, FUZZ_dataProducer setRand(cctx, ZSTD_c_ldmHashRateLog, ZSTD_LDM_HASHRATELOG_MIN, ZSTD_LDM_HASHRATELOG_MAX, producer); /* Set misc parameters */ +#ifndef ZSTD_MULTITHREAD + // To reproduce with or without ZSTD_MULTITHREAD, we are going to use + // the same amount of entropy. + unsigned const nbWorkers_value = produceParamValue(0, 2, producer); + unsigned const rsyncable_value = produceParamValue(0, 1, producer); + (void)nbWorkers_value; + (void)rsyncable_value; + set(cctx, ZSTD_c_nbWorkers, 0); + set(cctx, ZSTD_c_rsyncable, 0); +#else setRand(cctx, ZSTD_c_nbWorkers, 0, 2, producer); setRand(cctx, ZSTD_c_rsyncable, 0, 1, producer); +#endif setRand(cctx, ZSTD_c_useRowMatchFinder, 0, 2, producer); setRand(cctx, ZSTD_c_enableDedicatedDictSearch, 0, 1, producer); setRand(cctx, ZSTD_c_forceMaxWindow, 0, 1, producer); @@ -98,12 +126,22 @@ void FUZZ_setRandomParameters(ZSTD_CCtx *cctx, size_t srcSize, FUZZ_dataProducer setRand(cctx, ZSTD_c_forceAttachDict, 0, 2, producer); setRand(cctx, ZSTD_c_useBlockSplitter, 0, 2, producer); setRand(cctx, ZSTD_c_deterministicRefPrefix, 0, 1, producer); + setRand(cctx, ZSTD_c_prefetchCDictTables, 0, 2, producer); + setRand(cctx, ZSTD_c_maxBlockSize, ZSTD_BLOCKSIZE_MAX_MIN, ZSTD_BLOCKSIZE_MAX, producer); + setRand(cctx, ZSTD_c_validateSequences, 0, 1, producer); + setRand(cctx, ZSTD_c_searchForExternalRepcodes, 0, 2, producer); if (FUZZ_dataProducer_uint32Range(producer, 0, 1) == 0) { setRand(cctx, ZSTD_c_srcSizeHint, ZSTD_SRCSIZEHINT_MIN, 2 * srcSize, producer); } if (FUZZ_dataProducer_uint32Range(producer, 0, 1) == 0) { setRand(cctx, ZSTD_c_targetCBlockSize, ZSTD_TARGETCBLOCKSIZE_MIN, ZSTD_TARGETCBLOCKSIZE_MAX, producer); } + + if (FUZZ_dataProducer_uint32Range(producer, 0, 10) == 1) { + setSequenceProducerParams(cctx, producer); + } else { + ZSTD_registerSequenceProducer(cctx, NULL, NULL); + } } FUZZ_dict_t FUZZ_train(void const* src, size_t srcSize, FUZZ_dataProducer_t *producer) diff --git a/tests/fuzz/zstd_helpers.h b/tests/fuzz/zstd_helpers.h index 7813884d3f3..a4cfe321bdb 100644 --- a/tests/fuzz/zstd_helpers.h +++ b/tests/fuzz/zstd_helpers.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -17,6 +17,7 @@ #define ZSTD_STATIC_LINKING_ONLY #include "zstd.h" +#include "zstd_errors.h" #include "fuzz_data_producer.h" #include diff --git a/tests/fuzzer.c b/tests/fuzzer.c index d168d657611..fc78c7fbc5f 100644 --- a/tests/fuzzer.c +++ b/tests/fuzzer.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -25,7 +25,8 @@ #include /* free */ #include /* fgets, sscanf */ #include /* strcmp */ -#undef NDEBUG +#include /* time(), time_t */ +#undef NDEBUG /* always enable assert() */ #include #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressContinue, ZSTD_compressBlock */ #include "debug.h" /* DEBUG_STATIC_ASSERT */ @@ -82,8 +83,8 @@ static UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER; void FUZ_bug976(void); void FUZ_bug976(void) { /* these constants shall not depend on MIN() macro */ - assert(ZSTD_HASHLOG_MAX < 31); - assert(ZSTD_CHAINLOG_MAX < 31); + DEBUG_STATIC_ASSERT(ZSTD_HASHLOG_MAX < 31); + DEBUG_STATIC_ASSERT(ZSTD_CHAINLOG_MAX < 31); } @@ -118,23 +119,24 @@ static U32 FUZ_highbit32(U32 v32) /*============================================= * Test macros =============================================*/ -#define CHECK_Z(f) { \ - size_t const err = f; \ - if (ZSTD_isError(err)) { \ - DISPLAY("Error => %s : %s ", \ - #f, ZSTD_getErrorName(err)); \ - exit(1); \ +#define CHECK(fn) { if(!(fn)) { DISPLAYLEVEL(1, "Error : test (%s) failed \n", #fn); exit(1); } } + +#define CHECK_Z(f) { \ + size_t const err = f; \ + if (ZSTD_isError(err)) { \ + DISPLAY("Error => %s : %s ", \ + #f, ZSTD_getErrorName(err)); \ + exit(1); \ } } -#define CHECK_VAR(var, fn) var = fn; if (ZSTD_isError(var)) { DISPLAYLEVEL(1, "%s : fails : %s \n", #fn, ZSTD_getErrorName(var)); goto _output_error; } +#define CHECK_VAR(var, fn) var = fn; if (ZSTD_isError(var)) { DISPLAYLEVEL(1, "%s : fails : %s \n", #fn, ZSTD_getErrorName(var)); exit(1); } #define CHECK_NEWV(var, fn) size_t const CHECK_VAR(var, fn) -#define CHECK(fn) { CHECK_NEWV(__err, fn); } #define CHECKPLUS(var, fn, more) { CHECK_NEWV(var, fn); more; } #define CHECK_OP(op, lhs, rhs) { \ if (!((lhs) op (rhs))) { \ DISPLAY("Error L%u => FAILED %s %s %s ", __LINE__, #lhs, #op, #rhs); \ - goto _output_error; \ + exit(1); \ } \ } #define CHECK_EQ(lhs, rhs) CHECK_OP(==, lhs, rhs) @@ -338,6 +340,7 @@ static void FUZ_decodeSequences(BYTE* dst, ZSTD_Sequence* seqs, size_t seqsSize, } #ifdef ZSTD_MULTITHREAD + typedef struct { ZSTD_CCtx* cctx; ZSTD_threadPool* pool; @@ -429,8 +432,8 @@ static int threadPoolTests(void) { ZSTD_pthread_create(&t1, NULL, threadPoolTests_compressionJob, &p1); ZSTD_pthread_create(&t2, NULL, threadPoolTests_compressionJob, &p2); - ZSTD_pthread_join(t1, NULL); - ZSTD_pthread_join(t2, NULL); + ZSTD_pthread_join(t1); + ZSTD_pthread_join(t2); assert(!memcmp(decodedBuffer, decodedBuffer2, CNBuffSize)); free(decodedBuffer2); @@ -461,6 +464,101 @@ static int threadPoolTests(void) { * Unit tests =============================================*/ +static void test_compressBound(unsigned tnb) +{ + DISPLAYLEVEL(3, "test%3u : compressBound : ", tnb); + + /* check ZSTD_compressBound == ZSTD_COMPRESSBOUND + * for a large range of known valid values */ + DEBUG_STATIC_ASSERT(sizeof(size_t) >= 4); + { int s; + for (s=0; s<30; s++) { + size_t const w = (size_t)1 << s; + CHECK_EQ(ZSTD_compressBound(w), ZSTD_COMPRESSBOUND(w)); + } } + + /* Ensure error if srcSize too big */ + { size_t const w = ZSTD_MAX_INPUT_SIZE + 1; + CHECK(ZSTD_isError(ZSTD_compressBound(w))); /* must fail */ + CHECK_EQ(ZSTD_COMPRESSBOUND(w), 0); + } + + DISPLAYLEVEL(3, "OK \n"); +} + +static void test_decompressBound(unsigned tnb) +{ + DISPLAYLEVEL(3, "test%3u : decompressBound : ", tnb); + + /* Simple compression, with size : should provide size; */ + { const char example[] = "abcd"; + char cBuffer[ZSTD_COMPRESSBOUND(sizeof(example))]; + size_t const cSize = ZSTD_compress(cBuffer, sizeof(cBuffer), example, sizeof(example), 0); + CHECK_Z(cSize); + CHECK_EQ(ZSTD_decompressBound(cBuffer, cSize), (unsigned long long)sizeof(example)); + } + + /* Simple small compression without size : should provide 1 block size */ + { char cBuffer[ZSTD_COMPRESSBOUND(0)]; + ZSTD_outBuffer out = { cBuffer, sizeof(cBuffer), 0 }; + ZSTD_inBuffer in = { NULL, 0, 0 }; + ZSTD_CCtx* const cctx = ZSTD_createCCtx(); + assert(cctx); + CHECK_Z( ZSTD_initCStream(cctx, 0) ); + CHECK_Z( ZSTD_compressStream(cctx, &out, &in) ); + CHECK_EQ( ZSTD_endStream(cctx, &out), 0 ); + CHECK_EQ( ZSTD_decompressBound(cBuffer, out.pos), ZSTD_BLOCKSIZE_MAX ); + ZSTD_freeCCtx(cctx); + } + + /* Attempt to overflow 32-bit intermediate multiplication result + * This requires dBound >= 4 GB, aka 2^32. + * This requires 2^32 / 2^17 = 2^15 blocks + * => create 2^15 blocks (can be empty, or just 1 byte). */ + { const char input[] = "a"; + size_t const nbBlocks = (1 << 15) + 1; + size_t blockNb; + size_t const outCapacity = 1 << 18; /* large margin */ + char* const outBuffer = malloc (outCapacity); + ZSTD_outBuffer out = { outBuffer, outCapacity, 0 }; + ZSTD_CCtx* const cctx = ZSTD_createCCtx(); + assert(cctx); + assert(outBuffer); + CHECK_Z( ZSTD_initCStream(cctx, 0) ); + for (blockNb=0; blockNb 0x100000000LLU /* 4 GB */ ); + ZSTD_freeCCtx(cctx); + free(outBuffer); + } + + DISPLAYLEVEL(3, "OK \n"); +} + +static void test_setCParams(unsigned tnb) +{ + ZSTD_CCtx* const cctx = ZSTD_createCCtx(); + ZSTD_compressionParameters cparams; + assert(cctx); + + DISPLAYLEVEL(3, "test%3u : ZSTD_CCtx_setCParams : ", tnb); + + /* valid cparams */ + cparams = ZSTD_getCParams(1, 0, 0); + CHECK_Z(ZSTD_CCtx_setCParams(cctx, cparams)); + + /* invalid cparams (must fail) */ + cparams.windowLog = 99; + CHECK(ZSTD_isError(ZSTD_CCtx_setCParams(cctx, cparams))); + + free(cctx); + DISPLAYLEVEL(3, "OK \n"); +} + static int basicUnitTests(U32 const seed, double compressibility) { size_t const CNBuffSize = 5 MB; @@ -507,6 +605,12 @@ static int basicUnitTests(U32 const seed, double compressibility) DISPLAYLEVEL(3, "%u (OK) \n", vn); } + test_compressBound(testNb++); + + test_decompressBound(testNb++); + + test_setCParams(testNb++); + DISPLAYLEVEL(3, "test%3u : ZSTD_adjustCParams : ", testNb++); { ZSTD_compressionParameters params; @@ -580,6 +684,17 @@ static int basicUnitTests(U32 const seed, double compressibility) if (r != CNBuffSize) goto _output_error; } DISPLAYLEVEL(3, "OK \n"); + DISPLAYLEVEL(3, "test%3i : decompress %u bytes with Huffman assembly disabled : ", testNb++, (unsigned)CNBuffSize); + { + ZSTD_DCtx* dctx = ZSTD_createDCtx(); + size_t r; + CHECK_Z(ZSTD_DCtx_setParameter(dctx, ZSTD_d_disableHuffmanAssembly, 1)); + r = ZSTD_decompress(decodedBuffer, CNBuffSize, compressedBuffer, cSize); + if (r != CNBuffSize || memcmp(decodedBuffer, CNBuffer, CNBuffSize)) goto _output_error; + ZSTD_freeDCtx(dctx); + } + DISPLAYLEVEL(3, "OK \n"); + DISPLAYLEVEL(3, "test%3i : check decompressed result : ", testNb++); { size_t u; for (u=0; u 10 * refPrefixLdmComrpessedSize); - /* make sure the ldm comrpessed size is less than 1% of original */ - assert((double)refPrefixLdmComrpessedSize / (double)size < 0.01); + assert(refPrefixCompressedSize > 10 * refPrefixLdmCompressedSize); + /* make sure the ldm compressed size is less than 1% of original */ + assert((double)refPrefixLdmCompressedSize / (double)size < 0.01); ZSTD_freeDCtx(dctx); ZSTD_freeCCtx(cctx); @@ -1117,6 +1232,60 @@ static int basicUnitTests(U32 const seed, double compressibility) } DISPLAYLEVEL(3, "OK \n"); + DISPLAYLEVEL(3, "test%3i : in-place decompression : ", testNb++); + cSize = ZSTD_compress(compressedBuffer, compressedBufferSize, CNBuffer, CNBuffSize, -ZSTD_BLOCKSIZE_MAX); + CHECK_Z(cSize); + CHECK_LT(CNBuffSize, cSize); + { + size_t const margin = ZSTD_decompressionMargin(compressedBuffer, cSize); + size_t const outputSize = (CNBuffSize + margin); + char* output = malloc(outputSize); + char* input = output + outputSize - cSize; + CHECK_LT(cSize, CNBuffSize + margin); + CHECK(output != NULL); + CHECK_Z(margin); + CHECK(margin <= ZSTD_DECOMPRESSION_MARGIN(CNBuffSize, ZSTD_BLOCKSIZE_MAX)); + memcpy(input, compressedBuffer, cSize); + + { + size_t const dSize = ZSTD_decompress(output, outputSize, input, cSize); + CHECK_Z(dSize); + CHECK_EQ(dSize, CNBuffSize); + } + CHECK(!memcmp(output, CNBuffer, CNBuffSize)); + free(output); + } + DISPLAYLEVEL(3, "OK \n"); + + DISPLAYLEVEL(3, "test%3i : in-place decompression with 2 frames : ", testNb++); + cSize = ZSTD_compress(compressedBuffer, compressedBufferSize, CNBuffer, CNBuffSize / 3, -ZSTD_BLOCKSIZE_MAX); + CHECK_Z(cSize); + { + size_t const cSize2 = ZSTD_compress((char*)compressedBuffer + cSize, compressedBufferSize - cSize, (char const*)CNBuffer + (CNBuffSize / 3), CNBuffSize / 3, -ZSTD_BLOCKSIZE_MAX); + CHECK_Z(cSize2); + cSize += cSize2; + } + { + size_t const srcSize = (CNBuffSize / 3) * 2; + size_t const margin = ZSTD_decompressionMargin(compressedBuffer, cSize); + size_t const outputSize = (CNBuffSize + margin); + char* output = malloc(outputSize); + char* input = output + outputSize - cSize; + CHECK_LT(cSize, CNBuffSize + margin); + CHECK(output != NULL); + CHECK_Z(margin); + memcpy(input, compressedBuffer, cSize); + + { + size_t const dSize = ZSTD_decompress(output, outputSize, input, cSize); + CHECK_Z(dSize); + CHECK_EQ(dSize, srcSize); + } + CHECK(!memcmp(output, CNBuffer, srcSize)); + free(output); + } + DISPLAYLEVEL(3, "OK \n"); + DISPLAYLEVEL(3, "test%3d: superblock uncompressible data, too many nocompress superblocks : ", testNb++); { ZSTD_CCtx* const cctx = ZSTD_createCCtx(); @@ -1206,7 +1375,7 @@ static int basicUnitTests(U32 const seed, double compressibility) DISPLAYLEVEL(3, "test%3i : compress a NULL input with each level : ", testNb++); { int level = -1; - ZSTD_CCtx* cctx = ZSTD_createCCtx(); + ZSTD_CCtx* const cctx = ZSTD_createCCtx(); if (!cctx) goto _output_error; for (level = -1; level <= ZSTD_maxCLevel(); ++level) { CHECK_Z( ZSTD_compress(compressedBuffer, compressedBufferSize, NULL, 0, level) ); @@ -1579,7 +1748,7 @@ static int basicUnitTests(U32 const seed, double compressibility) DISPLAYLEVEL(3, "OK \n"); DISPLAYLEVEL(3, "test%3i : init CCtx for small level %u (should work again) : ", testNb++, 1); - CHECK( ZSTD_compressBegin(staticCCtx, 1) ); + CHECK_Z( ZSTD_compressBegin(staticCCtx, 1) ); DISPLAYLEVEL(3, "OK \n"); DISPLAYLEVEL(3, "test%3i : use CStream on CCtx-sized static context (should fail) : ", testNb++); @@ -1622,6 +1791,94 @@ static int basicUnitTests(U32 const seed, double compressibility) if (!ZSTD_isError(r)) goto _output_error; } DISPLAYLEVEL(3, "OK \n"); + + DISPLAYLEVEL(3, "test%3i : test estimation functions with default cctx params : ", testNb++); + { + // Test ZSTD_estimateCCtxSize_usingCCtxParams + { + ZSTD_CCtx_params* params = ZSTD_createCCtxParams(); + size_t const cctxSizeDefault = ZSTD_estimateCCtxSize_usingCCtxParams(params); + staticCCtx = ZSTD_initStaticCCtx(staticCCtxBuffer, cctxSizeDefault); + CHECK_VAR(cSize, ZSTD_compressCCtx(staticCCtx, + compressedBuffer, compressedBufferSize, + CNBuffer, CNBuffSize, 3)); + + { + size_t const r = ZSTD_decompressDCtx(staticDCtx, + decodedBuffer, CNBuffSize, + compressedBuffer, cSize); + if (r != CNBuffSize) goto _output_error; + if (memcmp(decodedBuffer, CNBuffer, CNBuffSize)) goto _output_error; + } + ZSTD_freeCCtxParams(params); + } + + // Test ZSTD_estimateCStreamSize_usingCCtxParams + { + ZSTD_CCtx_params* params = ZSTD_createCCtxParams(); + size_t const cctxSizeDefault = ZSTD_estimateCStreamSize_usingCCtxParams(params); + staticCCtx = ZSTD_initStaticCCtx(staticCCtxBuffer, cctxSizeDefault); + CHECK_VAR(cSize, ZSTD_compressCCtx(staticCCtx, + compressedBuffer, compressedBufferSize, + CNBuffer, CNBuffSize, 3) ); + + { + size_t const r = ZSTD_decompressDCtx(staticDCtx, + decodedBuffer, CNBuffSize, + compressedBuffer, cSize); + if (r != CNBuffSize) goto _output_error; + if (memcmp(decodedBuffer, CNBuffer, CNBuffSize)) goto _output_error; + } + ZSTD_freeCCtxParams(params); + } + } + DISPLAYLEVEL(3, "OK \n"); + + DISPLAYLEVEL(3, "test%3i : test estimation functions with maxBlockSize = 0 : ", testNb++); + { + // Test ZSTD_estimateCCtxSize_usingCCtxParams + { + ZSTD_CCtx_params* params = ZSTD_createCCtxParams(); + size_t cctxSizeDefault; + CHECK_Z(ZSTD_CCtxParams_setParameter(params, ZSTD_c_maxBlockSize, 0)); + cctxSizeDefault = ZSTD_estimateCCtxSize_usingCCtxParams(params); + staticCCtx = ZSTD_initStaticCCtx(staticCCtxBuffer, cctxSizeDefault); + CHECK_VAR(cSize, ZSTD_compressCCtx(staticCCtx, + compressedBuffer, compressedBufferSize, + CNBuffer, CNBuffSize, 3) ); + + { + size_t const r = ZSTD_decompressDCtx(staticDCtx, + decodedBuffer, CNBuffSize, + compressedBuffer, cSize); + if (r != CNBuffSize) goto _output_error; + if (memcmp(decodedBuffer, CNBuffer, CNBuffSize)) goto _output_error; + } + ZSTD_freeCCtxParams(params); + } + + // Test ZSTD_estimateCStreamSize_usingCCtxParams + { + ZSTD_CCtx_params* params = ZSTD_createCCtxParams(); + size_t cctxSizeDefault; + CHECK_Z(ZSTD_CCtxParams_setParameter(params, ZSTD_c_maxBlockSize, 0)); + cctxSizeDefault = ZSTD_estimateCStreamSize_usingCCtxParams(params); + staticCCtx = ZSTD_initStaticCCtx(staticCCtxBuffer, cctxSizeDefault); + CHECK_VAR(cSize, ZSTD_compressCCtx(staticCCtx, + compressedBuffer, compressedBufferSize, + CNBuffer, CNBuffSize, 3) ); + + { + size_t const r = ZSTD_decompressDCtx(staticDCtx, + decodedBuffer, CNBuffSize, + compressedBuffer, cSize); + if (r != CNBuffSize) goto _output_error; + if (memcmp(decodedBuffer, CNBuffer, CNBuffSize)) goto _output_error; + } + ZSTD_freeCCtxParams(params); + } + } + DISPLAYLEVEL(3, "OK \n"); } free(staticCCtxBuffer); free(staticDCtxBuffer); @@ -1647,8 +1904,8 @@ static int basicUnitTests(U32 const seed, double compressibility) testResult = 1; goto _end; } - CHECK( ZSTD_CCtx_setParameter(mtctx, ZSTD_c_nbWorkers, 2) ); - CHECK( ZSTD_CCtx_setParameter(mtctx, ZSTD_c_compressionLevel, 1) ); + CHECK_Z( ZSTD_CCtx_setParameter(mtctx, ZSTD_c_nbWorkers, 2) ); + CHECK_Z( ZSTD_CCtx_setParameter(mtctx, ZSTD_c_compressionLevel, 1) ); DISPLAYLEVEL(3, "OK \n"); DISPLAYLEVEL(3, "test%3u : compress %u bytes with 2 threads : ", testNb++, (unsigned)CNBuffSize); @@ -1678,9 +1935,9 @@ static int basicUnitTests(U32 const seed, double compressibility) DISPLAYLEVEL(3, "OK \n"); DISPLAYLEVEL(3, "test%3i : compress -T2 with checksum : ", testNb++); - CHECK( ZSTD_CCtx_setParameter(mtctx, ZSTD_c_checksumFlag, 1) ); - CHECK( ZSTD_CCtx_setParameter(mtctx, ZSTD_c_contentSizeFlag, 1) ); - CHECK( ZSTD_CCtx_setParameter(mtctx, ZSTD_c_overlapLog, 3) ); + CHECK_Z( ZSTD_CCtx_setParameter(mtctx, ZSTD_c_checksumFlag, 1) ); + CHECK_Z( ZSTD_CCtx_setParameter(mtctx, ZSTD_c_contentSizeFlag, 1) ); + CHECK_Z( ZSTD_CCtx_setParameter(mtctx, ZSTD_c_overlapLog, 3) ); CHECK_VAR(cSize, ZSTD_compress2(mtctx, compressedBuffer, compressedBufferSize, CNBuffer, CNBuffSize) ); @@ -1699,11 +1956,11 @@ static int basicUnitTests(U32 const seed, double compressibility) ZSTD_DCtx* const dctx = ZSTD_createDCtx(); char out[32]; if (cctx == NULL || dctx == NULL) goto _output_error; - CHECK( ZSTD_CCtx_setParameter(cctx, ZSTD_c_contentSizeFlag, 0) ); + CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_c_contentSizeFlag, 0) ); CHECK_VAR(cSize, ZSTD_compress2(cctx, out, sizeof(out), NULL, 0) ); DISPLAYLEVEL(3, "OK (%u bytes)\n", (unsigned)cSize); - CHECK( ZSTD_DCtx_setParameter(dctx, ZSTD_d_windowLogMax, 10) ); + CHECK_Z( ZSTD_DCtx_setParameter(dctx, ZSTD_d_windowLogMax, 10) ); { char const* outPtr = out; ZSTD_inBuffer inBuffer = { outPtr, cSize, 0 }; ZSTD_outBuffer outBuffer = { NULL, 0, 0 }; @@ -1718,9 +1975,9 @@ static int basicUnitTests(U32 const seed, double compressibility) DISPLAYLEVEL(3, "test%3i : compress with block splitting : ", testNb++) { ZSTD_CCtx* cctx = ZSTD_createCCtx(); - CHECK( ZSTD_CCtx_setParameter(cctx, ZSTD_c_useBlockSplitter, ZSTD_ps_enable) ); + CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_c_useBlockSplitter, ZSTD_ps_enable) ); cSize = ZSTD_compress2(cctx, compressedBuffer, compressedBufferSize, CNBuffer, CNBuffSize); - CHECK(cSize); + CHECK_Z(cSize); ZSTD_freeCCtx(cctx); } DISPLAYLEVEL(3, "OK \n"); @@ -1728,13 +1985,13 @@ static int basicUnitTests(U32 const seed, double compressibility) DISPLAYLEVEL(3, "test%3i : compress -T2 with/without literals compression : ", testNb++) { ZSTD_CCtx* cctx = ZSTD_createCCtx(); size_t cSize1, cSize2; - CHECK( ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, 1) ); - CHECK( ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, 2) ); + CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, 1) ); + CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, 2) ); cSize1 = ZSTD_compress2(cctx, compressedBuffer, compressedBufferSize, CNBuffer, CNBuffSize); - CHECK(cSize1); - CHECK( ZSTD_CCtx_setParameter(cctx, ZSTD_c_literalCompressionMode, ZSTD_ps_disable) ); + CHECK_Z(cSize1); + CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_c_literalCompressionMode, ZSTD_ps_disable) ); cSize2 = ZSTD_compress2(cctx, compressedBuffer, compressedBufferSize, CNBuffer, CNBuffSize); - CHECK(cSize2); + CHECK_Z(cSize2); CHECK_LT(cSize1, cSize2); ZSTD_freeCCtx(cctx); } @@ -1745,10 +2002,10 @@ static int basicUnitTests(U32 const seed, double compressibility) /* Set rsyncable and don't give the ZSTD_compressBound(CNBuffSize) so * ZSTDMT is forced to not take the shortcut. */ - CHECK( ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, 1) ); - CHECK( ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, 1) ); - CHECK( ZSTD_CCtx_setParameter(cctx, ZSTD_c_rsyncable, 1) ); - CHECK( ZSTD_compress2(cctx, compressedBuffer, compressedBufferSize - 1, CNBuffer, CNBuffSize) ); + CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, 1) ); + CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, 1) ); + CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_c_rsyncable, 1) ); + CHECK_Z( ZSTD_compress2(cctx, compressedBuffer, compressedBufferSize - 1, CNBuffer, CNBuffSize) ); ZSTD_freeCCtx(cctx); } DISPLAYLEVEL(3, "OK \n"); @@ -1758,22 +2015,22 @@ static int basicUnitTests(U32 const seed, double compressibility) int const jobSize = 512 KB; int value; /* Check that the overlap log and job size are unset. */ - CHECK( ZSTD_CCtxParams_getParameter(params, ZSTD_c_overlapLog, &value) ); + CHECK_Z( ZSTD_CCtxParams_getParameter(params, ZSTD_c_overlapLog, &value) ); CHECK_EQ(value, 0); - CHECK( ZSTD_CCtxParams_getParameter(params, ZSTD_c_jobSize, &value) ); + CHECK_Z( ZSTD_CCtxParams_getParameter(params, ZSTD_c_jobSize, &value) ); CHECK_EQ(value, 0); /* Set and check the overlap log and job size. */ - CHECK( ZSTD_CCtxParams_setParameter(params, ZSTD_c_overlapLog, 5) ); - CHECK( ZSTD_CCtxParams_setParameter(params, ZSTD_c_jobSize, jobSize) ); - CHECK( ZSTD_CCtxParams_getParameter(params, ZSTD_c_overlapLog, &value) ); + CHECK_Z( ZSTD_CCtxParams_setParameter(params, ZSTD_c_overlapLog, 5) ); + CHECK_Z( ZSTD_CCtxParams_setParameter(params, ZSTD_c_jobSize, jobSize) ); + CHECK_Z( ZSTD_CCtxParams_getParameter(params, ZSTD_c_overlapLog, &value) ); CHECK_EQ(value, 5); - CHECK( ZSTD_CCtxParams_getParameter(params, ZSTD_c_jobSize, &value) ); + CHECK_Z( ZSTD_CCtxParams_getParameter(params, ZSTD_c_jobSize, &value) ); CHECK_EQ(value, jobSize); /* Set the number of workers and check the overlap log and job size. */ - CHECK( ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, 2) ); - CHECK( ZSTD_CCtxParams_getParameter(params, ZSTD_c_overlapLog, &value) ); + CHECK_Z( ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, 2) ); + CHECK_Z( ZSTD_CCtxParams_getParameter(params, ZSTD_c_overlapLog, &value) ); CHECK_EQ(value, 5); - CHECK( ZSTD_CCtxParams_getParameter(params, ZSTD_c_jobSize, &value) ); + CHECK_Z( ZSTD_CCtxParams_getParameter(params, ZSTD_c_jobSize, &value) ); CHECK_EQ(value, jobSize); ZSTD_freeCCtxParams(params); } @@ -1884,8 +2141,8 @@ static int basicUnitTests(U32 const seed, double compressibility) DISPLAYLEVEL(3, "OK \n"); DISPLAYLEVEL(3, "test%3i : load dictionary into context : ", testNb++); - CHECK( ZSTD_compressBegin_usingDict(ctxOrig, CNBuffer, dictSize, 2) ); - CHECK( ZSTD_copyCCtx(ctxDuplicated, ctxOrig, 0) ); /* Begin_usingDict implies unknown srcSize, so match that */ + CHECK_Z( ZSTD_compressBegin_usingDict(ctxOrig, CNBuffer, dictSize, 2) ); + CHECK_Z( ZSTD_copyCCtx(ctxDuplicated, ctxOrig, 0) ); /* Begin_usingDict implies unknown srcSize, so match that */ DISPLAYLEVEL(3, "OK \n"); DISPLAYLEVEL(3, "test%3i : compress with flat dictionary : ", testNb++); @@ -1945,8 +2202,8 @@ static int basicUnitTests(U32 const seed, double compressibility) DISPLAYLEVEL(3, "test%3i : check content size on duplicated context : ", testNb++); { size_t const testSize = CNBuffSize / 3; - CHECK( ZSTD_compressBegin(ctxOrig, ZSTD_defaultCLevel()) ); - CHECK( ZSTD_copyCCtx(ctxDuplicated, ctxOrig, testSize) ); + CHECK_Z( ZSTD_compressBegin(ctxOrig, ZSTD_defaultCLevel()) ); + CHECK_Z( ZSTD_copyCCtx(ctxDuplicated, ctxOrig, testSize) ); CHECK_VAR(cSize, ZSTD_compressEnd(ctxDuplicated, compressedBuffer, ZSTD_compressBound(testSize), (const char*)CNBuffer + dictSize, testSize) ); @@ -2041,6 +2298,7 @@ static int basicUnitTests(U32 const seed, double compressibility) CHECK_Z( ZSTD_CCtx_setParameter(ctxOrig, ZSTD_c_compressionLevel, l) ); CHECK_Z( ZSTD_CCtx_setParameter(ctxOrig, ZSTD_c_enableDedicatedDictSearch, 0) ); CHECK_Z( ZSTD_CCtx_setParameter(ctxOrig, ZSTD_c_forceAttachDict, ZSTD_dictForceAttach) ); + CHECK_Z( ZSTD_CCtx_setParameter(ctxOrig, ZSTD_c_prefetchCDictTables, seed % 3) ); wdict_cSize = ZSTD_compress2(ctxOrig, compressedBuffer, compressedBufferSize, contentStart, contentSize); if (wdict_cSize > target_wdict_cSize[l]) { DISPLAYLEVEL(1, "error : compression with dictionary and compress2 at level %i worse than expected (%u > %u) \n", @@ -2345,6 +2603,27 @@ static int basicUnitTests(U32 const seed, double compressibility) } DISPLAYLEVEL(3, "OK \n"); + DISPLAYLEVEL(3, "test%3d : bufferless api with cdict : ", testNb++); + { ZSTD_CDict* const cdict = ZSTD_createCDict(dictBuffer, dictSize, 1); + ZSTD_DCtx* const dctx = ZSTD_createDCtx(); + ZSTD_frameParameters const fParams = { 0, 1, 0 }; + size_t cBlockSize; + cSize = 0; + CHECK_Z(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN)); + cBlockSize = ZSTD_compressContinue(cctx, (char*)compressedBuffer + cSize, compressedBufferSize - cSize, CNBuffer, 1000); + CHECK_Z(cBlockSize); + cSize += cBlockSize; + cBlockSize = ZSTD_compressEnd(cctx, (char*)compressedBuffer + cSize, compressedBufferSize - cSize, (char const*)CNBuffer + 2000, 1000); + CHECK_Z(cBlockSize); + cSize += cBlockSize; + + CHECK_Z(ZSTD_decompress_usingDict(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize, dictBuffer, dictSize)); + + ZSTD_freeCDict(cdict); + ZSTD_freeDCtx(dctx); + } + DISPLAYLEVEL(3, "OK \n"); + DISPLAYLEVEL(3, "test%3i : Building cdict w/ ZSTD_dct_fullDict on a good dictionary : ", testNb++); { ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBuffSize, dictSize); ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_fullDict, cParams, ZSTD_defaultCMem); @@ -2585,6 +2864,90 @@ static int basicUnitTests(U32 const seed, double compressibility) } DISPLAYLEVEL(3, "OK \n"); + DISPLAYLEVEL(3, "test%3i : ZSTD_fast attach dictionary with hashLog = 25 and chainLog = 25 : ", testNb++); + { + ZSTD_CCtx_params* cctxParams = ZSTD_createCCtxParams(); + ZSTD_customMem customMem = {NULL, NULL, NULL}; + ZSTD_DCtx* dctx = ZSTD_createDCtx(); + ZSTD_CDict* cdict; + CHECK_Z(ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_strategy, ZSTD_fast)); + /* Set windowLog to 25 so hash/chain logs don't get sized down */ + CHECK_Z(ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_windowLog, 25)); + CHECK_Z(ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_hashLog, 25)); + CHECK_Z(ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_chainLog, 25)); + /* Set srcSizeHint to 2^25 so hash/chain logs don't get sized down */ + CHECK_Z(ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_srcSizeHint, 1u << 25)); + cdict = ZSTD_createCDict_advanced2(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, cctxParams, customMem); + CHECK_Z(ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_forceAttachDict, ZSTD_dictForceAttach)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, 1)); + CHECK_Z(ZSTD_CCtx_refCDict(cctx, cdict)); + cSize = ZSTD_compress2(cctx, compressedBuffer, compressedBufferSize, CNBuffer, CNBuffSize); + CHECK_Z(cSize); + CHECK_Z(ZSTD_decompress_usingDict(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize, dictBuffer, dictSize)); + ZSTD_freeCDict(cdict); + ZSTD_freeDCtx(dctx); + ZSTD_freeCCtxParams(cctxParams); + } + DISPLAYLEVEL(3, "OK \n"); + + DISPLAYLEVEL(3, "test%3i : ZSTD_dfast attach dictionary with hashLog = 25 and chainLog = 25 : ", testNb++); + { + ZSTD_CCtx_params* cctxParams = ZSTD_createCCtxParams(); + ZSTD_customMem customMem = {NULL, NULL, NULL}; + ZSTD_DCtx* dctx = ZSTD_createDCtx(); + ZSTD_CDict* cdict; + CHECK_Z(ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_strategy, ZSTD_dfast)); + /* Set windowLog to 25 so hash/chain logs don't get sized down */ + CHECK_Z(ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_windowLog, 25)); + CHECK_Z(ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_hashLog, 25)); + CHECK_Z(ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_chainLog, 25)); + /* Set srcSizeHint to 2^25 so hash/chain logs don't get sized down */ + CHECK_Z(ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_srcSizeHint, 1u << 25)); + cdict = ZSTD_createCDict_advanced2(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, cctxParams, customMem); + CHECK_Z(ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_forceAttachDict, ZSTD_dictForceAttach)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, 1)); + CHECK_Z(ZSTD_CCtx_refCDict(cctx, cdict)); + cSize = ZSTD_compress2(cctx, compressedBuffer, compressedBufferSize, CNBuffer, CNBuffSize); + CHECK_Z(cSize); + CHECK_Z(ZSTD_decompress_usingDict(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize, dictBuffer, dictSize)); + ZSTD_freeCDict(cdict); + ZSTD_freeDCtx(dctx); + ZSTD_freeCCtxParams(cctxParams); + } + DISPLAYLEVEL(3, "OK \n"); + + DISPLAYLEVEL(3, "test%3i : ZSTD_lazy attach dictionary with hashLog = 29 and searchLog = 4 : ", testNb++); + if (MEM_64bits()) { + ZSTD_CCtx_params* cctxParams = ZSTD_createCCtxParams(); + ZSTD_customMem customMem = {NULL, NULL, NULL}; + ZSTD_DCtx* dctx = ZSTD_createDCtx(); + ZSTD_CDict* cdict; + CHECK_Z(ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_strategy, ZSTD_lazy)); + /* Force enable row based match finder, and disable dedicated dict search. */ + CHECK_Z(ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_useRowMatchFinder, ZSTD_ps_enable)); + CHECK_Z(ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_enableDedicatedDictSearch, 0)); + CHECK_Z(ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_searchLog, 4)); + /* Set windowLog to 29 so hash/chain logs don't get sized down */ + CHECK_Z(ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_windowLog, 29)); + CHECK_Z(ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_hashLog, 29)); + /* Set srcSizeHint to 2^29 so hash/chain logs don't get sized down */ + CHECK_Z(ZSTD_CCtxParams_setParameter(cctxParams, ZSTD_c_srcSizeHint, 1u << 29)); + cdict = ZSTD_createCDict_advanced2(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, cctxParams, customMem); + CHECK_Z(ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_forceAttachDict, ZSTD_dictForceAttach)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, 1)); + CHECK_Z(ZSTD_CCtx_refCDict(cctx, cdict)); + cSize = ZSTD_compress2(cctx, compressedBuffer, compressedBufferSize, CNBuffer, CNBuffSize); + CHECK_Z(cSize); + CHECK_Z(ZSTD_decompress_usingDict(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize, dictBuffer, dictSize)); + ZSTD_freeCDict(cdict); + ZSTD_freeDCtx(dctx); + ZSTD_freeCCtxParams(cctxParams); + } + DISPLAYLEVEL(3, "OK \n"); + DISPLAYLEVEL(3, "test%3i : Dictionary with non-default repcodes : ", testNb++); { U32 u; for (u=0; u %u bytes)\n", (unsigned)inputSize, (unsigned)result); @@ -2855,7 +3218,7 @@ static int basicUnitTests(U32 const seed, double compressibility) DISPLAYLEVEL(3, "test%3i : get dParameter bounds ", testNb++); { ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax); - CHECK(bounds.error); + CHECK_Z(bounds.error); } DISPLAYLEVEL(3, "OK \n"); @@ -2889,7 +3252,7 @@ static int basicUnitTests(U32 const seed, double compressibility) /* basic block compression */ DISPLAYLEVEL(3, "test%3i : magic-less format test : ", testNb++); - CHECK( ZSTD_CCtx_setParameter(cctx, ZSTD_c_format, ZSTD_f_zstd1_magicless) ); + CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_c_format, ZSTD_f_zstd1_magicless) ); { ZSTD_inBuffer in = { CNBuffer, inputSize, 0 }; ZSTD_outBuffer out = { compressedBuffer, ZSTD_compressBound(inputSize), 0 }; size_t const result = ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_end); @@ -2907,7 +3270,7 @@ static int basicUnitTests(U32 const seed, double compressibility) DISPLAYLEVEL(3, "test%3i : decompress of magic-less frame : ", testNb++); ZSTD_DCtx_reset(dctx, ZSTD_reset_session_and_parameters); - CHECK( ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, ZSTD_f_zstd1_magicless) ); + CHECK_Z( ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, ZSTD_f_zstd1_magicless) ); { ZSTD_frameHeader zfh; size_t const zfhrt = ZSTD_getFrameHeader_advanced(&zfh, compressedBuffer, cSize, ZSTD_f_zstd1_magicless); if (zfhrt != 0) goto _output_error; @@ -2929,7 +3292,7 @@ static int basicUnitTests(U32 const seed, double compressibility) /* basic block compression */ DISPLAYLEVEL(3, "test%3i : empty magic-less format test : ", testNb++); - CHECK( ZSTD_CCtx_setParameter(cctx, ZSTD_c_format, ZSTD_f_zstd1_magicless) ); + CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_c_format, ZSTD_f_zstd1_magicless) ); { ZSTD_inBuffer in = { CNBuffer, 0, 0 }; ZSTD_outBuffer out = { compressedBuffer, ZSTD_compressBound(0), 0 }; size_t const result = ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_end); @@ -2941,7 +3304,7 @@ static int basicUnitTests(U32 const seed, double compressibility) DISPLAYLEVEL(3, "test%3i : decompress of empty magic-less frame : ", testNb++); ZSTD_DCtx_reset(dctx, ZSTD_reset_session_and_parameters); - CHECK( ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, ZSTD_f_zstd1_magicless) ); + CHECK_Z( ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, ZSTD_f_zstd1_magicless) ); /* one shot */ { size_t const result = ZSTD_decompressDCtx(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize); if (result != 0) goto _output_error; @@ -2976,13 +3339,13 @@ static int basicUnitTests(U32 const seed, double compressibility) int check; if (ZSTD_isError(bounds.error)) continue; - CHECK(ZSTD_DCtx_getParameter(dctx, dParam, &value1)); + CHECK_Z(ZSTD_DCtx_getParameter(dctx, dParam, &value1)); value2 = (value1 != bounds.lowerBound) ? bounds.lowerBound : bounds.upperBound; - CHECK(ZSTD_DCtx_setParameter(dctx, dParam, value2)); - CHECK(ZSTD_DCtx_getParameter(dctx, dParam, &check)); + CHECK_Z(ZSTD_DCtx_setParameter(dctx, dParam, value2)); + CHECK_Z(ZSTD_DCtx_getParameter(dctx, dParam, &check)); if (check != value2) goto _output_error; - CHECK(ZSTD_DCtx_reset(dctx, ZSTD_reset_parameters)); - CHECK(ZSTD_DCtx_getParameter(dctx, dParam, &check)); + CHECK_Z(ZSTD_DCtx_reset(dctx, ZSTD_reset_parameters)); + CHECK_Z(ZSTD_DCtx_getParameter(dctx, dParam, &check)); if (check != value1) goto _output_error; } ZSTD_freeDCtx(dctx); @@ -2999,21 +3362,21 @@ static int basicUnitTests(U32 const seed, double compressibility) /* basic block compression */ DISPLAYLEVEL(3, "test%3i : Block compression test : ", testNb++); - CHECK( ZSTD_compressBegin(cctx, 5) ); - CHECK( ZSTD_getBlockSize(cctx) >= blockSize); + CHECK_Z( ZSTD_compressBegin(cctx, 5) ); + CHECK_Z( ZSTD_getBlockSize(cctx) >= blockSize); CHECK_VAR(cSize, ZSTD_compressBlock(cctx, compressedBuffer, ZSTD_compressBound(blockSize), CNBuffer, blockSize) ); DISPLAYLEVEL(3, "OK \n"); DISPLAYLEVEL(3, "test%3i : Block decompression test : ", testNb++); - CHECK( ZSTD_decompressBegin(dctx) ); + CHECK_Z( ZSTD_decompressBegin(dctx) ); { CHECK_NEWV(r, ZSTD_decompressBlock(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize) ); if (r != blockSize) goto _output_error; } DISPLAYLEVEL(3, "OK \n"); /* very long stream of block compression */ DISPLAYLEVEL(3, "test%3i : Huge block streaming compression test : ", testNb++); - CHECK( ZSTD_compressBegin(cctx, -199) ); /* we just want to quickly overflow internal U32 index */ - CHECK( ZSTD_getBlockSize(cctx) >= blockSize); + CHECK_Z( ZSTD_compressBegin(cctx, -199) ); /* we just want to quickly overflow internal U32 index */ + CHECK_Z( ZSTD_getBlockSize(cctx) >= blockSize); { U64 const toCompress = 5000000000ULL; /* > 4 GB */ U64 compressed = 0; while (compressed < toCompress) { @@ -3026,7 +3389,7 @@ static int basicUnitTests(U32 const seed, double compressibility) /* dictionary block compression */ DISPLAYLEVEL(3, "test%3i : Dictionary Block compression test : ", testNb++); - CHECK( ZSTD_compressBegin_usingDict(cctx, CNBuffer, dictSize, 5) ); + CHECK_Z( ZSTD_compressBegin_usingDict(cctx, CNBuffer, dictSize, 5) ); CHECK_VAR(cSize, ZSTD_compressBlock(cctx, compressedBuffer, ZSTD_compressBound(blockSize), (char*)CNBuffer+dictSize, blockSize)); RDG_genBuffer((char*)CNBuffer+dictSize+blockSize, blockSize, 0.0, 0.0, seed); /* create a non-compressible second block */ { CHECK_NEWV(r, ZSTD_compressBlock(cctx, (char*)compressedBuffer+cSize, ZSTD_compressBound(blockSize), (char*)CNBuffer+dictSize+blockSize, blockSize) ); /* for cctx history consistency */ @@ -3037,7 +3400,7 @@ static int basicUnitTests(U32 const seed, double compressibility) DISPLAYLEVEL(3, "OK \n"); DISPLAYLEVEL(3, "test%3i : Dictionary Block decompression test : ", testNb++); - CHECK( ZSTD_decompressBegin_usingDict(dctx, CNBuffer, dictSize) ); + CHECK_Z( ZSTD_decompressBegin_usingDict(dctx, CNBuffer, dictSize) ); { CHECK_NEWV( r, ZSTD_decompressBlock(dctx, decodedBuffer, blockSize, compressedBuffer, cSize) ); if (r != blockSize) { DISPLAYLEVEL(1, "ZSTD_decompressBlock() with _usingDict() fails : %u, instead of %u expected \n", (unsigned)r, (unsigned)blockSize); @@ -3056,8 +3419,8 @@ static int basicUnitTests(U32 const seed, double compressibility) DISPLAYLEVEL(3, "test%3i : Block compression with CDict : ", testNb++); { ZSTD_CDict* const cdict = ZSTD_createCDict(CNBuffer, dictSize, 3); if (cdict==NULL) goto _output_error; - CHECK( ZSTD_compressBegin_usingCDict(cctx, cdict) ); - CHECK( ZSTD_compressBlock(cctx, compressedBuffer, ZSTD_compressBound(blockSize), (char*)CNBuffer+dictSize, blockSize) ); + CHECK_Z( ZSTD_compressBegin_usingCDict(cctx, cdict) ); + CHECK_Z( ZSTD_compressBlock(cctx, compressedBuffer, ZSTD_compressBound(blockSize), (char*)CNBuffer+dictSize, blockSize) ); ZSTD_freeCDict(cdict); } DISPLAYLEVEL(3, "OK \n"); @@ -3114,18 +3477,17 @@ static int basicUnitTests(U32 const seed, double compressibility) DISPLAYLEVEL(3, "test%3i : ZSTD_getSequences followed by ZSTD_compressSequences : ", testNb++); { - size_t srcSize = 500 KB; - BYTE* src = (BYTE*)CNBuffer; - BYTE* dst = (BYTE*)compressedBuffer; - size_t dstSize = ZSTD_compressBound(srcSize); - size_t decompressSize = srcSize; - char* decompressBuffer = (char*)malloc(decompressSize); + const size_t srcSize = 500 KB; + const BYTE* const src = (BYTE*)CNBuffer; + BYTE* const dst = (BYTE*)compressedBuffer; + const size_t dstCapacity = ZSTD_compressBound(srcSize); + const size_t decompressSize = srcSize; + char* const decompressBuffer = (char*)malloc(decompressSize); size_t compressedSize; - size_t dSize; - ZSTD_CCtx* cctx = ZSTD_createCCtx(); - ZSTD_Sequence* seqs = (ZSTD_Sequence*)malloc(srcSize * sizeof(ZSTD_Sequence)); - size_t seqsSize; + ZSTD_CCtx* const cctx = ZSTD_createCCtx(); + ZSTD_Sequence* const seqs = (ZSTD_Sequence*)malloc(srcSize * sizeof(ZSTD_Sequence)); + size_t nbSeqs; if (seqs == NULL) goto _output_error; assert(cctx != NULL); @@ -3133,36 +3495,37 @@ static int basicUnitTests(U32 const seed, double compressibility) /* Populate src with random data */ RDG_genBuffer(CNBuffer, srcSize, compressibility, 0., seed); - /* Test with block delimiters roundtrip */ - seqsSize = ZSTD_generateSequences(cctx, seqs, srcSize, src, srcSize); + /* Roundtrip Test with block delimiters generated by ZSTD_generateSequences() */ + nbSeqs = ZSTD_generateSequences(cctx, seqs, srcSize, src, srcSize); ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters); ZSTD_CCtx_setParameter(cctx, ZSTD_c_blockDelimiters, ZSTD_sf_explicitBlockDelimiters); - compressedSize = ZSTD_compressSequences(cctx, dst, dstSize, seqs, seqsSize, src, srcSize); + compressedSize = ZSTD_compressSequences(cctx, dst, dstCapacity, seqs, nbSeqs, src, srcSize); if (ZSTD_isError(compressedSize)) { DISPLAY("Error in sequence compression with block delims\n"); goto _output_error; } - dSize = ZSTD_decompress(decompressBuffer, decompressSize, dst, compressedSize); - if (ZSTD_isError(dSize)) { - DISPLAY("Error in sequence compression roundtrip with block delims\n"); - goto _output_error; - } + { size_t const dSize = ZSTD_decompress(decompressBuffer, decompressSize, dst, compressedSize); + if (ZSTD_isError(dSize)) { + DISPLAY("Error in sequence compression roundtrip with block delims\n"); + goto _output_error; + } } assert(!memcmp(decompressBuffer, src, srcSize)); - /* Test with no block delimiters roundtrip */ - seqsSize = ZSTD_mergeBlockDelimiters(seqs, seqsSize); - ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters); - ZSTD_CCtx_setParameter(cctx, ZSTD_c_blockDelimiters, ZSTD_sf_noBlockDelimiters); - compressedSize = ZSTD_compressSequences(cctx, dst, dstSize, seqs, seqsSize, src, srcSize); + /* Roundtrip Test with no block delimiters */ + { size_t const nbSeqsAfterMerge = ZSTD_mergeBlockDelimiters(seqs, nbSeqs); + ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters); + ZSTD_CCtx_setParameter(cctx, ZSTD_c_blockDelimiters, ZSTD_sf_noBlockDelimiters); + compressedSize = ZSTD_compressSequences(cctx, dst, dstCapacity, seqs, nbSeqsAfterMerge, src, srcSize); + } if (ZSTD_isError(compressedSize)) { DISPLAY("Error in sequence compression with no block delims\n"); goto _output_error; } - dSize = ZSTD_decompress(decompressBuffer, decompressSize, dst, compressedSize); - if (ZSTD_isError(dSize)) { - DISPLAY("Error in sequence compression roundtrip with no block delims\n"); - goto _output_error; - } + { size_t const dSize = ZSTD_decompress(decompressBuffer, decompressSize, dst, compressedSize); + if (ZSTD_isError(dSize)) { + DISPLAY("Error in sequence compression roundtrip with no block delims\n"); + goto _output_error; + } } assert(!memcmp(decompressBuffer, src, srcSize)); ZSTD_freeCCtx(cctx); @@ -3226,7 +3589,7 @@ static int basicUnitTests(U32 const seed, double compressibility) size_t const bound = ZSTD_compressBound(_3BYTESTESTLENGTH); size_t nbSeq = 1; while (nbSeq <= maxNbSeq) { - CHECK(ZSTD_compressCCtx(cctx, compressedBuffer, bound, CNBuffer, nbSeq * 3, 19)); + CHECK_Z(ZSTD_compressCCtx(cctx, compressedBuffer, bound, CNBuffer, nbSeq * 3, 19)); /* Check every sequence for the first 100, then skip more rapidly. */ if (nbSeq < 100) { ++nbSeq; @@ -3255,7 +3618,7 @@ static int basicUnitTests(U32 const seed, double compressibility) size_t const bound = ZSTD_compressBound(CNBuffSize); size_t size = 1; while (size <= CNBuffSize) { - CHECK(ZSTD_compressCCtx(cctx, compressedBuffer, bound, CNBuffer, size, 3)); + CHECK_Z(ZSTD_compressCCtx(cctx, compressedBuffer, bound, CNBuffer, size, 3)); /* Check every size for the first 100, then skip more rapidly. */ if (size < 100) { ++size; @@ -3283,15 +3646,14 @@ static int basicUnitTests(U32 const seed, double compressibility) { U32 u; for (u = 0; u < CNBuffSize; ++u) { ((BYTE*)CNBuffer)[u] = 255 - ((BYTE*)CNBuffer)[u]; - } - } + } } { /* Compress the data */ size_t const inputSize = 500; size_t const outputSize = ZSTD_compressBound(inputSize); void* const outputBuffer = malloc(outputSize); ZSTD_CCtx* const cctx = ZSTD_createCCtx(); if (!outputBuffer || !cctx) goto _output_error; - CHECK(ZSTD_compress_usingDict(cctx, outputBuffer, outputSize, CNBuffer, inputSize, dictBuffer, dictSize, 1)); + CHECK_Z(ZSTD_compress_usingDict(cctx, outputBuffer, outputSize, CNBuffer, inputSize, dictBuffer, dictSize, 1)); free(outputBuffer); ZSTD_freeCCtx(cctx); } @@ -3376,6 +3738,74 @@ static int basicUnitTests(U32 const seed, double compressibility) } DISPLAYLEVEL(3, "OK \n"); + DISPLAYLEVEL(3, "test%3i : testing bitwise intrinsics PR#3045: ", testNb++); + { + U32 seed_copy = seed; /* need non-const seed to avoid compiler warning for FUZ_rand(&seed) */ + U32 rand32 = FUZ_rand(&seed_copy); + U64 rand64 = ((U64)FUZ_rand(&seed_copy) << 32) | FUZ_rand(&seed_copy); + U32 lowbit_only_32 = 1; + U64 lowbit_only_64 = 1; + U32 highbit_only_32 = (U32)1 << 31; + U64 highbit_only_64 = (U64)1 << 63; + U32 i; + if (rand32 == 0) rand32 = 1; /* CLZ and CTZ are undefined on 0 */ + if (rand64 == 0) rand64 = 1; /* CLZ and CTZ are undefined on 0 */ + + /* Test ZSTD_countTrailingZeros32 */ + CHECK_EQ(ZSTD_countTrailingZeros32(lowbit_only_32), 0u); + CHECK_EQ(ZSTD_countTrailingZeros32(highbit_only_32), 31u); + CHECK_EQ(ZSTD_countTrailingZeros32(rand32), ZSTD_countTrailingZeros32_fallback(rand32)); + + /* Test ZSTD_countLeadingZeros32 */ + CHECK_EQ(ZSTD_countLeadingZeros32(lowbit_only_32), 31u); + CHECK_EQ(ZSTD_countLeadingZeros32(highbit_only_32), 0u); + CHECK_EQ(ZSTD_countLeadingZeros32(rand32), ZSTD_countLeadingZeros32_fallback(rand32)); + + /* Test ZSTD_countTrailingZeros64 */ + CHECK_EQ(ZSTD_countTrailingZeros64(lowbit_only_64), 0u); + CHECK_EQ(ZSTD_countTrailingZeros64(highbit_only_64), 63u); + + /* Test ZSTD_countLeadingZeros64 */ + CHECK_EQ(ZSTD_countLeadingZeros64(lowbit_only_64), 63u); + CHECK_EQ(ZSTD_countLeadingZeros64(highbit_only_64), 0u); + + /* Test ZSTD_highbit32 */ + CHECK_EQ(ZSTD_highbit32(lowbit_only_32), 0u); + CHECK_EQ(ZSTD_highbit32(highbit_only_32), 31u); + + /* Test ZSTD_NbCommonBytes */ + if (MEM_isLittleEndian()) { + if (MEM_64bits()) { + CHECK_EQ(ZSTD_NbCommonBytes(lowbit_only_32), 0u); + CHECK_EQ(ZSTD_NbCommonBytes(highbit_only_32), 3u); + } else { + CHECK_EQ(ZSTD_NbCommonBytes(lowbit_only_32), 0u); + CHECK_EQ(ZSTD_NbCommonBytes(highbit_only_32), 3u); + } + } else { + if (MEM_64bits()) { + CHECK_EQ(ZSTD_NbCommonBytes(lowbit_only_32), 7u); + CHECK_EQ(ZSTD_NbCommonBytes(highbit_only_32), 4u); + } else { + CHECK_EQ(ZSTD_NbCommonBytes(lowbit_only_32), 3u); + CHECK_EQ(ZSTD_NbCommonBytes(highbit_only_32), 0u); + } + } + + /* Test MEM_ intrinsics */ + CHECK_EQ(MEM_swap32(rand32), MEM_swap32_fallback(rand32)); + CHECK_EQ(MEM_swap64(rand64), MEM_swap64_fallback(rand64)); + + /* Test fallbacks vs intrinsics on a range of small integers */ + for (i=1; i <= 1000; i++) { + CHECK_EQ(MEM_swap32(i), MEM_swap32_fallback(i)); + CHECK_EQ(MEM_swap64((U64)i), MEM_swap64_fallback((U64)i)); + CHECK_EQ(ZSTD_countTrailingZeros32(i), ZSTD_countTrailingZeros32_fallback(i)); + CHECK_EQ(ZSTD_countLeadingZeros32(i), ZSTD_countLeadingZeros32_fallback(i)); + } + } + DISPLAYLEVEL(3, "OK \n"); + #ifdef ZSTD_MULTITHREAD DISPLAYLEVEL(3, "test%3i : passing wrong full dict should fail on compressStream2 refPrefix ", testNb++); { ZSTD_CCtx* cctx = ZSTD_createCCtx(); @@ -3424,7 +3854,7 @@ static int basicUnitTests(U32 const seed, double compressibility) /* Enable MT, LDM, and use refPrefix() for a small dict */ CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, 2)); - CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_enableLongDistanceMatching, 1)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_enableLongDistanceMatching, ZSTD_ps_enable)); CHECK_Z(ZSTD_CCtx_refPrefix(cctx, dict, dictSize)); CHECK_Z(ZSTD_compress2(cctx, dst, dstSize, src, srcSize)); @@ -3560,7 +3990,7 @@ static int longUnitTests(U32 const seed, double compressibility) while (approxIndex <= (maxIndex / 4) * 3) { CHECK_Z(ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_flush)); approxIndex += in.pos; - CHECK(in.pos == in.size); + CHECK_Z(in.pos == in.size); in.pos = 0; out.pos = 0; } @@ -3586,7 +4016,7 @@ static int longUnitTests(U32 const seed, double compressibility) while (approxIndex <= maxIndex) { CHECK_Z(ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_flush)); approxIndex += in.pos; - CHECK(in.pos == in.size); + CHECK_Z(in.pos == in.size); in.pos = 0; out.pos = 0; } @@ -3618,7 +4048,7 @@ static int longUnitTests(U32 const seed, double compressibility) /* Enable checksum to verify round trip. */ CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, 1)); - CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_enableLongDistanceMatching, 1)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_enableLongDistanceMatching, ZSTD_ps_enable)); CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, 19)); /* Round trip once with ldm. */ @@ -3628,7 +4058,7 @@ static int longUnitTests(U32 const seed, double compressibility) ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters); CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, 1)); - CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_enableLongDistanceMatching, 0)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_enableLongDistanceMatching, ZSTD_ps_disable)); CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, 19)); /* Round trip once without ldm. */ @@ -3669,7 +4099,7 @@ static int longUnitTests(U32 const seed, double compressibility) RDG_genBuffer(dict, dictSize, 0.5, 0.5, seed); RDG_genBuffer(CNBuffer, CNBuffSize, 0.6, 0.6, seed); - CHECK(cctx_params != NULL); + CHECK_Z(cctx_params != NULL); for (dictSize = CNBuffSize; dictSize; dictSize = dictSize >> 3) { DISPLAYLEVEL(3, "\n Testing with dictSize %u ", (U32)dictSize); @@ -3712,11 +4142,6 @@ static int longUnitTests(U32 const seed, double compressibility) free(compressedBuffer); free(decodedBuffer); return testResult; - -_output_error: - testResult = 1; - DISPLAY("Error detected in Unit tests ! \n"); - goto _end; } @@ -3969,9 +4394,9 @@ static int fuzzerTests(U32 seed, unsigned nbTests, unsigned startTest, U32 const DISPLAYLEVEL(5, "fuzzer t%u: Bufferless streaming compression test \n", testNb); { U32 const testLog = FUZ_rand(&lseed) % maxSrcLog; U32 const dictLog = FUZ_rand(&lseed) % maxSrcLog; - int const cLevel = (FUZ_rand(&lseed) % - (ZSTD_maxCLevel() - - (MAX(testLog, dictLog) / cLevelLimiter))) + + int const cLevel = (int)(FUZ_rand(&lseed) % + ((U32)ZSTD_maxCLevel() - + (MAX(testLog, dictLog) / (U32)cLevelLimiter))) + 1; maxTestSize = FUZ_rLogLength(&lseed, testLog); if (maxTestSize >= dstBufferSize) maxTestSize = dstBufferSize-1; @@ -4067,7 +4492,7 @@ static int fuzzerTests(U32 seed, unsigned nbTests, unsigned startTest, U32 const free(cBuffer); free(dstBuffer); free(mirrorBuffer); - return result; + return (int)result; _output_error: result = 1; @@ -4104,7 +4529,7 @@ static unsigned readU32FromChar(const char** stringPtr) { unsigned result = 0; while ((**stringPtr >='0') && (**stringPtr <='9')) - result *= 10, result += **stringPtr - '0', (*stringPtr)++ ; + result *= 10, result += (unsigned)(**stringPtr - '0'), (*stringPtr)++ ; if ((**stringPtr=='K') || (**stringPtr=='M')) { result <<= 10; if (**stringPtr=='M') result <<= 10; @@ -4246,7 +4671,7 @@ int main(int argc, const char** argv) } } if (!result) - result = fuzzerTests(seed, nbTests, testNb, maxDuration, ((double)proba) / 100, bigTests); + result = fuzzerTests(seed, (unsigned)nbTests, (unsigned)testNb, maxDuration, ((double)proba) / 100, bigTests); if (mainPause) { int unused; DISPLAY("Press Enter \n"); diff --git a/tests/golden-compression/large-literal-and-match-lengths b/tests/golden-compression/large-literal-and-match-lengths new file mode 100644 index 00000000000..fb63c32c50f Binary files /dev/null and b/tests/golden-compression/large-literal-and-match-lengths differ diff --git a/tests/golden-decompression/empty-block.zst b/tests/golden-decompression/empty-block.zst new file mode 100644 index 00000000000..fbfb893e11e Binary files /dev/null and b/tests/golden-decompression/empty-block.zst differ diff --git a/tests/gzip/Makefile b/tests/gzip/Makefile index a50350f8d3f..cca3109670f 100644 --- a/tests/gzip/Makefile +++ b/tests/gzip/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/tests/gzip/gzip-env.sh b/tests/gzip/gzip-env.sh index 120e52d78d4..4570e81c377 100755 --- a/tests/gzip/gzip-env.sh +++ b/tests/gzip/gzip-env.sh @@ -14,7 +14,7 @@ # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # limit so don't run it by default. . "${srcdir=.}/init.sh"; path_prepend_ . diff --git a/tests/gzip/helin-segv.sh b/tests/gzip/helin-segv.sh index f182c8066f3..b400c24459b 100644 --- a/tests/gzip/helin-segv.sh +++ b/tests/gzip/helin-segv.sh @@ -14,7 +14,7 @@ # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # limit so don't run it by default. . "${srcdir=.}/init.sh"; path_prepend_ . diff --git a/tests/gzip/help-version.sh b/tests/gzip/help-version.sh index ee0c19f7d1f..fcda1c3acd1 100644 --- a/tests/gzip/help-version.sh +++ b/tests/gzip/help-version.sh @@ -15,7 +15,7 @@ # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # Ensure that $SHELL is set to *some* value and exported. # This is required for dircolors, which would fail e.g., when diff --git a/tests/gzip/hufts.sh b/tests/gzip/hufts.sh index 9b9576ce34e..49c36950141 100644 --- a/tests/gzip/hufts.sh +++ b/tests/gzip/hufts.sh @@ -14,7 +14,7 @@ # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # limit so don't run it by default. . "${srcdir=.}/init.sh"; path_prepend_ . diff --git a/tests/gzip/init.sh b/tests/gzip/init.sh index 97e4e4ba5e6..ebd8410bef4 100644 --- a/tests/gzip/init.sh +++ b/tests/gzip/init.sh @@ -13,7 +13,7 @@ # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # Using this file in a test # ========================= diff --git a/tests/gzip/keep.sh b/tests/gzip/keep.sh index ab9a21811d3..f87b1a49bf2 100644 --- a/tests/gzip/keep.sh +++ b/tests/gzip/keep.sh @@ -14,7 +14,7 @@ # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # limit so don't run it by default. . "${srcdir=.}/init.sh"; path_prepend_ . diff --git a/tests/gzip/list.sh b/tests/gzip/list.sh index 75912e1e26d..e218d7562df 100644 --- a/tests/gzip/list.sh +++ b/tests/gzip/list.sh @@ -14,7 +14,7 @@ # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # limit so don't run it by default. . "${srcdir=.}/init.sh"; path_prepend_ . diff --git a/tests/gzip/memcpy-abuse.sh b/tests/gzip/memcpy-abuse.sh index 7d5c056debc..1478890cc5c 100644 --- a/tests/gzip/memcpy-abuse.sh +++ b/tests/gzip/memcpy-abuse.sh @@ -15,7 +15,7 @@ # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # limit so don't run it by default. . "${srcdir=.}/init.sh"; path_prepend_ . diff --git a/tests/gzip/mixed.sh b/tests/gzip/mixed.sh index 383a54f5e46..b47f4a56a22 100644 --- a/tests/gzip/mixed.sh +++ b/tests/gzip/mixed.sh @@ -15,7 +15,7 @@ # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # limit so don't run it by default. . "${srcdir=.}/init.sh"; path_prepend_ . diff --git a/tests/gzip/null-suffix-clobber.sh b/tests/gzip/null-suffix-clobber.sh index 0efd0e34490..5acfb321c36 100644 --- a/tests/gzip/null-suffix-clobber.sh +++ b/tests/gzip/null-suffix-clobber.sh @@ -14,7 +14,7 @@ # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # limit so don't run it by default. . "${srcdir=.}/init.sh"; path_prepend_ . diff --git a/tests/gzip/stdin.sh b/tests/gzip/stdin.sh index eef4cd8b107..d697ab80a9e 100644 --- a/tests/gzip/stdin.sh +++ b/tests/gzip/stdin.sh @@ -14,7 +14,7 @@ # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # limit so don't run it by default. . "${srcdir=.}/init.sh"; path_prepend_ . diff --git a/tests/gzip/test-driver.sh b/tests/gzip/test-driver.sh index 649c084e4b0..0529cc8fbc6 100644 --- a/tests/gzip/test-driver.sh +++ b/tests/gzip/test-driver.sh @@ -16,7 +16,7 @@ scriptversion=2016-01-11.22; # UTC # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a diff --git a/tests/gzip/trailing-nul.sh b/tests/gzip/trailing-nul.sh index 7b15d5e5578..b33b98f78b5 100644 --- a/tests/gzip/trailing-nul.sh +++ b/tests/gzip/trailing-nul.sh @@ -15,7 +15,7 @@ # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # limit so don't run it by default. . "${srcdir=.}/init.sh"; path_prepend_ . diff --git a/tests/gzip/unpack-invalid.sh b/tests/gzip/unpack-invalid.sh index fe8384d73cd..ceda5ffa45f 100644 --- a/tests/gzip/unpack-invalid.sh +++ b/tests/gzip/unpack-invalid.sh @@ -15,7 +15,7 @@ # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # limit so don't run it by default. . "${srcdir=.}/init.sh"; path_prepend_ . diff --git a/tests/gzip/z-suffix.sh b/tests/gzip/z-suffix.sh index a870a5408de..c0bf509c99f 100644 --- a/tests/gzip/z-suffix.sh +++ b/tests/gzip/z-suffix.sh @@ -14,7 +14,7 @@ # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # limit so don't run it by default. . "${srcdir=.}/init.sh"; path_prepend_ . diff --git a/tests/gzip/zdiff.sh b/tests/gzip/zdiff.sh index d62a84606ba..6e99b66f40b 100644 --- a/tests/gzip/zdiff.sh +++ b/tests/gzip/zdiff.sh @@ -15,7 +15,7 @@ # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # limit so don't run it by default. . "${srcdir=.}/init.sh"; path_prepend_ . diff --git a/tests/gzip/zgrep-context.sh b/tests/gzip/zgrep-context.sh index c8648b7e4f5..d213426a3c7 100644 --- a/tests/gzip/zgrep-context.sh +++ b/tests/gzip/zgrep-context.sh @@ -14,7 +14,7 @@ # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # limit so don't run it by default. . "${srcdir=.}/init.sh"; path_prepend_ . diff --git a/tests/gzip/zgrep-f.sh b/tests/gzip/zgrep-f.sh index d0cf27f7e23..1e73ed20412 100644 --- a/tests/gzip/zgrep-f.sh +++ b/tests/gzip/zgrep-f.sh @@ -15,7 +15,7 @@ # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # limit so don't run it by default. . "${srcdir=.}/init.sh"; path_prepend_ . diff --git a/tests/gzip/zgrep-signal.sh b/tests/gzip/zgrep-signal.sh index a8c53881add..dd8442c8164 100644 --- a/tests/gzip/zgrep-signal.sh +++ b/tests/gzip/zgrep-signal.sh @@ -15,7 +15,7 @@ # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # limit so don't run it by default. . "${srcdir=.}/init.sh"; path_prepend_ . diff --git a/tests/gzip/znew-k.sh b/tests/gzip/znew-k.sh index 6c239e28ea8..5cf99ed346f 100644 --- a/tests/gzip/znew-k.sh +++ b/tests/gzip/znew-k.sh @@ -14,7 +14,7 @@ # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # limit so don't run it by default. . "${srcdir=.}/init.sh"; path_prepend_ . diff --git a/tests/invalidDictionaries.c b/tests/invalidDictionaries.c index b71f7419f5c..66caa9e99cb 100644 --- a/tests/invalidDictionaries.c +++ b/tests/invalidDictionaries.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/legacy.c b/tests/legacy.c index ac4938fe5f9..3be386495b1 100644 --- a/tests/legacy.c +++ b/tests/legacy.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/longmatch.c b/tests/longmatch.c index a171c0e4aae..547b2619010 100644 --- a/tests/longmatch.c +++ b/tests/longmatch.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -36,27 +36,27 @@ compress(ZSTD_CStream *ctx, ZSTD_outBuffer out, const void *data, size_t size) int main(int argc, const char** argv) { ZSTD_CStream* ctx; - ZSTD_parameters params; - size_t rc; - unsigned windowLog; + unsigned windowLog = 18; (void)argc; (void)argv; /* Create stream */ - ctx = ZSTD_createCStream(); + ctx = ZSTD_createCCtx(); if (!ctx) { return 1; } /* Set parameters */ - memset(¶ms, 0, sizeof(params)); - params.cParams.windowLog = 18; - params.cParams.chainLog = 13; - params.cParams.hashLog = 14; - params.cParams.searchLog = 1; - params.cParams.minMatch = 7; - params.cParams.targetLength = 16; - params.cParams.strategy = ZSTD_fast; - windowLog = params.cParams.windowLog; - /* Initialize stream */ - rc = ZSTD_initCStream_advanced(ctx, NULL, 0, params, 0); - if (ZSTD_isError(rc)) { return 2; } + if (ZSTD_isError(ZSTD_CCtx_setParameter(ctx, ZSTD_c_windowLog, windowLog))) + return 2; + if (ZSTD_isError(ZSTD_CCtx_setParameter(ctx, ZSTD_c_chainLog, 13))) + return 2; + if (ZSTD_isError(ZSTD_CCtx_setParameter(ctx, ZSTD_c_hashLog, 14))) + return 2; + if (ZSTD_isError(ZSTD_CCtx_setParameter(ctx, ZSTD_c_searchLog, 1))) + return 2; + if (ZSTD_isError(ZSTD_CCtx_setParameter(ctx, ZSTD_c_minMatch, 7))) + return 2; + if (ZSTD_isError(ZSTD_CCtx_setParameter(ctx, ZSTD_c_targetLength, 16))) + return 2; + if (ZSTD_isError(ZSTD_CCtx_setParameter(ctx, ZSTD_c_strategy, ZSTD_fast))) + return 2; { U64 compressed = 0; const U64 toCompress = ((U64)1) << 33; @@ -97,5 +97,6 @@ int main(int argc, const char** argv) free(srcBuffer); free(dstBuffer); } + ZSTD_freeCCtx(ctx); return 0; } diff --git a/tests/paramgrill.c b/tests/paramgrill.c index 033a10181a3..8971c65d627 100644 --- a/tests/paramgrill.c +++ b/tests/paramgrill.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -555,7 +555,7 @@ static int feasible(const BMK_benchResult_t results, const constraint_t target) } /* hill climbing value for part 1 */ -/* Scoring here is a linear reward for all set constraints normalized between 0 to 1 +/* Scoring here is a linear reward for all set constraints normalized between 0 and 1 * (with 0 at 0 and 1 being fully fulfilling the constraint), summed with a logarithmic * bonus to exceeding the constraint value. We also give linear ratio for compression ratio. * The constant factors are experimental. @@ -566,10 +566,10 @@ resultScore(const BMK_benchResult_t res, const size_t srcSize, const constraint_ double cs = 0., ds = 0., rt, cm = 0.; const double r1 = 1, r2 = 0.1, rtr = 0.5; double ret; - if(target.cSpeed) { cs = res.cSpeed / (double)target.cSpeed; } - if(target.dSpeed) { ds = res.dSpeed / (double)target.dSpeed; } - if(target.cMem != (U32)-1) { cm = (double)target.cMem / res.cMem; } - rt = ((double)srcSize / res.cSize); + if(target.cSpeed) { cs = (double)res.cSpeed / (double)target.cSpeed; } + if(target.dSpeed) { ds = (double)res.dSpeed / (double)target.dSpeed; } + if(target.cMem != (U32)-1) { cm = (double)target.cMem / (double)res.cMem; } + rt = ((double)srcSize / (double)res.cSize); ret = (MIN(1, cs) + MIN(1, ds) + MIN(1, cm))*r1 + rt * rtr + (MAX(0, log(cs))+ MAX(0, log(ds))+ MAX(0, log(cm))) * r2; @@ -581,8 +581,8 @@ resultScore(const BMK_benchResult_t res, const size_t srcSize, const constraint_ static double resultDistLvl(const BMK_benchResult_t result1, const BMK_benchResult_t lvlRes) { - double normalizedCSpeedGain1 = ((double)result1.cSpeed / lvlRes.cSpeed) - 1; - double normalizedRatioGain1 = ((double)lvlRes.cSize / result1.cSize) - 1; + double normalizedCSpeedGain1 = ((double)result1.cSpeed / (double)lvlRes.cSpeed) - 1; + double normalizedRatioGain1 = ((double)lvlRes.cSize / (double)result1.cSize) - 1; if(normalizedRatioGain1 < 0 || normalizedCSpeedGain1 < 0) { return 0.0; } @@ -854,7 +854,7 @@ BMK_displayOneResult(FILE* f, winnerInfo_t res, const size_t srcSize) } { double const ratio = res.result.cSize ? - (double)srcSize / res.result.cSize : 0; + (double)srcSize / (double)res.result.cSize : 0; double const cSpeedMBps = (double)res.result.cSpeed / MB_UNIT; double const dSpeedMBps = (double)res.result.dSpeed / MB_UNIT; @@ -937,7 +937,7 @@ BMK_printWinnerOpt(FILE* f, const U32 cLevel, const BMK_benchResult_t result, co } fprintf(f, "================================\n"); fprintf(f, "Level Bounds: R: > %.3f AND C: < %.1f MB/s \n\n", - (double)srcSize / g_lvltarget.cSize, (double)g_lvltarget.cSpeed / MB_UNIT); + (double)srcSize / (double)g_lvltarget.cSize, (double)g_lvltarget.cSpeed / MB_UNIT); fprintf(f, "Overall Winner: \n"); @@ -977,7 +977,7 @@ BMK_print_cLevelEntry(FILE* f, const int cLevel, } /* print comment */ { double const ratio = result.cSize ? - (double)srcSize / result.cSize : 0; + (double)srcSize / (double)result.cSize : 0; double const cSpeedMBps = (double)result.cSpeed / MB_UNIT; double const dSpeedMBps = (double)result.dSpeed / MB_UNIT; @@ -1726,19 +1726,19 @@ static int allBench(BMK_benchResult_t* resultPtr, /* calculate uncertainty in compression / decompression runs */ if (benchres.cSpeed) { - U64 const loopDurationC = (((U64)buf.srcSize * TIMELOOP_NANOSEC) / benchres.cSpeed); + double const loopDurationC = (double)(((U64)buf.srcSize * TIMELOOP_NANOSEC) / benchres.cSpeed); uncertaintyConstantC = ((loopDurationC + (double)(2 * g_clockGranularity))/loopDurationC); } if (benchres.dSpeed) { - U64 const loopDurationD = (((U64)buf.srcSize * TIMELOOP_NANOSEC) / benchres.dSpeed); + double const loopDurationD = (double)(((U64)buf.srcSize * TIMELOOP_NANOSEC) / benchres.dSpeed); uncertaintyConstantD = ((loopDurationD + (double)(2 * g_clockGranularity))/loopDurationD); } /* optimistic assumption of benchres */ { BMK_benchResult_t resultMax = benchres; - resultMax.cSpeed = (unsigned long long)(resultMax.cSpeed * uncertaintyConstantC * VARIANCE); - resultMax.dSpeed = (unsigned long long)(resultMax.dSpeed * uncertaintyConstantD * VARIANCE); + resultMax.cSpeed = (unsigned long long)((double)resultMax.cSpeed * uncertaintyConstantC * VARIANCE); + resultMax.dSpeed = (unsigned long long)((double)resultMax.dSpeed * uncertaintyConstantD * VARIANCE); /* disregard infeasible results in feas mode */ /* disregard if resultMax < winner in infeas mode */ @@ -1850,8 +1850,8 @@ static int BMK_seed(winnerInfo_t* winners, if ((double)testResult.cSize <= ((double)winners[cLevel].result.cSize * (1. + (0.02 / cLevel))) ) { /* Validate solution is "good enough" */ - double W_ratio = (double)buf.srcSize / testResult.cSize; - double O_ratio = (double)buf.srcSize / winners[cLevel].result.cSize; + double W_ratio = (double)buf.srcSize / (double)testResult.cSize; + double O_ratio = (double)buf.srcSize / (double)winners[cLevel].result.cSize; double W_ratioNote = log (W_ratio); double O_ratioNote = log (O_ratio); size_t W_DMemUsed = (1 << params.vals[wlog_ind]) + (16 KB); @@ -1864,11 +1864,11 @@ static int BMK_seed(winnerInfo_t* winners, double W_CMemUsed_note = W_ratioNote * ( 50 + 13*cLevel) - log((double)W_CMemUsed); double O_CMemUsed_note = O_ratioNote * ( 50 + 13*cLevel) - log((double)O_CMemUsed); - double W_CSpeed_note = W_ratioNote * (double)( 30 + 10*cLevel) + log(testResult.cSpeed); - double O_CSpeed_note = O_ratioNote * (double)( 30 + 10*cLevel) + log(winners[cLevel].result.cSpeed); + double W_CSpeed_note = W_ratioNote * (double)( 30 + 10*cLevel) + log((double)testResult.cSpeed); + double O_CSpeed_note = O_ratioNote * (double)( 30 + 10*cLevel) + log((double)winners[cLevel].result.cSpeed); - double W_DSpeed_note = W_ratioNote * (double)( 20 + 2*cLevel) + log(testResult.dSpeed); - double O_DSpeed_note = O_ratioNote * (double)( 20 + 2*cLevel) + log(winners[cLevel].result.dSpeed); + double W_DSpeed_note = W_ratioNote * (double)( 20 + 2*cLevel) + log((double)testResult.dSpeed); + double O_DSpeed_note = O_ratioNote * (double)( 20 + 2*cLevel) + log((double)winners[cLevel].result.dSpeed); if (W_DMemUsed_note < O_DMemUsed_note) { /* uses too much Decompression memory for too little benefit */ diff --git a/tests/playTests.sh b/tests/playTests.sh index b7a3d88a817..5f595f61154 100755 --- a/tests/playTests.sh +++ b/tests/playTests.sh @@ -2,6 +2,10 @@ set -e +unset ZSTD_CLEVEL +unset ZSTD_NBTHREADS + + die() { println "$@" 1>&2 exit 1 @@ -88,7 +92,6 @@ SCRIPT_DIR=$(cd "$(dirname "$0")" && pwd) PRGDIR="$SCRIPT_DIR/../programs" TESTDIR="$SCRIPT_DIR/../tests" UNAME=$(uname) -ZSTDGREP="$PRGDIR/zstdgrep" detectedTerminal=false if [ -t 0 ] && [ -t 1 ] @@ -182,6 +185,7 @@ else fi +zstd -vvV println "\n===> simple tests " @@ -208,6 +212,7 @@ zstd -c --fast=0 tmp > $INTOVOID && die "--fast must not accept value 0" println "test : too large numeric argument" zstd --fast=9999999999 -f tmp && die "should have refused numeric value" println "test : set compression level with environment variable ZSTD_CLEVEL" + ZSTD_CLEVEL=12 zstd -f tmp # positive compression level ZSTD_CLEVEL=-12 zstd -f tmp # negative compression level ZSTD_CLEVEL=+12 zstd -f tmp # valid: verbose '+' sign @@ -219,6 +224,11 @@ ZSTD_CLEVEL=3a7 zstd -f tmp # malformed env var, warn and revert to default sett ZSTD_CLEVEL=50000000000 zstd -f tmp # numeric value too large, warn and revert to default setting println "test : override ZSTD_CLEVEL with command line option" ZSTD_CLEVEL=12 zstd --fast=3 -f tmp # overridden by command line option + +# temporary envvar changes in the above tests would actually persist in macos /bin/sh +unset ZSTD_CLEVEL + + println "test : compress to stdout" zstd tmp -c > tmpCompressed zstd tmp --stdout > tmpCompressed # long command format @@ -258,10 +268,13 @@ zstd -dc - < tmp.zst > $INTOVOID zstd -d < tmp.zst > $INTOVOID # implicit stdout when stdin is used zstd -d - < tmp.zst > $INTOVOID println "test : impose memory limitation (must fail)" -zstd -d -f tmp.zst -M2K -c > $INTOVOID && die "decompression needs more memory than allowed" -zstd -d -f tmp.zst --memlimit=2K -c > $INTOVOID && die "decompression needs more memory than allowed" # long command -zstd -d -f tmp.zst --memory=2K -c > $INTOVOID && die "decompression needs more memory than allowed" # long command -zstd -d -f tmp.zst --memlimit-decompress=2K -c > $INTOVOID && die "decompression needs more memory than allowed" # long command +datagen -g500K > tmplimit +zstd -f tmplimit +zstd -d -f tmplimit.zst -M2K -c > $INTOVOID && die "decompression needs more memory than allowed" +zstd -d -f tmplimit.zst --memlimit=2K -c > $INTOVOID && die "decompression needs more memory than allowed" # long command +zstd -d -f tmplimit.zst --memory=2K -c > $INTOVOID && die "decompression needs more memory than allowed" # long command +zstd -d -f tmplimit.zst --memlimit-decompress=2K -c > $INTOVOID && die "decompression needs more memory than allowed" # long command +rm -f tmplimit tmplimit.zst println "test : overwrite protection" zstd -q tmp && die "overwrite check failed!" println "test : force overwrite" @@ -304,24 +317,13 @@ zstd -d -f tmp_corrupt.zst --no-check zstd -d -f tmp_corrupt.zst --check --no-check # final flag overrides zstd -d -f tmp.zst --no-check -if [ "$isWindows" = false ]; then +if [ "$isWindows" = false ] && [ "$UNAME" != "AIX" ]; then if [ -n "$(which readelf)" ]; then println "test: check if binary has executable stack (#2963)" readelf -lW "$ZSTD_BIN" | grep 'GNU_STACK .* RW ' || die "zstd binary has executable stack!" fi fi -println "\n===> zstdgrep tests" -ln -sf "$ZSTD_BIN" zstdcat -rm -f tmp_grep -echo "1234" > tmp_grep -zstd -f tmp_grep -lines=$(ZCAT=./zstdcat "$ZSTDGREP" 2>&1 "1234" tmp_grep tmp_grep.zst | wc -l) -test 2 -eq $lines -ZCAT=./zstdcat "$ZSTDGREP" 2>&1 "1234" tmp_grep_bad.zst && die "Should have failed" -ZCAT=./zstdcat "$ZSTDGREP" 2>&1 "1234" tmp_grep_bad.zst | grep "No such file or directory" || true -rm -f tmp_grep* - println "\n===> --exclude-compressed flag" rm -rf precompressedFilterTestDir mkdir -p precompressedFilterTestDir @@ -375,12 +377,39 @@ println "\n===> recursive mode test " # combination of -r with empty list of input file zstd -c -r < tmp > tmp.zst +# combination of -r with empty folder +mkdir -p tmpEmptyDir +zstd -r tmpEmptyDir +rm -rf tmpEmptyDir + println "\n===> file removal" zstd -f --rm tmp test ! -f tmp # tmp should no longer be present zstd -f -d --rm tmp.zst -test ! -f tmp.zst # tmp.zst should no longer be present +test ! -f tmp.zst # tmp.zst should no longer be present +println "test: --rm is disabled when output is stdout" +test -f tmp +zstd --rm tmp -c > $INTOVOID +test -f tmp # tmp shall still be there +zstd -f --rm tmp -c > $INTOVOID +test -f tmp # tmp shall still be there +zstd -f tmp -c > $INTOVOID --rm +test -f tmp # tmp shall still be there +println "test: --rm is disabled when multiple inputs are concatenated into a single output" +cp tmp tmp2 +zstd --rm tmp tmp2 -c > $INTOVOID +test -f tmp +test -f tmp2 +rm -f tmp3.zst +echo 'y' | zstd -v tmp tmp2 -o tmp3.zst --rm # prompt for confirmation +test -f tmp +test -f tmp2 +zstd -f tmp tmp2 -o tmp3.zst --rm # just warns, no prompt +test -f tmp +test -f tmp2 +zstd -q tmp tmp2 -o tmp3.zst --rm && die "should refuse to concatenate" + println "test : should quietly not remove non-regular file" println hello > tmp zstd tmp -f -o "$DEVDEVICE" 2>tmplog > "$INTOVOID" @@ -411,44 +440,46 @@ println "\n===> decompression only tests " dd bs=1048576 count=1 if=/dev/zero of=tmp zstd -d -o tmp1 "$TESTDIR/golden-decompression/rle-first-block.zst" $DIFF -s tmp1 tmp -rm -f tmp* +touch tmp_empty +zstd -d -o tmp2 "$TESTDIR/golden-decompression/empty-block.zst" +$DIFF -s tmp2 tmp_empty +rm -f tmp* println "\n===> compress multiple files" println hello > tmp1 println world > tmp2 zstd tmp1 tmp2 -o "$INTOVOID" -f zstd tmp1 tmp2 -c | zstd -t -zstd tmp1 tmp2 -o tmp.zst +echo 'y' | zstd -v tmp1 tmp2 -o tmp.zst test ! -f tmp1.zst test ! -f tmp2.zst zstd tmp1 tmp2 zstd -t tmp1.zst tmp2.zst zstd -dc tmp1.zst tmp2.zst zstd tmp1.zst tmp2.zst -o "$INTOVOID" -f -zstd -d tmp1.zst tmp2.zst -o tmp +echo 'y' | zstd -v -d tmp1.zst tmp2.zst -o tmp touch tmpexists zstd tmp1 tmp2 -f -o tmpexists zstd tmp1 tmp2 -q -o tmpexists && die "should have refused to overwrite" println gooder > tmp_rm1 println boi > tmp_rm2 println worldly > tmp_rm3 -echo 'y' | zstd tmp_rm1 tmp_rm2 -v -o tmp_rm3.zst --rm # tests the warning prompt for --rm with multiple inputs into once source -test ! -f tmp_rm1 -test ! -f tmp_rm2 +echo 'y' | zstd -v tmp_rm1 tmp_rm2 -v -o tmp_rm3.zst +test -f tmp_rm1 +test -f tmp_rm2 cp tmp_rm3.zst tmp_rm4.zst -echo 'Y' | zstd -d tmp_rm3.zst tmp_rm4.zst -v -o tmp_rm_out --rm -test ! -f tmp_rm3.zst -test ! -f tmp_rm4.zst -echo 'yes' | zstd tmp_rm_out tmp_rm3 -c --rm && die "compressing multiple files to stdout with --rm should fail unless -f is specified" -echo 'yes' | zstd tmp_rm_out tmp_rm3 -c --rm -v && die "compressing multiple files to stdout with --rm should fail unless -f is specified" +echo 'Y' | zstd -v -d tmp_rm3.zst tmp_rm4.zst -v -o tmp_rm_out --rm +test -f tmp_rm3.zst +test -f tmp_rm4.zst println gooder > tmpexists1 zstd tmpexists1 tmpexists -c --rm -f > $INTOVOID - # Bug: PR #972 if [ "$?" -eq 139 ]; then die "should not have segfaulted" fi +test -f tmpexists1 +test -f tmpexists println "\n===> multiple files and shell completion " datagen -s1 > tmp1 2> $INTOVOID datagen -s2 -g100K > tmp2 2> $INTOVOID @@ -552,19 +583,9 @@ if [ "$isWindows" = false ] ; then zstd -f -d tmp1.zst -o tmp1.out assertFilePermissions tmp1.out 400 - rm -f tmp1.zst tmp1.out - umask 0666 chmod 0666 tmp1 tmp2 - println "test : respect umask when copying permissions in file -> file compression " - zstd -f tmp1 -o tmp1.zst - assertFilePermissions tmp1.zst 0 - println "test : respect umask when copying permissions in file -> file decompression " - chmod 0666 tmp1.zst - zstd -f -d tmp1.zst -o tmp1.out - assertFilePermissions tmp1.out 0 - rm -f tmp1.zst tmp1.out println "test : respect umask when compressing from stdin input " @@ -728,11 +749,11 @@ test -f tmp4 println "test : survive the list of files with too long filenames (--filelist=FILE)" datagen -g5M > tmp_badList -zstd -f --filelist=tmp_badList && die "should have failed : file name length is too long" +zstd -qq -f --filelist=tmp_badList && die "should have failed : file name length is too long" # printing very long text garbage on console will cause CI failure println "test : survive a list of files which is text garbage (--filelist=FILE)" datagen > tmp_badList -zstd -f --filelist=tmp_badList && die "should have failed : list is text garbage" +zstd -qq -f --filelist=tmp_badList && die "should have failed : list is text garbage" # printing very long text garbage on console will cause CI failure println "test : survive a list of files which is binary garbage (--filelist=FILE)" datagen -P0 -g1M > tmp_badList @@ -798,6 +819,16 @@ println "Hello world 1!" | zstd -df println "Hello world 2!" | zstd -dcf println "Hello world 3!" > tmp1 zstd -dcf tmp1 +println "" | zstd -df > tmp1 +println "" > tmp2 +$DIFF -q tmp1 tmp2 +println "1" | zstd -df > tmp1 +println "1" > tmp2 +$DIFF -q tmp1 tmp2 +println "12" | zstd -df > tmp1 +println "12" > tmp2 +$DIFF -q tmp1 tmp2 +rm -rf tmp* println "\n===> frame concatenation " @@ -1156,6 +1187,10 @@ zstd -t tmp3 && die "bad file not detected !" # detects 0-sized files as bad println "test --rm and --test combined " zstd -t --rm tmp1.zst test -f tmp1.zst # check file is still present +cp tmp1.zst tmp2.zst +zstd -t tmp1.zst tmp2.zst --rm +test -f tmp1.zst # check file is still present +test -f tmp2.zst # check file is still present split -b16384 tmp1.zst tmpSplit. zstd -t tmpSplit.* && die "bad file not detected !" datagen | zstd -c | zstd -t @@ -1182,7 +1217,15 @@ zstd -rqi0b1e2 tmp1 println "benchmark decompression only" zstd -f tmp1 zstd -b -d -i0 tmp1.zst +println "benchmark can fail - decompression on invalid data" +zstd -b -d -i0 tmp1 && die "invalid .zst data => benchmark should have failed" +GZIPMODE=1 +zstd --format=gzip -V || GZIPMODE=0 +if [ $GZIPMODE -eq 1 ]; then + println "benchmark mode is only compatible with zstd" + zstd --format=gzip -b tmp1 && die "-b should be incompatible with gzip format!" +fi println "\n===> zstd compatibility tests " @@ -1441,6 +1484,8 @@ then ZSTD_NBTHREADS=50000000000 zstd -f mt_tmp # numeric value too large, warn and revert to default setting= ZSTD_NBTHREADS=2 zstd -f mt_tmp # correct usage ZSTD_NBTHREADS=1 zstd -f mt_tmp # correct usage: single thread + # temporary envvar changes in the above tests would actually persist in macos /bin/sh + unset ZSTD_NBTHREADS rm -f mt_tmp* println "\n===> ovLog tests " @@ -1575,6 +1620,44 @@ elif [ "$longCSize19wlog23" -gt "$optCSize19wlog23" ]; then exit 1 fi +println "\n===> zstd asyncio tests " + +addFrame() { + datagen -g2M -s$2 >> tmp_uncompressed + datagen -g2M -s$2 | zstd -1 --format=$1 >> tmp_compressed.zst +} + +addTwoFrames() { + addFrame $1 1 + addFrame $1 2 +} + +testAsyncIO() { + roundTripTest -g2M "3 --asyncio --format=$1" + roundTripTest -g2M "3 --no-asyncio --format=$1" +} + +rm -f tmp_compressed tmp_uncompressed +testAsyncIO zstd +addTwoFrames zstd +if [ $GZIPMODE -eq 1 ]; then + testAsyncIO gzip + addTwoFrames gzip +fi +if [ $LZMAMODE -eq 1 ]; then + testAsyncIO lzma + addTwoFrames lzma +fi +if [ $LZ4MODE -eq 1 ]; then + testAsyncIO lz4 + addTwoFrames lz4 +fi +cat tmp_uncompressed | $MD5SUM > tmp2 +zstd -d tmp_compressed.zst --asyncio -c | $MD5SUM > tmp1 +$DIFF -q tmp1 tmp2 +rm tmp1 +zstd -d tmp_compressed.zst --no-asyncio -c | $MD5SUM > tmp1 +$DIFF -q tmp1 tmp2 if [ "$1" != "--test-large-data" ]; then println "Skipping large data tests" diff --git a/tests/poolTests.c b/tests/poolTests.c index 08f31c0691f..9e62722bfa0 100644 --- a/tests/poolTests.c +++ b/tests/poolTests.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/rateLimiter.py b/tests/rateLimiter.py index 26293728e68..3e65081e059 100755 --- a/tests/rateLimiter.py +++ b/tests/rateLimiter.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # ################################################################ -# Copyright (c) Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/tests/regression/.gitignore b/tests/regression/.gitignore index 1b2618f415d..3da209d40e3 100644 --- a/tests/regression/.gitignore +++ b/tests/regression/.gitignore @@ -1,3 +1,4 @@ # regression test artifacts data-cache +cache test diff --git a/tests/regression/Makefile b/tests/regression/Makefile index a440c6c9425..ba8b4302419 100644 --- a/tests/regression/Makefile +++ b/tests/regression/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/tests/regression/config.c b/tests/regression/config.c index 57cd110c6bb..1d86fff232f 100644 --- a/tests/regression/config.c +++ b/tests/regression/config.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -183,7 +183,7 @@ static config_t no_pledged_src_size_with_dict = { }; static param_value_t const ldm_param_values[] = { - {.param = ZSTD_c_enableLongDistanceMatching, .value = 1}, + {.param = ZSTD_c_enableLongDistanceMatching, .value = ZSTD_ps_enable}, }; static config_t ldm = { @@ -204,7 +204,7 @@ static config_t mt = { static param_value_t const mt_ldm_param_values[] = { {.param = ZSTD_c_nbWorkers, .value = 2}, - {.param = ZSTD_c_enableLongDistanceMatching, .value = 1}, + {.param = ZSTD_c_enableLongDistanceMatching, .value = ZSTD_ps_enable}, }; static config_t mt_ldm = { diff --git a/tests/regression/config.h b/tests/regression/config.h index dd889374375..a4b542a90af 100644 --- a/tests/regression/config.h +++ b/tests/regression/config.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/regression/data.c b/tests/regression/data.c index 341b02d3b10..43f085f2cc7 100644 --- a/tests/regression/data.c +++ b/tests/regression/data.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/regression/data.h b/tests/regression/data.h index e54e6a1dc9d..a4ee920723e 100644 --- a/tests/regression/data.h +++ b/tests/regression/data.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/regression/levels.h b/tests/regression/levels.h index e98209d8015..d15b12046bb 100644 --- a/tests/regression/levels.h +++ b/tests/regression/levels.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/regression/method.c b/tests/regression/method.c index 55b1154ebca..f84a15ef330 100644 --- a/tests/regression/method.c +++ b/tests/regression/method.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/regression/method.h b/tests/regression/method.h index 1a36a9399c2..8efdd33a0b2 100644 --- a/tests/regression/method.h +++ b/tests/regression/method.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/regression/result.c b/tests/regression/result.c index 1f879c14028..8ccb8751e67 100644 --- a/tests/regression/result.c +++ b/tests/regression/result.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/regression/result.h b/tests/regression/result.h index 197fa905dcd..8a761ea4da8 100644 --- a/tests/regression/result.h +++ b/tests/regression/result.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/regression/results.csv b/tests/regression/results.csv index f43129e6c2e..188eea90081 100644 --- a/tests/regression/results.csv +++ b/tests/regression/results.csv @@ -1,9 +1,9 @@ Data, Config, Method, Total compressed size -silesia.tar, level -5, compress simple, 6853608 -silesia.tar, level -3, compress simple, 6505969 -silesia.tar, level -1, compress simple, 6179026 +silesia.tar, level -5, compress simple, 6861055 +silesia.tar, level -3, compress simple, 6505483 +silesia.tar, level -1, compress simple, 6179047 silesia.tar, level 0, compress simple, 4854086 -silesia.tar, level 1, compress simple, 5327373 +silesia.tar, level 1, compress simple, 5327717 silesia.tar, level 3, compress simple, 4854086 silesia.tar, level 4, compress simple, 4791503 silesia.tar, level 5, compress simple, 4677740 @@ -11,12 +11,12 @@ silesia.tar, level 6, compress silesia.tar, level 7, compress simple, 4576661 silesia.tar, level 9, compress simple, 4552899 silesia.tar, level 13, compress simple, 4502956 -silesia.tar, level 16, compress simple, 4360527 -silesia.tar, level 19, compress simple, 4267266 +silesia.tar, level 16, compress simple, 4360546 +silesia.tar, level 19, compress simple, 4265911 silesia.tar, uncompressed literals, compress simple, 4854086 -silesia.tar, uncompressed literals optimal, compress simple, 4267266 -silesia.tar, huffman literals, compress simple, 6179026 -github.tar, level -5, compress simple, 52110 +silesia.tar, uncompressed literals optimal, compress simple, 4265911 +silesia.tar, huffman literals, compress simple, 6179047 +github.tar, level -5, compress simple, 52115 github.tar, level -3, compress simple, 45678 github.tar, level -1, compress simple, 42560 github.tar, level 0, compress simple, 38831 @@ -28,16 +28,16 @@ github.tar, level 6, compress github.tar, level 7, compress simple, 38110 github.tar, level 9, compress simple, 36760 github.tar, level 13, compress simple, 35501 -github.tar, level 16, compress simple, 40471 -github.tar, level 19, compress simple, 32134 +github.tar, level 16, compress simple, 40466 +github.tar, level 19, compress simple, 32276 github.tar, uncompressed literals, compress simple, 38831 -github.tar, uncompressed literals optimal, compress simple, 32134 +github.tar, uncompressed literals optimal, compress simple, 32276 github.tar, huffman literals, compress simple, 42560 -silesia, level -5, compress cctx, 6852424 -silesia, level -3, compress cctx, 6503413 -silesia, level -1, compress cctx, 6172178 +silesia, level -5, compress cctx, 6857372 +silesia, level -3, compress cctx, 6503412 +silesia, level -1, compress cctx, 6172202 silesia, level 0, compress cctx, 4842075 -silesia, level 1, compress cctx, 5306426 +silesia, level 1, compress cctx, 5306632 silesia, level 3, compress cctx, 4842075 silesia, level 4, compress cctx, 4779186 silesia, level 5, compress cctx, 4666323 @@ -45,8 +45,8 @@ silesia, level 6, compress silesia, level 7, compress cctx, 4566984 silesia, level 9, compress cctx, 4543018 silesia, level 13, compress cctx, 4493990 -silesia, level 16, compress cctx, 4359864 -silesia, level 19, compress cctx, 4296880 +silesia, level 16, compress cctx, 4360041 +silesia, level 19, compress cctx, 4296055 silesia, long distance mode, compress cctx, 4842075 silesia, multithreaded, compress cctx, 4842075 silesia, multithreaded long distance mode, compress cctx, 4842075 @@ -55,19 +55,19 @@ silesia, small hash log, compress silesia, small chain log, compress cctx, 4912197 silesia, explicit params, compress cctx, 4794052 silesia, uncompressed literals, compress cctx, 4842075 -silesia, uncompressed literals optimal, compress cctx, 4296880 -silesia, huffman literals, compress cctx, 6172178 +silesia, uncompressed literals optimal, compress cctx, 4296055 +silesia, huffman literals, compress cctx, 6172202 silesia, multithreaded with advanced params, compress cctx, 4842075 -github, level -5, compress cctx, 204411 -github, level -5 with dict, compress cctx, 47294 +github, level -5, compress cctx, 204407 +github, level -5 with dict, compress cctx, 52059 github, level -3, compress cctx, 193253 -github, level -3 with dict, compress cctx, 48047 +github, level -3 with dict, compress cctx, 46787 github, level -1, compress cctx, 175468 -github, level -1 with dict, compress cctx, 43527 +github, level -1 with dict, compress cctx, 43585 github, level 0, compress cctx, 136332 github, level 0 with dict, compress cctx, 41534 github, level 1, compress cctx, 142365 -github, level 1 with dict, compress cctx, 42157 +github, level 1 with dict, compress cctx, 42259 github, level 3, compress cctx, 136332 github, level 3 with dict, compress cctx, 41534 github, level 4, compress cctx, 136199 @@ -80,11 +80,11 @@ github, level 7, compress github, level 7 with dict, compress cctx, 38755 github, level 9, compress cctx, 135122 github, level 9 with dict, compress cctx, 39398 -github, level 13, compress cctx, 134064 +github, level 13, compress cctx, 132878 github, level 13 with dict, compress cctx, 39948 -github, level 16, compress cctx, 134064 +github, level 16, compress cctx, 133209 github, level 16 with dict, compress cctx, 37568 -github, level 19, compress cctx, 134064 +github, level 19, compress cctx, 132879 github, level 19 with dict, compress cctx, 37567 github, long distance mode, compress cctx, 141069 github, multithreaded, compress cctx, 141069 @@ -94,14 +94,14 @@ github, small hash log, compress github, small chain log, compress cctx, 139242 github, explicit params, compress cctx, 140932 github, uncompressed literals, compress cctx, 136332 -github, uncompressed literals optimal, compress cctx, 134064 +github, uncompressed literals optimal, compress cctx, 132879 github, huffman literals, compress cctx, 175468 github, multithreaded with advanced params, compress cctx, 141069 -silesia, level -5, zstdcli, 6852472 -silesia, level -3, zstdcli, 6503461 -silesia, level -1, zstdcli, 6172226 +silesia, level -5, zstdcli, 6857420 +silesia, level -3, zstdcli, 6503460 +silesia, level -1, zstdcli, 6172250 silesia, level 0, zstdcli, 4842123 -silesia, level 1, zstdcli, 5306474 +silesia, level 1, zstdcli, 5306680 silesia, level 3, zstdcli, 4842123 silesia, level 4, zstdcli, 4779234 silesia, level 5, zstdcli, 4666371 @@ -109,8 +109,8 @@ silesia, level 6, zstdcli, silesia, level 7, zstdcli, 4567032 silesia, level 9, zstdcli, 4543066 silesia, level 13, zstdcli, 4494038 -silesia, level 16, zstdcli, 4359912 -silesia, level 19, zstdcli, 4296928 +silesia, level 16, zstdcli, 4360089 +silesia, level 19, zstdcli, 4296103 silesia, long distance mode, zstdcli, 4833785 silesia, multithreaded, zstdcli, 4842123 silesia, multithreaded long distance mode, zstdcli, 4833785 @@ -120,13 +120,13 @@ silesia, small chain log, zstdcli, silesia, explicit params, zstdcli, 4795432 silesia, uncompressed literals, zstdcli, 5120614 silesia, uncompressed literals optimal, zstdcli, 4319566 -silesia, huffman literals, zstdcli, 5321394 +silesia, huffman literals, zstdcli, 5321417 silesia, multithreaded with advanced params, zstdcli, 5120614 -silesia.tar, level -5, zstdcli, 6853994 -silesia.tar, level -3, zstdcli, 6506742 -silesia.tar, level -1, zstdcli, 6179765 +silesia.tar, level -5, zstdcli, 6862049 +silesia.tar, level -3, zstdcli, 6506509 +silesia.tar, level -1, zstdcli, 6179789 silesia.tar, level 0, zstdcli, 4854164 -silesia.tar, level 1, zstdcli, 5328534 +silesia.tar, level 1, zstdcli, 5329010 silesia.tar, level 3, zstdcli, 4854164 silesia.tar, level 4, zstdcli, 4792352 silesia.tar, level 5, zstdcli, 4678682 @@ -134,21 +134,21 @@ silesia.tar, level 6, zstdcli, silesia.tar, level 7, zstdcli, 4578719 silesia.tar, level 9, zstdcli, 4552903 silesia.tar, level 13, zstdcli, 4502960 -silesia.tar, level 16, zstdcli, 4360531 -silesia.tar, level 19, zstdcli, 4267270 +silesia.tar, level 16, zstdcli, 4360550 +silesia.tar, level 19, zstdcli, 4265915 silesia.tar, no source size, zstdcli, 4854160 silesia.tar, long distance mode, zstdcli, 4845745 silesia.tar, multithreaded, zstdcli, 4854164 silesia.tar, multithreaded long distance mode, zstdcli, 4845745 silesia.tar, small window log, zstdcli, 7100701 -silesia.tar, small hash log, zstdcli, 6529289 +silesia.tar, small hash log, zstdcli, 6529264 silesia.tar, small chain log, zstdcli, 4917022 silesia.tar, explicit params, zstdcli, 4820713 silesia.tar, uncompressed literals, zstdcli, 5122571 silesia.tar, uncompressed literals optimal, zstdcli, 4310145 -silesia.tar, huffman literals, zstdcli, 5342054 +silesia.tar, huffman literals, zstdcli, 5342074 silesia.tar, multithreaded with advanced params, zstdcli, 5122571 -github, level -5, zstdcli, 206411 +github, level -5, zstdcli, 206407 github, level -5 with dict, zstdcli, 48718 github, level -3, zstdcli, 195253 github, level -3 with dict, zstdcli, 47395 @@ -170,11 +170,11 @@ github, level 7, zstdcli, github, level 7 with dict, zstdcli, 40745 github, level 9, zstdcli, 137122 github, level 9 with dict, zstdcli, 41393 -github, level 13, zstdcli, 136064 +github, level 13, zstdcli, 134878 github, level 13 with dict, zstdcli, 41900 -github, level 16, zstdcli, 136064 +github, level 16, zstdcli, 135209 github, level 16 with dict, zstdcli, 39577 -github, level 19, zstdcli, 136064 +github, level 19, zstdcli, 134879 github, level 19 with dict, zstdcli, 39576 github, long distance mode, zstdcli, 138332 github, multithreaded, zstdcli, 138332 @@ -184,19 +184,19 @@ github, small hash log, zstdcli, github, small chain log, zstdcli, 138341 github, explicit params, zstdcli, 136197 github, uncompressed literals, zstdcli, 167911 -github, uncompressed literals optimal, zstdcli, 159227 +github, uncompressed literals optimal, zstdcli, 154667 github, huffman literals, zstdcli, 144365 github, multithreaded with advanced params, zstdcli, 167911 -github.tar, level -5, zstdcli, 52114 -github.tar, level -5 with dict, zstdcli, 46502 +github.tar, level -5, zstdcli, 52119 +github.tar, level -5 with dict, zstdcli, 50978 github.tar, level -3, zstdcli, 45682 -github.tar, level -3 with dict, zstdcli, 42181 +github.tar, level -3 with dict, zstdcli, 44660 github.tar, level -1, zstdcli, 42564 -github.tar, level -1 with dict, zstdcli, 41140 +github.tar, level -1 with dict, zstdcli, 41155 github.tar, level 0, zstdcli, 38835 github.tar, level 0 with dict, zstdcli, 37999 github.tar, level 1, zstdcli, 39204 -github.tar, level 1 with dict, zstdcli, 38288 +github.tar, level 1 with dict, zstdcli, 38093 github.tar, level 3, zstdcli, 38835 github.tar, level 3 with dict, zstdcli, 37999 github.tar, level 4, zstdcli, 38897 @@ -211,10 +211,10 @@ github.tar, level 9, zstdcli, github.tar, level 9 with dict, zstdcli, 36632 github.tar, level 13, zstdcli, 35505 github.tar, level 13 with dict, zstdcli, 37134 -github.tar, level 16, zstdcli, 40475 -github.tar, level 16 with dict, zstdcli, 33382 -github.tar, level 19, zstdcli, 32138 -github.tar, level 19 with dict, zstdcli, 32713 +github.tar, level 16, zstdcli, 40470 +github.tar, level 16 with dict, zstdcli, 33378 +github.tar, level 19, zstdcli, 32280 +github.tar, level 19 with dict, zstdcli, 32716 github.tar, no source size, zstdcli, 38832 github.tar, no source size with dict, zstdcli, 38004 github.tar, long distance mode, zstdcli, 40236 @@ -228,11 +228,11 @@ github.tar, uncompressed literals, zstdcli, github.tar, uncompressed literals optimal, zstdcli, 35401 github.tar, huffman literals, zstdcli, 38857 github.tar, multithreaded with advanced params, zstdcli, 41529 -silesia, level -5, advanced one pass, 6852424 -silesia, level -3, advanced one pass, 6503413 -silesia, level -1, advanced one pass, 6172178 +silesia, level -5, advanced one pass, 6857372 +silesia, level -3, advanced one pass, 6503412 +silesia, level -1, advanced one pass, 6172202 silesia, level 0, advanced one pass, 4842075 -silesia, level 1, advanced one pass, 5306426 +silesia, level 1, advanced one pass, 5306632 silesia, level 3, advanced one pass, 4842075 silesia, level 4, advanced one pass, 4779186 silesia, level 5 row 1, advanced one pass, 4666323 @@ -248,8 +248,8 @@ silesia, level 11 row 2, advanced silesia, level 12 row 1, advanced one pass, 4505046 silesia, level 12 row 2, advanced one pass, 4503116 silesia, level 13, advanced one pass, 4493990 -silesia, level 16, advanced one pass, 4359864 -silesia, level 19, advanced one pass, 4296880 +silesia, level 16, advanced one pass, 4360041 +silesia, level 19, advanced one pass, 4296055 silesia, no source size, advanced one pass, 4842075 silesia, long distance mode, advanced one pass, 4833710 silesia, multithreaded, advanced one pass, 4842075 @@ -260,13 +260,13 @@ silesia, small chain log, advanced silesia, explicit params, advanced one pass, 4795432 silesia, uncompressed literals, advanced one pass, 5120566 silesia, uncompressed literals optimal, advanced one pass, 4319518 -silesia, huffman literals, advanced one pass, 5321346 +silesia, huffman literals, advanced one pass, 5321369 silesia, multithreaded with advanced params, advanced one pass, 5120566 -silesia.tar, level -5, advanced one pass, 6853608 -silesia.tar, level -3, advanced one pass, 6505969 -silesia.tar, level -1, advanced one pass, 6179026 +silesia.tar, level -5, advanced one pass, 6861055 +silesia.tar, level -3, advanced one pass, 6505483 +silesia.tar, level -1, advanced one pass, 6179047 silesia.tar, level 0, advanced one pass, 4854086 -silesia.tar, level 1, advanced one pass, 5327373 +silesia.tar, level 1, advanced one pass, 5327717 silesia.tar, level 3, advanced one pass, 4854086 silesia.tar, level 4, advanced one pass, 4791503 silesia.tar, level 5 row 1, advanced one pass, 4677740 @@ -282,21 +282,21 @@ silesia.tar, level 11 row 2, advanced silesia.tar, level 12 row 1, advanced one pass, 4514049 silesia.tar, level 12 row 2, advanced one pass, 4513797 silesia.tar, level 13, advanced one pass, 4502956 -silesia.tar, level 16, advanced one pass, 4360527 -silesia.tar, level 19, advanced one pass, 4267266 +silesia.tar, level 16, advanced one pass, 4360546 +silesia.tar, level 19, advanced one pass, 4265911 silesia.tar, no source size, advanced one pass, 4854086 silesia.tar, long distance mode, advanced one pass, 4840452 silesia.tar, multithreaded, advanced one pass, 4854160 silesia.tar, multithreaded long distance mode, advanced one pass, 4845741 silesia.tar, small window log, advanced one pass, 7100655 -silesia.tar, small hash log, advanced one pass, 6529231 +silesia.tar, small hash log, advanced one pass, 6529206 silesia.tar, small chain log, advanced one pass, 4917041 silesia.tar, explicit params, advanced one pass, 4806855 silesia.tar, uncompressed literals, advanced one pass, 5122473 silesia.tar, uncompressed literals optimal, advanced one pass, 4310141 -silesia.tar, huffman literals, advanced one pass, 5341685 +silesia.tar, huffman literals, advanced one pass, 5341705 silesia.tar, multithreaded with advanced params, advanced one pass, 5122567 -github, level -5, advanced one pass, 204411 +github, level -5, advanced one pass, 204407 github, level -5 with dict, advanced one pass, 46718 github, level -3, advanced one pass, 193253 github, level -3 with dict, advanced one pass, 45395 @@ -312,8 +312,8 @@ github, level 1, advanced github, level 1 with dict, advanced one pass, 41682 github, level 1 with dict dms, advanced one pass, 41682 github, level 1 with dict dds, advanced one pass, 41682 -github, level 1 with dict copy, advanced one pass, 41674 -github, level 1 with dict load, advanced one pass, 43755 +github, level 1 with dict copy, advanced one pass, 41698 +github, level 1 with dict load, advanced one pass, 43814 github, level 3, advanced one pass, 136332 github, level 3 with dict, advanced one pass, 41148 github, level 3 with dict dms, advanced one pass, 41148 @@ -370,39 +370,39 @@ github, level 9 with dict dms, advanced github, level 9 with dict dds, advanced one pass, 39393 github, level 9 with dict copy, advanced one pass, 39398 github, level 9 with dict load, advanced one pass, 41710 -github, level 11 row 1, advanced one pass, 135119 +github, level 11 row 1, advanced one pass, 135367 github, level 11 row 1 with dict dms, advanced one pass, 39671 github, level 11 row 1 with dict dds, advanced one pass, 39671 github, level 11 row 1 with dict copy, advanced one pass, 39651 github, level 11 row 1 with dict load, advanced one pass, 41360 -github, level 11 row 2, advanced one pass, 135119 +github, level 11 row 2, advanced one pass, 135367 github, level 11 row 2 with dict dms, advanced one pass, 39671 github, level 11 row 2 with dict dds, advanced one pass, 39671 github, level 11 row 2 with dict copy, advanced one pass, 39651 github, level 11 row 2 with dict load, advanced one pass, 41360 -github, level 12 row 1, advanced one pass, 134180 +github, level 12 row 1, advanced one pass, 134402 github, level 12 row 1 with dict dms, advanced one pass, 39677 github, level 12 row 1 with dict dds, advanced one pass, 39677 github, level 12 row 1 with dict copy, advanced one pass, 39677 github, level 12 row 1 with dict load, advanced one pass, 41166 -github, level 12 row 2, advanced one pass, 134180 +github, level 12 row 2, advanced one pass, 134402 github, level 12 row 2 with dict dms, advanced one pass, 39677 github, level 12 row 2 with dict dds, advanced one pass, 39677 github, level 12 row 2 with dict copy, advanced one pass, 39677 github, level 12 row 2 with dict load, advanced one pass, 41166 -github, level 13, advanced one pass, 134064 +github, level 13, advanced one pass, 132878 github, level 13 with dict, advanced one pass, 39900 github, level 13 with dict dms, advanced one pass, 39900 github, level 13 with dict dds, advanced one pass, 39900 github, level 13 with dict copy, advanced one pass, 39948 -github, level 13 with dict load, advanced one pass, 42626 -github, level 16, advanced one pass, 134064 +github, level 13 with dict load, advanced one pass, 42624 +github, level 16, advanced one pass, 133209 github, level 16 with dict, advanced one pass, 37577 github, level 16 with dict dms, advanced one pass, 37577 github, level 16 with dict dds, advanced one pass, 37577 github, level 16 with dict copy, advanced one pass, 37568 -github, level 16 with dict load, advanced one pass, 42340 -github, level 19, advanced one pass, 134064 +github, level 16 with dict load, advanced one pass, 42338 +github, level 19, advanced one pass, 132879 github, level 19 with dict, advanced one pass, 37576 github, level 19 with dict dms, advanced one pass, 37576 github, level 19 with dict dds, advanced one pass, 37576 @@ -418,15 +418,15 @@ github, small hash log, advanced github, small chain log, advanced one pass, 136341 github, explicit params, advanced one pass, 137727 github, uncompressed literals, advanced one pass, 165911 -github, uncompressed literals optimal, advanced one pass, 157227 +github, uncompressed literals optimal, advanced one pass, 152667 github, huffman literals, advanced one pass, 142365 github, multithreaded with advanced params, advanced one pass, 165911 -github.tar, level -5, advanced one pass, 52110 -github.tar, level -5 with dict, advanced one pass, 46498 +github.tar, level -5, advanced one pass, 52115 +github.tar, level -5 with dict, advanced one pass, 50974 github.tar, level -3, advanced one pass, 45678 -github.tar, level -3 with dict, advanced one pass, 42177 +github.tar, level -3 with dict, advanced one pass, 44656 github.tar, level -1, advanced one pass, 42560 -github.tar, level -1 with dict, advanced one pass, 41136 +github.tar, level -1 with dict, advanced one pass, 41151 github.tar, level 0, advanced one pass, 38831 github.tar, level 0 with dict, advanced one pass, 37995 github.tar, level 0 with dict dms, advanced one pass, 38003 @@ -434,11 +434,11 @@ github.tar, level 0 with dict dds, advanced github.tar, level 0 with dict copy, advanced one pass, 37995 github.tar, level 0 with dict load, advanced one pass, 37956 github.tar, level 1, advanced one pass, 39200 -github.tar, level 1 with dict, advanced one pass, 38284 +github.tar, level 1 with dict, advanced one pass, 38089 github.tar, level 1 with dict dms, advanced one pass, 38294 github.tar, level 1 with dict dds, advanced one pass, 38294 -github.tar, level 1 with dict copy, advanced one pass, 38284 -github.tar, level 1 with dict load, advanced one pass, 38724 +github.tar, level 1 with dict copy, advanced one pass, 38089 +github.tar, level 1 with dict load, advanced one pass, 38364 github.tar, level 3, advanced one pass, 38831 github.tar, level 3 with dict, advanced one pass, 37995 github.tar, level 3 with dict dms, advanced one pass, 38003 @@ -521,18 +521,18 @@ github.tar, level 13 with dict dms, advanced github.tar, level 13 with dict dds, advanced one pass, 37220 github.tar, level 13 with dict copy, advanced one pass, 37130 github.tar, level 13 with dict load, advanced one pass, 36010 -github.tar, level 16, advanced one pass, 40471 -github.tar, level 16 with dict, advanced one pass, 33378 -github.tar, level 16 with dict dms, advanced one pass, 33213 -github.tar, level 16 with dict dds, advanced one pass, 33213 -github.tar, level 16 with dict copy, advanced one pass, 33378 +github.tar, level 16, advanced one pass, 40466 +github.tar, level 16 with dict, advanced one pass, 33374 +github.tar, level 16 with dict dms, advanced one pass, 33206 +github.tar, level 16 with dict dds, advanced one pass, 33206 +github.tar, level 16 with dict copy, advanced one pass, 33374 github.tar, level 16 with dict load, advanced one pass, 39081 -github.tar, level 19, advanced one pass, 32134 -github.tar, level 19 with dict, advanced one pass, 32709 -github.tar, level 19 with dict dms, advanced one pass, 32553 -github.tar, level 19 with dict dds, advanced one pass, 32553 -github.tar, level 19 with dict copy, advanced one pass, 32709 -github.tar, level 19 with dict load, advanced one pass, 32474 +github.tar, level 19, advanced one pass, 32276 +github.tar, level 19 with dict, advanced one pass, 32712 +github.tar, level 19 with dict dms, advanced one pass, 32555 +github.tar, level 19 with dict dds, advanced one pass, 32555 +github.tar, level 19 with dict copy, advanced one pass, 32712 +github.tar, level 19 with dict load, advanced one pass, 32479 github.tar, no source size, advanced one pass, 38831 github.tar, no source size with dict, advanced one pass, 37995 github.tar, long distance mode, advanced one pass, 40252 @@ -546,11 +546,11 @@ github.tar, uncompressed literals, advanced github.tar, uncompressed literals optimal, advanced one pass, 35397 github.tar, huffman literals, advanced one pass, 38853 github.tar, multithreaded with advanced params, advanced one pass, 41525 -silesia, level -5, advanced one pass small out, 6852424 -silesia, level -3, advanced one pass small out, 6503413 -silesia, level -1, advanced one pass small out, 6172178 +silesia, level -5, advanced one pass small out, 6857372 +silesia, level -3, advanced one pass small out, 6503412 +silesia, level -1, advanced one pass small out, 6172202 silesia, level 0, advanced one pass small out, 4842075 -silesia, level 1, advanced one pass small out, 5306426 +silesia, level 1, advanced one pass small out, 5306632 silesia, level 3, advanced one pass small out, 4842075 silesia, level 4, advanced one pass small out, 4779186 silesia, level 5 row 1, advanced one pass small out, 4666323 @@ -566,8 +566,8 @@ silesia, level 11 row 2, advanced silesia, level 12 row 1, advanced one pass small out, 4505046 silesia, level 12 row 2, advanced one pass small out, 4503116 silesia, level 13, advanced one pass small out, 4493990 -silesia, level 16, advanced one pass small out, 4359864 -silesia, level 19, advanced one pass small out, 4296880 +silesia, level 16, advanced one pass small out, 4360041 +silesia, level 19, advanced one pass small out, 4296055 silesia, no source size, advanced one pass small out, 4842075 silesia, long distance mode, advanced one pass small out, 4833710 silesia, multithreaded, advanced one pass small out, 4842075 @@ -578,13 +578,13 @@ silesia, small chain log, advanced silesia, explicit params, advanced one pass small out, 4795432 silesia, uncompressed literals, advanced one pass small out, 5120566 silesia, uncompressed literals optimal, advanced one pass small out, 4319518 -silesia, huffman literals, advanced one pass small out, 5321346 +silesia, huffman literals, advanced one pass small out, 5321369 silesia, multithreaded with advanced params, advanced one pass small out, 5120566 -silesia.tar, level -5, advanced one pass small out, 6853608 -silesia.tar, level -3, advanced one pass small out, 6505969 -silesia.tar, level -1, advanced one pass small out, 6179026 +silesia.tar, level -5, advanced one pass small out, 6861055 +silesia.tar, level -3, advanced one pass small out, 6505483 +silesia.tar, level -1, advanced one pass small out, 6179047 silesia.tar, level 0, advanced one pass small out, 4854086 -silesia.tar, level 1, advanced one pass small out, 5327373 +silesia.tar, level 1, advanced one pass small out, 5327717 silesia.tar, level 3, advanced one pass small out, 4854086 silesia.tar, level 4, advanced one pass small out, 4791503 silesia.tar, level 5 row 1, advanced one pass small out, 4677740 @@ -600,21 +600,21 @@ silesia.tar, level 11 row 2, advanced silesia.tar, level 12 row 1, advanced one pass small out, 4514049 silesia.tar, level 12 row 2, advanced one pass small out, 4513797 silesia.tar, level 13, advanced one pass small out, 4502956 -silesia.tar, level 16, advanced one pass small out, 4360527 -silesia.tar, level 19, advanced one pass small out, 4267266 +silesia.tar, level 16, advanced one pass small out, 4360546 +silesia.tar, level 19, advanced one pass small out, 4265911 silesia.tar, no source size, advanced one pass small out, 4854086 silesia.tar, long distance mode, advanced one pass small out, 4840452 silesia.tar, multithreaded, advanced one pass small out, 4854160 silesia.tar, multithreaded long distance mode, advanced one pass small out, 4845741 silesia.tar, small window log, advanced one pass small out, 7100655 -silesia.tar, small hash log, advanced one pass small out, 6529231 +silesia.tar, small hash log, advanced one pass small out, 6529206 silesia.tar, small chain log, advanced one pass small out, 4917041 silesia.tar, explicit params, advanced one pass small out, 4806855 silesia.tar, uncompressed literals, advanced one pass small out, 5122473 silesia.tar, uncompressed literals optimal, advanced one pass small out, 4310141 -silesia.tar, huffman literals, advanced one pass small out, 5341685 +silesia.tar, huffman literals, advanced one pass small out, 5341705 silesia.tar, multithreaded with advanced params, advanced one pass small out, 5122567 -github, level -5, advanced one pass small out, 204411 +github, level -5, advanced one pass small out, 204407 github, level -5 with dict, advanced one pass small out, 46718 github, level -3, advanced one pass small out, 193253 github, level -3 with dict, advanced one pass small out, 45395 @@ -630,8 +630,8 @@ github, level 1, advanced github, level 1 with dict, advanced one pass small out, 41682 github, level 1 with dict dms, advanced one pass small out, 41682 github, level 1 with dict dds, advanced one pass small out, 41682 -github, level 1 with dict copy, advanced one pass small out, 41674 -github, level 1 with dict load, advanced one pass small out, 43755 +github, level 1 with dict copy, advanced one pass small out, 41698 +github, level 1 with dict load, advanced one pass small out, 43814 github, level 3, advanced one pass small out, 136332 github, level 3 with dict, advanced one pass small out, 41148 github, level 3 with dict dms, advanced one pass small out, 41148 @@ -688,39 +688,39 @@ github, level 9 with dict dms, advanced github, level 9 with dict dds, advanced one pass small out, 39393 github, level 9 with dict copy, advanced one pass small out, 39398 github, level 9 with dict load, advanced one pass small out, 41710 -github, level 11 row 1, advanced one pass small out, 135119 +github, level 11 row 1, advanced one pass small out, 135367 github, level 11 row 1 with dict dms, advanced one pass small out, 39671 github, level 11 row 1 with dict dds, advanced one pass small out, 39671 github, level 11 row 1 with dict copy, advanced one pass small out, 39651 github, level 11 row 1 with dict load, advanced one pass small out, 41360 -github, level 11 row 2, advanced one pass small out, 135119 +github, level 11 row 2, advanced one pass small out, 135367 github, level 11 row 2 with dict dms, advanced one pass small out, 39671 github, level 11 row 2 with dict dds, advanced one pass small out, 39671 github, level 11 row 2 with dict copy, advanced one pass small out, 39651 github, level 11 row 2 with dict load, advanced one pass small out, 41360 -github, level 12 row 1, advanced one pass small out, 134180 +github, level 12 row 1, advanced one pass small out, 134402 github, level 12 row 1 with dict dms, advanced one pass small out, 39677 github, level 12 row 1 with dict dds, advanced one pass small out, 39677 github, level 12 row 1 with dict copy, advanced one pass small out, 39677 github, level 12 row 1 with dict load, advanced one pass small out, 41166 -github, level 12 row 2, advanced one pass small out, 134180 +github, level 12 row 2, advanced one pass small out, 134402 github, level 12 row 2 with dict dms, advanced one pass small out, 39677 github, level 12 row 2 with dict dds, advanced one pass small out, 39677 github, level 12 row 2 with dict copy, advanced one pass small out, 39677 github, level 12 row 2 with dict load, advanced one pass small out, 41166 -github, level 13, advanced one pass small out, 134064 +github, level 13, advanced one pass small out, 132878 github, level 13 with dict, advanced one pass small out, 39900 github, level 13 with dict dms, advanced one pass small out, 39900 github, level 13 with dict dds, advanced one pass small out, 39900 github, level 13 with dict copy, advanced one pass small out, 39948 -github, level 13 with dict load, advanced one pass small out, 42626 -github, level 16, advanced one pass small out, 134064 +github, level 13 with dict load, advanced one pass small out, 42624 +github, level 16, advanced one pass small out, 133209 github, level 16 with dict, advanced one pass small out, 37577 github, level 16 with dict dms, advanced one pass small out, 37577 github, level 16 with dict dds, advanced one pass small out, 37577 github, level 16 with dict copy, advanced one pass small out, 37568 -github, level 16 with dict load, advanced one pass small out, 42340 -github, level 19, advanced one pass small out, 134064 +github, level 16 with dict load, advanced one pass small out, 42338 +github, level 19, advanced one pass small out, 132879 github, level 19 with dict, advanced one pass small out, 37576 github, level 19 with dict dms, advanced one pass small out, 37576 github, level 19 with dict dds, advanced one pass small out, 37576 @@ -736,15 +736,15 @@ github, small hash log, advanced github, small chain log, advanced one pass small out, 136341 github, explicit params, advanced one pass small out, 137727 github, uncompressed literals, advanced one pass small out, 165911 -github, uncompressed literals optimal, advanced one pass small out, 157227 +github, uncompressed literals optimal, advanced one pass small out, 152667 github, huffman literals, advanced one pass small out, 142365 github, multithreaded with advanced params, advanced one pass small out, 165911 -github.tar, level -5, advanced one pass small out, 52110 -github.tar, level -5 with dict, advanced one pass small out, 46498 +github.tar, level -5, advanced one pass small out, 52115 +github.tar, level -5 with dict, advanced one pass small out, 50974 github.tar, level -3, advanced one pass small out, 45678 -github.tar, level -3 with dict, advanced one pass small out, 42177 +github.tar, level -3 with dict, advanced one pass small out, 44656 github.tar, level -1, advanced one pass small out, 42560 -github.tar, level -1 with dict, advanced one pass small out, 41136 +github.tar, level -1 with dict, advanced one pass small out, 41151 github.tar, level 0, advanced one pass small out, 38831 github.tar, level 0 with dict, advanced one pass small out, 37995 github.tar, level 0 with dict dms, advanced one pass small out, 38003 @@ -752,11 +752,11 @@ github.tar, level 0 with dict dds, advanced github.tar, level 0 with dict copy, advanced one pass small out, 37995 github.tar, level 0 with dict load, advanced one pass small out, 37956 github.tar, level 1, advanced one pass small out, 39200 -github.tar, level 1 with dict, advanced one pass small out, 38284 +github.tar, level 1 with dict, advanced one pass small out, 38089 github.tar, level 1 with dict dms, advanced one pass small out, 38294 github.tar, level 1 with dict dds, advanced one pass small out, 38294 -github.tar, level 1 with dict copy, advanced one pass small out, 38284 -github.tar, level 1 with dict load, advanced one pass small out, 38724 +github.tar, level 1 with dict copy, advanced one pass small out, 38089 +github.tar, level 1 with dict load, advanced one pass small out, 38364 github.tar, level 3, advanced one pass small out, 38831 github.tar, level 3 with dict, advanced one pass small out, 37995 github.tar, level 3 with dict dms, advanced one pass small out, 38003 @@ -839,18 +839,18 @@ github.tar, level 13 with dict dms, advanced github.tar, level 13 with dict dds, advanced one pass small out, 37220 github.tar, level 13 with dict copy, advanced one pass small out, 37130 github.tar, level 13 with dict load, advanced one pass small out, 36010 -github.tar, level 16, advanced one pass small out, 40471 -github.tar, level 16 with dict, advanced one pass small out, 33378 -github.tar, level 16 with dict dms, advanced one pass small out, 33213 -github.tar, level 16 with dict dds, advanced one pass small out, 33213 -github.tar, level 16 with dict copy, advanced one pass small out, 33378 +github.tar, level 16, advanced one pass small out, 40466 +github.tar, level 16 with dict, advanced one pass small out, 33374 +github.tar, level 16 with dict dms, advanced one pass small out, 33206 +github.tar, level 16 with dict dds, advanced one pass small out, 33206 +github.tar, level 16 with dict copy, advanced one pass small out, 33374 github.tar, level 16 with dict load, advanced one pass small out, 39081 -github.tar, level 19, advanced one pass small out, 32134 -github.tar, level 19 with dict, advanced one pass small out, 32709 -github.tar, level 19 with dict dms, advanced one pass small out, 32553 -github.tar, level 19 with dict dds, advanced one pass small out, 32553 -github.tar, level 19 with dict copy, advanced one pass small out, 32709 -github.tar, level 19 with dict load, advanced one pass small out, 32474 +github.tar, level 19, advanced one pass small out, 32276 +github.tar, level 19 with dict, advanced one pass small out, 32712 +github.tar, level 19 with dict dms, advanced one pass small out, 32555 +github.tar, level 19 with dict dds, advanced one pass small out, 32555 +github.tar, level 19 with dict copy, advanced one pass small out, 32712 +github.tar, level 19 with dict load, advanced one pass small out, 32479 github.tar, no source size, advanced one pass small out, 38831 github.tar, no source size with dict, advanced one pass small out, 37995 github.tar, long distance mode, advanced one pass small out, 40252 @@ -864,11 +864,11 @@ github.tar, uncompressed literals, advanced github.tar, uncompressed literals optimal, advanced one pass small out, 35397 github.tar, huffman literals, advanced one pass small out, 38853 github.tar, multithreaded with advanced params, advanced one pass small out, 41525 -silesia, level -5, advanced streaming, 6963781 -silesia, level -3, advanced streaming, 6610376 -silesia, level -1, advanced streaming, 6179294 +silesia, level -5, advanced streaming, 6854744 +silesia, level -3, advanced streaming, 6503319 +silesia, level -1, advanced streaming, 6172207 silesia, level 0, advanced streaming, 4842075 -silesia, level 1, advanced streaming, 5310178 +silesia, level 1, advanced streaming, 5306388 silesia, level 3, advanced streaming, 4842075 silesia, level 4, advanced streaming, 4779186 silesia, level 5 row 1, advanced streaming, 4666323 @@ -884,8 +884,8 @@ silesia, level 11 row 2, advanced silesia, level 12 row 1, advanced streaming, 4505046 silesia, level 12 row 2, advanced streaming, 4503116 silesia, level 13, advanced streaming, 4493990 -silesia, level 16, advanced streaming, 4359864 -silesia, level 19, advanced streaming, 4296880 +silesia, level 16, advanced streaming, 4360041 +silesia, level 19, advanced streaming, 4296055 silesia, no source size, advanced streaming, 4842039 silesia, long distance mode, advanced streaming, 4833710 silesia, multithreaded, advanced streaming, 4842075 @@ -896,13 +896,13 @@ silesia, small chain log, advanced silesia, explicit params, advanced streaming, 4795452 silesia, uncompressed literals, advanced streaming, 5120566 silesia, uncompressed literals optimal, advanced streaming, 4319518 -silesia, huffman literals, advanced streaming, 5327881 +silesia, huffman literals, advanced streaming, 5321370 silesia, multithreaded with advanced params, advanced streaming, 5120566 -silesia.tar, level -5, advanced streaming, 7043687 -silesia.tar, level -3, advanced streaming, 6671317 -silesia.tar, level -1, advanced streaming, 6187457 +silesia.tar, level -5, advanced streaming, 6856523 +silesia.tar, level -3, advanced streaming, 6505954 +silesia.tar, level -1, advanced streaming, 6179056 silesia.tar, level 0, advanced streaming, 4859271 -silesia.tar, level 1, advanced streaming, 5333896 +silesia.tar, level 1, advanced streaming, 5327708 silesia.tar, level 3, advanced streaming, 4859271 silesia.tar, level 4, advanced streaming, 4797470 silesia.tar, level 5 row 1, advanced streaming, 4677748 @@ -918,21 +918,21 @@ silesia.tar, level 11 row 2, advanced silesia.tar, level 12 row 1, advanced streaming, 4514049 silesia.tar, level 12 row 2, advanced streaming, 4513797 silesia.tar, level 13, advanced streaming, 4502956 -silesia.tar, level 16, advanced streaming, 4360527 -silesia.tar, level 19, advanced streaming, 4267266 +silesia.tar, level 16, advanced streaming, 4360546 +silesia.tar, level 19, advanced streaming, 4265911 silesia.tar, no source size, advanced streaming, 4859267 silesia.tar, long distance mode, advanced streaming, 4840452 silesia.tar, multithreaded, advanced streaming, 4854160 silesia.tar, multithreaded long distance mode, advanced streaming, 4845741 silesia.tar, small window log, advanced streaming, 7117559 -silesia.tar, small hash log, advanced streaming, 6529234 +silesia.tar, small hash log, advanced streaming, 6529209 silesia.tar, small chain log, advanced streaming, 4917021 silesia.tar, explicit params, advanced streaming, 4806873 silesia.tar, uncompressed literals, advanced streaming, 5127423 silesia.tar, uncompressed literals optimal, advanced streaming, 4310141 -silesia.tar, huffman literals, advanced streaming, 5349624 +silesia.tar, huffman literals, advanced streaming, 5341712 silesia.tar, multithreaded with advanced params, advanced streaming, 5122567 -github, level -5, advanced streaming, 204411 +github, level -5, advanced streaming, 204407 github, level -5 with dict, advanced streaming, 46718 github, level -3, advanced streaming, 193253 github, level -3 with dict, advanced streaming, 45395 @@ -948,8 +948,8 @@ github, level 1, advanced github, level 1 with dict, advanced streaming, 41682 github, level 1 with dict dms, advanced streaming, 41682 github, level 1 with dict dds, advanced streaming, 41682 -github, level 1 with dict copy, advanced streaming, 41674 -github, level 1 with dict load, advanced streaming, 43755 +github, level 1 with dict copy, advanced streaming, 41698 +github, level 1 with dict load, advanced streaming, 43814 github, level 3, advanced streaming, 136332 github, level 3 with dict, advanced streaming, 41148 github, level 3 with dict dms, advanced streaming, 41148 @@ -1006,39 +1006,39 @@ github, level 9 with dict dms, advanced github, level 9 with dict dds, advanced streaming, 39393 github, level 9 with dict copy, advanced streaming, 39398 github, level 9 with dict load, advanced streaming, 41710 -github, level 11 row 1, advanced streaming, 135119 +github, level 11 row 1, advanced streaming, 135367 github, level 11 row 1 with dict dms, advanced streaming, 39671 github, level 11 row 1 with dict dds, advanced streaming, 39671 github, level 11 row 1 with dict copy, advanced streaming, 39651 github, level 11 row 1 with dict load, advanced streaming, 41360 -github, level 11 row 2, advanced streaming, 135119 +github, level 11 row 2, advanced streaming, 135367 github, level 11 row 2 with dict dms, advanced streaming, 39671 github, level 11 row 2 with dict dds, advanced streaming, 39671 github, level 11 row 2 with dict copy, advanced streaming, 39651 github, level 11 row 2 with dict load, advanced streaming, 41360 -github, level 12 row 1, advanced streaming, 134180 +github, level 12 row 1, advanced streaming, 134402 github, level 12 row 1 with dict dms, advanced streaming, 39677 github, level 12 row 1 with dict dds, advanced streaming, 39677 github, level 12 row 1 with dict copy, advanced streaming, 39677 github, level 12 row 1 with dict load, advanced streaming, 41166 -github, level 12 row 2, advanced streaming, 134180 +github, level 12 row 2, advanced streaming, 134402 github, level 12 row 2 with dict dms, advanced streaming, 39677 github, level 12 row 2 with dict dds, advanced streaming, 39677 github, level 12 row 2 with dict copy, advanced streaming, 39677 github, level 12 row 2 with dict load, advanced streaming, 41166 -github, level 13, advanced streaming, 134064 +github, level 13, advanced streaming, 132878 github, level 13 with dict, advanced streaming, 39900 github, level 13 with dict dms, advanced streaming, 39900 github, level 13 with dict dds, advanced streaming, 39900 github, level 13 with dict copy, advanced streaming, 39948 -github, level 13 with dict load, advanced streaming, 42626 -github, level 16, advanced streaming, 134064 +github, level 13 with dict load, advanced streaming, 42624 +github, level 16, advanced streaming, 133209 github, level 16 with dict, advanced streaming, 37577 github, level 16 with dict dms, advanced streaming, 37577 github, level 16 with dict dds, advanced streaming, 37577 github, level 16 with dict copy, advanced streaming, 37568 -github, level 16 with dict load, advanced streaming, 42340 -github, level 19, advanced streaming, 134064 +github, level 16 with dict load, advanced streaming, 42338 +github, level 19, advanced streaming, 132879 github, level 19 with dict, advanced streaming, 37576 github, level 19 with dict dms, advanced streaming, 37576 github, level 19 with dict dds, advanced streaming, 37576 @@ -1054,27 +1054,27 @@ github, small hash log, advanced github, small chain log, advanced streaming, 136341 github, explicit params, advanced streaming, 137727 github, uncompressed literals, advanced streaming, 165911 -github, uncompressed literals optimal, advanced streaming, 157227 +github, uncompressed literals optimal, advanced streaming, 152667 github, huffman literals, advanced streaming, 142365 github, multithreaded with advanced params, advanced streaming, 165911 -github.tar, level -5, advanced streaming, 51420 -github.tar, level -5 with dict, advanced streaming, 45495 -github.tar, level -3, advanced streaming, 45077 -github.tar, level -3 with dict, advanced streaming, 41627 -github.tar, level -1, advanced streaming, 42536 -github.tar, level -1 with dict, advanced streaming, 41198 +github.tar, level -5, advanced streaming, 52152 +github.tar, level -5 with dict, advanced streaming, 51045 +github.tar, level -3, advanced streaming, 45678 +github.tar, level -3 with dict, advanced streaming, 44656 +github.tar, level -1, advanced streaming, 42560 +github.tar, level -1 with dict, advanced streaming, 41151 github.tar, level 0, advanced streaming, 38831 github.tar, level 0 with dict, advanced streaming, 37995 github.tar, level 0 with dict dms, advanced streaming, 38003 github.tar, level 0 with dict dds, advanced streaming, 38003 github.tar, level 0 with dict copy, advanced streaming, 37995 github.tar, level 0 with dict load, advanced streaming, 37956 -github.tar, level 1, advanced streaming, 39270 -github.tar, level 1 with dict, advanced streaming, 38316 -github.tar, level 1 with dict dms, advanced streaming, 38326 -github.tar, level 1 with dict dds, advanced streaming, 38326 -github.tar, level 1 with dict copy, advanced streaming, 38316 -github.tar, level 1 with dict load, advanced streaming, 38761 +github.tar, level 1, advanced streaming, 39200 +github.tar, level 1 with dict, advanced streaming, 38089 +github.tar, level 1 with dict dms, advanced streaming, 38294 +github.tar, level 1 with dict dds, advanced streaming, 38294 +github.tar, level 1 with dict copy, advanced streaming, 38089 +github.tar, level 1 with dict load, advanced streaming, 38364 github.tar, level 3, advanced streaming, 38831 github.tar, level 3 with dict, advanced streaming, 37995 github.tar, level 3 with dict dms, advanced streaming, 38003 @@ -1157,18 +1157,18 @@ github.tar, level 13 with dict dms, advanced github.tar, level 13 with dict dds, advanced streaming, 37220 github.tar, level 13 with dict copy, advanced streaming, 37130 github.tar, level 13 with dict load, advanced streaming, 36010 -github.tar, level 16, advanced streaming, 40471 -github.tar, level 16 with dict, advanced streaming, 33378 -github.tar, level 16 with dict dms, advanced streaming, 33213 -github.tar, level 16 with dict dds, advanced streaming, 33213 -github.tar, level 16 with dict copy, advanced streaming, 33378 +github.tar, level 16, advanced streaming, 40466 +github.tar, level 16 with dict, advanced streaming, 33374 +github.tar, level 16 with dict dms, advanced streaming, 33206 +github.tar, level 16 with dict dds, advanced streaming, 33206 +github.tar, level 16 with dict copy, advanced streaming, 33374 github.tar, level 16 with dict load, advanced streaming, 39081 -github.tar, level 19, advanced streaming, 32134 -github.tar, level 19 with dict, advanced streaming, 32709 -github.tar, level 19 with dict dms, advanced streaming, 32553 -github.tar, level 19 with dict dds, advanced streaming, 32553 -github.tar, level 19 with dict copy, advanced streaming, 32709 -github.tar, level 19 with dict load, advanced streaming, 32474 +github.tar, level 19, advanced streaming, 32276 +github.tar, level 19 with dict, advanced streaming, 32712 +github.tar, level 19 with dict dms, advanced streaming, 32555 +github.tar, level 19 with dict dds, advanced streaming, 32555 +github.tar, level 19 with dict copy, advanced streaming, 32712 +github.tar, level 19 with dict load, advanced streaming, 32479 github.tar, no source size, advanced streaming, 38828 github.tar, no source size with dict, advanced streaming, 38000 github.tar, long distance mode, advanced streaming, 40252 @@ -1180,13 +1180,13 @@ github.tar, small chain log, advanced github.tar, explicit params, advanced streaming, 41385 github.tar, uncompressed literals, advanced streaming, 41525 github.tar, uncompressed literals optimal, advanced streaming, 35397 -github.tar, huffman literals, advanced streaming, 38874 +github.tar, huffman literals, advanced streaming, 38853 github.tar, multithreaded with advanced params, advanced streaming, 41525 -silesia, level -5, old streaming, 6963781 -silesia, level -3, old streaming, 6610376 -silesia, level -1, old streaming, 6179294 +silesia, level -5, old streaming, 6854744 +silesia, level -3, old streaming, 6503319 +silesia, level -1, old streaming, 6172207 silesia, level 0, old streaming, 4842075 -silesia, level 1, old streaming, 5310178 +silesia, level 1, old streaming, 5306388 silesia, level 3, old streaming, 4842075 silesia, level 4, old streaming, 4779186 silesia, level 5, old streaming, 4666323 @@ -1194,17 +1194,17 @@ silesia, level 6, old stre silesia, level 7, old streaming, 4566984 silesia, level 9, old streaming, 4543018 silesia, level 13, old streaming, 4493990 -silesia, level 16, old streaming, 4359864 -silesia, level 19, old streaming, 4296880 +silesia, level 16, old streaming, 4360041 +silesia, level 19, old streaming, 4296055 silesia, no source size, old streaming, 4842039 silesia, uncompressed literals, old streaming, 4842075 -silesia, uncompressed literals optimal, old streaming, 4296880 -silesia, huffman literals, old streaming, 6179294 -silesia.tar, level -5, old streaming, 7043687 -silesia.tar, level -3, old streaming, 6671317 -silesia.tar, level -1, old streaming, 6187457 +silesia, uncompressed literals optimal, old streaming, 4296055 +silesia, huffman literals, old streaming, 6172207 +silesia.tar, level -5, old streaming, 6856523 +silesia.tar, level -3, old streaming, 6505954 +silesia.tar, level -1, old streaming, 6179056 silesia.tar, level 0, old streaming, 4859271 -silesia.tar, level 1, old streaming, 5333896 +silesia.tar, level 1, old streaming, 5327708 silesia.tar, level 3, old streaming, 4859271 silesia.tar, level 4, old streaming, 4797470 silesia.tar, level 5, old streaming, 4677748 @@ -1212,13 +1212,13 @@ silesia.tar, level 6, old stre silesia.tar, level 7, old streaming, 4576664 silesia.tar, level 9, old streaming, 4552900 silesia.tar, level 13, old streaming, 4502956 -silesia.tar, level 16, old streaming, 4360527 -silesia.tar, level 19, old streaming, 4267266 +silesia.tar, level 16, old streaming, 4360546 +silesia.tar, level 19, old streaming, 4265911 silesia.tar, no source size, old streaming, 4859267 silesia.tar, uncompressed literals, old streaming, 4859271 -silesia.tar, uncompressed literals optimal, old streaming, 4267266 -silesia.tar, huffman literals, old streaming, 6187457 -github, level -5, old streaming, 204411 +silesia.tar, uncompressed literals optimal, old streaming, 4265911 +silesia.tar, huffman literals, old streaming, 6179056 +github, level -5, old streaming, 204407 github, level -5 with dict, old streaming, 46718 github, level -3, old streaming, 193253 github, level -3 with dict, old streaming, 45395 @@ -1240,27 +1240,27 @@ github, level 7, old stre github, level 7 with dict, old streaming, 38758 github, level 9, old streaming, 135122 github, level 9 with dict, old streaming, 39437 -github, level 13, old streaming, 134064 +github, level 13, old streaming, 132878 github, level 13 with dict, old streaming, 39900 -github, level 16, old streaming, 134064 +github, level 16, old streaming, 133209 github, level 16 with dict, old streaming, 37577 -github, level 19, old streaming, 134064 +github, level 19, old streaming, 132879 github, level 19 with dict, old streaming, 37576 github, no source size, old streaming, 140599 github, no source size with dict, old streaming, 40654 github, uncompressed literals, old streaming, 136332 -github, uncompressed literals optimal, old streaming, 134064 +github, uncompressed literals optimal, old streaming, 132879 github, huffman literals, old streaming, 175468 -github.tar, level -5, old streaming, 51420 -github.tar, level -5 with dict, old streaming, 45495 -github.tar, level -3, old streaming, 45077 -github.tar, level -3 with dict, old streaming, 41627 -github.tar, level -1, old streaming, 42536 -github.tar, level -1 with dict, old streaming, 41198 +github.tar, level -5, old streaming, 52152 +github.tar, level -5 with dict, old streaming, 51045 +github.tar, level -3, old streaming, 45678 +github.tar, level -3 with dict, old streaming, 44656 +github.tar, level -1, old streaming, 42560 +github.tar, level -1 with dict, old streaming, 41151 github.tar, level 0, old streaming, 38831 github.tar, level 0 with dict, old streaming, 37995 -github.tar, level 1, old streaming, 39270 -github.tar, level 1 with dict, old streaming, 38316 +github.tar, level 1, old streaming, 39200 +github.tar, level 1 with dict, old streaming, 38089 github.tar, level 3, old streaming, 38831 github.tar, level 3 with dict, old streaming, 37995 github.tar, level 4, old streaming, 38893 @@ -1275,20 +1275,20 @@ github.tar, level 9, old stre github.tar, level 9 with dict, old streaming, 36484 github.tar, level 13, old streaming, 35501 github.tar, level 13 with dict, old streaming, 37130 -github.tar, level 16, old streaming, 40471 -github.tar, level 16 with dict, old streaming, 33378 -github.tar, level 19, old streaming, 32134 -github.tar, level 19 with dict, old streaming, 32709 +github.tar, level 16, old streaming, 40466 +github.tar, level 16 with dict, old streaming, 33374 +github.tar, level 19, old streaming, 32276 +github.tar, level 19 with dict, old streaming, 32712 github.tar, no source size, old streaming, 38828 github.tar, no source size with dict, old streaming, 38000 github.tar, uncompressed literals, old streaming, 38831 -github.tar, uncompressed literals optimal, old streaming, 32134 -github.tar, huffman literals, old streaming, 42536 -silesia, level -5, old streaming advanced, 6963781 -silesia, level -3, old streaming advanced, 6610376 -silesia, level -1, old streaming advanced, 6179294 +github.tar, uncompressed literals optimal, old streaming, 32276 +github.tar, huffman literals, old streaming, 42560 +silesia, level -5, old streaming advanced, 6854744 +silesia, level -3, old streaming advanced, 6503319 +silesia, level -1, old streaming advanced, 6172207 silesia, level 0, old streaming advanced, 4842075 -silesia, level 1, old streaming advanced, 5310178 +silesia, level 1, old streaming advanced, 5306388 silesia, level 3, old streaming advanced, 4842075 silesia, level 4, old streaming advanced, 4779186 silesia, level 5, old streaming advanced, 4666323 @@ -1296,8 +1296,8 @@ silesia, level 6, old stre silesia, level 7, old streaming advanced, 4566984 silesia, level 9, old streaming advanced, 4543018 silesia, level 13, old streaming advanced, 4493990 -silesia, level 16, old streaming advanced, 4359864 -silesia, level 19, old streaming advanced, 4296880 +silesia, level 16, old streaming advanced, 4360041 +silesia, level 19, old streaming advanced, 4296055 silesia, no source size, old streaming advanced, 4842039 silesia, long distance mode, old streaming advanced, 4842075 silesia, multithreaded, old streaming advanced, 4842075 @@ -1307,14 +1307,14 @@ silesia, small hash log, old stre silesia, small chain log, old streaming advanced, 4912197 silesia, explicit params, old streaming advanced, 4795452 silesia, uncompressed literals, old streaming advanced, 4842075 -silesia, uncompressed literals optimal, old streaming advanced, 4296880 -silesia, huffman literals, old streaming advanced, 6179294 +silesia, uncompressed literals optimal, old streaming advanced, 4296055 +silesia, huffman literals, old streaming advanced, 6172207 silesia, multithreaded with advanced params, old streaming advanced, 4842075 -silesia.tar, level -5, old streaming advanced, 7043687 -silesia.tar, level -3, old streaming advanced, 6671317 -silesia.tar, level -1, old streaming advanced, 6187457 +silesia.tar, level -5, old streaming advanced, 6856523 +silesia.tar, level -3, old streaming advanced, 6505954 +silesia.tar, level -1, old streaming advanced, 6179056 silesia.tar, level 0, old streaming advanced, 4859271 -silesia.tar, level 1, old streaming advanced, 5333896 +silesia.tar, level 1, old streaming advanced, 5327708 silesia.tar, level 3, old streaming advanced, 4859271 silesia.tar, level 4, old streaming advanced, 4797470 silesia.tar, level 5, old streaming advanced, 4677748 @@ -1322,19 +1322,19 @@ silesia.tar, level 6, old stre silesia.tar, level 7, old streaming advanced, 4576664 silesia.tar, level 9, old streaming advanced, 4552900 silesia.tar, level 13, old streaming advanced, 4502956 -silesia.tar, level 16, old streaming advanced, 4360527 -silesia.tar, level 19, old streaming advanced, 4267266 +silesia.tar, level 16, old streaming advanced, 4360546 +silesia.tar, level 19, old streaming advanced, 4265911 silesia.tar, no source size, old streaming advanced, 4859267 silesia.tar, long distance mode, old streaming advanced, 4859271 silesia.tar, multithreaded, old streaming advanced, 4859271 silesia.tar, multithreaded long distance mode, old streaming advanced, 4859271 silesia.tar, small window log, old streaming advanced, 7117562 -silesia.tar, small hash log, old streaming advanced, 6529234 +silesia.tar, small hash log, old streaming advanced, 6529209 silesia.tar, small chain log, old streaming advanced, 4917021 silesia.tar, explicit params, old streaming advanced, 4806873 silesia.tar, uncompressed literals, old streaming advanced, 4859271 -silesia.tar, uncompressed literals optimal, old streaming advanced, 4267266 -silesia.tar, huffman literals, old streaming advanced, 6187457 +silesia.tar, uncompressed literals optimal, old streaming advanced, 4265911 +silesia.tar, huffman literals, old streaming advanced, 6179056 silesia.tar, multithreaded with advanced params, old streaming advanced, 4859271 github, level -5, old streaming advanced, 213265 github, level -5 with dict, old streaming advanced, 49562 @@ -1360,9 +1360,9 @@ github, level 9, old stre github, level 9 with dict, old streaming advanced, 38981 github, level 13, old streaming advanced, 138676 github, level 13 with dict, old streaming advanced, 39725 -github, level 16, old streaming advanced, 138676 +github, level 16, old streaming advanced, 138575 github, level 16 with dict, old streaming advanced, 40789 -github, level 19, old streaming advanced, 134064 +github, level 19, old streaming advanced, 132879 github, level 19 with dict, old streaming advanced, 37576 github, no source size, old streaming advanced, 140599 github, no source size with dict, old streaming advanced, 40608 @@ -1374,19 +1374,19 @@ github, small hash log, old stre github, small chain log, old streaming advanced, 139275 github, explicit params, old streaming advanced, 140937 github, uncompressed literals, old streaming advanced, 141104 -github, uncompressed literals optimal, old streaming advanced, 134064 +github, uncompressed literals optimal, old streaming advanced, 132879 github, huffman literals, old streaming advanced, 181107 github, multithreaded with advanced params, old streaming advanced, 141104 -github.tar, level -5, old streaming advanced, 51420 -github.tar, level -5 with dict, old streaming advanced, 46091 -github.tar, level -3, old streaming advanced, 45077 -github.tar, level -3 with dict, old streaming advanced, 42222 -github.tar, level -1, old streaming advanced, 42536 -github.tar, level -1 with dict, old streaming advanced, 41494 +github.tar, level -5, old streaming advanced, 52152 +github.tar, level -5 with dict, old streaming advanced, 50988 +github.tar, level -3, old streaming advanced, 45678 +github.tar, level -3 with dict, old streaming advanced, 44729 +github.tar, level -1, old streaming advanced, 42560 +github.tar, level -1 with dict, old streaming advanced, 41589 github.tar, level 0, old streaming advanced, 38831 github.tar, level 0 with dict, old streaming advanced, 38013 -github.tar, level 1, old streaming advanced, 39270 -github.tar, level 1 with dict, old streaming advanced, 38934 +github.tar, level 1, old streaming advanced, 39200 +github.tar, level 1 with dict, old streaming advanced, 38359 github.tar, level 3, old streaming advanced, 38831 github.tar, level 3 with dict, old streaming advanced, 38013 github.tar, level 4, old streaming advanced, 38893 @@ -1401,10 +1401,10 @@ github.tar, level 9, old stre github.tar, level 9 with dict, old streaming advanced, 36312 github.tar, level 13, old streaming advanced, 35501 github.tar, level 13 with dict, old streaming advanced, 35807 -github.tar, level 16, old streaming advanced, 40471 +github.tar, level 16, old streaming advanced, 40466 github.tar, level 16 with dict, old streaming advanced, 38578 -github.tar, level 19, old streaming advanced, 32134 -github.tar, level 19 with dict, old streaming advanced, 32702 +github.tar, level 19, old streaming advanced, 32276 +github.tar, level 19 with dict, old streaming advanced, 32704 github.tar, no source size, old streaming advanced, 38828 github.tar, no source size with dict, old streaming advanced, 38015 github.tar, long distance mode, old streaming advanced, 38831 @@ -1415,8 +1415,8 @@ github.tar, small hash log, old stre github.tar, small chain log, old streaming advanced, 41669 github.tar, explicit params, old streaming advanced, 41385 github.tar, uncompressed literals, old streaming advanced, 38831 -github.tar, uncompressed literals optimal, old streaming advanced, 32134 -github.tar, huffman literals, old streaming advanced, 42536 +github.tar, uncompressed literals optimal, old streaming advanced, 32276 +github.tar, huffman literals, old streaming advanced, 42560 github.tar, multithreaded with advanced params, old streaming advanced, 38831 github, level -5 with dict, old streaming cdict, 46718 github, level -3 with dict, old streaming cdict, 45395 @@ -1433,11 +1433,11 @@ github, level 13 with dict, old stre github, level 16 with dict, old streaming cdict, 37577 github, level 19 with dict, old streaming cdict, 37576 github, no source size with dict, old streaming cdict, 40654 -github.tar, level -5 with dict, old streaming cdict, 46276 -github.tar, level -3 with dict, old streaming cdict, 42354 -github.tar, level -1 with dict, old streaming cdict, 41662 +github.tar, level -5 with dict, old streaming cdict, 51191 +github.tar, level -3 with dict, old streaming cdict, 44821 +github.tar, level -1 with dict, old streaming cdict, 41775 github.tar, level 0 with dict, old streaming cdict, 37956 -github.tar, level 1 with dict, old streaming cdict, 38761 +github.tar, level 1 with dict, old streaming cdict, 38364 github.tar, level 3 with dict, old streaming cdict, 37956 github.tar, level 4 with dict, old streaming cdict, 37927 github.tar, level 5 with dict, old streaming cdict, 38999 @@ -1446,7 +1446,7 @@ github.tar, level 7 with dict, old stre github.tar, level 9 with dict, old streaming cdict, 36401 github.tar, level 13 with dict, old streaming cdict, 36010 github.tar, level 16 with dict, old streaming cdict, 39081 -github.tar, level 19 with dict, old streaming cdict, 32474 +github.tar, level 19 with dict, old streaming cdict, 32479 github.tar, no source size with dict, old streaming cdict, 38000 github, level -5 with dict, old streaming advanced cdict, 49562 github, level -3 with dict, old streaming advanced cdict, 44956 @@ -1463,11 +1463,11 @@ github, level 13 with dict, old stre github, level 16 with dict, old streaming advanced cdict, 40789 github, level 19 with dict, old streaming advanced cdict, 37576 github, no source size with dict, old streaming advanced cdict, 40608 -github.tar, level -5 with dict, old streaming advanced cdict, 44307 -github.tar, level -3 with dict, old streaming advanced cdict, 41359 -github.tar, level -1 with dict, old streaming advanced cdict, 41322 +github.tar, level -5 with dict, old streaming advanced cdict, 50854 +github.tar, level -3 with dict, old streaming advanced cdict, 44571 +github.tar, level -1 with dict, old streaming advanced cdict, 41477 github.tar, level 0 with dict, old streaming advanced cdict, 38013 -github.tar, level 1 with dict, old streaming advanced cdict, 39002 +github.tar, level 1 with dict, old streaming advanced cdict, 38168 github.tar, level 3 with dict, old streaming advanced cdict, 38013 github.tar, level 4 with dict, old streaming advanced cdict, 38063 github.tar, level 5 with dict, old streaming advanced cdict, 38997 @@ -1476,5 +1476,5 @@ github.tar, level 7 with dict, old stre github.tar, level 9 with dict, old streaming advanced cdict, 36312 github.tar, level 13 with dict, old streaming advanced cdict, 35807 github.tar, level 16 with dict, old streaming advanced cdict, 38578 -github.tar, level 19 with dict, old streaming advanced cdict, 32702 +github.tar, level 19 with dict, old streaming advanced cdict, 32704 github.tar, no source size with dict, old streaming advanced cdict, 38015 diff --git a/tests/regression/test.c b/tests/regression/test.c index 1de6be86ca3..07600be57e7 100644 --- a/tests/regression/test.c +++ b/tests/regression/test.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/roundTripCrash.c b/tests/roundTripCrash.c index 9aa208cfff1..77411cd4932 100644 --- a/tests/roundTripCrash.c +++ b/tests/roundTripCrash.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/seqgen.c b/tests/seqgen.c index 1e340c8511c..0d8a766c82e 100644 --- a/tests/seqgen.c +++ b/tests/seqgen.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/seqgen.h b/tests/seqgen.h index cea3f552b73..df17398efed 100644 --- a/tests/seqgen.h +++ b/tests/seqgen.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/tests/test-license.py b/tests/test-license.py index f27cb677431..d54c16419e9 100755 --- a/tests/test-license.py +++ b/tests/test-license.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # ################################################################ -# Copyright (c) Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -83,8 +83,8 @@ def valid_copyright(lines): continue if "present" in line: return (False, f"Copyright line '{line}' contains 'present'!") - if "Facebook, Inc" not in line: - return (False, f"Copyright line '{line}' does not contain 'Facebook, Inc'") + if "Meta Platforms, Inc" not in line: + return (False, f"Copyright line '{line}' does not contain 'Meta Platforms, Inc'") year = YEAR_REGEX.search(line) if year is not None: return (False, f"Copyright line '{line}' contains {year.group(0)}; it should be yearless") diff --git a/tests/test-zstd-versions.py b/tests/test-zstd-versions.py index baca251f525..1bcf39e2b25 100755 --- a/tests/test-zstd-versions.py +++ b/tests/test-zstd-versions.py @@ -2,7 +2,7 @@ """Test zstd interoperability between versions""" # ################################################################ -# Copyright (c) Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -23,14 +23,25 @@ repo_url = 'https://github.com/facebook/zstd.git' tmp_dir_name = 'tests/versionsTest' make_cmd = 'make' -make_args = ['-j','CFLAGS=-O1'] +make_args = ['-j','CFLAGS=-O0'] git_cmd = 'git' test_dat_src = 'README.md' test_dat = 'test_dat' head = 'vdevel' dict_source = 'dict_source' -dict_files = './zstd/programs/*.c ./zstd/lib/common/*.c ./zstd/lib/compress/*.c ./zstd/lib/decompress/*.c ./zstd/lib/dictBuilder/*.c ./zstd/lib/legacy/*.c ' -dict_files += './zstd/programs/*.h ./zstd/lib/common/*.h ./zstd/lib/compress/*.h ./zstd/lib/dictBuilder/*.h ./zstd/lib/legacy/*.h' +dict_globs = [ + 'programs/*.c', + 'lib/common/*.c', + 'lib/compress/*.c', + 'lib/decompress/*.c', + 'lib/dictBuilder/*.c', + 'lib/legacy/*.c', + 'programs/*.h', + 'lib/common/*.h', + 'lib/compress/*.h', + 'lib/dictBuilder/*.h', + 'lib/legacy/*.h' +] def execute(command, print_output=False, print_error=True, param_shell=False): @@ -74,59 +85,85 @@ def get_git_tags(): return tags -def create_dict(tag, dict_source_path): +def dict_ok(tag, dict_name, sample): + if not os.path.isfile(dict_name): + return False + try: + cmd = ['./zstd.' + tag, '-D', dict_name] + with open(sample, "rb") as i: + subprocess.check_call(cmd, stdin=i, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + return True + except: + return False + + +def create_dict(tag, dict_source_path, fallback_tag=None): dict_name = 'dict.' + tag if not os.path.isfile(dict_name): cFiles = glob.glob(dict_source_path + "/*.c") hFiles = glob.glob(dict_source_path + "/*.h") + # Ensure the dictionary builder is deterministic + files = sorted(cFiles + hFiles) if tag == 'v0.5.0': - result = execute('./dictBuilder.' + tag + ' ' + ' '.join(cFiles) + ' ' + ' '.join(hFiles) + ' -o ' + dict_name, print_output=False, param_shell=True) + result = execute('./dictBuilder.' + tag + ' ' + ' '.join(files) + ' -o ' + dict_name, print_output=False, param_shell=True) else: - result = execute('./zstd.' + tag + ' -f --train ' + ' '.join(cFiles) + ' ' + ' '.join(hFiles) + ' -o ' + dict_name, print_output=False, param_shell=True) - if result == 0: + result = execute('./zstd.' + tag + ' -f --train ' + ' '.join(files) + ' -o ' + dict_name, print_output=False, param_shell=True) + if result == 0 and dict_ok(tag, dict_name, files[0]): print(dict_name + ' created') + elif fallback_tag is not None: + fallback_dict_name = 'dict.' + fallback_tag + print('creating dictionary ' + dict_name + ' failed, falling back to ' + fallback_dict_name) + shutil.copy(fallback_dict_name, dict_name) else: - print('ERROR: creating of ' + dict_name + ' failed') + raise RuntimeError('ERROR: creating of ' + dict_name + ' failed') else: print(dict_name + ' already exists') +def zstd(tag, args, input_file, output_file): + """ + Zstd compress input_file to output_file. + Need this helper because 0.5.0 is broken when stdout is not a TTY. + Throws an exception if the command returns non-zero. + """ + with open(input_file, "rb") as i: + with open(output_file, "wb") as o: + cmd = ['./zstd.' + tag] + args + print("Running: '{}', input={}, output={}" .format( + ' '.join(cmd), input_file, output_file + )) + result = subprocess.run(cmd, stdin=i, stdout=o, stderr=subprocess.PIPE) + print("Stderr: {}".format(result.stderr.decode("ascii"))) + result.check_returncode() + + def dict_compress_sample(tag, sample): dict_name = 'dict.' + tag - DEVNULL = open(os.devnull, 'wb') - if subprocess.call(['./zstd.' + tag, '-D', dict_name, '-f', sample], stderr=DEVNULL) == 0: - os.rename(sample + '.zst', sample + '_01_64_' + tag + '_dictio.zst') - if subprocess.call(['./zstd.' + tag, '-D', dict_name, '-5f', sample], stderr=DEVNULL) == 0: - os.rename(sample + '.zst', sample + '_05_64_' + tag + '_dictio.zst') - if subprocess.call(['./zstd.' + tag, '-D', dict_name, '-9f', sample], stderr=DEVNULL) == 0: - os.rename(sample + '.zst', sample + '_09_64_' + tag + '_dictio.zst') - if subprocess.call(['./zstd.' + tag, '-D', dict_name, '-15f', sample], stderr=DEVNULL) == 0: - os.rename(sample + '.zst', sample + '_15_64_' + tag + '_dictio.zst') - if subprocess.call(['./zstd.' + tag, '-D', dict_name, '-18f', sample], stderr=DEVNULL) == 0: - os.rename(sample + '.zst', sample + '_18_64_' + tag + '_dictio.zst') + verbose = ['-v', '-v', '-v'] + zstd(tag, ['-D', dict_name, '-1'] + verbose, sample, sample + '_01_64_' + tag + '_dictio.zst') + zstd(tag, ['-D', dict_name, '-3'], sample, sample + '_03_64_' + tag + '_dictio.zst') + zstd(tag, ['-D', dict_name, '-5'], sample, sample + '_05_64_' + tag + '_dictio.zst') + zstd(tag, ['-D', dict_name, '-9'], sample, sample + '_09_64_' + tag + '_dictio.zst') + zstd(tag, ['-D', dict_name, '-15'], sample, sample + '_15_64_' + tag + '_dictio.zst') + zstd(tag, ['-D', dict_name, '-18'], sample, sample + '_18_64_' + tag + '_dictio.zst') # zstdFiles = glob.glob("*.zst*") # print(zstdFiles) print(tag + " : dict compression completed") def compress_sample(tag, sample): - DEVNULL = open(os.devnull, 'wb') - if subprocess.call(['./zstd.' + tag, '-f', sample], stderr=DEVNULL) == 0: - os.rename(sample + '.zst', sample + '_01_64_' + tag + '_nodict.zst') - if subprocess.call(['./zstd.' + tag, '-5f', sample], stderr=DEVNULL) == 0: - os.rename(sample + '.zst', sample + '_05_64_' + tag + '_nodict.zst') - if subprocess.call(['./zstd.' + tag, '-9f', sample], stderr=DEVNULL) == 0: - os.rename(sample + '.zst', sample + '_09_64_' + tag + '_nodict.zst') - if subprocess.call(['./zstd.' + tag, '-15f', sample], stderr=DEVNULL) == 0: - os.rename(sample + '.zst', sample + '_15_64_' + tag + '_nodict.zst') - if subprocess.call(['./zstd.' + tag, '-18f', sample], stderr=DEVNULL) == 0: - os.rename(sample + '.zst', sample + '_18_64_' + tag + '_nodict.zst') + zstd(tag, ['-1'], sample, sample + '_01_64_' + tag + '_nodict.zst') + zstd(tag, ['-3'], sample, sample + '_03_64_' + tag + '_nodict.zst') + zstd(tag, ['-5'], sample, sample + '_05_64_' + tag + '_nodict.zst') + zstd(tag, ['-9'], sample, sample + '_09_64_' + tag + '_nodict.zst') + zstd(tag, ['-15'], sample, sample + '_15_64_' + tag + '_nodict.zst') + zstd(tag, ['-18'], sample, sample + '_18_64_' + tag + '_nodict.zst') # zstdFiles = glob.glob("*.zst*") # print(zstdFiles) print(tag + " : compression completed") -# http://stackoverflow.com/a/19711609/2132223 +# https://stackoverflow.com/a/19711609/2132223 def sha1_of_file(filepath): with open(filepath, 'rb') as f: return hashlib.sha1(f.read()).hexdigest() @@ -150,23 +187,13 @@ def decompress_zst(tag): dec_error = 0 list_zst = sorted(glob.glob('*_nodict.zst')) for file_zst in list_zst: - print(file_zst, end=' ') - print(tag, end=' ') + print(file_zst + ' ' + tag) file_dec = file_zst + '_d64_' + tag + '.dec' - if tag <= 'v0.5.0': - params = ['./zstd.' + tag, '-df', file_zst, file_dec] - else: - params = ['./zstd.' + tag, '-df', file_zst, '-o', file_dec] - if execute(params) == 0: - if not filecmp.cmp(file_dec, test_dat): - print('ERR !! ') - dec_error = 1 - else: - print('OK ') + zstd(tag, ['-d'], file_zst, file_dec) + if not filecmp.cmp(file_dec, test_dat): + raise RuntimeError('Decompression failed: tag={} file={}'.format(tag, file_zst)) else: - print('command does not work') - dec_error = 1 - return dec_error + print('OK ') def decompress_dict(tag): @@ -181,22 +208,13 @@ def decompress_dict(tag): if tag == 'v0.6.0' and dict_tag < 'v0.6.0': continue dict_name = 'dict.' + dict_tag - print(file_zst + ' ' + tag + ' dict=' + dict_tag, end=' ') + print(file_zst + ' ' + tag + ' dict=' + dict_tag) file_dec = file_zst + '_d64_' + tag + '.dec' - if tag <= 'v0.5.0': - params = ['./zstd.' + tag, '-D', dict_name, '-df', file_zst, file_dec] - else: - params = ['./zstd.' + tag, '-D', dict_name, '-df', file_zst, '-o', file_dec] - if execute(params) == 0: - if not filecmp.cmp(file_dec, test_dat): - print('ERR !! ') - dec_error = 1 - else: - print('OK ') + zstd(tag, ['-D', dict_name, '-d'], file_zst, file_dec) + if not filecmp.cmp(file_dec, test_dat): + raise RuntimeError('Decompression failed: tag={} file={}'.format(tag, file_zst)) else: - print('command does not work') - dec_error = 1 - return dec_error + print('OK ') if __name__ == '__main__': @@ -260,32 +278,31 @@ def decompress_dict(tag): # copy *.c and *.h to a temporary directory ("dict_source") if not os.path.isdir(dict_source_path): os.mkdir(dict_source_path) - print('cp ' + dict_files + ' ' + dict_source_path) - execute('cp ' + dict_files + ' ' + dict_source_path, param_shell=True) + for dict_glob in dict_globs: + files = glob.glob(dict_glob, root_dir=base_dir) + for file in files: + file = os.path.join(base_dir, file) + print("copying " + file + " to " + dict_source_path) + shutil.copy(file, dict_source_path) print('-----------------------------------------------') print('Compress test.dat by all released zstd') print('-----------------------------------------------') - error_code = 0 + create_dict(head, dict_source_path) for tag in tags: print(tag) if tag >= 'v0.5.0': - create_dict(tag, dict_source_path) + create_dict(tag, dict_source_path, head) dict_compress_sample(tag, test_dat) remove_duplicates() - error_code += decompress_dict(tag) + decompress_dict(tag) compress_sample(tag, test_dat) remove_duplicates() - error_code += decompress_zst(tag) + decompress_zst(tag) print('') print('Enumerate different compressed files') zstds = sorted(glob.glob('*.zst')) for zstd in zstds: print(zstd + ' : ' + repr(os.path.getsize(zstd)) + ', ' + sha1_of_file(zstd)) - - if error_code != 0: - print('====== ERROR !!! =======') - - sys.exit(error_code) diff --git a/tests/zstreamtest.c b/tests/zstreamtest.c index 72fd72ea368..14c4af82fb7 100644 --- a/tests/zstreamtest.c +++ b/tests/zstreamtest.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -25,6 +25,7 @@ #include /* free */ #include /* fgets, sscanf */ #include /* strcmp */ +#include /* time_t, time(), to randomize seed */ #include /* assert */ #include "timefn.h" /* UTIL_time_t, UTIL_getTime */ #include "mem.h" @@ -39,7 +40,7 @@ #include "seqgen.h" #include "util.h" #include "timefn.h" /* UTIL_time_t, UTIL_clockSpanMicro, UTIL_getTime */ - +#include "external_matchfinder.h" /* zstreamSequenceProducer, EMF_testCase */ /*-************************************ * Constants @@ -259,7 +260,7 @@ static U32 badParameters(ZSTD_CCtx* zc, ZSTD_parameters const savedParams) return 0; } -static int basicUnitTests(U32 seed, double compressibility) +static int basicUnitTests(U32 seed, double compressibility, int bigTests) { size_t const CNBufferSize = COMPRESSIBLE_NOISE_LENGTH; void* CNBuffer = malloc(CNBufferSize); @@ -424,6 +425,15 @@ static int basicUnitTests(U32 seed, double compressibility) } } DISPLAYLEVEL(3, "OK \n"); + /* check decompression fails early if first bytes are wrong */ + DISPLAYLEVEL(3, "test%3i : early decompression error if first bytes are incorrect : ", testNb++); + { const char buf[3] = { 0 }; /* too short, not enough to start decoding header */ + ZSTD_inBuffer inb = { buf, sizeof(buf), 0 }; + size_t const remaining = ZSTD_decompressStream(zd, &outBuff, &inb); + if (!ZSTD_isError(remaining)) goto _output_error; /* should have errored out immediately (note: this does not test the exact error code) */ + } + DISPLAYLEVEL(3, "OK \n"); + /* context size functions */ DISPLAYLEVEL(3, "test%3i : estimate DStream size : ", testNb++); { ZSTD_frameHeader fhi; @@ -513,7 +523,7 @@ static int basicUnitTests(U32 seed, double compressibility) } DISPLAYLEVEL(3, "OK \n"); - DISPLAYLEVEL(3, "test%3i : NULL buffers : ", testNb++); + DISPLAYLEVEL(3, "test%3i : NULL output and NULL input : ", testNb++); inBuff.src = NULL; inBuff.size = 0; inBuff.pos = 0; @@ -539,6 +549,36 @@ static int basicUnitTests(U32 seed, double compressibility) { size_t const ret = ZSTD_decompressStream(zd, &outBuff, &inBuff); if (ret != 0) goto _output_error; } + DISPLAYLEVEL(3, "OK\n"); + + DISPLAYLEVEL(3, "test%3i : NULL output buffer with non-NULL input : ", testNb++); + { + const char* test = "aa"; + inBuff.src = test; + inBuff.size = 2; + inBuff.pos = 0; + outBuff.dst = NULL; + outBuff.size = 0; + outBuff.pos = 0; + CHECK_Z( ZSTD_compressStream(zc, &outBuff, &inBuff) ); + CHECK(inBuff.pos != inBuff.size, "Entire input should be consumed"); + CHECK_Z( ZSTD_endStream(zc, &outBuff) ); + outBuff.dst = (char*)(compressedBuffer); + outBuff.size = compressedBufferSize; + outBuff.pos = 0; + { size_t const r = ZSTD_endStream(zc, &outBuff); + CHECK(r != 0, "Error or some data not flushed (ret=%zu)", r); + } + inBuff.src = outBuff.dst; + inBuff.size = outBuff.pos; + inBuff.pos = 0; + outBuff.dst = NULL; + outBuff.size = 0; + outBuff.pos = 0; + CHECK_Z( ZSTD_initDStream(zd) ); + CHECK_Z(ZSTD_decompressStream(zd, &outBuff, &inBuff)); + } + DISPLAYLEVEL(3, "OK\n"); /* _srcSize compression test */ DISPLAYLEVEL(3, "test%3i : compress_srcSize %u bytes : ", testNb++, COMPRESSIBLE_NOISE_LENGTH); @@ -613,7 +653,7 @@ static int basicUnitTests(U32 seed, double compressibility) DISPLAYLEVEL(3, "OK (error detected : %s) \n", ZSTD_getErrorName(r)); } } - /* Complex context re-use scenario */ + /* Compression state re-use scenario */ DISPLAYLEVEL(3, "test%3i : context re-use : ", testNb++); ZSTD_freeCStream(zc); zc = ZSTD_createCStream(); @@ -634,8 +674,7 @@ static int basicUnitTests(U32 seed, double compressibility) CHECK_Z( ZSTD_compressStream(zc, &outBuff, &inBuff) ); if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */ DISPLAYLEVEL(5, "end1 "); - { size_t const r = ZSTD_endStream(zc, &outBuff); - if (r != 0) goto _output_error; } /* error, or some data not flushed */ + if (ZSTD_endStream(zc, &outBuff) != 0) goto _output_error; /* error, or some data not flushed */ } /* use 2 */ { size_t const inSize = 1025; /* will not continue, because tables auto-adjust and are therefore different size */ @@ -653,8 +692,7 @@ static int basicUnitTests(U32 seed, double compressibility) CHECK_Z( ZSTD_compressStream(zc, &outBuff, &inBuff) ); if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */ DISPLAYLEVEL(5, "end2 "); - { size_t const r = ZSTD_endStream(zc, &outBuff); - if (r != 0) goto _output_error; } /* error, or some data not flushed */ + if (ZSTD_endStream(zc, &outBuff) != 0) goto _output_error; /* error, or some data not flushed */ } DISPLAYLEVEL(3, "OK \n"); @@ -771,11 +809,12 @@ static int basicUnitTests(U32 seed, double compressibility) } /* Compression with ZSTD_c_stable{In,Out}Buffer */ - { ZSTD_CCtx* cctx = ZSTD_createCCtx(); + { ZSTD_CCtx* const cctx = ZSTD_createCCtx(); ZSTD_inBuffer in; ZSTD_outBuffer out; size_t cctxSize1; size_t cctxSize2; + assert(cctx != NULL); in.src = CNBuffer; in.size = CNBufferSize; out.dst = compressedBuffer; @@ -786,30 +825,33 @@ static int basicUnitTests(U32 seed, double compressibility) CHECK(!(cSize < ZSTD_compressBound(CNBufferSize)), "cSize too large for test"); CHECK_Z(cSize = ZSTD_compress2(cctx, compressedBuffer, cSize + 4, CNBuffer, CNBufferSize)); CHECK_Z(cctxSize1 = ZSTD_sizeof_CCtx(cctx)); - { ZSTD_CCtx* cctx2 = ZSTD_createCCtx(); + /* @cctxSize2 : sizeof_CCtx when doing full streaming (no stable in/out) */ + { ZSTD_CCtx* const cctx2 = ZSTD_createCCtx(); + assert(cctx2 != NULL); in.pos = out.pos = 0; CHECK_Z(ZSTD_compressStream2(cctx2, &out, &in, ZSTD_e_continue)); CHECK(!(ZSTD_compressStream2(cctx2, &out, &in, ZSTD_e_end) == 0), "Not finished"); CHECK_Z(cctxSize2 = ZSTD_sizeof_CCtx(cctx2)); ZSTD_freeCCtx(cctx2); } - { ZSTD_CCtx* cctx3 = ZSTD_createCCtx(); + /* @cctxSize1 : sizeof_CCtx when doing single-shot compression (no streaming) */ + { ZSTD_CCtx* const cctx1 = ZSTD_createCCtx(); ZSTD_parameters params = ZSTD_getParams(0, CNBufferSize, 0); size_t cSize3; + assert(cctx1 != NULL); params.fParams.checksumFlag = 1; - cSize3 = ZSTD_compress_advanced(cctx3, compressedBuffer, compressedBufferSize, CNBuffer, CNBufferSize, NULL, 0, params); + cSize3 = ZSTD_compress_advanced(cctx1, compressedBuffer, compressedBufferSize, CNBuffer, CNBufferSize, NULL, 0, params); CHECK_Z(cSize3); CHECK(!(cSize == cSize3), "Must be same compressed size"); - CHECK(!(cctxSize1 == ZSTD_sizeof_CCtx(cctx3)), "Must be same CCtx size"); - ZSTD_freeCCtx(cctx3); + CHECK(!(cctxSize1 == ZSTD_sizeof_CCtx(cctx1)), "Must be same CCtx size"); + ZSTD_freeCCtx(cctx1); } CHECK(!(cctxSize1 < cctxSize2), "Stable buffers means less allocated size"); CHECK_Z(ZSTD_decompress(decodedBuffer, CNBufferSize, compressedBuffer, cSize)); DISPLAYLEVEL(3, "OK \n"); DISPLAYLEVEL(3, "test%3i : ZSTD_compress2() doesn't modify user parameters : ", testNb++); - { - int stableInBuffer; + { int stableInBuffer; int stableOutBuffer; CHECK_Z(ZSTD_CCtx_getParameter(cctx, ZSTD_c_stableInBuffer, &stableInBuffer)); CHECK_Z(ZSTD_CCtx_getParameter(cctx, ZSTD_c_stableOutBuffer, &stableOutBuffer)); @@ -866,30 +908,73 @@ static int basicUnitTests(U32 seed, double compressibility) in.pos = 0; { size_t const ret = ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_end); CHECK(!ZSTD_isError(ret), "Must error"); - CHECK(!(ZSTD_getErrorCode(ret) == ZSTD_error_srcBuffer_wrong), "Must be this error"); + CHECK(!(ZSTD_getErrorCode(ret) == ZSTD_error_stabilityCondition_notRespected), "Must be this error"); } DISPLAYLEVEL(3, "OK \n"); - DISPLAYLEVEL(3, "test%3i : ZSTD_compressStream2() ZSTD_c_stableInBuffer with continue and flush : ", testNb++); - in.src = CNBuffer; - in.size = CNBufferSize; - in.pos = 0; - out.pos = 0; - out.size = compressedBufferSize; - CHECK_Z(ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only)); - { size_t const ret = ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_continue); - CHECK(!ZSTD_isError(ret), "Must error"); - CHECK(!(ZSTD_getErrorCode(ret) == ZSTD_error_srcBuffer_wrong), "Must be this error"); - } - CHECK_Z(ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only)); - { size_t const ret = ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_flush); - CHECK(!ZSTD_isError(ret), "Must error"); - CHECK(!(ZSTD_getErrorCode(ret) == ZSTD_error_srcBuffer_wrong), "Must be this error"); - } + /* stableSrc + streaming */ + DISPLAYLEVEL(3, "test%3i : ZSTD_c_stableInBuffer compatibility with compressStream, flushStream and endStream : ", testNb++); + CHECK_Z( ZSTD_initCStream(cctx, 1) ); + CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_c_stableInBuffer, 1) ); + { ZSTD_inBuffer inBuf; + ZSTD_outBuffer outBuf; + const size_t nonZeroStartPos = 18; + const size_t inputSize = 500; + inBuf.src = CNBuffer; + inBuf.size = 100; + inBuf.pos = nonZeroStartPos; + outBuf.dst = (char*)(compressedBuffer)+cSize; + outBuf.size = ZSTD_compressBound(inputSize); + outBuf.pos = 0; + CHECK_Z( ZSTD_compressStream(cctx, &outBuf, &inBuf) ); + inBuf.size = 200; + CHECK_Z( ZSTD_compressStream(cctx, &outBuf, &inBuf) ); + CHECK_Z( ZSTD_flushStream(cctx, &outBuf) ); + inBuf.size = nonZeroStartPos + inputSize; + CHECK_Z( ZSTD_compressStream(cctx, &outBuf, &inBuf) ); + CHECK(ZSTD_endStream(cctx, &outBuf) != 0, "compression should be successful and fully flushed"); + { const void* const realSrcStart = (const char*)inBuf.src + nonZeroStartPos; + void* const verifBuf = (char*)outBuf.dst + outBuf.pos; + const size_t decSize = ZSTD_decompress(verifBuf, inputSize, outBuf.dst, outBuf.pos); + CHECK_Z(decSize); + CHECK(decSize != inputSize, "regenerated %zu bytes, instead of %zu", decSize, inputSize); + CHECK(memcmp(realSrcStart, verifBuf, inputSize) != 0, "regenerated data different from original"); + } } DISPLAYLEVEL(3, "OK \n"); - DISPLAYLEVEL(3, "test%3i : ZSTD_compressStream2() ZSTD_c_stableInBuffer allocated size : ", testNb++); + /* stableSrc + streaming */ + DISPLAYLEVEL(3, "test%3i : ZSTD_c_stableInBuffer compatibility with compressStream2, using different end directives : ", testNb++); + CHECK_Z( ZSTD_initCStream(cctx, 1) ); + CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_c_stableInBuffer, 1) ); + { ZSTD_inBuffer inBuf; + ZSTD_outBuffer outBuf; + const size_t nonZeroStartPos = 18; + const size_t inputSize = 500; + inBuf.src = CNBuffer; + inBuf.size = 100; + inBuf.pos = nonZeroStartPos; + outBuf.dst = (char*)(compressedBuffer)+cSize; + outBuf.size = ZSTD_compressBound(inputSize); + outBuf.pos = 0; + CHECK_Z( ZSTD_compressStream2(cctx, &outBuf, &inBuf, ZSTD_e_continue) ); + inBuf.size = 200; + CHECK_Z( ZSTD_compressStream2(cctx, &outBuf, &inBuf, ZSTD_e_continue) ); + CHECK_Z( ZSTD_compressStream2(cctx, &outBuf, &inBuf, ZSTD_e_flush) ); + inBuf.size = nonZeroStartPos + inputSize; + CHECK_Z( ZSTD_compressStream2(cctx, &outBuf, &inBuf, ZSTD_e_continue) ); + CHECK( ZSTD_compressStream2(cctx, &outBuf, &inBuf, ZSTD_e_end) != 0, "compression should be successful and fully flushed"); + { const void* const realSrcStart = (const char*)inBuf.src + nonZeroStartPos; + void* const verifBuf = (char*)outBuf.dst + outBuf.pos; + const size_t decSize = ZSTD_decompress(verifBuf, inputSize, outBuf.dst, outBuf.pos); + CHECK_Z(decSize); + CHECK(decSize != inputSize, "regenerated %zu bytes, instead of %zu", decSize, inputSize); + CHECK(memcmp(realSrcStart, verifBuf, inputSize) != 0, "regenerated data different from original"); + } } + DISPLAYLEVEL(3, "OK \n"); + + DISPLAYLEVEL(3, "test%3i : ZSTD_compressStream2() with ZSTD_c_stableInBuffer: context size : ", testNb++); { size_t const cctxSize = ZSTD_sizeof_CCtx(cctx); + DISPLAYLEVEL(4, "cctxSize1=%zu; cctxSize=%zu; cctxSize2=%zu : ", cctxSize1, cctxSize, cctxSize2); CHECK(!(cctxSize1 < cctxSize), "Must be bigger than single-pass"); CHECK(!(cctxSize < cctxSize2), "Must be smaller than streaming"); cctxSize1 = cctxSize; @@ -900,8 +985,10 @@ static int basicUnitTests(U32 seed, double compressibility) CHECK_Z(ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters)); CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, 1)); CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_stableOutBuffer, 1)); + in.src = CNBuffer; in.pos = out.pos = 0; in.size = MIN(CNBufferSize, 10); + out.size = compressedBufferSize; CHECK_Z(ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_flush)); in.pos = 0; in.size = CNBufferSize - in.size; @@ -916,12 +1003,13 @@ static int basicUnitTests(U32 seed, double compressibility) in.pos = out.pos = 0; { size_t const ret = ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_continue); CHECK(!ZSTD_isError(ret), "Must have errored"); - CHECK(!(ZSTD_getErrorCode(ret) == ZSTD_error_dstBuffer_wrong), "Must be this error"); + CHECK(!(ZSTD_getErrorCode(ret) == ZSTD_error_stabilityCondition_notRespected), "Must be this error"); } DISPLAYLEVEL(3, "OK \n"); - DISPLAYLEVEL(3, "test%3i : ZSTD_compressStream2() ZSTD_c_stableOutBuffer allocated size : ", testNb++); + DISPLAYLEVEL(3, "test%3i : ZSTD_compressStream2() with ZSTD_c_stableOutBuffer: context size : ", testNb++); { size_t const cctxSize = ZSTD_sizeof_CCtx(cctx); + DISPLAYLEVEL(4, "cctxSize1=%zu; cctxSize=%zu; cctxSize2=%zu : ", cctxSize1, cctxSize, cctxSize2); CHECK(!(cctxSize1 < cctxSize), "Must be bigger than single-pass and stableInBuffer"); CHECK(!(cctxSize < cctxSize2), "Must be smaller than streaming"); } @@ -1478,6 +1566,84 @@ static int basicUnitTests(U32 seed, double compressibility) CHECK(!ZSTD_isError(ZSTD_CCtx_setParameter(zc, ZSTD_c_srcSizeHint, -1)), "Out of range doesn't error"); DISPLAYLEVEL(3, "OK \n"); + DISPLAYLEVEL(3, "test%3i : ZSTD_lazy compress with hashLog = 29 and searchLog = 4 : ", testNb++); + if (MEM_64bits()) { + ZSTD_outBuffer out = { compressedBuffer, compressedBufferSize, 0 }; + ZSTD_inBuffer in = { CNBuffer, CNBufferSize, 0 }; + CHECK_Z(ZSTD_CCtx_reset(zc, ZSTD_reset_session_and_parameters)); + CHECK_Z(ZSTD_CCtx_setParameter(zc, ZSTD_c_strategy, ZSTD_lazy)); + /* Force enable the row based match finder */ + CHECK_Z(ZSTD_CCtx_setParameter(zc, ZSTD_c_useRowMatchFinder, ZSTD_ps_enable)); + CHECK_Z(ZSTD_CCtx_setParameter(zc, ZSTD_c_searchLog, 4)); + /* Set windowLog to 29 so the hashLog doesn't get sized down */ + CHECK_Z(ZSTD_CCtx_setParameter(zc, ZSTD_c_windowLog, 29)); + CHECK_Z(ZSTD_CCtx_setParameter(zc, ZSTD_c_hashLog, 29)); + CHECK_Z(ZSTD_CCtx_setParameter(zc, ZSTD_c_checksumFlag, 1)); + /* Compress with continue first so the hashLog doesn't get sized down */ + CHECK_Z(ZSTD_compressStream2(zc, &out, &in, ZSTD_e_continue)); + CHECK_Z(ZSTD_compressStream2(zc, &out, &in, ZSTD_e_end)); + cSize = out.pos; + CHECK_Z(ZSTD_decompress(decodedBuffer, CNBufferSize, compressedBuffer, cSize)); + } + DISPLAYLEVEL(3, "OK \n"); + + DISPLAYLEVEL(3, "test%3i : Test offset == windowSize : ", testNb++); + { + int windowLog; + int const kMaxWindowLog = bigTests ? 29 : 26; + size_t const kNbSequences = 10000; + size_t const kMaxSrcSize = (1u << kMaxWindowLog) + 10 * kNbSequences; + char* src = calloc(kMaxSrcSize, 1); + ZSTD_Sequence* sequences = malloc(sizeof(ZSTD_Sequence) * kNbSequences); + for (windowLog = ZSTD_WINDOWLOG_MIN; windowLog <= kMaxWindowLog; ++windowLog) { + size_t const srcSize = ((size_t)1 << windowLog) + 10 * (kNbSequences - 1); + + sequences[0].offset = 32; + sequences[0].litLength = 32; + sequences[0].matchLength = (1u << windowLog) - 32; + sequences[0].rep = 0; + { + size_t i; + for (i = 1; i < kNbSequences; ++i) { + sequences[i].offset = (1u << windowLog) - (FUZ_rand(&seed) % 8); + sequences[i].litLength = FUZ_rand(&seed) & 7; + sequences[i].matchLength = 10 - sequences[i].litLength; + sequences[i].rep = 0; + } + } + + CHECK_Z(ZSTD_CCtx_reset(zc, ZSTD_reset_session_and_parameters)); + CHECK_Z(ZSTD_CCtx_setParameter(zc, ZSTD_c_checksumFlag, 1)); + CHECK_Z(ZSTD_CCtx_setParameter(zc, ZSTD_c_minMatch, 3)); + CHECK_Z(ZSTD_CCtx_setParameter(zc, ZSTD_c_validateSequences, 1)); + CHECK_Z(ZSTD_CCtx_setParameter(zc, ZSTD_c_windowLog, windowLog)); + assert(srcSize <= kMaxSrcSize); + cSize = ZSTD_compressSequences(zc, compressedBuffer, compressedBufferSize, sequences, kNbSequences, src, srcSize); + CHECK_Z(cSize); + CHECK_Z(ZSTD_DCtx_reset(zd, ZSTD_reset_session_and_parameters)); + CHECK_Z(ZSTD_DCtx_setParameter(zd, ZSTD_d_windowLogMax, windowLog)) + { + ZSTD_inBuffer in = {compressedBuffer, cSize, 0}; + size_t decompressedBytes = 0; + for (;;) { + ZSTD_outBuffer out = {decodedBuffer, decodedBufferSize, 0}; + size_t const ret = ZSTD_decompressStream(zd, &out, &in); + CHECK_Z(ret); + CHECK(decompressedBytes + out.pos > srcSize, "Output too large"); + CHECK(memcmp(out.dst, src + decompressedBytes, out.pos), "Corrupted"); + decompressedBytes += out.pos; + if (ret == 0) { + break; + } + } + CHECK(decompressedBytes != srcSize, "Output wrong size"); + } + } + free(sequences); + free(src); + } + DISPLAYLEVEL(3, "OK \n"); + /* Overlen overwriting window data bug */ DISPLAYLEVEL(3, "test%3i : wildcopy doesn't overwrite potential match data : ", testNb++); { /* This test has a window size of 1024 bytes and consists of 3 blocks: @@ -1690,6 +1856,445 @@ static int basicUnitTests(U32 seed, double compressibility) } DISPLAYLEVEL(3, "OK \n"); + DISPLAYLEVEL(3, "test%3i : Block-Level External Sequence Producer API: ", testNb++); + { + size_t const dstBufSize = ZSTD_compressBound(CNBufferSize); + BYTE* const dstBuf = (BYTE*)malloc(ZSTD_compressBound(dstBufSize)); + size_t const checkBufSize = CNBufferSize; + BYTE* const checkBuf = (BYTE*)malloc(checkBufSize); + int enableFallback; + EMF_testCase sequenceProducerState; + + CHECK(dstBuf == NULL || checkBuf == NULL, "allocation failed"); + + CHECK_Z(ZSTD_CCtx_reset(zc, ZSTD_reset_session_and_parameters)); + + /* Reference external matchfinder outside the test loop to + * check that the reference is preserved across compressions */ + ZSTD_registerSequenceProducer(zc, &sequenceProducerState, zstreamSequenceProducer); + + for (enableFallback = 0; enableFallback <= 1; enableFallback++) { + size_t testCaseId; + size_t const numTestCases = 9; + + EMF_testCase const testCases[] = { + EMF_ONE_BIG_SEQ, + EMF_LOTS_OF_SEQS, + EMF_ZERO_SEQS, + EMF_BIG_ERROR, + EMF_SMALL_ERROR, + EMF_INVALID_OFFSET, + EMF_INVALID_MATCHLEN, + EMF_INVALID_LITLEN, + EMF_INVALID_LAST_LITS + }; + + ZSTD_ErrorCode const errorCodes[] = { + ZSTD_error_no_error, + ZSTD_error_no_error, + ZSTD_error_sequenceProducer_failed, + ZSTD_error_sequenceProducer_failed, + ZSTD_error_sequenceProducer_failed, + ZSTD_error_externalSequences_invalid, + ZSTD_error_externalSequences_invalid, + ZSTD_error_externalSequences_invalid, + ZSTD_error_externalSequences_invalid + }; + + for (testCaseId = 0; testCaseId < numTestCases; testCaseId++) { + size_t res; + + int const compressionShouldSucceed = ( + (errorCodes[testCaseId] == ZSTD_error_no_error) || + (enableFallback && errorCodes[testCaseId] == ZSTD_error_sequenceProducer_failed) + ); + + int const testWithSequenceValidation = ( + testCases[testCaseId] == EMF_INVALID_OFFSET + ); + + sequenceProducerState = testCases[testCaseId]; + + ZSTD_CCtx_reset(zc, ZSTD_reset_session_only); + CHECK_Z(ZSTD_CCtx_setParameter(zc, ZSTD_c_validateSequences, testWithSequenceValidation)); + CHECK_Z(ZSTD_CCtx_setParameter(zc, ZSTD_c_enableSeqProducerFallback, enableFallback)); + res = ZSTD_compress2(zc, dstBuf, dstBufSize, CNBuffer, CNBufferSize); + + if (compressionShouldSucceed) { + CHECK(ZSTD_isError(res), "EMF: Compression error: %s", ZSTD_getErrorName(res)); + CHECK_Z(ZSTD_decompress(checkBuf, checkBufSize, dstBuf, res)); + CHECK(memcmp(CNBuffer, checkBuf, CNBufferSize) != 0, "EMF: Corruption!"); + } else { + CHECK(!ZSTD_isError(res), "EMF: Should have raised an error!"); + CHECK( + ZSTD_getErrorCode(res) != errorCodes[testCaseId], + "EMF: Wrong error code: %s", ZSTD_getErrorName(res) + ); + } + } + + /* Test compression with external matchfinder + empty src buffer */ + { + size_t res; + sequenceProducerState = EMF_ZERO_SEQS; + ZSTD_CCtx_reset(zc, ZSTD_reset_session_only); + CHECK_Z(ZSTD_CCtx_setParameter(zc, ZSTD_c_enableSeqProducerFallback, enableFallback)); + res = ZSTD_compress2(zc, dstBuf, dstBufSize, CNBuffer, 0); + CHECK(ZSTD_isError(res), "EMF: Compression error: %s", ZSTD_getErrorName(res)); + CHECK(ZSTD_decompress(checkBuf, checkBufSize, dstBuf, res) != 0, "EMF: Empty src round trip failed!"); + } + } + + /* Test that reset clears the external matchfinder */ + CHECK_Z(ZSTD_CCtx_reset(zc, ZSTD_reset_session_and_parameters)); + sequenceProducerState = EMF_BIG_ERROR; /* ensure zstd will fail if the matchfinder wasn't cleared */ + CHECK_Z(ZSTD_CCtx_setParameter(zc, ZSTD_c_enableSeqProducerFallback, 0)); + CHECK_Z(ZSTD_compress2(zc, dstBuf, dstBufSize, CNBuffer, CNBufferSize)); + + /* Test that registering mFinder == NULL clears the external matchfinder */ + ZSTD_CCtx_reset(zc, ZSTD_reset_session_and_parameters); + ZSTD_registerSequenceProducer(zc, &sequenceProducerState, zstreamSequenceProducer); + sequenceProducerState = EMF_BIG_ERROR; /* ensure zstd will fail if the matchfinder wasn't cleared */ + CHECK_Z(ZSTD_CCtx_setParameter(zc, ZSTD_c_enableSeqProducerFallback, 0)); + ZSTD_registerSequenceProducer(zc, NULL, NULL); /* clear the external matchfinder */ + CHECK_Z(ZSTD_compress2(zc, dstBuf, dstBufSize, CNBuffer, CNBufferSize)); + + /* Test that external matchfinder doesn't interact with older APIs */ + ZSTD_CCtx_reset(zc, ZSTD_reset_session_and_parameters); + ZSTD_registerSequenceProducer(zc, &sequenceProducerState, zstreamSequenceProducer); + sequenceProducerState = EMF_BIG_ERROR; /* ensure zstd will fail if the matchfinder is used */ + CHECK_Z(ZSTD_CCtx_setParameter(zc, ZSTD_c_enableSeqProducerFallback, 0)); + CHECK_Z(ZSTD_compressCCtx(zc, dstBuf, dstBufSize, CNBuffer, CNBufferSize, 3)); + + /* Test that compression returns the correct error with LDM */ + CHECK_Z(ZSTD_CCtx_reset(zc, ZSTD_reset_session_and_parameters)); + { + size_t res; + ZSTD_registerSequenceProducer(zc, &sequenceProducerState, zstreamSequenceProducer); + CHECK_Z(ZSTD_CCtx_setParameter(zc, ZSTD_c_enableLongDistanceMatching, ZSTD_ps_enable)); + res = ZSTD_compress2(zc, dstBuf, dstBufSize, CNBuffer, CNBufferSize); + CHECK(!ZSTD_isError(res), "EMF: Should have raised an error!"); + CHECK( + ZSTD_getErrorCode(res) != ZSTD_error_parameter_combination_unsupported, + "EMF: Wrong error code: %s", ZSTD_getErrorName(res) + ); + } + +#ifdef ZSTD_MULTITHREAD + /* Test that compression returns the correct error with nbWorkers > 0 */ + CHECK_Z(ZSTD_CCtx_reset(zc, ZSTD_reset_session_and_parameters)); + { + size_t res; + ZSTD_registerSequenceProducer(zc, &sequenceProducerState, zstreamSequenceProducer); + CHECK_Z(ZSTD_CCtx_setParameter(zc, ZSTD_c_nbWorkers, 1)); + res = ZSTD_compress2(zc, dstBuf, dstBufSize, CNBuffer, CNBufferSize); + CHECK(!ZSTD_isError(res), "EMF: Should have raised an error!"); + CHECK( + ZSTD_getErrorCode(res) != ZSTD_error_parameter_combination_unsupported, + "EMF: Wrong error code: %s", ZSTD_getErrorName(res) + ); + } +#endif + + free(dstBuf); + free(checkBuf); + } + DISPLAYLEVEL(3, "OK \n"); + + + /* Test maxBlockSize cctx param functionality */ + DISPLAYLEVEL(3, "test%3i : Testing maxBlockSize PR#3418: ", testNb++); + { + ZSTD_CCtx* cctx = ZSTD_createCCtx(); + + /* Quick test to make sure maxBlockSize bounds are enforced */ + assert(ZSTD_isError(ZSTD_CCtx_setParameter(cctx, ZSTD_c_maxBlockSize, ZSTD_BLOCKSIZE_MAX_MIN - 1))); + assert(ZSTD_isError(ZSTD_CCtx_setParameter(cctx, ZSTD_c_maxBlockSize, ZSTD_BLOCKSIZE_MAX + 1))); + + /* Test maxBlockSize < windowSize and windowSize < maxBlockSize*/ + { + size_t srcSize = 2 << 10; + void* const src = CNBuffer; + size_t dstSize = ZSTD_compressBound(srcSize); + void* const dst1 = compressedBuffer; + void* const dst2 = (BYTE*)compressedBuffer + dstSize; + size_t size1, size2; + void* const checkBuf = malloc(srcSize); + memset(src, 'x', srcSize); + + /* maxBlockSize = 1KB */ + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_maxBlockSize, 1u << 10)); + size1 = ZSTD_compress2(cctx, dst1, dstSize, src, srcSize); + + if (ZSTD_isError(size1)) goto _output_error; + CHECK_Z(ZSTD_decompress(checkBuf, srcSize, dst1, size1)); + CHECK(memcmp(src, checkBuf, srcSize) != 0, "Corruption!"); + + /* maxBlockSize = 3KB */ + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_maxBlockSize, 3u << 10)); + size2 = ZSTD_compress2(cctx, dst2, dstSize, src, srcSize); + + if (ZSTD_isError(size2)) goto _output_error; + CHECK_Z(ZSTD_decompress(checkBuf, srcSize, dst2, size2)); + CHECK(memcmp(src, checkBuf, srcSize) != 0, "Corruption!"); + + assert(size1 - size2 == 4); /* We add another RLE block with header + character */ + assert(memcmp(dst1, dst2, size2) != 0); /* Compressed output should not be equal */ + + /* maxBlockSize = 1KB, windowLog = 10 */ + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_maxBlockSize, 1u << 10)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, 10)); + size1 = ZSTD_compress2(cctx, dst1, dstSize, src, srcSize); + + if (ZSTD_isError(size1)) goto _output_error; + CHECK_Z(ZSTD_decompress(checkBuf, srcSize, dst1, size1)); + CHECK(memcmp(src, checkBuf, srcSize) != 0, "Corruption!"); + + /* maxBlockSize = 3KB, windowLog = 10 */ + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_maxBlockSize, 3u << 10)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, 10)); + size2 = ZSTD_compress2(cctx, dst2, dstSize, src, srcSize); + + if (ZSTD_isError(size2)) goto _output_error; + CHECK_Z(ZSTD_decompress(checkBuf, srcSize, dst2, size2)); + CHECK(memcmp(src, checkBuf, srcSize) != 0, "Corruption!"); + + assert(size1 == size2); + assert(memcmp(dst1, dst2, size1) == 0); /* Compressed output should be equal */ + + free(checkBuf); + } + + ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters); + + /* Test maxBlockSize = 0 is valid */ + { size_t srcSize = 256 << 10; + void* const src = CNBuffer; + size_t dstSize = ZSTD_compressBound(srcSize); + void* const dst1 = compressedBuffer; + void* const dst2 = (BYTE*)compressedBuffer + dstSize; + size_t size1, size2; + void* const checkBuf = malloc(srcSize); + + /* maxBlockSize = 0 */ + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_maxBlockSize, 0)); + size1 = ZSTD_compress2(cctx, dst1, dstSize, src, srcSize); + + if (ZSTD_isError(size1)) goto _output_error; + CHECK_Z(ZSTD_decompress(checkBuf, srcSize, dst1, size1)); + CHECK(memcmp(src, checkBuf, srcSize) != 0, "Corruption!"); + + /* maxBlockSize = ZSTD_BLOCKSIZE_MAX */ + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_maxBlockSize, ZSTD_BLOCKSIZE_MAX)); + size2 = ZSTD_compress2(cctx, dst2, dstSize, src, srcSize); + + if (ZSTD_isError(size2)) goto _output_error; + CHECK_Z(ZSTD_decompress(checkBuf, srcSize, dst2, size2)); + CHECK(memcmp(src, checkBuf, srcSize) != 0, "Corruption!"); + + assert(size1 == size2); + assert(memcmp(dst1, dst2, size1) == 0); /* Compressed output should be equal */ + free(checkBuf); + } + ZSTD_freeCCtx(cctx); + } + DISPLAYLEVEL(3, "OK \n"); + + /* Test Sequence Validation */ + DISPLAYLEVEL(3, "test%3i : Testing sequence validation: ", testNb++); + { + ZSTD_CCtx* cctx = ZSTD_createCCtx(); + + /* Test minMatch >= 4, matchLength < 4 */ + { + size_t srcSize = 11; + void* const src = CNBuffer; + size_t dstSize = ZSTD_compressBound(srcSize); + void* const dst = compressedBuffer; + size_t const kNbSequences = 4; + ZSTD_Sequence* sequences = malloc(sizeof(ZSTD_Sequence) * kNbSequences); + + memset(src, 'x', srcSize); + + sequences[0] = (ZSTD_Sequence) {1, 1, 3, 0}; + sequences[1] = (ZSTD_Sequence) {1, 0, 3, 0}; + sequences[2] = (ZSTD_Sequence) {1, 0, 3, 0}; + sequences[3] = (ZSTD_Sequence) {0, 1, 0, 0}; + + /* Test with sequence validation */ + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, 5)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_blockDelimiters, ZSTD_sf_explicitBlockDelimiters)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_validateSequences, 1)); + + cSize = ZSTD_compressSequences(cctx, dst, dstSize, + sequences, kNbSequences, + src, srcSize); + + CHECK(!ZSTD_isError(cSize), "Should throw an error"); /* maxNbSeq is too small and an assert will fail */ + CHECK(ZSTD_getErrorCode(cSize) != ZSTD_error_externalSequences_invalid, "Wrong error code: %s", ZSTD_getErrorName(cSize)); /* fails sequence validation */ + + ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters); + + /* Test without sequence validation */ + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, 5)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_blockDelimiters, ZSTD_sf_explicitBlockDelimiters)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_validateSequences, 0)); + + cSize = ZSTD_compressSequences(cctx, dst, dstSize, + sequences, kNbSequences, + src, srcSize); + + CHECK(!ZSTD_isError(cSize), "Should throw an error"); /* maxNbSeq is too small and an assert will fail */ + CHECK(ZSTD_getErrorCode(cSize) != ZSTD_error_externalSequences_invalid, "Wrong error code: %s", ZSTD_getErrorName(cSize)); /* fails sequence validation */ + + free(sequences); + } + + ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters); + + + /* Test with no block delim */ + { + size_t srcSize = 4; + void* const src = CNBuffer; + size_t dstSize = ZSTD_compressBound(srcSize); + void* const dst = compressedBuffer; + size_t const kNbSequences = 1; + ZSTD_Sequence* sequences = malloc(sizeof(ZSTD_Sequence) * kNbSequences); + void* const checkBuf = malloc(srcSize); + + memset(src, 'x', srcSize); + + sequences[0] = (ZSTD_Sequence) {1, 1, 3, 0}; + + /* Test with sequence validation */ + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, 3)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_blockDelimiters, ZSTD_sf_noBlockDelimiters)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_validateSequences, 1)); + + cSize = ZSTD_compressSequences(cctx, dst, dstSize, + sequences, kNbSequences, + src, srcSize); + + CHECK(ZSTD_isError(cSize), "Should not throw an error"); + CHECK_Z(ZSTD_decompress(checkBuf, srcSize, dst, cSize)); + CHECK(memcmp(src, checkBuf, srcSize) != 0, "Corruption!"); + + free(sequences); + free(checkBuf); + } + + ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters); + + { /* Test case with two additional sequences */ + size_t srcSize = 19; + void* const src = CNBuffer; + size_t dstSize = ZSTD_compressBound(srcSize); + void* const dst = compressedBuffer; + size_t const kNbSequences = 7; + ZSTD_Sequence* sequences = malloc(sizeof(ZSTD_Sequence) * kNbSequences); + + memset(src, 'x', srcSize); + + sequences[0] = (ZSTD_Sequence) {1, 1, 3, 0}; + sequences[1] = (ZSTD_Sequence) {1, 0, 3, 0}; + sequences[2] = (ZSTD_Sequence) {1, 0, 3, 0}; + sequences[3] = (ZSTD_Sequence) {1, 0, 3, 0}; + sequences[4] = (ZSTD_Sequence) {1, 0, 3, 0}; + sequences[5] = (ZSTD_Sequence) {1, 0, 3, 0}; + sequences[6] = (ZSTD_Sequence) {0, 0, 0, 0}; + + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, 5)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_blockDelimiters, ZSTD_sf_explicitBlockDelimiters)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_validateSequences, 1)); + + cSize = ZSTD_compressSequences(cctx, dst, dstSize, + sequences, kNbSequences, + src, srcSize); + + CHECK(!ZSTD_isError(cSize), "Should throw an error"); /* maxNbSeq is too small and an assert will fail */ + CHECK(ZSTD_getErrorCode(cSize) != ZSTD_error_externalSequences_invalid, "Wrong error code: %s", ZSTD_getErrorName(cSize)); /* fails sequence validation */ + + ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters); + + /* Test without sequence validation */ + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, 5)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_blockDelimiters, ZSTD_sf_explicitBlockDelimiters)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_validateSequences, 0)); + + cSize = ZSTD_compressSequences(cctx, dst, dstSize, + sequences, kNbSequences, + src, srcSize); + + CHECK(!ZSTD_isError(cSize), "Should throw an error"); /* maxNbSeq is too small and an assert will fail */ + CHECK(ZSTD_getErrorCode(cSize) != ZSTD_error_externalSequences_invalid, "Wrong error code: %s", ZSTD_getErrorName(cSize)); /* fails sequence validation */ + + free(sequences); + } + ZSTD_freeCCtx(cctx); + } + DISPLAYLEVEL(3, "OK \n"); + + + DISPLAYLEVEL(3, "test%3i : Testing large offset with small window size: ", testNb++); + { + ZSTD_CCtx* cctx = ZSTD_createCCtx(); + ZSTD_DCtx* dctx = ZSTD_createDCtx(); + + /* Test large offset, small window size*/ + { + size_t srcSize = 21; + void* const src = CNBuffer; + size_t dstSize = ZSTD_compressBound(srcSize); + void* const dst = compressedBuffer; + size_t const kNbSequences = 4; + ZSTD_Sequence* sequences = malloc(sizeof(ZSTD_Sequence) * kNbSequences); + void* const checkBuf = malloc(srcSize); + const size_t largeDictSize = 1 << 25; + ZSTD_CDict* cdict = NULL; + ZSTD_DDict* ddict = NULL; + + /* Generate large dictionary */ + void* dictBuffer = calloc(largeDictSize, 1); + ZSTD_compressionParameters cParams = ZSTD_getCParams(1, srcSize, largeDictSize); + cParams.minMatch = ZSTD_MINMATCH_MIN; + cParams.hashLog = ZSTD_HASHLOG_MIN; + cParams.chainLog = ZSTD_CHAINLOG_MIN; + + cdict = ZSTD_createCDict_advanced(dictBuffer, largeDictSize, ZSTD_dlm_byRef, ZSTD_dct_rawContent, cParams, ZSTD_defaultCMem); + ddict = ZSTD_createDDict_advanced(dictBuffer, largeDictSize, ZSTD_dlm_byRef, ZSTD_dct_rawContent, ZSTD_defaultCMem); + + ZSTD_CCtx_refCDict(cctx, cdict); + ZSTD_DCtx_refDDict(dctx, ddict); + + sequences[0] = (ZSTD_Sequence) {3, 3, 3, 0}; + sequences[1] = (ZSTD_Sequence) {1 << 25, 0, 3, 0}; + sequences[2] = (ZSTD_Sequence) {1 << 25, 0, 9, 0}; + sequences[3] = (ZSTD_Sequence) {3, 0, 3, 0}; + + cSize = ZSTD_compressSequences(cctx, dst, dstSize, + sequences, kNbSequences, + src, srcSize); + + CHECK(ZSTD_isError(cSize), "Should not throw an error"); + + { + size_t dSize = ZSTD_decompressDCtx(dctx, checkBuf, srcSize, dst, cSize); + CHECK(ZSTD_isError(dSize), "Should not throw an error"); + CHECK(memcmp(src, checkBuf, srcSize) != 0, "Corruption!"); + } + + free(sequences); + free(checkBuf); + free(dictBuffer); + ZSTD_freeCDict(cdict); + ZSTD_freeDDict(ddict); + } + ZSTD_freeCCtx(cctx); + ZSTD_freeDCtx(dctx); + } + DISPLAYLEVEL(3, "OK \n"); + _end: FUZ_freeDictionary(dictionary); ZSTD_freeCStream(zc); @@ -1887,8 +2492,9 @@ static int fuzzerTests(U32 seed, unsigned nbTests, unsigned startTest, double co /* multi-segments compression test */ XXH64_reset(&xxhState, 0); { ZSTD_outBuffer outBuff = { cBuffer, cBufferSize, 0 } ; - U32 n; - for (n=0, cSize=0, totalTestSize=0 ; totalTestSize < maxTestSize ; n++) { + cSize=0; + totalTestSize=0; + while(totalTestSize < maxTestSize) { /* compress random chunks into randomly sized dst buffers */ { size_t const randomSrcSize = FUZ_randomLength(&lseed, maxSampleLog); size_t const srcSize = MIN(maxTestSize-totalTestSize, randomSrcSize); @@ -2193,7 +2799,7 @@ static int fuzzerTests_newAPI(U32 seed, int nbTests, int startTest, /* mess with long distance matching parameters */ if (bigTests) { - if (FUZ_rand(&lseed) & 1) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_c_enableLongDistanceMatching, FUZ_rand(&lseed) & 63, opaqueAPI) ); + if (FUZ_rand(&lseed) & 1) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_c_enableLongDistanceMatching, FUZ_randomClampedLength(&lseed, ZSTD_ps_auto, ZSTD_ps_disable), opaqueAPI) ); if (FUZ_rand(&lseed) & 3) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_c_ldmHashLog, FUZ_randomClampedLength(&lseed, ZSTD_HASHLOG_MIN, 23), opaqueAPI) ); if (FUZ_rand(&lseed) & 3) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_c_ldmMinMatch, FUZ_randomClampedLength(&lseed, ZSTD_LDM_MINMATCH_MIN, ZSTD_LDM_MINMATCH_MAX), opaqueAPI) ); if (FUZ_rand(&lseed) & 3) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_c_ldmBucketSizeLog, FUZ_randomClampedLength(&lseed, ZSTD_LDM_BUCKETSIZELOG_MIN, ZSTD_LDM_BUCKETSIZELOG_MAX), opaqueAPI) ); @@ -2356,6 +2962,9 @@ static int fuzzerTests_newAPI(U32 seed, int nbTests, int startTest, CHECK(badParameters(zc, savedParams), "CCtx params are wrong"); /* multi - fragments decompression test */ + if (FUZ_rand(&lseed) & 1) { + CHECK_Z(ZSTD_DCtx_reset(zd, ZSTD_reset_session_and_parameters)); + } if (!dictSize /* don't reset if dictionary : could be different */ && (FUZ_rand(&lseed) & 1)) { DISPLAYLEVEL(5, "resetting DCtx (dict:%p) \n", (void const*)dict); CHECK_Z( ZSTD_resetDStream(zd) ); @@ -2364,6 +2973,9 @@ static int fuzzerTests_newAPI(U32 seed, int nbTests, int startTest, DISPLAYLEVEL(5, "using dictionary of size %zu \n", dictSize); CHECK_Z( ZSTD_initDStream_usingDict(zd, dict, dictSize) ); } + if (FUZ_rand(&lseed) & 1) { + CHECK_Z(ZSTD_DCtx_setParameter(zd, ZSTD_d_disableHuffmanAssembly, FUZ_rand(&lseed) & 1)); + } { size_t decompressionResult = 1; ZSTD_inBuffer inBuff = { cBuffer, cSize, 0 }; ZSTD_outBuffer outBuff= { dstBuffer, dstBufferSize, 0 }; @@ -2405,7 +3017,14 @@ static int fuzzerTests_newAPI(U32 seed, int nbTests, int startTest, } } /* try decompression on noisy data */ - CHECK_Z( ZSTD_initDStream(zd_noise) ); /* note : no dictionary */ + if (FUZ_rand(&lseed) & 1) { + CHECK_Z(ZSTD_DCtx_reset(zd_noise, ZSTD_reset_session_and_parameters)); + } else { + CHECK_Z(ZSTD_DCtx_reset(zd_noise, ZSTD_reset_session_only)); + } + if (FUZ_rand(&lseed) & 1) { + CHECK_Z(ZSTD_DCtx_setParameter(zd_noise, ZSTD_d_disableHuffmanAssembly, FUZ_rand(&lseed) & 1)); + } { ZSTD_inBuffer inBuff = { cBuffer, cSize, 0 }; ZSTD_outBuffer outBuff= { dstBuffer, dstBufferSize, 0 }; while (outBuff.pos < dstBufferSize) { @@ -2488,6 +3107,7 @@ int main(int argc, const char** argv) if (!strcmp(argument, "--newapi")) { selected_api=advanced_api; testNb += !testNb; continue; } if (!strcmp(argument, "--no-big-tests")) { bigTests=0; continue; } + if (!strcmp(argument, "--big-tests")) { bigTests=1; continue; } argument++; while (*argument!=0) { @@ -2589,7 +3209,7 @@ int main(int argc, const char** argv) if (nbTests<=0) nbTests=1; if (testNb==0) { - result = basicUnitTests(0, ((double)proba) / 100); /* constant seed for predictability */ + result = basicUnitTests(0, ((double)proba) / 100, bigTests); /* constant seed for predictability */ } if (!result) { diff --git a/zlibWrapper/Makefile b/zlibWrapper/Makefile index 6fd5ac3bbec..830b294bb6f 100644 --- a/zlibWrapper/Makefile +++ b/zlibWrapper/Makefile @@ -66,9 +66,10 @@ test: example fitblk example_zstd fitblk_zstd zwrapbench minigzip minigzip_zstd ./zwrapbench -qi1b3B1K $(TEST_FILE) ./zwrapbench -rqi1b1e3 ../lib -#valgrindTest: ZSTDLIBRARY = $(ZSTDLIBDIR)/libzstd.so -valgrindTest: VALGRIND = LD_LIBRARY_PATH=$(ZSTDLIBDIR) valgrind --track-origins=yes --leak-check=full --error-exitcode=1 -valgrindTest: clean example fitblk example_zstd fitblk_zstd zwrapbench +.PHONY: test-valgrind +#test-valgrind: ZSTDLIBRARY = $(ZSTDLIBDIR)/libzstd.so +test-valgrind: VALGRIND = LD_LIBRARY_PATH=$(ZSTDLIBDIR) valgrind --track-origins=yes --leak-check=full --error-exitcode=1 +test-valgrind: clean example fitblk example_zstd fitblk_zstd zwrapbench @echo "\n ---- valgrind tests ----" $(VALGRIND) ./example $(VALGRIND) ./example_zstd diff --git a/zlibWrapper/README.md b/zlibWrapper/README.md index e61767c468f..311e1db7ae6 100644 --- a/zlibWrapper/README.md +++ b/zlibWrapper/README.md @@ -1,7 +1,7 @@ Zstandard wrapper for zlib ================================ -The main objective of creating a zstd wrapper for [zlib](http://zlib.net/) is to allow a quick and smooth transition to zstd for projects already using zlib. +The main objective of creating a zstd wrapper for [zlib](https://zlib.net/) is to allow a quick and smooth transition to zstd for projects already using zlib. #### Required files @@ -43,7 +43,7 @@ This behavior can be changed using `ZWRAP_setDecompressionType(ZWRAP_FORCE_ZLIB) #### Example -We have take the file `test/example.c` from [the zlib library distribution](http://zlib.net/) and copied it to [zlibWrapper/examples/example.c](examples/example.c). +We have taken the file `test/example.c` from [the zlib library distribution](https://zlib.net/) and copied it to [zlibWrapper/examples/example.c](examples/example.c). After compilation and execution it shows the following results: ``` zlib version 1.2.8 = 0x1280, compile flags = 0x65 diff --git a/zlibWrapper/examples/example.c b/zlibWrapper/examples/example.c index 9000f7a3295..d7590e31237 100644 --- a/zlibWrapper/examples/example.c +++ b/zlibWrapper/examples/example.c @@ -56,26 +56,26 @@ z_const char hello[] = "hello, hello! I said hello, hello!"; const char dictionary[] = "hello, hello!"; uLong dictId; /* Adler32 value of the dictionary */ -void test_deflate OF((Byte *compr, uLong comprLen)); -void test_inflate OF((Byte *compr, uLong comprLen, +void test_deflate _Z_OF((Byte *compr, uLong comprLen)); +void test_inflate _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -void test_large_deflate OF((Byte *compr, uLong comprLen, +void test_large_deflate _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -void test_large_inflate OF((Byte *compr, uLong comprLen, +void test_large_inflate _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -void test_flush OF((Byte *compr, uLong *comprLen)); -void test_sync OF((Byte *compr, uLong comprLen, +void test_flush _Z_OF((Byte *compr, uLong *comprLen)); +void test_sync _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -void test_dict_deflate OF((Byte *compr, uLong comprLen)); -void test_dict_inflate OF((Byte *compr, uLong comprLen, +void test_dict_deflate _Z_OF((Byte *compr, uLong comprLen)); +void test_dict_inflate _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -int main OF((int argc, char *argv[])); +int main _Z_OF((int argc, char *argv[])); #ifdef Z_SOLO -void *myalloc OF((void *, unsigned, unsigned)); -void myfree OF((void *, void *)); +void *myalloc _Z_OF((void *, unsigned, unsigned)); +void myfree _Z_OF((void *, void *)); void *myalloc(q, n, m) void *q; @@ -102,9 +102,9 @@ static free_func zfree = myfree; static alloc_func zalloc = (alloc_func)0; static free_func zfree = (free_func)0; -void test_compress OF((Byte *compr, uLong comprLen, +void test_compress _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -void test_gzio OF((const char *fname, +void test_gzio _Z_OF((const char *fname, Byte *uncompr, uLong uncomprLen)); /* =========================================================================== diff --git a/zlibWrapper/examples/example_original.c b/zlibWrapper/examples/example_original.c index 649882bf0a7..5b4e4d1d8f6 100644 --- a/zlibWrapper/examples/example_original.c +++ b/zlibWrapper/examples/example_original.c @@ -51,26 +51,26 @@ z_const char hello[] = "hello, hello!"; const char dictionary[] = "hello"; uLong dictId; /* Adler32 value of the dictionary */ -void test_deflate OF((Byte *compr, uLong comprLen)); -void test_inflate OF((Byte *compr, uLong comprLen, +void test_deflate _Z_OF((Byte *compr, uLong comprLen)); +void test_inflate _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -void test_large_deflate OF((Byte *compr, uLong comprLen, +void test_large_deflate _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -void test_large_inflate OF((Byte *compr, uLong comprLen, +void test_large_inflate _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -void test_flush OF((Byte *compr, uLong *comprLen)); -void test_sync OF((Byte *compr, uLong comprLen, +void test_flush _Z_OF((Byte *compr, uLong *comprLen)); +void test_sync _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -void test_dict_deflate OF((Byte *compr, uLong comprLen)); -void test_dict_inflate OF((Byte *compr, uLong comprLen, +void test_dict_deflate _Z_OF((Byte *compr, uLong comprLen)); +void test_dict_inflate _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -int main OF((int argc, char *argv[])); +int main _Z_OF((int argc, char *argv[])); #ifdef Z_SOLO -void *myalloc OF((void *, unsigned, unsigned)); -void myfree OF((void *, void *)); +void *myalloc _Z_OF((void *, unsigned, unsigned)); +void myfree _Z_OF((void *, void *)); void *myalloc(q, n, m) void *q; @@ -94,9 +94,9 @@ static free_func zfree = myfree; static alloc_func zalloc = (alloc_func)0; static free_func zfree = (free_func)0; -void test_compress OF((Byte *compr, uLong comprLen, +void test_compress _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -void test_gzio OF((const char *fname, +void test_gzio _Z_OF((const char *fname, Byte *uncompr, uLong uncomprLen)); /* =========================================================================== diff --git a/zlibWrapper/examples/minigzip.c b/zlibWrapper/examples/minigzip.c index f67be09564f..717a94df970 100644 --- a/zlibWrapper/examples/minigzip.c +++ b/zlibWrapper/examples/minigzip.c @@ -3,7 +3,7 @@ /* minigzip.c -- simulate gzip using the zlib compression library * Copyright (C) 1995-2006, 2010, 2011 Jean-loup Gailly. - * For conditions of distribution and use, see http://www.zlib.net/zlib_license.html + * For conditions of distribution and use, see https://www.zlib.net/zlib_license.html */ /* @@ -64,7 +64,7 @@ #if !defined(Z_HAVE_UNISTD_H) && !defined(_LARGEFILE64_SOURCE) #ifndef WIN32 /* unlink already in stdio.h for WIN32 */ - extern int unlink OF((const char *)); + extern int unlink _Z_OF((const char *)); #endif #endif @@ -154,8 +154,8 @@ static void pwinerror (s) # include /* for unlink() */ #endif -void *myalloc OF((void *, unsigned, unsigned)); -void myfree OF((void *, void *)); +void *myalloc _Z_OF((void *, unsigned, unsigned)); +void myfree _Z_OF((void *, void *)); void *myalloc(q, n, m) void *q; @@ -180,9 +180,9 @@ typedef struct gzFile_s { z_stream strm; } *gzFile; -gzFile gzopen OF((const char *, const char *)); -gzFile gzdopen OF((int, const char *)); -gzFile gz_open OF((const char *, int, const char *)); +gzFile gzopen _Z_OF((const char *, const char *)); +gzFile gzdopen _Z_OF((int, const char *)); +gzFile gz_open _Z_OF((const char *, int, const char *)); gzFile gzopen(path, mode) const char *path; @@ -236,7 +236,7 @@ gzFile gz_open(path, fd, mode) return gz; } -int gzwrite OF((gzFile, const void *, unsigned)); +int gzwrite _Z_OF((gzFile, const void *, unsigned)); int gzwrite(gz, buf, len) gzFile gz; @@ -260,7 +260,7 @@ int gzwrite(gz, buf, len) return len; } -int gzread OF((gzFile, void *, unsigned)); +int gzread _Z_OF((gzFile, void *, unsigned)); int gzread(gz, buf, len) gzFile gz; @@ -297,7 +297,7 @@ int gzread(gz, buf, len) return len - strm->avail_out; } -int gzclose OF((gzFile)); +int gzclose _Z_OF((gzFile)); int gzclose(gz) gzFile gz; @@ -326,7 +326,7 @@ int gzclose(gz) return Z_OK; } -const char *gzerror OF((gzFile, int *)); +const char *gzerror _Z_OF((gzFile, int *)); const char *gzerror(gz, err) gzFile gz; @@ -340,15 +340,15 @@ const char *gzerror(gz, err) char *prog; -void error OF((const char *msg)); -void gz_compress OF((FILE *in, gzFile out)); +void error _Z_OF((const char *msg)); +void gz_compress _Z_OF((FILE *in, gzFile out)); #ifdef USE_MMAP -int gz_compress_mmap OF((FILE *in, gzFile out)); +int gz_compress_mmap _Z_OF((FILE *in, gzFile out)); #endif -void gz_uncompress OF((gzFile in, FILE *out)); -void file_compress OF((char *file, char *mode)); -void file_uncompress OF((char *file)); -int main OF((int argc, char *argv[])); +void gz_uncompress _Z_OF((gzFile in, FILE *out)); +void file_compress _Z_OF((char *file, char *mode)); +void file_uncompress _Z_OF((char *file)); +int main _Z_OF((int argc, char *argv[])); /* =========================================================================== * Display error message and exit diff --git a/zlibWrapper/examples/zwrapbench.c b/zlibWrapper/examples/zwrapbench.c index 5993e51ba02..3bc9a1ab0b8 100644 --- a/zlibWrapper/examples/zwrapbench.c +++ b/zlibWrapper/examples/zwrapbench.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -388,7 +388,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize, markNb = (markNb+1) % NB_MARKS; DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.3f),%6.1f MB/s\r", marks[markNb], displayName, (unsigned)srcSize, (unsigned)cSize, ratio, - (double)srcSize / fastestC ); + (double)srcSize / (double)fastestC ); (void)fastestD; (void)crcOrig; /* unused when decompression disabled */ #if 1 @@ -428,8 +428,10 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize, ZSTD_DStream* zbd = ZSTD_createDStream(); size_t rSize; if (zbd == NULL) EXM_THROW(1, "ZSTD_createDStream() allocation failure"); - rSize = ZSTD_initDStream_usingDict(zbd, dictBuffer, dictBufferSize); - if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_initDStream() failed : %s", ZSTD_getErrorName(rSize)); + rSize = ZSTD_DCtx_reset(zbd, ZSTD_reset_session_only); + if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_DCtx_reset() failed : %s", ZSTD_getErrorName(rSize)); + rSize = ZSTD_DCtx_loadDictionary(zbd, dictBuffer, dictBufferSize); + if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_DCtx_loadDictionary() failed : %s", ZSTD_getErrorName(rSize)); do { U32 blockNb; for (blockNb=0; blockNb%10u (%5.3f),%6.1f MB/s ,%6.1f MB/s\r", marks[markNb], displayName, (unsigned)srcSize, (unsigned)cSize, ratio, - (double)srcSize / fastestC, - (double)srcSize / fastestD ); + (double)srcSize / (double)fastestC, + (double)srcSize / (double)fastestD ); /* CRC Checking */ { U64 const crcCheck = XXH64(resultBuffer, srcSize, 0); @@ -558,8 +560,8 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize, } /* for (testNb = 1; testNb <= (g_nbIterations + !g_nbIterations); testNb++) */ if (g_displayLevel == 1) { - double cSpeed = (double)srcSize / fastestC; - double dSpeed = (double)srcSize / fastestD; + double cSpeed = (double)srcSize / (double)fastestC; + double dSpeed = (double)srcSize / (double)fastestD; if (g_additionalParam) DISPLAY("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s (param=%d)\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName, g_additionalParam); else @@ -836,7 +838,7 @@ static int usage(const char* programName) DISPLAY( " -b# : benchmark file(s), using # compression level (default : %d) \n", ZSTDCLI_CLEVEL_DEFAULT); DISPLAY( " -e# : test all compression levels from -bX to # (default: %d)\n", ZSTDCLI_CLEVEL_DEFAULT); DISPLAY( " -i# : minimum evaluation time in seconds (default : 3s)\n"); - DISPLAY( " -B# : cut file into independent blocks of size # (default: no block)\n"); + DISPLAY( " -B# : cut file into independent chunks of size # (default: no chunking)\n"); return 0; } diff --git a/zlibWrapper/gzclose.c b/zlibWrapper/gzclose.c index 25d3789b128..ba43b8c5e44 100644 --- a/zlibWrapper/gzclose.c +++ b/zlibWrapper/gzclose.c @@ -3,7 +3,7 @@ /* gzclose.c -- zlib gzclose() function * Copyright (C) 2004, 2010 Mark Adler - * For conditions of distribution and use, see http://www.zlib.net/zlib_license.html + * For conditions of distribution and use, see https://www.zlib.net/zlib_license.html */ #include "gzguts.h" diff --git a/zlibWrapper/gzcompatibility.h b/zlibWrapper/gzcompatibility.h index c1aa2b87c12..9d11b984fa9 100644 --- a/zlibWrapper/gzcompatibility.h +++ b/zlibWrapper/gzcompatibility.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,10 +11,10 @@ #if ZLIB_VERNUM <= 0x1240 -ZEXTERN int ZEXPORT gzclose_r OF((gzFile file)); -ZEXTERN int ZEXPORT gzclose_w OF((gzFile file)); -ZEXTERN int ZEXPORT gzbuffer OF((gzFile file, unsigned size)); -ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile file)); +ZEXTERN int ZEXPORT gzclose_r _Z_OF((gzFile file)); +ZEXTERN int ZEXPORT gzclose_w _Z_OF((gzFile file)); +ZEXTERN int ZEXPORT gzbuffer _Z_OF((gzFile file, unsigned size)); +ZEXTERN z_off_t ZEXPORT gzoffset _Z_OF((gzFile file)); #if !defined(_WIN32) && defined(Z_LARGE64) # define z_off64_t off64_t @@ -40,7 +40,7 @@ struct gzFile_s { #if ZLIB_VERNUM <= 0x1270 #if defined(_WIN32) && !defined(Z_SOLO) # include /* for wchar_t */ -ZEXTERN gzFile ZEXPORT gzopen_w OF((const wchar_t *path, +ZEXTERN gzFile ZEXPORT gzopen_w _Z_OF((const wchar_t *path, const char *mode)); #endif #endif @@ -61,8 +61,8 @@ ZEXTERN gzFile ZEXPORT gzopen_w OF((const wchar_t *path, # endif # undef z_longlong #endif -ZEXTERN z_size_t ZEXPORT gzfread OF((voidp buf, z_size_t size, z_size_t nitems, +ZEXTERN z_size_t ZEXPORT gzfread _Z_OF((voidp buf, z_size_t size, z_size_t nitems, gzFile file)); -ZEXTERN z_size_t ZEXPORT gzfwrite OF((voidpc buf, z_size_t size, +ZEXTERN z_size_t ZEXPORT gzfwrite _Z_OF((voidpc buf, z_size_t size, z_size_t nitems, gzFile file)); #endif diff --git a/zlibWrapper/gzguts.h b/zlibWrapper/gzguts.h index b639b4be85f..70a609d724c 100644 --- a/zlibWrapper/gzguts.h +++ b/zlibWrapper/gzguts.h @@ -4,7 +4,7 @@ /* gzguts.h -- zlib internal header definitions for gz* operations * Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013, 2016 Mark Adler - * For conditions of distribution and use, see http://www.zlib.net/zlib_license.html + * For conditions of distribution and use, see https://www.zlib.net/zlib_license.html */ #ifdef _LARGEFILE64_SOURCE @@ -126,8 +126,8 @@ /* gz* functions always use library allocation functions */ #ifndef STDC - extern voidp malloc OF((uInt size)); - extern void free OF((voidpf ptr)); + extern voidp malloc _Z_OF((uInt size)); + extern void free _Z_OF((voidpf ptr)); #endif /* get errno and strerror definition */ @@ -145,10 +145,10 @@ /* provide prototypes for these when building zlib without LFS */ #if !defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0 - ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); - ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int)); - ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile)); - ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile)); + ZEXTERN gzFile ZEXPORT gzopen64 _Z_OF((const char *, const char *)); + ZEXTERN z_off64_t ZEXPORT gzseek64 _Z_OF((gzFile, z_off64_t, int)); + ZEXTERN z_off64_t ZEXPORT gztell64 _Z_OF((gzFile)); + ZEXTERN z_off64_t ZEXPORT gzoffset64 _Z_OF((gzFile)); #endif /* default memLevel */ @@ -213,9 +213,9 @@ typedef union { } gz_statep; /* shared functions */ -void ZLIB_INTERNAL gz_error OF((gz_statep, int, const char *)); +void ZLIB_INTERNAL gz_error _Z_OF((gz_statep, int, const char *)); #if defined UNDER_CE -char ZLIB_INTERNAL *gz_strwinerror OF((DWORD error)); +char ZLIB_INTERNAL *gz_strwinerror _Z_OF((DWORD error)); #endif /* GT_OFF(x), where x is an unsigned value, is true if x > maximum z_off64_t @@ -224,6 +224,6 @@ char ZLIB_INTERNAL *gz_strwinerror OF((DWORD error)); #ifdef INT_MAX # define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > INT_MAX) #else -unsigned ZLIB_INTERNAL gz_intmax OF((void)); +unsigned ZLIB_INTERNAL gz_intmax _Z_OF((void)); # define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > gz_intmax()) #endif diff --git a/zlibWrapper/gzlib.c b/zlibWrapper/gzlib.c index b1fb98517e4..eea480a74c9 100644 --- a/zlibWrapper/gzlib.c +++ b/zlibWrapper/gzlib.c @@ -3,7 +3,7 @@ /* gzlib.c -- zlib functions common to reading and writing gzip files * Copyright (C) 2004-2017 Mark Adler - * For conditions of distribution and use, see http://www.zlib.net/zlib_license.html + * For conditions of distribution and use, see https://www.zlib.net/zlib_license.html */ #include "gzguts.h" @@ -19,8 +19,8 @@ #endif /* Local functions */ -local void gz_reset OF((gz_statep)); -local gzFile gz_open OF((const void *, int, const char *)); +local void gz_reset _Z_OF((gz_statep)); +local gzFile gz_open _Z_OF((const void *, int, const char *)); #if defined UNDER_CE diff --git a/zlibWrapper/gzread.c b/zlibWrapper/gzread.c index 359d1788913..584fad1eaa8 100644 --- a/zlibWrapper/gzread.c +++ b/zlibWrapper/gzread.c @@ -3,7 +3,7 @@ /* gzread.c -- zlib functions for reading gzip files * Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013, 2016 Mark Adler - * For conditions of distribution and use, see http://www.zlib.net/zlib_license.html + * For conditions of distribution and use, see https://www.zlib.net/zlib_license.html */ #include "gzguts.h" @@ -17,13 +17,13 @@ /* Local functions */ -local int gz_load OF((gz_statep, unsigned char *, unsigned, unsigned *)); -local int gz_avail OF((gz_statep)); -local int gz_look OF((gz_statep)); -local int gz_decomp OF((gz_statep)); -local int gz_fetch OF((gz_statep)); -local int gz_skip OF((gz_statep, z_off64_t)); -local z_size_t gz_read OF((gz_statep, voidp, z_size_t)); +local int gz_load _Z_OF((gz_statep, unsigned char *, unsigned, unsigned *)); +local int gz_avail _Z_OF((gz_statep)); +local int gz_look _Z_OF((gz_statep)); +local int gz_decomp _Z_OF((gz_statep)); +local int gz_fetch _Z_OF((gz_statep)); +local int gz_skip _Z_OF((gz_statep, z_off64_t)); +local z_size_t gz_read _Z_OF((gz_statep, voidp, z_size_t)); /* Use read() to load a buffer -- return -1 on error, otherwise 0. Read from state.state->fd, and update state.state->eof, state.state->err, and state.state->msg as appropriate. @@ -464,8 +464,8 @@ z_size_t ZEXPORT gzfread(buf, size, nitems, file) #endif #if ZLIB_VERNUM <= 0x1250 -ZEXTERN int ZEXPORT gzgetc OF((gzFile file)); -ZEXTERN int ZEXPORT gzgetc_ OF((gzFile file)); +ZEXTERN int ZEXPORT gzgetc _Z_OF((gzFile file)); +ZEXTERN int ZEXPORT gzgetc_ _Z_OF((gzFile file)); #endif int ZEXPORT gzgetc(file) diff --git a/zlibWrapper/gzwrite.c b/zlibWrapper/gzwrite.c index 422ff17db97..ccd4f71f13a 100644 --- a/zlibWrapper/gzwrite.c +++ b/zlibWrapper/gzwrite.c @@ -3,7 +3,7 @@ /* gzwrite.c -- zlib functions for writing gzip files * Copyright (C) 2004-2017 Mark Adler - * For conditions of distribution and use, see http://www.zlib.net/zlib_license.html + * For conditions of distribution and use, see https://www.zlib.net/zlib_license.html */ #include @@ -11,10 +11,10 @@ #include "gzguts.h" /* Local functions */ -local int gz_init OF((gz_statep)); -local int gz_comp OF((gz_statep, int)); -local int gz_zero OF((gz_statep, z_off64_t)); -local z_size_t gz_write OF((gz_statep, voidpc, z_size_t)); +local int gz_init _Z_OF((gz_statep)); +local int gz_comp _Z_OF((gz_statep, int)); +local int gz_zero _Z_OF((gz_statep, z_off64_t)); +local z_size_t gz_write _Z_OF((gz_statep, voidpc, z_size_t)); /* Initialize state for writing a gzip file. Mark initialization by setting state.state->size to non-zero. Return -1 on a memory allocation failure, or 0 on diff --git a/zlibWrapper/zstd_zlibwrapper.c b/zlibWrapper/zstd_zlibwrapper.c index adb231f0606..479ddd4e006 100644 --- a/zlibWrapper/zstd_zlibwrapper.c +++ b/zlibWrapper/zstd_zlibwrapper.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -101,7 +101,7 @@ ZWRAP_decompress_type ZWRAP_getDecompressionType(void) { return g_ZWRAPdecompres const char * zstdVersion(void) { return ZSTD_VERSION_STRING; } -ZEXTERN const char * ZEXPORT z_zlibVersion OF((void)) { return zlibVersion(); } +ZEXTERN const char * ZEXPORT z_zlibVersion _Z_OF((void)) { return zlibVersion(); } static void* ZWRAP_allocFunction(void* opaque, size_t size) { @@ -260,7 +260,7 @@ static struct internal_state* convert_into_sis(void* ptr) return (struct internal_state*) ptr; } -ZEXTERN int ZEXPORT z_deflateInit_ OF((z_streamp strm, int level, +ZEXTERN int ZEXPORT z_deflateInit_ _Z_OF((z_streamp strm, int level, const char *version, int stream_size)) { ZWRAP_CCtx* zwc; @@ -287,7 +287,7 @@ ZEXTERN int ZEXPORT z_deflateInit_ OF((z_streamp strm, int level, } -ZEXTERN int ZEXPORT z_deflateInit2_ OF((z_streamp strm, int level, int method, +ZEXTERN int ZEXPORT z_deflateInit2_ _Z_OF((z_streamp strm, int level, int method, int windowBits, int memLevel, int strategy, const char *version, int stream_size)) @@ -319,7 +319,7 @@ int ZWRAP_deflateReset_keepDict(z_streamp strm) } -ZEXTERN int ZEXPORT z_deflateReset OF((z_streamp strm)) +ZEXTERN int ZEXPORT z_deflateReset _Z_OF((z_streamp strm)) { LOG_WRAPPERC("- deflateReset\n"); if (!g_ZWRAP_useZSTDcompression) @@ -334,7 +334,7 @@ ZEXTERN int ZEXPORT z_deflateReset OF((z_streamp strm)) } -ZEXTERN int ZEXPORT z_deflateSetDictionary OF((z_streamp strm, +ZEXTERN int ZEXPORT z_deflateSetDictionary _Z_OF((z_streamp strm, const Bytef *dictionary, uInt dictLength)) { @@ -359,7 +359,7 @@ ZEXTERN int ZEXPORT z_deflateSetDictionary OF((z_streamp strm, } -ZEXTERN int ZEXPORT z_deflate OF((z_streamp strm, int flush)) +ZEXTERN int ZEXPORT z_deflate _Z_OF((z_streamp strm, int flush)) { ZWRAP_CCtx* zwc; @@ -465,7 +465,7 @@ ZEXTERN int ZEXPORT z_deflate OF((z_streamp strm, int flush)) } -ZEXTERN int ZEXPORT z_deflateEnd OF((z_streamp strm)) +ZEXTERN int ZEXPORT z_deflateEnd _Z_OF((z_streamp strm)) { if (!g_ZWRAP_useZSTDcompression) { LOG_WRAPPERC("- deflateEnd\n"); @@ -483,7 +483,7 @@ ZEXTERN int ZEXPORT z_deflateEnd OF((z_streamp strm)) } -ZEXTERN uLong ZEXPORT z_deflateBound OF((z_streamp strm, +ZEXTERN uLong ZEXPORT z_deflateBound _Z_OF((z_streamp strm, uLong sourceLen)) { if (!g_ZWRAP_useZSTDcompression) @@ -493,7 +493,7 @@ ZEXTERN uLong ZEXPORT z_deflateBound OF((z_streamp strm, } -ZEXTERN int ZEXPORT z_deflateParams OF((z_streamp strm, +ZEXTERN int ZEXPORT z_deflateParams _Z_OF((z_streamp strm, int level, int strategy)) { @@ -594,7 +594,7 @@ static int ZWRAPD_finishWithErrorMsg(z_streamp strm, char* message) } -ZEXTERN int ZEXPORT z_inflateInit_ OF((z_streamp strm, +ZEXTERN int ZEXPORT z_inflateInit_ _Z_OF((z_streamp strm, const char* version, int stream_size)) { if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB) { @@ -623,7 +623,7 @@ ZEXTERN int ZEXPORT z_inflateInit_ OF((z_streamp strm, } -ZEXTERN int ZEXPORT z_inflateInit2_ OF((z_streamp strm, int windowBits, +ZEXTERN int ZEXPORT z_inflateInit2_ _Z_OF((z_streamp strm, int windowBits, const char *version, int stream_size)) { if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB) { @@ -660,7 +660,7 @@ int ZWRAP_inflateReset_keepDict(z_streamp strm) } -ZEXTERN int ZEXPORT z_inflateReset OF((z_streamp strm)) +ZEXTERN int ZEXPORT z_inflateReset _Z_OF((z_streamp strm)) { LOG_WRAPPERD("- inflateReset\n"); if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !strm->reserved) @@ -678,7 +678,7 @@ ZEXTERN int ZEXPORT z_inflateReset OF((z_streamp strm)) #if ZLIB_VERNUM >= 0x1240 -ZEXTERN int ZEXPORT z_inflateReset2 OF((z_streamp strm, +ZEXTERN int ZEXPORT z_inflateReset2 _Z_OF((z_streamp strm, int windowBits)) { if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !strm->reserved) @@ -696,7 +696,7 @@ ZEXTERN int ZEXPORT z_inflateReset2 OF((z_streamp strm, #endif -ZEXTERN int ZEXPORT z_inflateSetDictionary OF((z_streamp strm, +ZEXTERN int ZEXPORT z_inflateSetDictionary _Z_OF((z_streamp strm, const Bytef *dictionary, uInt dictLength)) { @@ -706,8 +706,10 @@ ZEXTERN int ZEXPORT z_inflateSetDictionary OF((z_streamp strm, { ZWRAP_DCtx* const zwd = (ZWRAP_DCtx*) strm->state; if (zwd == NULL || zwd->zbd == NULL) return Z_STREAM_ERROR; - { size_t const initErr = ZSTD_initDStream_usingDict(zwd->zbd, dictionary, dictLength); - if (ZSTD_isError(initErr)) return ZWRAPD_finishWithError(zwd, strm, 0); } + { size_t const resetErr = ZSTD_DCtx_reset(zwd->zbd, ZSTD_reset_session_only); + if (ZSTD_isError(resetErr)) return ZWRAPD_finishWithError(zwd, strm, 0); } + { size_t const loadErr = ZSTD_DCtx_loadDictionary(zwd->zbd, dictionary, dictLength); + if (ZSTD_isError(loadErr)) return ZWRAPD_finishWithError(zwd, strm, 0); } zwd->decompState = ZWRAP_useReset; if (zwd->totalInBytes == ZSTD_HEADERSIZE) { @@ -730,7 +732,7 @@ ZEXTERN int ZEXPORT z_inflateSetDictionary OF((z_streamp strm, } -ZEXTERN int ZEXPORT z_inflate OF((z_streamp strm, int flush)) +ZEXTERN int ZEXPORT z_inflate _Z_OF((z_streamp strm, int flush)) { ZWRAP_DCtx* zwd; @@ -926,7 +928,7 @@ ZEXTERN int ZEXPORT z_inflate OF((z_streamp strm, int flush)) } -ZEXTERN int ZEXPORT z_inflateEnd OF((z_streamp strm)) +ZEXTERN int ZEXPORT z_inflateEnd _Z_OF((z_streamp strm)) { if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !strm->reserved) return inflateEnd(strm); @@ -943,7 +945,7 @@ ZEXTERN int ZEXPORT z_inflateEnd OF((z_streamp strm)) } -ZEXTERN int ZEXPORT z_inflateSync OF((z_streamp strm)) +ZEXTERN int ZEXPORT z_inflateSync _Z_OF((z_streamp strm)) { if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !strm->reserved) { return inflateSync(strm); @@ -955,7 +957,7 @@ ZEXTERN int ZEXPORT z_inflateSync OF((z_streamp strm)) /* Advanced compression functions */ -ZEXTERN int ZEXPORT z_deflateCopy OF((z_streamp dest, +ZEXTERN int ZEXPORT z_deflateCopy _Z_OF((z_streamp dest, z_streamp source)) { if (!g_ZWRAP_useZSTDcompression) @@ -964,7 +966,7 @@ ZEXTERN int ZEXPORT z_deflateCopy OF((z_streamp dest, } -ZEXTERN int ZEXPORT z_deflateTune OF((z_streamp strm, +ZEXTERN int ZEXPORT z_deflateTune _Z_OF((z_streamp strm, int good_length, int max_lazy, int nice_length, @@ -977,7 +979,7 @@ ZEXTERN int ZEXPORT z_deflateTune OF((z_streamp strm, #if ZLIB_VERNUM >= 0x1260 -ZEXTERN int ZEXPORT z_deflatePending OF((z_streamp strm, +ZEXTERN int ZEXPORT z_deflatePending _Z_OF((z_streamp strm, unsigned *pending, int *bits)) { @@ -988,7 +990,7 @@ ZEXTERN int ZEXPORT z_deflatePending OF((z_streamp strm, #endif -ZEXTERN int ZEXPORT z_deflatePrime OF((z_streamp strm, +ZEXTERN int ZEXPORT z_deflatePrime _Z_OF((z_streamp strm, int bits, int value)) { @@ -998,7 +1000,7 @@ ZEXTERN int ZEXPORT z_deflatePrime OF((z_streamp strm, } -ZEXTERN int ZEXPORT z_deflateSetHeader OF((z_streamp strm, +ZEXTERN int ZEXPORT z_deflateSetHeader _Z_OF((z_streamp strm, gz_headerp head)) { if (!g_ZWRAP_useZSTDcompression) @@ -1011,7 +1013,7 @@ ZEXTERN int ZEXPORT z_deflateSetHeader OF((z_streamp strm, /* Advanced decompression functions */ #if ZLIB_VERNUM >= 0x1280 -ZEXTERN int ZEXPORT z_inflateGetDictionary OF((z_streamp strm, +ZEXTERN int ZEXPORT z_inflateGetDictionary _Z_OF((z_streamp strm, Bytef *dictionary, uInt *dictLength)) { @@ -1022,7 +1024,7 @@ ZEXTERN int ZEXPORT z_inflateGetDictionary OF((z_streamp strm, #endif -ZEXTERN int ZEXPORT z_inflateCopy OF((z_streamp dest, +ZEXTERN int ZEXPORT z_inflateCopy _Z_OF((z_streamp dest, z_streamp source)) { if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !source->reserved) @@ -1032,7 +1034,7 @@ ZEXTERN int ZEXPORT z_inflateCopy OF((z_streamp dest, #if ZLIB_VERNUM >= 0x1240 -ZEXTERN long ZEXPORT z_inflateMark OF((z_streamp strm)) +ZEXTERN long ZEXPORT z_inflateMark _Z_OF((z_streamp strm)) { if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !strm->reserved) return inflateMark(strm); @@ -1041,7 +1043,7 @@ ZEXTERN long ZEXPORT z_inflateMark OF((z_streamp strm)) #endif -ZEXTERN int ZEXPORT z_inflatePrime OF((z_streamp strm, +ZEXTERN int ZEXPORT z_inflatePrime _Z_OF((z_streamp strm, int bits, int value)) { @@ -1051,7 +1053,7 @@ ZEXTERN int ZEXPORT z_inflatePrime OF((z_streamp strm, } -ZEXTERN int ZEXPORT z_inflateGetHeader OF((z_streamp strm, +ZEXTERN int ZEXPORT z_inflateGetHeader _Z_OF((z_streamp strm, gz_headerp head)) { if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !strm->reserved) @@ -1060,7 +1062,7 @@ ZEXTERN int ZEXPORT z_inflateGetHeader OF((z_streamp strm, } -ZEXTERN int ZEXPORT z_inflateBackInit_ OF((z_streamp strm, int windowBits, +ZEXTERN int ZEXPORT z_inflateBackInit_ _Z_OF((z_streamp strm, int windowBits, unsigned char FAR *window, const char *version, int stream_size)) @@ -1071,7 +1073,7 @@ ZEXTERN int ZEXPORT z_inflateBackInit_ OF((z_streamp strm, int windowBits, } -ZEXTERN int ZEXPORT z_inflateBack OF((z_streamp strm, +ZEXTERN int ZEXPORT z_inflateBack _Z_OF((z_streamp strm, in_func in, void FAR *in_desc, out_func out, void FAR *out_desc)) { @@ -1081,7 +1083,7 @@ ZEXTERN int ZEXPORT z_inflateBack OF((z_streamp strm, } -ZEXTERN int ZEXPORT z_inflateBackEnd OF((z_streamp strm)) +ZEXTERN int ZEXPORT z_inflateBackEnd _Z_OF((z_streamp strm)) { if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !strm->reserved) return inflateBackEnd(strm); @@ -1089,14 +1091,14 @@ ZEXTERN int ZEXPORT z_inflateBackEnd OF((z_streamp strm)) } -ZEXTERN uLong ZEXPORT z_zlibCompileFlags OF((void)) { return zlibCompileFlags(); } +ZEXTERN uLong ZEXPORT z_zlibCompileFlags _Z_OF((void)) { return zlibCompileFlags(); } /* === utility functions === */ #ifndef Z_SOLO -ZEXTERN int ZEXPORT z_compress OF((Bytef *dest, uLongf *destLen, +ZEXTERN int ZEXPORT z_compress _Z_OF((Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceLen)) { if (!g_ZWRAP_useZSTDcompression) @@ -1115,7 +1117,7 @@ ZEXTERN int ZEXPORT z_compress OF((Bytef *dest, uLongf *destLen, } -ZEXTERN int ZEXPORT z_compress2 OF((Bytef *dest, uLongf *destLen, +ZEXTERN int ZEXPORT z_compress2 _Z_OF((Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceLen, int level)) { @@ -1131,7 +1133,7 @@ ZEXTERN int ZEXPORT z_compress2 OF((Bytef *dest, uLongf *destLen, } -ZEXTERN uLong ZEXPORT z_compressBound OF((uLong sourceLen)) +ZEXTERN uLong ZEXPORT z_compressBound _Z_OF((uLong sourceLen)) { if (!g_ZWRAP_useZSTDcompression) return compressBound(sourceLen); @@ -1140,7 +1142,7 @@ ZEXTERN uLong ZEXPORT z_compressBound OF((uLong sourceLen)) } -ZEXTERN int ZEXPORT z_uncompress OF((Bytef *dest, uLongf *destLen, +ZEXTERN int ZEXPORT z_uncompress _Z_OF((Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceLen)) { if (!ZSTD_isFrame(source, sourceLen)) @@ -1159,24 +1161,24 @@ ZEXTERN int ZEXPORT z_uncompress OF((Bytef *dest, uLongf *destLen, /* checksum functions */ -ZEXTERN uLong ZEXPORT z_adler32 OF((uLong adler, const Bytef *buf, uInt len)) +ZEXTERN uLong ZEXPORT z_adler32 _Z_OF((uLong adler, const Bytef *buf, uInt len)) { return adler32(adler, buf, len); } -ZEXTERN uLong ZEXPORT z_crc32 OF((uLong crc, const Bytef *buf, uInt len)) +ZEXTERN uLong ZEXPORT z_crc32 _Z_OF((uLong crc, const Bytef *buf, uInt len)) { return crc32(crc, buf, len); } #if ZLIB_VERNUM >= 0x12B0 -ZEXTERN uLong ZEXPORT z_adler32_z OF((uLong adler, const Bytef *buf, z_size_t len)) +ZEXTERN uLong ZEXPORT z_adler32_z _Z_OF((uLong adler, const Bytef *buf, z_size_t len)) { return adler32_z(adler, buf, len); } -ZEXTERN uLong ZEXPORT z_crc32_z OF((uLong crc, const Bytef *buf, z_size_t len)) +ZEXTERN uLong ZEXPORT z_crc32_z _Z_OF((uLong crc, const Bytef *buf, z_size_t len)) { return crc32_z(crc, buf, len); } @@ -1184,14 +1186,14 @@ ZEXTERN uLong ZEXPORT z_crc32_z OF((uLong crc, const Bytef *buf, z_size_t len)) #if ZLIB_VERNUM >= 0x1270 -ZEXTERN const z_crc_t FAR * ZEXPORT z_get_crc_table OF((void)) +ZEXTERN const z_crc_t FAR * ZEXPORT z_get_crc_table _Z_OF((void)) { return get_crc_table(); } #endif /* Error function */ -ZEXTERN const char * ZEXPORT z_zError OF((int err)) +ZEXTERN const char * ZEXPORT z_zError _Z_OF((int err)) { /* Just use zlib Error function */ return zError(err); diff --git a/zlibWrapper/zstd_zlibwrapper.h b/zlibWrapper/zstd_zlibwrapper.h index 042ab9f84fd..230bf8411b0 100644 --- a/zlibWrapper/zstd_zlibwrapper.h +++ b/zlibWrapper/zstd_zlibwrapper.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -25,6 +25,9 @@ extern "C" { #define z_const #endif +#if !defined(_Z_OF) + #define _Z_OF OF +#endif /* returns a string with version of zstd library */ const char * zstdVersion(void);