diff -Nru snappy-1.1.9/.appveyor.yml snappy-1.1.10/.appveyor.yml --- snappy-1.1.9/.appveyor.yml 2021-05-04 22:53:34.000000000 +0000 +++ snappy-1.1.10/.appveyor.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -# Build matrix / environment variables are explained on: -# https://www.appveyor.com/docs/appveyor-yml/ -# This file can be validated on: https://ci.appveyor.com/tools/validate-yaml - -version: "{build}" - -environment: - matrix: - # AppVeyor currently has no custom job name feature. - # http://help.appveyor.com/discussions/questions/1623-can-i-provide-a-friendly-name-for-jobs - - JOB: Visual Studio 2019 - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 - CMAKE_GENERATOR: Visual Studio 16 2019 - -platform: - - x86 - - x64 - -configuration: - - RelWithDebInfo - - Debug - -build_script: - - git submodule update --init --recursive - - mkdir build - - cd build - - if "%platform%"=="x86" (set CMAKE_GENERATOR_PLATFORM="Win32") - else (set CMAKE_GENERATOR_PLATFORM="%platform%") - - cmake --version - - cmake .. -G "%CMAKE_GENERATOR%" -A "%CMAKE_GENERATOR_PLATFORM%" - -DCMAKE_CONFIGURATION_TYPES="%CONFIGURATION%" -DSNAPPY_REQUIRE_AVX2=ON - - cmake --build . --config %CONFIGURATION% - - cd .. - -test_script: - - build\%CONFIGURATION%\snappy_unittest - - build\%CONFIGURATION%\snappy_benchmark diff -Nru snappy-1.1.9/cmake/config.h.in snappy-1.1.10/cmake/config.h.in --- snappy-1.1.9/cmake/config.h.in 2021-05-04 22:53:34.000000000 +0000 +++ snappy-1.1.10/cmake/config.h.in 2023-03-08 23:44:00.000000000 +0000 @@ -2,55 +2,65 @@ #define THIRD_PARTY_SNAPPY_OPENSOURCE_CMAKE_CONFIG_H_ /* Define to 1 if the compiler supports __attribute__((always_inline)). */ -#cmakedefine HAVE_ATTRIBUTE_ALWAYS_INLINE 1 +#cmakedefine01 HAVE_ATTRIBUTE_ALWAYS_INLINE /* Define to 1 if the compiler supports __builtin_ctz and friends. */ -#cmakedefine HAVE_BUILTIN_CTZ 1 +#cmakedefine01 HAVE_BUILTIN_CTZ /* Define to 1 if the compiler supports __builtin_expect. */ -#cmakedefine HAVE_BUILTIN_EXPECT 1 +#cmakedefine01 HAVE_BUILTIN_EXPECT /* Define to 1 if you have a definition for mmap() in . */ -#cmakedefine HAVE_FUNC_MMAP 1 +#cmakedefine01 HAVE_FUNC_MMAP /* Define to 1 if you have a definition for sysconf() in . */ -#cmakedefine HAVE_FUNC_SYSCONF 1 +#cmakedefine01 HAVE_FUNC_SYSCONF /* Define to 1 if you have the `lzo2' library (-llzo2). */ -#cmakedefine HAVE_LIBLZO2 1 +#cmakedefine01 HAVE_LIBLZO2 /* Define to 1 if you have the `z' library (-lz). */ -#cmakedefine HAVE_LIBZ 1 +#cmakedefine01 HAVE_LIBZ /* Define to 1 if you have the `lz4' library (-llz4). */ -#cmakedefine HAVE_LIBLZ4 1 +#cmakedefine01 HAVE_LIBLZ4 /* Define to 1 if you have the header file. */ -#cmakedefine HAVE_SYS_MMAN_H 1 +#cmakedefine01 HAVE_SYS_MMAN_H /* Define to 1 if you have the header file. */ -#cmakedefine HAVE_SYS_RESOURCE_H 1 +#cmakedefine01 HAVE_SYS_RESOURCE_H /* Define to 1 if you have the header file. */ -#cmakedefine HAVE_SYS_TIME_H 1 +#cmakedefine01 HAVE_SYS_TIME_H /* Define to 1 if you have the header file. */ -#cmakedefine HAVE_SYS_UIO_H 1 +#cmakedefine01 HAVE_SYS_UIO_H /* Define to 1 if you have the header file. */ -#cmakedefine HAVE_UNISTD_H 1 +#cmakedefine01 HAVE_UNISTD_H /* Define to 1 if you have the header file. */ -#cmakedefine HAVE_WINDOWS_H 1 +#cmakedefine01 HAVE_WINDOWS_H /* Define to 1 if you target processors with SSSE3+ and have . */ #cmakedefine01 SNAPPY_HAVE_SSSE3 +/* Define to 1 if you target processors with SSE4.2 and have . */ +#cmakedefine01 SNAPPY_HAVE_X86_CRC32 + /* Define to 1 if you target processors with BMI2+ and have . */ #cmakedefine01 SNAPPY_HAVE_BMI2 +/* Define to 1 if you target processors with NEON and have . */ +#cmakedefine01 SNAPPY_HAVE_NEON + +/* Define to 1 if you have and and want to optimize + compression speed by using __crc32cw from . */ +#cmakedefine01 SNAPPY_HAVE_NEON_CRC32 + /* Define to 1 if your processor stores words with the most significant byte first (like Motorola and SPARC, unlike Intel and VAX). */ -#cmakedefine SNAPPY_IS_BIG_ENDIAN 1 +#cmakedefine01 SNAPPY_IS_BIG_ENDIAN #endif // THIRD_PARTY_SNAPPY_OPENSOURCE_CMAKE_CONFIG_H_ diff -Nru snappy-1.1.9/CMakeLists.txt snappy-1.1.10/CMakeLists.txt --- snappy-1.1.9/CMakeLists.txt 2021-05-04 22:53:34.000000000 +0000 +++ snappy-1.1.10/CMakeLists.txt 2023-03-08 23:44:00.000000000 +0000 @@ -27,7 +27,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. cmake_minimum_required(VERSION 3.1) -project(Snappy VERSION 1.1.9 LANGUAGES C CXX) +project(Snappy VERSION 1.1.10 LANGUAGES C CXX) # C++ standard can be overridden when this is used as a sub-project. if(NOT CMAKE_CXX_STANDARD) @@ -175,9 +175,31 @@ check_cxx_source_compiles(" #include int main() { + return _mm_crc32_u32(0, 1); +}" SNAPPY_HAVE_X86_CRC32) + +check_cxx_source_compiles(" +#include +#include +int main() { + return __crc32cw(0, 1); +}" SNAPPY_HAVE_NEON_CRC32) + +check_cxx_source_compiles(" +#include +int main() { return _bzhi_u32(0, 1); }" SNAPPY_HAVE_BMI2) +check_cxx_source_compiles(" +#include +int main() { + uint8_t val = 3, dup[8]; + uint8x16_t v = vld1q_dup_u8(&val); + vst1q_u8(dup, v); + return 0; +}" SNAPPY_HAVE_NEON) + include(CheckSymbolExists) check_symbol_exists("mmap" "sys/mman.h" HAVE_FUNC_MMAP) check_symbol_exists("sysconf" "unistd.h" HAVE_FUNC_SYSCONF) diff -Nru snappy-1.1.9/CONTRIBUTING.md snappy-1.1.10/CONTRIBUTING.md --- snappy-1.1.9/CONTRIBUTING.md 2021-05-04 22:53:34.000000000 +0000 +++ snappy-1.1.10/CONTRIBUTING.md 2023-03-08 23:44:00.000000000 +0000 @@ -3,30 +3,10 @@ We'd love to accept your patches and contributions to this project. There are just a few small guidelines you need to follow. -## Project Goals - -In addition to the aims listed at the top of the [README](README.md) Snappy -explicitly supports the following: - -1. C++11 -2. Clang (gcc and MSVC are best-effort). -3. Low level optimizations (e.g. assembly or equivalent intrinsics) for: - 1. [x86](https://en.wikipedia.org/wiki/X86) - 2. [x86-64](https://en.wikipedia.org/wiki/X86-64) - 3. ARMv7 (32-bit) - 4. ARMv8 (AArch64) -4. Supports only the Snappy compression scheme as described in - [format_description.txt](format_description.txt). -5. CMake for building - -Changes adding features or dependencies outside of the core area of focus listed -above might not be accepted. If in doubt post a message to the -[Snappy discussion mailing list](https://groups.google.com/g/snappy-compression). - ## Contributor License Agreement Contributions to this project must be accompanied by a Contributor License -Agreement. You (or your employer) retain the copyright to your contribution, +Agreement. You (or your employer) retain the copyright to your contribution; this simply gives us permission to use and redistribute your contributions as part of the project. Head over to to see your current agreements on file or to sign a new one. @@ -35,12 +15,17 @@ (even if it was for a different project), you probably don't need to do it again. -## Code reviews +## Code Reviews All submissions, including submissions by project members, require review. We use GitHub pull requests for this purpose. Consult [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more information on using pull requests. -Please make sure that all the automated checks (CLA, AppVeyor, Travis) pass for -your pull requests. Pull requests whose checks fail may be ignored. +See [the README](README.md#contributing-to-the-snappy-project) for areas +where we are likely to accept external contributions. + +## Community Guidelines + +This project follows [Google's Open Source Community +Guidelines](https://opensource.google/conduct/). diff -Nru snappy-1.1.9/debian/changelog snappy-1.1.10/debian/changelog --- snappy-1.1.9/debian/changelog 2022-09-14 18:28:10.000000000 +0000 +++ snappy-1.1.10/debian/changelog 2023-09-18 03:43:22.000000000 +0000 @@ -1,19 +1,28 @@ -snappy (1.1.9-2+20.04.sav1) focal; urgency=medium +snappy (1.1.10-1~20.04.sav0) focal; urgency=medium + * Backport to Focal + * debian/rules: Add override_dh_auto_test, add verbose arg (compat < 13) + * debian/control: Set debhelper-compat (= 12) BD * Skip building benchmark tests on i386 (not on Launchpad i386 whitelist): - debian/control: Set libbenchmark-dev [!i386] BD - debian/rules: Add conditional and config for benchmarks on non-i386 - -- Rob Savoury Wed, 14 Sep 2022 11:28:10 -0700 + -- Rob Savoury Sun, 17 Sep 2023 20:43:22 -0700 -snappy (1.1.9-2+20.04.sav0) focal; urgency=medium +snappy (1.1.10-1) unstable; urgency=medium - * Backport to Focal - * debian/patches/: Add fix-ptr-offset-overflow.patch (pull !148) - * debian/rules: Add override_dh_auto_test, add verbose arg (compat < 13) - * debian/control: Set debhelper-compat (= 12) BD + * New upstream release. + * Configure with C++14 standard (closes: #1041028). + + -- Laszlo Boszormenyi (GCS) Mon, 17 Jul 2023 21:07:55 +0200 + +snappy (1.1.9-3) unstable; urgency=medium + + * Add option to enable rtti, set default to current behavior. + * Enable rtti support (closes: 1031394). + * Update Standards-Version to 4.6.2 . - -- Rob Savoury Wed, 14 Sep 2022 10:23:51 -0700 + -- Laszlo Boszormenyi (GCS) Thu, 16 Feb 2023 20:31:32 +0100 snappy (1.1.9-2) unstable; urgency=medium diff -Nru snappy-1.1.9/debian/control snappy-1.1.10/debian/control --- snappy-1.1.9/debian/control 2022-09-14 18:12:06.000000000 +0000 +++ snappy-1.1.10/debian/control 2023-09-18 03:43:22.000000000 +0000 @@ -1,9 +1,9 @@ Source: snappy Priority: optional Maintainer: Laszlo Boszormenyi (GCS) -Build-Depends: debhelper-compat (= 12), cmake, pkg-config, +Build-Depends: debhelper-compat (= 12), cmake, pkgconf, libgtest-dev, libgmock-dev, libbenchmark-dev [!i386] -Standards-Version: 4.6.0 +Standards-Version: 4.6.2 Section: libs Homepage: https://google.github.io/snappy/ diff -Nru snappy-1.1.9/debian/patches/0001-Add-inline-with-SNAPPY_ATTRIBUTE_ALWAYS_INLINE.patch snappy-1.1.10/debian/patches/0001-Add-inline-with-SNAPPY_ATTRIBUTE_ALWAYS_INLINE.patch --- snappy-1.1.9/debian/patches/0001-Add-inline-with-SNAPPY_ATTRIBUTE_ALWAYS_INLINE.patch 2021-12-04 18:21:57.000000000 +0000 +++ snappy-1.1.10/debian/patches/0001-Add-inline-with-SNAPPY_ATTRIBUTE_ALWAYS_INLINE.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,71 +0,0 @@ -From 4728803cc8687431449c8c9fbfabb1da04943400 Mon Sep 17 00:00:00 2001 -From: "Georgi D. Sotirov" -Date: Wed, 5 May 2021 14:16:46 +0300 -Subject: [PATCH] Add inline with SNAPPY_ATTRIBUTE_ALWAYS_INLINE - -Add inline with SNAPPY_ATTRIBUTE_ALWAYS_INLINE on AdvanceToNextTag to -fix the following compilation errors and a warning with GCC: - -[ 2%] Building CXX object CMakeFiles/snappy.dir/snappy.cc.o -/usr/bin/c++ -DHAVE_CONFIG_H -Dsnappy_EXPORTS --I/tmp/snappy-1.1.9/build -I/tmp/snappy-1.1.9 -O3 --march=i586 -mtune=i686 -Wall -Wextra -fno-exceptions -fno-rtti -O3 --DNDEBUG -fPIC -std=c++11 -o CMakeFiles/snappy.dir/snappy.cc.o -c -/tmp/snappy-1.1.9/snappy.cc -/tmp/snappy-1.1.9/snappy.cc:1017:8: warning: always_inline -function might not be inlinable [-Wattributes] - size_t AdvanceToNextTag(const uint8_t** ip_p, size_t* tag) { - ^ -/tmp/snappy-1.1.9/snappy.cc: In function 'std::pair snappy::DecompressBranchless(const uint8_t*, const -uint8_t*, ptrdiff_t, T, ptrdiff_t) [with T = char*; uint8_t = unsigned -char; ptrdiff_t = int]': -/tmp/snappy-1.1.9/snappy.cc:1017:8: error: inlining failed in -call to always_inline 'size_t snappy::AdvanceToNextTag(const uint8_t**, -size_t*)': function body can be overwritten at link time -/tmp/snappy-1.1.9/snappy.cc:1097:53: error: called from here - size_t tag_type = AdvanceToNextTag(&ip, &tag); - ^ -/tmp/snappy-1.1.9/snappy.cc:1017:8: error: inlining failed in -call to always_inline 'size_t snappy::AdvanceToNextTag(const uint8_t**, -size_t*)': function body can be overwritten at link time - size_t AdvanceToNextTag(const uint8_t** ip_p, size_t* tag) { - ^ -/tmp/snappy-1.1.9/snappy.cc:1097:53: error: called from here - size_t tag_type = AdvanceToNextTag(&ip, &tag); - ^ -/tmp/snappy-1.1.9/snappy.cc:1017:8: error: inlining failed in -call to always_inline 'size_t snappy::AdvanceToNextTag(const uint8_t**, -size_t*)': function body can be overwritten at link time - size_t AdvanceToNextTag(const uint8_t** ip_p, size_t* tag) { - ^ -/tmp/snappy-1.1.9/snappy.cc:1097:53: error: called from here - size_t tag_type = AdvanceToNextTag(&ip, &tag); - ^ -CMakeFiles/snappy.dir/build.make:137: recipe for target -'CMakeFiles/snappy.dir/snappy.cc.o' failed - -Just like with other functions using SNAPPY_ATTRIBUTE_ALWAYS_INLINE -macro (i.e. __attribute__((always_inline)) ) it is necessary to use C++ -inline specifier. - -Signed-off-by: Martin Jansa - -Upstream-Status: Submitted [https://github.com/google/snappy/pull/128] ---- - snappy.cc | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/snappy.cc b/snappy.cc -index 79dc0e8..51157be 100644 ---- a/snappy.cc -+++ b/snappy.cc -@@ -1014,7 +1014,7 @@ void MemMove(ptrdiff_t dst, const void* src, size_t size) { - } - - SNAPPY_ATTRIBUTE_ALWAYS_INLINE --size_t AdvanceToNextTag(const uint8_t** ip_p, size_t* tag) { -+inline size_t AdvanceToNextTag(const uint8_t** ip_p, size_t* tag) { - const uint8_t*& ip = *ip_p; - // This section is crucial for the throughput of the decompression loop. - // The latency of an iteration is fundamentally constrained by the diff -Nru snappy-1.1.9/debian/patches/add_option_to_enable_rtti.patch snappy-1.1.10/debian/patches/add_option_to_enable_rtti.patch --- snappy-1.1.9/debian/patches/add_option_to_enable_rtti.patch 1970-01-01 00:00:00.000000000 +0000 +++ snappy-1.1.10/debian/patches/add_option_to_enable_rtti.patch 2023-02-16 19:28:24.000000000 +0000 @@ -0,0 +1,48 @@ +From 376f14b5933e91d08ade6a503fdd657a6e01a149 Mon Sep 17 00:00:00 2001 +From: Max +Date: Wed, 24 Nov 2021 12:16:23 -0600 +Subject: [PATCH] add option to enable rtti, set default to current behavior + +--- + CMakeLists.txt | 14 ++++++++++---- + 1 file changed, 10 insertions(+), 4 deletions(-) + +diff --git a/CMakeLists.txt b/CMakeLists.txt +index 6eef485c..aefd35bc 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -53,8 +53,10 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + add_definitions(-D_HAS_EXCEPTIONS=0) + + # Disable RTTI. +- string(REGEX REPLACE "/GR" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") +- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GR-") ++ if(NOT SNAPPY_ENABLE_RTTI) ++ string(REGEX REPLACE "/GR" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") ++ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GR-") ++ endif(SNAPPY_ENABLE_RTTI) + else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + # Use -Wall for clang and gcc. + if(NOT CMAKE_CXX_FLAGS MATCHES "-Wall") +@@ -78,8 +80,10 @@ else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions") + + # Disable RTTI. +- string(REGEX REPLACE "-frtti" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") +- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") ++ if(NOT SNAPPY_ENABLE_RTTI) ++ string(REGEX REPLACE "-frtti" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") ++ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") ++ endif(SNAPPY_ENABLE_RTTI) + endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + + # BUILD_SHARED_LIBS is a standard CMake variable, but we declare it here to make +@@ -98,6 +102,8 @@ option(SNAPPY_REQUIRE_AVX2 "Target processors with AVX2 support." OFF) + + option(SNAPPY_INSTALL "Install Snappy's header and library" ON) + ++option(SNAPPY_ENABLE_RTTI "Enable RTTI for Snappy's library" OFF) ++ + include(TestBigEndian) + test_big_endian(SNAPPY_IS_BIG_ENDIAN) + diff -Nru snappy-1.1.9/debian/patches/fix-ptr-offset-overflow.patch snappy-1.1.10/debian/patches/fix-ptr-offset-overflow.patch --- snappy-1.1.9/debian/patches/fix-ptr-offset-overflow.patch 2022-09-14 16:49:04.000000000 +0000 +++ snappy-1.1.10/debian/patches/fix-ptr-offset-overflow.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -From 64df9f28c8452500506af3361dd079e78f736ad5 Mon Sep 17 00:00:00 2001 -From: Antoine Pitrou -Date: Tue, 30 Nov 2021 19:46:18 +0100 -Subject: [PATCH] Fix UBSan error (ptr + offset overflow) - -As `i + offset` is promoted to a "negative" size_t, -UBSan would complain when adding the resulting offset to `dst`: -``` -/tmp/RtmptDX1SS/file584e37df4e/snappy_ep-prefix/src/snappy_ep/snappy.cc:343:43: runtime error: addition of unsigned offset to 0x6120003c5ec1 overflowed to 0x6120003c5ec0 - #0 0x7f9ebd21769c in snappy::(anonymous namespace)::Copy64BytesWithPatternExtension(char*, unsigned long) /tmp/RtmptDX1SS/file584e37df4e/snappy_ep-prefix/src/snappy_ep/snappy.cc:343:43 - #1 0x7f9ebd21769c in std::__1::pair snappy::DecompressBranchless(unsigned char const*, unsigned char const*, long, char*, long) /tmp/RtmptDX1SS/file584e37df4e/snappy_ep-prefix/src/snappy_ep/snappy.cc:1160:15 -``` ---- - snappy.cc | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/snappy.cc b/snappy.cc -index ee9a2c40..bb9e0e5d 100644 ---- a/snappy.cc -+++ b/snappy.cc -@@ -340,7 +340,7 @@ static inline bool Copy64BytesWithPatternExtension(char* dst, size_t offset) { - if (SNAPPY_PREDICT_TRUE(offset < 16)) { - if (SNAPPY_PREDICT_FALSE(offset == 0)) return false; - // Extend the pattern to the first 16 bytes. -- for (int i = 0; i < 16; i++) dst[i] = dst[i - offset]; -+ for (int i = 0; i < 16; i++) dst[i] = (dst - offset)[i]; - // Find a multiple of pattern >= 16. - static std::array pattern_sizes = []() { - std::array res; diff -Nru snappy-1.1.9/debian/patches/series snappy-1.1.10/debian/patches/series --- snappy-1.1.9/debian/patches/series 2022-09-14 17:23:41.000000000 +0000 +++ snappy-1.1.10/debian/patches/series 2023-07-17 19:07:55.000000000 +0000 @@ -1,7 +1,6 @@ build_static_lib.patch readd_pkgconfig_support.patch fix_snappy_unittest_FTBFS.patch -0001-Add-inline-with-SNAPPY_ATTRIBUTE_ALWAYS_INLINE.patch use_packaged_testing.patch correct_testing_link.patch -fix-ptr-offset-overflow.patch +add_option_to_enable_rtti.patch diff -Nru snappy-1.1.9/debian/rules snappy-1.1.10/debian/rules --- snappy-1.1.9/debian/rules 2022-09-14 18:27:15.000000000 +0000 +++ snappy-1.1.10/debian/rules 2023-09-18 03:43:22.000000000 +0000 @@ -14,9 +14,13 @@ override_dh_auto_configure: dh_auto_configure -- \ - -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_LIBDIR=/usr/lib/${DEB_HOST_MULTIARCH} -DBUILD_SHARED_LIBS=on \ - -DSNAPPY_USE_BUNDLED_GTEST=OFF \ - -DSNAPPY_BUILD_BENCHMARKS=$(BENCHMARKS) -DSNAPPY_USE_BUNDLED_BENCHMARK_LIB=OFF + -DCMAKE_CXX_STANDARD=14 \ + -DCMAKE_BUILD_TYPE=RelWithDebInfo \ + -DCMAKE_INSTALL_LIBDIR=/usr/lib/${DEB_HOST_MULTIARCH} -DBUILD_SHARED_LIBS=ON \ + -DSNAPPY_ENABLE_RTTI=ON \ + -DSNAPPY_USE_BUNDLED_GTEST=OFF \ + -DSNAPPY_BUILD_BENCHMARKS=$(BENCHMARKS) \ + -DSNAPPY_USE_BUNDLED_BENCHMARK_LIB=OFF override_dh_auto_test: dh_auto_test -- ARGS\+=--verbose diff -Nru snappy-1.1.9/debian/watch snappy-1.1.10/debian/watch --- snappy-1.1.9/debian/watch 2021-12-04 18:21:57.000000000 +0000 +++ snappy-1.1.10/debian/watch 2023-07-17 19:07:55.000000000 +0000 @@ -2,4 +2,4 @@ opts=uversionmangle=s/^(SNAPSHOT|RELEASE)\.//;s/(\d+)[_-](\d+)[_-](\d+)/\$1.\$2.\$3/;s/(\d+)[_-](\d+)/\$1.\$2/;s/(\d)[_\.\-\+]?((rc|pre|dev|beta|alpha|b|a)[\-\.]?\d*)$/$1~$2/i,\ dversionmangle=s/\+(debian|dfsg|ds|deb)(\.?\d+)?$//i \ https://github.com/google/snappy/tags \ -(?:|.*/)v?(\d\S*)@ARCHIVE_EXT@ +(?:|.*/)[vV]?(\d\S*)@ARCHIVE_EXT@ diff -Nru snappy-1.1.9/.github/workflows/build.yml snappy-1.1.10/.github/workflows/build.yml --- snappy-1.1.9/.github/workflows/build.yml 1970-01-01 00:00:00.000000000 +0000 +++ snappy-1.1.10/.github/workflows/build.yml 2023-03-08 23:44:00.000000000 +0000 @@ -0,0 +1,135 @@ +# Copyright 2021 Google Inc. All Rights Reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +name: ci +on: [push, pull_request] + +permissions: + contents: read + +jobs: + build-and-test: + name: >- + CI + ${{ matrix.os }} + ${{ matrix.cpu_level }} + ${{ matrix.compiler }} + ${{ matrix.optimized && 'release' || 'debug' }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + compiler: [clang, gcc, msvc] + os: [ubuntu-latest, macos-latest, windows-latest] + cpu_level: [baseline, avx, avx2] + optimized: [true, false] + exclude: + # MSVC only works on Windows. + - os: ubuntu-latest + compiler: msvc + - os: macos-latest + compiler: msvc + # GitHub servers seem to run on pre-Haswell CPUs. Attempting to use AVX2 + # results in crashes. + - os: macos-latest + cpu_level: avx2 + # Not testing with GCC on macOS. + - os: macos-latest + compiler: gcc + # Only testing with MSVC on Windows. + - os: windows-latest + compiler: clang + - os: windows-latest + compiler: gcc + include: + - compiler: clang + CC: clang + CXX: clang++ + - compiler: gcc + CC: gcc + CXX: g++ + - compiler: msvc + CC: + CXX: + + env: + CMAKE_BUILD_DIR: ${{ github.workspace }}/build + CMAKE_BUILD_TYPE: ${{ matrix.optimized && 'RelWithDebInfo' || 'Debug' }} + CC: ${{ matrix.CC }} + CXX: ${{ matrix.CXX }} + SNAPPY_REQUIRE_AVX: ${{ matrix.cpu_level == 'baseline' && '0' || '1' }} + SNAPPY_REQUIRE_AVX2: ${{ matrix.cpu_level == 'avx2' && '1' || '0' }} + SNAPPY_FUZZING_BUILD: >- + ${{ (startsWith(matrix.os, 'ubuntu') && matrix.compiler == 'clang' && + !matrix.optimized) && '1' || '0' }} + BINARY_SUFFIX: ${{ startsWith(matrix.os, 'windows') && '.exe' || '' }} + BINARY_PATH: >- + ${{ format( + startsWith(matrix.os, 'windows') && '{0}\build\{1}\' || '{0}/build/', + github.workspace, + matrix.optimized && 'RelWithDebInfo' || 'Debug') }} + + steps: + - uses: actions/checkout@v2 + with: + submodules: true + + - name: Generate build config + run: >- + cmake -S "${{ github.workspace }}" -B "${{ env.CMAKE_BUILD_DIR }}" + -DCMAKE_BUILD_TYPE=${{ env.CMAKE_BUILD_TYPE }} + -DCMAKE_INSTALL_PREFIX=${{ runner.temp }}/install_test/ + -DSNAPPY_FUZZING_BUILD=${{ env.SNAPPY_FUZZING_BUILD }} + -DSNAPPY_REQUIRE_AVX=${{ env.SNAPPY_REQUIRE_AVX }} + -DSNAPPY_REQUIRE_AVX2=${{ env.SNAPPY_REQUIRE_AVX2 }} + + - name: Build + run: >- + cmake --build "${{ env.CMAKE_BUILD_DIR }}" + --config "${{ env.CMAKE_BUILD_TYPE }}" + + - name: Run C++ API Tests + run: ${{ env.BINARY_PATH }}snappy_unittest${{ env.BINARY_SUFFIX }} + + - name: Run Compression Fuzzer + if: ${{ env.SNAPPY_FUZZING_BUILD == '1' }} + run: >- + ${{ env.BINARY_PATH }}snappy_compress_fuzzer${{ env.BINARY_SUFFIX }} + -runs=1000 -close_fd_mask=3 + + - name: Run Decompression Fuzzer + if: ${{ env.SNAPPY_FUZZING_BUILD == '1' }} + run: >- + ${{ env.BINARY_PATH }}snappy_uncompress_fuzzer${{ env.BINARY_SUFFIX }} + -runs=1000 -close_fd_mask=3 + + - name: Run Benchmarks + run: ${{ env.BINARY_PATH }}snappy_benchmark${{ env.BINARY_SUFFIX }} + + - name: Test CMake installation + run: cmake --build "${{ env.CMAKE_BUILD_DIR }}" --target install diff -Nru snappy-1.1.9/NEWS snappy-1.1.10/NEWS --- snappy-1.1.9/NEWS 2021-05-04 22:53:34.000000000 +0000 +++ snappy-1.1.10/NEWS 2023-03-08 23:44:00.000000000 +0000 @@ -1,3 +1,9 @@ +Snappy v1.1.10, Mar 8th 2023: + + * Performance improvements + + * Compilation fixes for various environments + Snappy v1.1.9, May 4th 2021: * Performance improvements. diff -Nru snappy-1.1.9/README.md snappy-1.1.10/README.md --- snappy-1.1.9/README.md 2021-05-04 22:53:34.000000000 +0000 +++ snappy-1.1.10/README.md 2023-03-08 23:44:00.000000000 +0000 @@ -1,7 +1,6 @@ Snappy, a fast compressor/decompressor. -[![Build Status](https://travis-ci.org/google/snappy.svg?branch=master)](https://travis-ci.org/google/snappy) -[![Build status](https://ci.appveyor.com/api/projects/status/t9nubcqkwo8rw8yn/branch/master?svg=true)](https://ci.appveyor.com/project/pwnall/leveldb) +[![Build Status](https://github.com/google/snappy/actions/workflows/build.yml/badge.svg)](https://github.com/google/snappy/actions/workflows/build.yml) Introduction ============ @@ -132,6 +131,32 @@ baddata[1-3].snappy are not intended as benchmarks; they are used to verify correctness in the presence of corrupted data in the unit test.) +Contributing to the Snappy Project +================================== + +In addition to the aims listed at the top of the [README](README.md) Snappy +explicitly supports the following: + +1. C++11 +2. Clang (gcc and MSVC are best-effort). +3. Low level optimizations (e.g. assembly or equivalent intrinsics) for: + 1. [x86](https://en.wikipedia.org/wiki/X86) + 2. [x86-64](https://en.wikipedia.org/wiki/X86-64) + 3. ARMv7 (32-bit) + 4. ARMv8 (AArch64) +4. Supports only the Snappy compression scheme as described in + [format_description.txt](format_description.txt). +5. CMake for building + +Changes adding features or dependencies outside of the core area of focus listed +above might not be accepted. If in doubt post a message to the +[Snappy discussion mailing list](https://groups.google.com/g/snappy-compression). + +We are unlikely to accept contributions to the build configuration files, such +as `CMakeLists.txt`. We are focused on maintaining a build configuration that +allows us to test that the project works in a few supported configurations +inside Google. We are not currently interested in supporting other requirements, +such as different operating systems, compilers, or build systems. Contact ======= diff -Nru snappy-1.1.9/snappy_benchmark.cc snappy-1.1.10/snappy_benchmark.cc --- snappy-1.1.9/snappy_benchmark.cc 2021-05-04 22:53:34.000000000 +0000 +++ snappy-1.1.10/snappy_benchmark.cc 2023-03-08 23:44:00.000000000 +0000 @@ -149,7 +149,55 @@ } BENCHMARK(BM_UValidateMedley); -void BM_UIOVec(benchmark::State& state) { +void BM_UIOVecSource(benchmark::State& state) { + // Pick file to process based on state.range(0). + int file_index = state.range(0); + + CHECK_GE(file_index, 0); + CHECK_LT(file_index, ARRAYSIZE(kTestDataFiles)); + std::string contents = + ReadTestDataFile(kTestDataFiles[file_index].filename, + kTestDataFiles[file_index].size_limit); + + // Create `iovec`s of the `contents`. + const int kNumEntries = 10; + struct iovec iov[kNumEntries]; + size_t used_so_far = 0; + for (int i = 0; i < kNumEntries; ++i) { + iov[i].iov_base = const_cast(contents.data()) + used_so_far; + if (used_so_far == contents.size()) { + iov[i].iov_len = 0; + continue; + } + if (i == kNumEntries - 1) { + iov[i].iov_len = contents.size() - used_so_far; + } else { + iov[i].iov_len = contents.size() / kNumEntries; + } + used_so_far += iov[i].iov_len; + } + + char* dst = new char[snappy::MaxCompressedLength(contents.size())]; + size_t zsize = 0; + for (auto s : state) { + snappy::RawCompressFromIOVec(iov, contents.size(), dst, &zsize); + benchmark::DoNotOptimize(iov); + } + state.SetBytesProcessed(static_cast(state.iterations()) * + static_cast(contents.size())); + const double compression_ratio = + static_cast(zsize) / std::max(1, contents.size()); + state.SetLabel(StrFormat("%s (%.2f %%)", kTestDataFiles[file_index].label, + 100.0 * compression_ratio)); + VLOG(0) << StrFormat("compression for %s: %d -> %d bytes", + kTestDataFiles[file_index].label, contents.size(), + zsize); + + delete[] dst; +} +BENCHMARK(BM_UIOVecSource)->DenseRange(0, ARRAYSIZE(kTestDataFiles) - 1); + +void BM_UIOVecSink(benchmark::State& state) { // Pick file to process based on state.range(0). int file_index = state.range(0); @@ -193,7 +241,7 @@ delete[] dst; } -BENCHMARK(BM_UIOVec)->DenseRange(0, 4); +BENCHMARK(BM_UIOVecSink)->DenseRange(0, 4); void BM_UFlatSink(benchmark::State& state) { // Pick file to process based on state.range(0). diff -Nru snappy-1.1.9/snappy.cc snappy-1.1.10/snappy.cc --- snappy-1.1.9/snappy.cc 2021-05-04 22:53:34.000000000 +0000 +++ snappy-1.1.10/snappy.cc 2023-03-08 23:44:00.000000000 +0000 @@ -29,18 +29,6 @@ #include "snappy-internal.h" #include "snappy-sinksource.h" #include "snappy.h" - -#if !defined(SNAPPY_HAVE_SSSE3) -// __SSSE3__ is defined by GCC and Clang. Visual Studio doesn't target SIMD -// support between SSE2 and AVX (so SSSE3 instructions require AVX support), and -// defines __AVX__ when AVX support is available. -#if defined(__SSSE3__) || defined(__AVX__) -#define SNAPPY_HAVE_SSSE3 1 -#else -#define SNAPPY_HAVE_SSSE3 0 -#endif -#endif // !defined(SNAPPY_HAVE_SSSE3) - #if !defined(SNAPPY_HAVE_BMI2) // __BMI2__ is defined by GCC and Clang. Visual Studio doesn't target BMI2 // specifically, but it does define __AVX2__ when AVX2 support is available. @@ -56,16 +44,34 @@ #endif #endif // !defined(SNAPPY_HAVE_BMI2) -#if SNAPPY_HAVE_SSSE3 -// Please do not replace with . or with headers that assume more -// advanced SSE versions without checking with all the OWNERS. -#include +#if !defined(SNAPPY_HAVE_X86_CRC32) +#if defined(__SSE4_2__) +#define SNAPPY_HAVE_X86_CRC32 1 +#else +#define SNAPPY_HAVE_X86_CRC32 0 #endif +#endif // !defined(SNAPPY_HAVE_X86_CRC32) -#if SNAPPY_HAVE_BMI2 +#if !defined(SNAPPY_HAVE_NEON_CRC32) +#if SNAPPY_HAVE_NEON && defined(__ARM_FEATURE_CRC32) +#define SNAPPY_HAVE_NEON_CRC32 1 +#else +#define SNAPPY_HAVE_NEON_CRC32 0 +#endif +#endif // !defined(SNAPPY_HAVE_NEON_CRC32) + +#if SNAPPY_HAVE_BMI2 || SNAPPY_HAVE_X86_CRC32 // Please do not replace with . or with headers that assume more // advanced SSE versions without checking with all the OWNERS. #include +#elif SNAPPY_HAVE_NEON_CRC32 +#include +#endif + +#if defined(__GNUC__) +#define SNAPPY_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 3) +#else +#define SNAPPY_PREFETCH(ptr) (void)(ptr) #endif #include @@ -91,6 +97,14 @@ using internal::COPY_4_BYTE_OFFSET; using internal::kMaximumTagLength; using internal::LITERAL; +#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE +using internal::V128; +using internal::V128_Load; +using internal::V128_LoadU; +using internal::V128_Shuffle; +using internal::V128_StoreU; +using internal::V128_DupChar; +#endif // We translate the information encoded in a tag through a lookup table to a // format that requires fewer instructions to decode. Effectively we store @@ -133,21 +147,37 @@ return std::array{LengthMinusOffset(seq)...}; } -// We maximally co-locate the two tables so that only one register needs to be -// reserved for the table address. -struct { - alignas(64) const std::array length_minus_offset; - uint32_t extract_masks[4]; // Used for extracting offset based on tag type. -} table = {MakeTable(make_index_sequence<256>{}), {0, 0xFF, 0xFFFF, 0}}; - -// Any hash function will produce a valid compressed bitstream, but a good -// hash function reduces the number of collisions and thus yields better -// compression for compressible input, and more speed for incompressible -// input. Of course, it doesn't hurt if the hash function is reasonably fast -// either, as it gets called a lot. -inline uint32_t HashBytes(uint32_t bytes, uint32_t mask) { +alignas(64) const std::array kLengthMinusOffset = + MakeTable(make_index_sequence<256>{}); + +// Given a table of uint16_t whose size is mask / 2 + 1, return a pointer to the +// relevant entry, if any, for the given bytes. Any hash function will do, +// but a good hash function reduces the number of collisions and thus yields +// better compression for compressible input. +// +// REQUIRES: mask is 2 * (table_size - 1), and table_size is a power of two. +inline uint16_t* TableEntry(uint16_t* table, uint32_t bytes, uint32_t mask) { + // Our choice is quicker-and-dirtier than the typical hash function; + // empirically, that seems beneficial. The upper bits of kMagic * bytes are a + // higher-quality hash than the lower bits, so when using kMagic * bytes we + // also shift right to get a higher-quality end result. There's no similar + // issue with a CRC because all of the output bits of a CRC are equally good + // "hashes." So, a CPU instruction for CRC, if available, tends to be a good + // choice. +#if SNAPPY_HAVE_NEON_CRC32 + // We use mask as the second arg to the CRC function, as it's about to + // be used anyway; it'd be equally correct to use 0 or some constant. + // Mathematically, _mm_crc32_u32 (or similar) is a function of the + // xor of its arguments. + const uint32_t hash = __crc32cw(bytes, mask); +#elif SNAPPY_HAVE_X86_CRC32 + const uint32_t hash = _mm_crc32_u32(bytes, mask); +#else constexpr uint32_t kMagic = 0x1e35a7bd; - return ((kMagic * bytes) >> (32 - kMaxHashTableBits)) & mask; + const uint32_t hash = (kMagic * bytes) >> (31 - kMaxHashTableBits); +#endif + return reinterpret_cast(reinterpret_cast(table) + + (hash & mask)); } } // namespace @@ -228,7 +258,7 @@ return op_limit; } -#if SNAPPY_HAVE_SSSE3 +#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE // Computes the bytes for shuffle control mask (please read comments on // 'pattern_generation_masks' as well) for the given index_offset and @@ -248,19 +278,19 @@ // Computes the shuffle control mask bytes array for given pattern-sizes and // returns an array. template -inline constexpr std::array, +inline constexpr std::array, sizeof...(pattern_sizes_minus_one)> MakePatternMaskBytesTable(int index_offset, index_sequence) { - return {MakePatternMaskBytes( - index_offset, pattern_sizes_minus_one + 1, - make_index_sequence())...}; + return { + MakePatternMaskBytes(index_offset, pattern_sizes_minus_one + 1, + make_index_sequence())...}; } // This is an array of shuffle control masks that can be used as the source // operand for PSHUFB to permute the contents of the destination XMM register // into a repeating byte pattern. -alignas(16) constexpr std::array, +alignas(16) constexpr std::array, 16> pattern_generation_masks = MakePatternMaskBytesTable( /*index_offset=*/0, @@ -271,40 +301,40 @@ // Basically, pattern_reshuffle_masks is a continuation of // pattern_generation_masks. It follows that, pattern_reshuffle_masks is same as // pattern_generation_masks for offsets 1, 2, 4, 8 and 16. -alignas(16) constexpr std::array, +alignas(16) constexpr std::array, 16> pattern_reshuffle_masks = MakePatternMaskBytesTable( /*index_offset=*/16, /*pattern_sizes_minus_one=*/make_index_sequence<16>()); SNAPPY_ATTRIBUTE_ALWAYS_INLINE -static inline __m128i LoadPattern(const char* src, const size_t pattern_size) { - __m128i generation_mask = _mm_load_si128(reinterpret_cast( +static inline V128 LoadPattern(const char* src, const size_t pattern_size) { + V128 generation_mask = V128_Load(reinterpret_cast( pattern_generation_masks[pattern_size - 1].data())); // Uninitialized bytes are masked out by the shuffle mask. // TODO: remove annotation and macro defs once MSan is fixed. SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(src + pattern_size, 16 - pattern_size); - return _mm_shuffle_epi8( - _mm_loadu_si128(reinterpret_cast(src)), generation_mask); + return V128_Shuffle(V128_LoadU(reinterpret_cast(src)), + generation_mask); } SNAPPY_ATTRIBUTE_ALWAYS_INLINE -static inline std::pair<__m128i /* pattern */, __m128i /* reshuffle_mask */> +static inline std::pair LoadPatternAndReshuffleMask(const char* src, const size_t pattern_size) { - __m128i pattern = LoadPattern(src, pattern_size); + V128 pattern = LoadPattern(src, pattern_size); // This mask will generate the next 16 bytes in-place. Doing so enables us to - // write data by at most 4 _mm_storeu_si128. + // write data by at most 4 V128_StoreU. // // For example, suppose pattern is: abcdefabcdefabcd // Shuffling with this mask will generate: efabcdefabcdefab // Shuffling again will generate: cdefabcdefabcdef - __m128i reshuffle_mask = _mm_load_si128(reinterpret_cast( + V128 reshuffle_mask = V128_Load(reinterpret_cast( pattern_reshuffle_masks[pattern_size - 1].data())); return {pattern, reshuffle_mask}; } -#endif // SNAPPY_HAVE_SSSE3 +#endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE // Fallback for when we need to copy while extending the pattern, for example // copying 10 bytes from 3 positions back abc -> abcabcabcabca. @@ -312,33 +342,38 @@ // REQUIRES: [dst - offset, dst + 64) is a valid address range. SNAPPY_ATTRIBUTE_ALWAYS_INLINE static inline bool Copy64BytesWithPatternExtension(char* dst, size_t offset) { -#if SNAPPY_HAVE_SSSE3 +#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE if (SNAPPY_PREDICT_TRUE(offset <= 16)) { switch (offset) { case 0: return false; case 1: { - std::memset(dst, dst[-1], 64); + // TODO: Ideally we should memset, move back once the + // codegen issues are fixed. + V128 pattern = V128_DupChar(dst[-1]); + for (int i = 0; i < 4; i++) { + V128_StoreU(reinterpret_cast(dst + 16 * i), pattern); + } return true; } case 2: case 4: case 8: case 16: { - __m128i pattern = LoadPattern(dst - offset, offset); + V128 pattern = LoadPattern(dst - offset, offset); for (int i = 0; i < 4; i++) { - _mm_storeu_si128(reinterpret_cast<__m128i*>(dst + 16 * i), pattern); + V128_StoreU(reinterpret_cast(dst + 16 * i), pattern); } return true; } default: { auto pattern_and_reshuffle_mask = LoadPatternAndReshuffleMask(dst - offset, offset); - __m128i pattern = pattern_and_reshuffle_mask.first; - __m128i reshuffle_mask = pattern_and_reshuffle_mask.second; + V128 pattern = pattern_and_reshuffle_mask.first; + V128 reshuffle_mask = pattern_and_reshuffle_mask.second; for (int i = 0; i < 4; i++) { - _mm_storeu_si128(reinterpret_cast<__m128i*>(dst + 16 * i), pattern); - pattern = _mm_shuffle_epi8(pattern, reshuffle_mask); + V128_StoreU(reinterpret_cast(dst + 16 * i), pattern); + pattern = V128_Shuffle(pattern, reshuffle_mask); } return true; } @@ -348,7 +383,8 @@ if (SNAPPY_PREDICT_TRUE(offset < 16)) { if (SNAPPY_PREDICT_FALSE(offset == 0)) return false; // Extend the pattern to the first 16 bytes. - for (int i = 0; i < 16; i++) dst[i] = dst[i - offset]; + // The simpler formulation of `dst[i - offset]` induces undefined behavior. + for (int i = 0; i < 16; i++) dst[i] = (dst - offset)[i]; // Find a multiple of pattern >= 16. static std::array pattern_sizes = []() { std::array res; @@ -361,7 +397,7 @@ } return true; } -#endif // SNAPPY_HAVE_SSSE3 +#endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE // Very rare. for (int i = 0; i < 4; i++) { @@ -375,7 +411,7 @@ // region of the buffer. inline char* IncrementalCopy(const char* src, char* op, char* const op_limit, char* const buf_limit) { -#if SNAPPY_HAVE_SSSE3 +#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE constexpr int big_pattern_size_lower_bound = 16; #else constexpr int big_pattern_size_lower_bound = 8; @@ -425,14 +461,14 @@ // Handle the uncommon case where pattern is less than 16 (or 8 in non-SSE) // bytes. if (pattern_size < big_pattern_size_lower_bound) { -#if SNAPPY_HAVE_SSSE3 +#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE // Load the first eight bytes into an 128-bit XMM register, then use PSHUFB // to permute the register's contents in-place into a repeating sequence of // the first "pattern_size" bytes. // For example, suppose: // src == "abc" // op == op + 3 - // After _mm_shuffle_epi8(), "pattern" will have five copies of "abc" + // After V128_Shuffle(), "pattern" will have five copies of "abc" // followed by one byte of slop: abcabcabcabcabca. // // The non-SSE fallback implementation suffers from store-forwarding stalls @@ -444,26 +480,26 @@ if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 15)) { auto pattern_and_reshuffle_mask = LoadPatternAndReshuffleMask(src, pattern_size); - __m128i pattern = pattern_and_reshuffle_mask.first; - __m128i reshuffle_mask = pattern_and_reshuffle_mask.second; + V128 pattern = pattern_and_reshuffle_mask.first; + V128 reshuffle_mask = pattern_and_reshuffle_mask.second; // There is at least one, and at most four 16-byte blocks. Writing four // conditionals instead of a loop allows FDO to layout the code with // respect to the actual probabilities of each length. // TODO: Replace with loop with trip count hint. - _mm_storeu_si128(reinterpret_cast<__m128i*>(op), pattern); + V128_StoreU(reinterpret_cast(op), pattern); if (op + 16 < op_limit) { - pattern = _mm_shuffle_epi8(pattern, reshuffle_mask); - _mm_storeu_si128(reinterpret_cast<__m128i*>(op + 16), pattern); + pattern = V128_Shuffle(pattern, reshuffle_mask); + V128_StoreU(reinterpret_cast(op + 16), pattern); } if (op + 32 < op_limit) { - pattern = _mm_shuffle_epi8(pattern, reshuffle_mask); - _mm_storeu_si128(reinterpret_cast<__m128i*>(op + 32), pattern); + pattern = V128_Shuffle(pattern, reshuffle_mask); + V128_StoreU(reinterpret_cast(op + 32), pattern); } if (op + 48 < op_limit) { - pattern = _mm_shuffle_epi8(pattern, reshuffle_mask); - _mm_storeu_si128(reinterpret_cast<__m128i*>(op + 48), pattern); + pattern = V128_Shuffle(pattern, reshuffle_mask); + V128_StoreU(reinterpret_cast(op + 48), pattern); } return op_limit; } @@ -471,8 +507,8 @@ if (SNAPPY_PREDICT_TRUE(op < op_end)) { auto pattern_and_reshuffle_mask = LoadPatternAndReshuffleMask(src, pattern_size); - __m128i pattern = pattern_and_reshuffle_mask.first; - __m128i reshuffle_mask = pattern_and_reshuffle_mask.second; + V128 pattern = pattern_and_reshuffle_mask.first; + V128 reshuffle_mask = pattern_and_reshuffle_mask.second; // This code path is relatively cold however so we save code size // by avoiding unrolling and vectorizing. @@ -483,13 +519,13 @@ #pragma clang loop unroll(disable) #endif do { - _mm_storeu_si128(reinterpret_cast<__m128i*>(op), pattern); - pattern = _mm_shuffle_epi8(pattern, reshuffle_mask); + V128_StoreU(reinterpret_cast(op), pattern); + pattern = V128_Shuffle(pattern, reshuffle_mask); op += 16; } while (SNAPPY_PREDICT_TRUE(op < op_end)); } return IncrementalCopySlow(op - pattern_size, op, op_limit); -#else // !SNAPPY_HAVE_SSSE3 +#else // !SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE // If plenty of buffer space remains, expand the pattern to at least 8 // bytes. The way the following loop is written, we need 8 bytes of buffer // space if pattern_size >= 4, 11 bytes if pattern_size is 1 or 3, and 10 @@ -506,7 +542,7 @@ } else { return IncrementalCopySlow(src, op, op_limit); } -#endif // SNAPPY_HAVE_SSSE3 +#endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE } assert(pattern_size >= big_pattern_size_lower_bound); constexpr bool use_16bytes_chunk = big_pattern_size_lower_bound == 16; @@ -599,7 +635,19 @@ LittleEndian::Store32(op, n); op += count; } - std::memcpy(op, literal, len); + // When allow_fast_path is true, we can overwrite up to 16 bytes. + if (allow_fast_path) { + char* destination = op; + const char* source = literal; + const char* end = destination + len; + do { + std::memcpy(destination, source, 16); + destination += 16; + source += 16; + } while (destination < end); + } else { + std::memcpy(op, literal, len); + } return op + len; } @@ -734,7 +782,7 @@ const char* ip = input; assert(input_size <= kBlockSize); assert((table_size & (table_size - 1)) == 0); // table must be power of two - const uint32_t mask = table_size - 1; + const uint32_t mask = 2 * (table_size - 1); const char* ip_end = input + input_size; const char* base_ip = ip; @@ -785,11 +833,11 @@ // loaded in preload. uint32_t dword = i == 0 ? preload : static_cast(data); assert(dword == LittleEndian::Load32(ip + i)); - uint32_t hash = HashBytes(dword, mask); - candidate = base_ip + table[hash]; + uint16_t* table_entry = TableEntry(table, dword, mask); + candidate = base_ip + *table_entry; assert(candidate >= base_ip); assert(candidate < ip + i); - table[hash] = delta + i; + *table_entry = delta + i; if (SNAPPY_PREDICT_FALSE(LittleEndian::Load32(candidate) == dword)) { *op = LITERAL | (i << 2); UnalignedCopy128(next_emit, op + 1); @@ -806,7 +854,7 @@ } while (true) { assert(static_cast(data) == LittleEndian::Load32(ip)); - uint32_t hash = HashBytes(data, mask); + uint16_t* table_entry = TableEntry(table, data, mask); uint32_t bytes_between_hash_lookups = skip >> 5; skip += bytes_between_hash_lookups; const char* next_ip = ip + bytes_between_hash_lookups; @@ -814,11 +862,11 @@ ip = next_emit; goto emit_remainder; } - candidate = base_ip + table[hash]; + candidate = base_ip + *table_entry; assert(candidate >= base_ip); assert(candidate < ip); - table[hash] = ip - base_ip; + *table_entry = ip - base_ip; if (SNAPPY_PREDICT_FALSE(static_cast(data) == LittleEndian::Load32(candidate))) { break; @@ -864,12 +912,13 @@ assert((data & 0xFFFFFFFFFF) == (LittleEndian::Load64(ip) & 0xFFFFFFFFFF)); // We are now looking for a 4-byte match again. We read - // table[Hash(ip, shift)] for that. To improve compression, + // table[Hash(ip, mask)] for that. To improve compression, // we also update table[Hash(ip - 1, mask)] and table[Hash(ip, mask)]. - table[HashBytes(LittleEndian::Load32(ip - 1), mask)] = ip - base_ip - 1; - uint32_t hash = HashBytes(data, mask); - candidate = base_ip + table[hash]; - table[hash] = ip - base_ip; + *TableEntry(table, LittleEndian::Load32(ip - 1), mask) = + ip - base_ip - 1; + uint16_t* table_entry = TableEntry(table, data, mask); + candidate = base_ip + *table_entry; + *table_entry = ip - base_ip; // Measurements on the benchmarks have shown the following probabilities // for the loop to exit (ie. avg. number of iterations is reciprocal). // BM_Flat/6 txt1 p = 0.3-0.4 @@ -962,7 +1011,7 @@ // bool TryFastAppend(const char* ip, size_t available, size_t length, T* op); // }; -static inline uint32_t ExtractLowBytes(uint32_t v, int n) { +static inline uint32_t ExtractLowBytes(const uint32_t& v, int n) { assert(n >= 0); assert(n <= 4); #if SNAPPY_HAVE_BMI2 @@ -991,30 +1040,87 @@ return offset != 0; } -void MemCopy(char* dst, const uint8_t* src, size_t size) { - std::memcpy(dst, src, size); +// Copies between size bytes and 64 bytes from src to dest. size cannot exceed +// 64. More than size bytes, but never exceeding 64, might be copied if doing +// so gives better performance. [src, src + size) must not overlap with +// [dst, dst + size), but [src, src + 64) may overlap with [dst, dst + 64). +void MemCopy64(char* dst, const void* src, size_t size) { + // Always copy this many bytes. If that's below size then copy the full 64. + constexpr int kShortMemCopy = 32; + + assert(size <= 64); + assert(std::less_equal()(static_cast(src) + size, + dst) || + std::less_equal()(dst + size, src)); + + // We know that src and dst are at least size bytes apart. However, because we + // might copy more than size bytes the copy still might overlap past size. + // E.g. if src and dst appear consecutively in memory (src + size >= dst). + // TODO: Investigate wider copies on other platforms. +#if defined(__x86_64__) && defined(__AVX__) + assert(kShortMemCopy <= 32); + __m256i data = _mm256_lddqu_si256(static_cast(src)); + _mm256_storeu_si256(reinterpret_cast<__m256i *>(dst), data); + // Profiling shows that nearly all copies are short. + if (SNAPPY_PREDICT_FALSE(size > kShortMemCopy)) { + data = _mm256_lddqu_si256(static_cast(src) + 1); + _mm256_storeu_si256(reinterpret_cast<__m256i *>(dst) + 1, data); + } +#else + std::memmove(dst, src, kShortMemCopy); + // Profiling shows that nearly all copies are short. + if (SNAPPY_PREDICT_FALSE(size > kShortMemCopy)) { + std::memmove(dst + kShortMemCopy, + static_cast(src) + kShortMemCopy, + 64 - kShortMemCopy); + } +#endif } -void MemCopy(ptrdiff_t dst, const uint8_t* src, size_t size) { +void MemCopy64(ptrdiff_t dst, const void* src, size_t size) { // TODO: Switch to [[maybe_unused]] when we can assume C++17. (void)dst; (void)src; (void)size; } -void MemMove(char* dst, const void* src, size_t size) { - std::memmove(dst, src, size); +void ClearDeferred(const void** deferred_src, size_t* deferred_length, + uint8_t* safe_source) { + *deferred_src = safe_source; + *deferred_length = 0; } -void MemMove(ptrdiff_t dst, const void* src, size_t size) { - // TODO: Switch to [[maybe_unused]] when we can assume C++17. - (void)dst; - (void)src; - (void)size; +void DeferMemCopy(const void** deferred_src, size_t* deferred_length, + const void* src, size_t length) { + *deferred_src = src; + *deferred_length = length; } SNAPPY_ATTRIBUTE_ALWAYS_INLINE -size_t AdvanceToNextTag(const uint8_t** ip_p, size_t* tag) { +inline size_t AdvanceToNextTagARMOptimized(const uint8_t** ip_p, size_t* tag) { + const uint8_t*& ip = *ip_p; + // This section is crucial for the throughput of the decompression loop. + // The latency of an iteration is fundamentally constrained by the + // following data chain on ip. + // ip -> c = Load(ip) -> delta1 = (c & 3) -> ip += delta1 or delta2 + // delta2 = ((c >> 2) + 1) ip++ + // This is different from X86 optimizations because ARM has conditional add + // instruction (csinc) and it removes several register moves. + const size_t tag_type = *tag & 3; + const bool is_literal = (tag_type == 0); + if (is_literal) { + size_t next_literal_tag = (*tag >> 2) + 1; + *tag = ip[next_literal_tag]; + ip += next_literal_tag + 1; + } else { + *tag = ip[tag_type]; + ip += tag_type + 1; + } + return tag_type; +} + +SNAPPY_ATTRIBUTE_ALWAYS_INLINE +inline size_t AdvanceToNextTagX86Optimized(const uint8_t** ip_p, size_t* tag) { const uint8_t*& ip = *ip_p; // This section is crucial for the throughput of the decompression loop. // The latency of an iteration is fundamentally constrained by the @@ -1026,11 +1132,12 @@ size_t literal_len = *tag >> 2; size_t tag_type = *tag; bool is_literal; -#if defined(__GNUC__) && defined(__x86_64__) +#if defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(__x86_64__) // TODO clang misses the fact that the (c & 3) already correctly // sets the zero flag. asm("and $3, %k[tag_type]\n\t" - : [tag_type] "+r"(tag_type), "=@ccz"(is_literal)); + : [tag_type] "+r"(tag_type), "=@ccz"(is_literal) + :: "cc"); #else tag_type &= 3; is_literal = (tag_type == 0); @@ -1060,7 +1167,24 @@ // Extract the offset for copy-1 and copy-2 returns 0 for literals or copy-4. inline uint32_t ExtractOffset(uint32_t val, size_t tag_type) { - return val & table.extract_masks[tag_type]; + // For x86 non-static storage works better. For ARM static storage is better. + // TODO: Once the array is recognized as a register, improve the + // readability for x86. +#if defined(__x86_64__) + constexpr uint64_t kExtractMasksCombined = 0x0000FFFF00FF0000ull; + uint16_t result; + memcpy(&result, + reinterpret_cast(&kExtractMasksCombined) + 2 * tag_type, + sizeof(result)); + return val & result; +#elif defined(__aarch64__) + constexpr uint64_t kExtractMasksCombined = 0x0000FFFF00FF0000ull; + return val & static_cast( + (kExtractMasksCombined >> (tag_type * 16)) & 0xFFFF); +#else + static constexpr uint32_t kExtractMasks[4] = {0, 0xFF, 0xFFFF, 0}; + return val & kExtractMasks[tag_type]; +#endif }; // Core decompression loop, when there is enough data available. @@ -1076,6 +1200,12 @@ std::pair DecompressBranchless( const uint8_t* ip, const uint8_t* ip_limit, ptrdiff_t op, T op_base, ptrdiff_t op_limit_min_slop) { + // If deferred_src is invalid point it here. + uint8_t safe_source[64]; + const void* deferred_src; + size_t deferred_length; + ClearDeferred(&deferred_src, &deferred_length, safe_source); + // We unroll the inner loop twice so we need twice the spare room. op_limit_min_slop -= kSlopBytes; if (2 * (kSlopBytes + 1) < ip_limit - ip && op < op_limit_min_slop) { @@ -1084,17 +1214,32 @@ // ip points just past the tag and we are touching at maximum kSlopBytes // in an iteration. size_t tag = ip[-1]; +#if defined(__clang__) && defined(__aarch64__) + // Workaround for https://bugs.llvm.org/show_bug.cgi?id=51317 + // when loading 1 byte, clang for aarch64 doesn't realize that it(ldrb) + // comes with free zero-extension, so clang generates another + // 'and xn, xm, 0xff' before it use that as the offset. This 'and' is + // redundant and can be removed by adding this dummy asm, which gives + // clang a hint that we're doing the zero-extension at the load. + asm("" ::"r"(tag)); +#endif do { // The throughput is limited by instructions, unrolling the inner loop // twice reduces the amount of instructions checking limits and also // leads to reduced mov's. + + SNAPPY_PREFETCH(ip + 128); for (int i = 0; i < 2; i++) { const uint8_t* old_ip = ip; assert(tag == ip[-1]); // For literals tag_type = 0, hence we will always obtain 0 from // ExtractLowBytes. For literals offset will thus be kLiteralOffset. - ptrdiff_t len_min_offset = table.length_minus_offset[tag]; - size_t tag_type = AdvanceToNextTag(&ip, &tag); + ptrdiff_t len_min_offset = kLengthMinusOffset[tag]; +#if defined(__aarch64__) + size_t tag_type = AdvanceToNextTagARMOptimized(&ip, &tag); +#else + size_t tag_type = AdvanceToNextTagX86Optimized(&ip, &tag); +#endif uint32_t next = LittleEndian::Load32(old_ip); size_t len = len_min_offset & 0xFF; len_min_offset -= ExtractOffset(next, tag_type); @@ -1110,39 +1255,29 @@ } // Only copy-1 or copy-2 tags can get here. assert(tag_type == 1 || tag_type == 2); - std::ptrdiff_t delta = op + len_min_offset - len; + std::ptrdiff_t delta = (op + deferred_length) + len_min_offset - len; // Guard against copies before the buffer start. + // Execute any deferred MemCopy since we write to dst here. + MemCopy64(op_base + op, deferred_src, deferred_length); + op += deferred_length; + ClearDeferred(&deferred_src, &deferred_length, safe_source); if (SNAPPY_PREDICT_FALSE(delta < 0 || !Copy64BytesWithPatternExtension( op_base + op, len - len_min_offset))) { goto break_loop; } + // We aren't deferring this copy so add length right away. op += len; continue; } - std::ptrdiff_t delta = op + len_min_offset - len; + std::ptrdiff_t delta = (op + deferred_length) + len_min_offset - len; if (SNAPPY_PREDICT_FALSE(delta < 0)) { -#if defined(__GNUC__) && defined(__x86_64__) - // TODO - // When validating, both code path reduced to `op += len`. Ie. this - // becomes effectively - // - // if (delta < 0) if (tag_type != 0) goto break_loop; - // op += len; - // - // The compiler interchanges the predictable and almost always false - // first if-statement with the completely unpredictable second - // if-statement, putting an unpredictable branch on every iteration. - // This empty asm is worth almost 2x, which I think qualifies for an - // award for the most load-bearing empty statement. - asm(""); -#endif - // Due to the spurious offset in literals have this will trigger // at the start of a block when op is still smaller than 256. if (tag_type != 0) goto break_loop; - MemCopy(op_base + op, old_ip, 64); - op += len; + MemCopy64(op_base + op, deferred_src, deferred_length); + op += deferred_length; + DeferMemCopy(&deferred_src, &deferred_length, old_ip, len); continue; } @@ -1150,14 +1285,23 @@ // we need to copy from ip instead of from the stream. const void* from = tag_type ? reinterpret_cast(op_base + delta) : old_ip; - MemMove(op_base + op, from, 64); - op += len; + MemCopy64(op_base + op, deferred_src, deferred_length); + op += deferred_length; + DeferMemCopy(&deferred_src, &deferred_length, from, len); } - } while (ip < ip_limit_min_slop && op < op_limit_min_slop); + } while (ip < ip_limit_min_slop && + (op + deferred_length) < op_limit_min_slop); exit: ip--; assert(ip <= ip_limit); } + // If we deferred a copy then we can perform. If we are up to date then we + // might not have enough slop bytes and could run past the end. + if (deferred_length) { + MemCopy64(op_base + op, deferred_src, deferred_length); + op += deferred_length; + ClearDeferred(&deferred_src, &deferred_length, safe_source); + } return {ip, op}; } @@ -1325,7 +1469,7 @@ if (!writer->AppendFromSelf(copy_offset, length, &op)) goto exit; } else { - const ptrdiff_t entry = table.length_minus_offset[c]; + const ptrdiff_t entry = kLengthMinusOffset[c]; preload = LittleEndian::Load32(ip); const uint32_t trailer = ExtractLowBytes(preload, c & 3); const uint32_t length = entry & 0xff; @@ -1537,6 +1681,67 @@ // IOVec interfaces // ----------------------------------------------------------------------- +// A `Source` implementation that yields the contents of an `iovec` array. Note +// that `total_size` is the total number of bytes to be read from the elements +// of `iov` (_not_ the total number of elements in `iov`). +class SnappyIOVecReader : public Source { + public: + SnappyIOVecReader(const struct iovec* iov, size_t total_size) + : curr_iov_(iov), + curr_pos_(total_size > 0 ? reinterpret_cast(iov->iov_base) + : nullptr), + curr_size_remaining_(total_size > 0 ? iov->iov_len : 0), + total_size_remaining_(total_size) { + // Skip empty leading `iovec`s. + if (total_size > 0 && curr_size_remaining_ == 0) Advance(); + } + + ~SnappyIOVecReader() = default; + + size_t Available() const { return total_size_remaining_; } + + const char* Peek(size_t* len) { + *len = curr_size_remaining_; + return curr_pos_; + } + + void Skip(size_t n) { + while (n >= curr_size_remaining_ && n > 0) { + n -= curr_size_remaining_; + Advance(); + } + curr_size_remaining_ -= n; + total_size_remaining_ -= n; + curr_pos_ += n; + } + + private: + // Advances to the next nonempty `iovec` and updates related variables. + void Advance() { + do { + assert(total_size_remaining_ >= curr_size_remaining_); + total_size_remaining_ -= curr_size_remaining_; + if (total_size_remaining_ == 0) { + curr_pos_ = nullptr; + curr_size_remaining_ = 0; + return; + } + ++curr_iov_; + curr_pos_ = reinterpret_cast(curr_iov_->iov_base); + curr_size_remaining_ = curr_iov_->iov_len; + } while (curr_size_remaining_ == 0); + } + + // The `iovec` currently being read. + const struct iovec* curr_iov_; + // The location in `curr_iov_` currently being read. + const char* curr_pos_; + // The amount of unread data in `curr_iov_`. + size_t curr_size_remaining_; + // The amount of unread data in the entire input array. + size_t total_size_remaining_; +}; + // A type that writes to an iovec. // Note that this is not a "ByteSink", but a type that matches the // Writer template argument to SnappyDecompressor::DecompressAllTags(). @@ -1911,6 +2116,16 @@ *compressed_length = (writer.CurrentDestination() - compressed); } +void RawCompressFromIOVec(const struct iovec* iov, size_t uncompressed_length, + char* compressed, size_t* compressed_length) { + SnappyIOVecReader reader(iov, uncompressed_length); + UncheckedByteArraySink writer(compressed); + Compress(&reader, &writer); + + // Compute how many bytes were added. + *compressed_length = writer.CurrentDestination() - compressed; +} + size_t Compress(const char* input, size_t input_length, std::string* compressed) { // Pre-grow the buffer to the max length of the compressed output @@ -1919,7 +2134,26 @@ size_t compressed_length; RawCompress(input, input_length, string_as_array(compressed), &compressed_length); - compressed->resize(compressed_length); + compressed->erase(compressed_length); + return compressed_length; +} + +size_t CompressFromIOVec(const struct iovec* iov, size_t iov_cnt, + std::string* compressed) { + // Compute the number of bytes to be compressed. + size_t uncompressed_length = 0; + for (size_t i = 0; i < iov_cnt; ++i) { + uncompressed_length += iov[i].iov_len; + } + + // Pre-grow the buffer to the max length of the compressed output. + STLStringResizeUninitialized(compressed, MaxCompressedLength( + uncompressed_length)); + + size_t compressed_length; + RawCompressFromIOVec(iov, uncompressed_length, string_as_array(compressed), + &compressed_length); + compressed->erase(compressed_length); return compressed_length; } diff -Nru snappy-1.1.9/snappy.h snappy-1.1.10/snappy.h --- snappy-1.1.9/snappy.h 2021-05-04 22:53:34.000000000 +0000 +++ snappy-1.1.10/snappy.h 2023-03-08 23:44:00.000000000 +0000 @@ -71,14 +71,21 @@ // Higher-level string based routines (should be sufficient for most users) // ------------------------------------------------------------------------ - // Sets "*compressed" to the compressed version of "input[0,input_length-1]". + // Sets "*compressed" to the compressed version of "input[0..input_length-1]". // Original contents of *compressed are lost. // // REQUIRES: "input[]" is not an alias of "*compressed". size_t Compress(const char* input, size_t input_length, std::string* compressed); - // Decompresses "compressed[0,compressed_length-1]" to "*uncompressed". + // Same as `Compress` above but taking an `iovec` array as input. Note that + // this function preprocesses the inputs to compute the sum of + // `iov[0..iov_cnt-1].iov_len` before reading. To avoid this, use + // `RawCompressFromIOVec` below. + size_t CompressFromIOVec(const struct iovec* iov, size_t iov_cnt, + std::string* compressed); + + // Decompresses "compressed[0..compressed_length-1]" to "*uncompressed". // Original contents of "*uncompressed" are lost. // // REQUIRES: "compressed[]" is not an alias of "*uncompressed". @@ -124,6 +131,12 @@ char* compressed, size_t* compressed_length); + // Same as `RawCompress` above but taking an `iovec` array as input. Note that + // `uncompressed_length` is the total number of bytes to be read from the + // elements of `iov` (_not_ the number of elements in `iov`). + void RawCompressFromIOVec(const struct iovec* iov, size_t uncompressed_length, + char* compressed, size_t* compressed_length); + // Given data in "compressed[0..compressed_length-1]" generated by // calling the Snappy::Compress routine, this routine // stores the uncompressed data to diff -Nru snappy-1.1.9/snappy-internal.h snappy-1.1.10/snappy-internal.h --- snappy-1.1.9/snappy-internal.h 2021-05-04 22:53:34.000000000 +0000 +++ snappy-1.1.10/snappy-internal.h 2023-03-08 23:44:00.000000000 +0000 @@ -33,9 +33,84 @@ #include "snappy-stubs-internal.h" +#if SNAPPY_HAVE_SSSE3 +// Please do not replace with or with headers that assume more +// advanced SSE versions without checking with all the OWNERS. +#include +#include +#endif + +#if SNAPPY_HAVE_NEON +#include +#endif + +#if SNAPPY_HAVE_SSSE3 || SNAPPY_HAVE_NEON +#define SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE 1 +#else +#define SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE 0 +#endif + namespace snappy { namespace internal { +#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE +#if SNAPPY_HAVE_SSSE3 +using V128 = __m128i; +#elif SNAPPY_HAVE_NEON +using V128 = uint8x16_t; +#endif + +// Load 128 bits of integer data. `src` must be 16-byte aligned. +inline V128 V128_Load(const V128* src); + +// Load 128 bits of integer data. `src` does not need to be aligned. +inline V128 V128_LoadU(const V128* src); + +// Store 128 bits of integer data. `dst` does not need to be aligned. +inline void V128_StoreU(V128* dst, V128 val); + +// Shuffle packed 8-bit integers using a shuffle mask. +// Each packed integer in the shuffle mask must be in [0,16). +inline V128 V128_Shuffle(V128 input, V128 shuffle_mask); + +// Constructs V128 with 16 chars |c|. +inline V128 V128_DupChar(char c); + +#if SNAPPY_HAVE_SSSE3 +inline V128 V128_Load(const V128* src) { return _mm_load_si128(src); } + +inline V128 V128_LoadU(const V128* src) { return _mm_loadu_si128(src); } + +inline void V128_StoreU(V128* dst, V128 val) { _mm_storeu_si128(dst, val); } + +inline V128 V128_Shuffle(V128 input, V128 shuffle_mask) { + return _mm_shuffle_epi8(input, shuffle_mask); +} + +inline V128 V128_DupChar(char c) { return _mm_set1_epi8(c); } + +#elif SNAPPY_HAVE_NEON +inline V128 V128_Load(const V128* src) { + return vld1q_u8(reinterpret_cast(src)); +} + +inline V128 V128_LoadU(const V128* src) { + return vld1q_u8(reinterpret_cast(src)); +} + +inline void V128_StoreU(V128* dst, V128 val) { + vst1q_u8(reinterpret_cast(dst), val); +} + +inline V128 V128_Shuffle(V128 input, V128 shuffle_mask) { + assert(vminvq_u8(shuffle_mask) >= 0 && vmaxvq_u8(shuffle_mask) <= 15); + return vqtbl1q_u8(input, shuffle_mask); +} + +inline V128 V128_DupChar(char c) { return vdupq_n_u8(c); } +#endif +#endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE + // Working memory performs a single allocation to hold all scratch space // required for compression. class WorkingMemory { @@ -95,8 +170,9 @@ // loading from s2 + n. // // Separate implementation for 64-bit, little-endian cpus. -#if !defined(SNAPPY_IS_BIG_ENDIAN) && \ - (defined(__x86_64__) || defined(_M_X64) || defined(ARCH_PPC) || defined(ARCH_ARM)) +#if !SNAPPY_IS_BIG_ENDIAN && \ + (defined(__x86_64__) || defined(_M_X64) || defined(ARCH_PPC) || \ + defined(ARCH_ARM)) static inline std::pair FindMatchLength(const char* s1, const char* s2, const char* s2_limit, @@ -154,8 +230,9 @@ uint64_t xorval = a1 ^ a2; int shift = Bits::FindLSBSetNonZero64(xorval); size_t matched_bytes = shift >> 3; + uint64_t a3 = UNALIGNED_LOAD64(s2 + 4); #ifndef __x86_64__ - *data = UNALIGNED_LOAD64(s2 + matched_bytes); + a2 = static_cast(xorval) == 0 ? a3 : a2; #else // Ideally this would just be // @@ -166,13 +243,13 @@ // use a conditional move (it's tuned to cut data dependencies). In this // case there is a longer parallel chain anyway AND this will be fairly // unpredictable. - uint64_t a3 = UNALIGNED_LOAD64(s2 + 4); asm("testl %k2, %k2\n\t" "cmovzq %1, %0\n\t" : "+r"(a2) - : "r"(a3), "r"(xorval)); - *data = a2 >> (shift & (3 * 8)); + : "r"(a3), "r"(xorval) + : "cc"); #endif + *data = a2 >> (shift & (3 * 8)); return std::pair(matched_bytes, true); } else { matched = 8; @@ -194,16 +271,17 @@ uint64_t xorval = a1 ^ a2; int shift = Bits::FindLSBSetNonZero64(xorval); size_t matched_bytes = shift >> 3; + uint64_t a3 = UNALIGNED_LOAD64(s2 + 4); #ifndef __x86_64__ - *data = UNALIGNED_LOAD64(s2 + matched_bytes); + a2 = static_cast(xorval) == 0 ? a3 : a2; #else - uint64_t a3 = UNALIGNED_LOAD64(s2 + 4); asm("testl %k2, %k2\n\t" "cmovzq %1, %0\n\t" : "+r"(a2) - : "r"(a3), "r"(xorval)); - *data = a2 >> (shift & (3 * 8)); + : "r"(a3), "r"(xorval) + : "cc"); #endif + *data = a2 >> (shift & (3 * 8)); matched += matched_bytes; assert(matched >= 8); return std::pair(matched, false); diff -Nru snappy-1.1.9/snappy-stubs-internal.h snappy-1.1.10/snappy-stubs-internal.h --- snappy-1.1.9/snappy-stubs-internal.h 2021-05-04 22:53:34.000000000 +0000 +++ snappy-1.1.10/snappy-stubs-internal.h 2023-03-08 23:44:00.000000000 +0000 @@ -31,7 +31,7 @@ #ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_ #define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_ -#ifdef HAVE_CONFIG_H +#if HAVE_CONFIG_H #include "config.h" #endif @@ -43,11 +43,11 @@ #include #include -#ifdef HAVE_SYS_MMAN_H +#if HAVE_SYS_MMAN_H #include #endif -#ifdef HAVE_UNISTD_H +#if HAVE_UNISTD_H #include #endif @@ -90,20 +90,20 @@ #define ARRAYSIZE(a) int{sizeof(a) / sizeof(*(a))} // Static prediction hints. -#ifdef HAVE_BUILTIN_EXPECT +#if HAVE_BUILTIN_EXPECT #define SNAPPY_PREDICT_FALSE(x) (__builtin_expect(x, 0)) #define SNAPPY_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1)) #else #define SNAPPY_PREDICT_FALSE(x) x #define SNAPPY_PREDICT_TRUE(x) x -#endif +#endif // HAVE_BUILTIN_EXPECT // Inlining hints. -#ifdef HAVE_ATTRIBUTE_ALWAYS_INLINE +#if HAVE_ATTRIBUTE_ALWAYS_INLINE #define SNAPPY_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline)) #else #define SNAPPY_ATTRIBUTE_ALWAYS_INLINE -#endif +#endif // HAVE_ATTRIBUTE_ALWAYS_INLINE // Stubbed version of ABSL_FLAG. // @@ -171,27 +171,42 @@ public: // Functions to do unaligned loads and stores in little-endian order. static inline uint16_t Load16(const void *ptr) { - const uint8_t* const buffer = reinterpret_cast(ptr); - // Compiles to a single mov/str on recent clang and gcc. +#if SNAPPY_IS_BIG_ENDIAN + const uint8_t* const buffer = reinterpret_cast(ptr); return (static_cast(buffer[0])) | (static_cast(buffer[1]) << 8); +#else + // memcpy() turns into a single instruction early in the optimization + // pipeline (relatively to a series of byte accesses). So, using memcpy + // instead of byte accesses may lead to better decisions in more stages of + // the optimization pipeline. + uint16_t value; + std::memcpy(&value, ptr, 2); + return value; +#endif } static inline uint32_t Load32(const void *ptr) { - const uint8_t* const buffer = reinterpret_cast(ptr); - // Compiles to a single mov/str on recent clang and gcc. +#if SNAPPY_IS_BIG_ENDIAN + const uint8_t* const buffer = reinterpret_cast(ptr); return (static_cast(buffer[0])) | (static_cast(buffer[1]) << 8) | (static_cast(buffer[2]) << 16) | (static_cast(buffer[3]) << 24); +#else + // See Load16() for the rationale of using memcpy(). + uint32_t value; + std::memcpy(&value, ptr, 4); + return value; +#endif } static inline uint64_t Load64(const void *ptr) { - const uint8_t* const buffer = reinterpret_cast(ptr); - // Compiles to a single mov/str on recent clang and gcc. +#if SNAPPY_IS_BIG_ENDIAN + const uint8_t* const buffer = reinterpret_cast(ptr); return (static_cast(buffer[0])) | (static_cast(buffer[1]) << 8) | (static_cast(buffer[2]) << 16) | @@ -200,30 +215,44 @@ (static_cast(buffer[5]) << 40) | (static_cast(buffer[6]) << 48) | (static_cast(buffer[7]) << 56); +#else + // See Load16() for the rationale of using memcpy(). + uint64_t value; + std::memcpy(&value, ptr, 8); + return value; +#endif } static inline void Store16(void *dst, uint16_t value) { - uint8_t* const buffer = reinterpret_cast(dst); - // Compiles to a single mov/str on recent clang and gcc. +#if SNAPPY_IS_BIG_ENDIAN + uint8_t* const buffer = reinterpret_cast(dst); buffer[0] = static_cast(value); buffer[1] = static_cast(value >> 8); +#else + // See Load16() for the rationale of using memcpy(). + std::memcpy(dst, &value, 2); +#endif } static void Store32(void *dst, uint32_t value) { - uint8_t* const buffer = reinterpret_cast(dst); - // Compiles to a single mov/str on recent clang and gcc. +#if SNAPPY_IS_BIG_ENDIAN + uint8_t* const buffer = reinterpret_cast(dst); buffer[0] = static_cast(value); buffer[1] = static_cast(value >> 8); buffer[2] = static_cast(value >> 16); buffer[3] = static_cast(value >> 24); +#else + // See Load16() for the rationale of using memcpy(). + std::memcpy(dst, &value, 4); +#endif } static void Store64(void* dst, uint64_t value) { - uint8_t* const buffer = reinterpret_cast(dst); - // Compiles to a single mov/str on recent clang and gcc. +#if SNAPPY_IS_BIG_ENDIAN + uint8_t* const buffer = reinterpret_cast(dst); buffer[0] = static_cast(value); buffer[1] = static_cast(value >> 8); buffer[2] = static_cast(value >> 16); @@ -232,14 +261,18 @@ buffer[5] = static_cast(value >> 40); buffer[6] = static_cast(value >> 48); buffer[7] = static_cast(value >> 56); +#else + // See Load16() for the rationale of using memcpy(). + std::memcpy(dst, &value, 8); +#endif } static inline constexpr bool IsLittleEndian() { -#if defined(SNAPPY_IS_BIG_ENDIAN) +#if SNAPPY_IS_BIG_ENDIAN return false; #else return true; -#endif // defined(SNAPPY_IS_BIG_ENDIAN) +#endif // SNAPPY_IS_BIG_ENDIAN } }; @@ -265,7 +298,7 @@ void operator=(const Bits&); }; -#if defined(HAVE_BUILTIN_CTZ) +#if HAVE_BUILTIN_CTZ inline int Bits::Log2FloorNonZero(uint32_t n) { assert(n != 0); @@ -354,7 +387,7 @@ #endif // End portable versions. -#if defined(HAVE_BUILTIN_CTZ) +#if HAVE_BUILTIN_CTZ inline int Bits::FindLSBSetNonZero64(uint64_t n) { assert(n != 0); @@ -388,7 +421,7 @@ } } -#endif // End portable version. +#endif // HAVE_BUILTIN_CTZ // Variable-length integer encoding. class Varint { diff -Nru snappy-1.1.9/snappy-test.cc snappy-1.1.10/snappy-test.cc --- snappy-1.1.9/snappy-test.cc 2021-05-04 22:53:34.000000000 +0000 +++ snappy-1.1.10/snappy-test.cc 2023-03-08 23:44:00.000000000 +0000 @@ -151,7 +151,7 @@ #pragma warning(pop) #endif -#ifdef HAVE_LIBZ +#if HAVE_LIBZ ZLib::ZLib() : comp_init_(false), diff -Nru snappy-1.1.9/snappy-test.h snappy-1.1.10/snappy-test.h --- snappy-1.1.9/snappy-test.h 2021-05-04 22:53:34.000000000 +0000 +++ snappy-1.1.10/snappy-test.h 2023-03-08 23:44:00.000000000 +0000 @@ -31,25 +31,25 @@ #ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_ #define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_ -#ifdef HAVE_CONFIG_H +#if HAVE_CONFIG_H #include "config.h" #endif #include "snappy-stubs-internal.h" -#ifdef HAVE_SYS_MMAN_H +#if HAVE_SYS_MMAN_H #include #endif -#ifdef HAVE_SYS_RESOURCE_H +#if HAVE_SYS_RESOURCE_H #include #endif -#ifdef HAVE_SYS_TIME_H +#if HAVE_SYS_TIME_H #include #endif -#ifdef HAVE_WINDOWS_H +#if HAVE_WINDOWS_H // Needed to be able to use std::max without workarounds in the source code. // https://support.microsoft.com/en-us/help/143208/prb-using-stl-in-windows-program-can-cause-min-max-conflicts #define NOMINMAX @@ -58,15 +58,15 @@ #define InitGoogle(argv0, argc, argv, remove_flags) ((void)(0)) -#ifdef HAVE_LIBZ +#if HAVE_LIBZ #include "zlib.h" #endif -#ifdef HAVE_LIBLZO2 +#if HAVE_LIBLZO2 #include "lzo/lzo1x.h" #endif -#ifdef HAVE_LIBLZ4 +#if HAVE_LIBLZ4 #include "lz4.h" #endif @@ -216,7 +216,7 @@ #define CHECK_GT(a, b) CRASH_UNLESS((a) > (b)) #define CHECK_OK(cond) (cond).ok() -#ifdef HAVE_LIBZ +#if HAVE_LIBZ // Object-oriented wrapper around zlib. class ZLib { diff -Nru snappy-1.1.9/snappy_test_tool.cc snappy-1.1.10/snappy_test_tool.cc --- snappy-1.1.9/snappy_test_tool.cc 2021-05-04 22:53:34.000000000 +0000 +++ snappy-1.1.10/snappy_test_tool.cc 2023-03-08 23:44:00.000000000 +0000 @@ -66,7 +66,7 @@ namespace { -#if defined(HAVE_FUNC_MMAP) && defined(HAVE_FUNC_SYSCONF) +#if HAVE_FUNC_MMAP && HAVE_FUNC_SYSCONF // To test against code that reads beyond its input, this class copies a // string to a newly allocated group of pages, the last of which @@ -112,7 +112,7 @@ size_t size_; }; -#else // defined(HAVE_FUNC_MMAP) && defined(HAVE_FUNC_SYSCONF) +#else // HAVE_FUNC_MMAP && HAVE_FUNC_SYSCONF // Fallback for systems without mmap. using DataEndingAtUnreadablePage = std::string; diff -Nru snappy-1.1.9/snappy_unittest.cc snappy-1.1.10/snappy_unittest.cc --- snappy-1.1.9/snappy_unittest.cc 2021-05-04 22:53:34.000000000 +0000 +++ snappy-1.1.10/snappy_unittest.cc 2023-03-08 23:44:00.000000000 +0000 @@ -50,7 +50,7 @@ namespace { -#if defined(HAVE_FUNC_MMAP) && defined(HAVE_FUNC_SYSCONF) +#if HAVE_FUNC_MMAP && HAVE_FUNC_SYSCONF // To test against code that reads beyond its input, this class copies a // string to a newly allocated group of pages, the last of which @@ -96,7 +96,7 @@ size_t size_; }; -#else // defined(HAVE_FUNC_MMAP) && defined(HAVE_FUNC_SYSCONF) +#else // HAVE_FUNC_MMAP) && HAVE_FUNC_SYSCONF // Fallback for systems without mmap. using DataEndingAtUnreadablePage = std::string; @@ -137,21 +137,10 @@ CHECK_EQ(uncompressed, input); } -void VerifyIOVec(const std::string& input) { - std::string compressed; - DataEndingAtUnreadablePage i(input); - const size_t written = snappy::Compress(i.data(), i.size(), &compressed); - CHECK_EQ(written, compressed.size()); - CHECK_LE(compressed.size(), - snappy::MaxCompressedLength(input.size())); - CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size())); - - // Try uncompressing into an iovec containing a random number of entries - // ranging from 1 to 10. - char* buf = new char[input.size()]; +struct iovec* GetIOVec(const std::string& input, char*& buf, size_t& num) { std::minstd_rand0 rng(input.size()); std::uniform_int_distribution uniform_1_to_10(1, 10); - size_t num = uniform_1_to_10(rng); + num = uniform_1_to_10(rng); if (input.size() < num) { num = input.size(); } @@ -175,8 +164,40 @@ } used_so_far += iov[i].iov_len; } - CHECK(snappy::RawUncompressToIOVec( - compressed.data(), compressed.size(), iov, num)); + return iov; +} + +int VerifyIOVecSource(const std::string& input) { + std::string compressed; + std::string copy = input; + char* buf = const_cast(copy.data()); + size_t num = 0; + struct iovec* iov = GetIOVec(input, buf, num); + const size_t written = snappy::CompressFromIOVec(iov, num, &compressed); + CHECK_EQ(written, compressed.size()); + CHECK_LE(compressed.size(), snappy::MaxCompressedLength(input.size())); + CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size())); + + std::string uncompressed; + DataEndingAtUnreadablePage c(compressed); + CHECK(snappy::Uncompress(c.data(), c.size(), &uncompressed)); + CHECK_EQ(uncompressed, input); + delete[] iov; + return uncompressed.size(); +} + +void VerifyIOVecSink(const std::string& input) { + std::string compressed; + DataEndingAtUnreadablePage i(input); + const size_t written = snappy::Compress(i.data(), i.size(), &compressed); + CHECK_EQ(written, compressed.size()); + CHECK_LE(compressed.size(), snappy::MaxCompressedLength(input.size())); + CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size())); + char* buf = new char[input.size()]; + size_t num = 0; + struct iovec* iov = GetIOVec(input, buf, num); + CHECK(snappy::RawUncompressToIOVec(compressed.data(), compressed.size(), iov, + num)); CHECK(!memcmp(buf, input.data(), input.size())); delete[] iov; delete[] buf; @@ -252,15 +273,18 @@ // Compress using string based routines const int result = VerifyString(input); + // Compress using `iovec`-based routines. + CHECK_EQ(VerifyIOVecSource(input), result); + // Verify using sink based routines VerifyStringSink(input); VerifyNonBlockedCompression(input); - VerifyIOVec(input); + VerifyIOVecSink(input); if (!input.empty()) { const std::string expanded = Expand(input); VerifyNonBlockedCompression(expanded); - VerifyIOVec(input); + VerifyIOVecSink(input); } return result; @@ -540,7 +564,27 @@ CHECK_EQ(uncompressed, src); } -TEST(Snappy, IOVecEdgeCases) { +TEST(Snappy, IOVecSourceEdgeCases) { + // Validate that empty leading, trailing, and in-between iovecs are handled: + // [] [] ['a'] [] ['b'] []. + std::string data = "ab"; + char* buf = const_cast(data.data()); + size_t used_so_far = 0; + static const int kLengths[] = {0, 0, 1, 0, 1, 0}; + struct iovec iov[ARRAYSIZE(kLengths)]; + for (int i = 0; i < ARRAYSIZE(kLengths); ++i) { + iov[i].iov_base = buf + used_so_far; + iov[i].iov_len = kLengths[i]; + used_so_far += kLengths[i]; + } + std::string compressed; + snappy::CompressFromIOVec(iov, ARRAYSIZE(kLengths), &compressed); + std::string uncompressed; + snappy::Uncompress(compressed.data(), compressed.size(), &uncompressed); + CHECK_EQ(data, uncompressed); +} + +TEST(Snappy, IOVecSinkEdgeCases) { // Test some tricky edge cases in the iovec output that are not necessarily // exercised by random tests. @@ -905,7 +949,7 @@ // COPY_1_BYTE_OFFSET. // // The tag byte in the compressed data stores len-4 in 3 bits, and - // offset/256 in 5 bits. offset%256 is stored in the next byte. + // offset/256 in 3 bits. offset%256 is stored in the next byte. // // This format is used for length in range [4..11] and offset in // range [0..2047] diff -Nru snappy-1.1.9/.travis.yml snappy-1.1.10/.travis.yml --- snappy-1.1.9/.travis.yml 2021-05-04 22:53:34.000000000 +0000 +++ snappy-1.1.10/.travis.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,100 +0,0 @@ -# Build matrix / environment variables are explained on: -# http://about.travis-ci.org/docs/user/build-configuration/ -# This file can be validated on: http://lint.travis-ci.org/ - -language: cpp -dist: bionic -osx_image: xcode12.2 - -compiler: -- gcc -- clang -os: -- linux -- osx - -env: -- BUILD_TYPE=Debug CPU_LEVEL=AVX -- BUILD_TYPE=Debug CPU_LEVEL=AVX2 -- BUILD_TYPE=RelWithDebInfo CPU_LEVEL=AVX -- BUILD_TYPE=RelWithDebInfo CPU_LEVEL=AVX2 - -jobs: - exclude: - # Travis OSX servers seem to run on pre-Haswell CPUs. Attempting to use AVX2 - # results in crashes. - - env: BUILD_TYPE=Debug CPU_LEVEL=AVX2 - os: osx - - env: BUILD_TYPE=RelWithDebInfo CPU_LEVEL=AVX2 - os: osx - allow_failures: - # Homebrew's GCC is currently broken on XCode 11. - - compiler: gcc - os: osx - -addons: - apt: - sources: - - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-10 main' - key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key' - - sourceline: 'ppa:ubuntu-toolchain-r/test' - packages: - - clang-10 - - cmake - - gcc-10 - - g++-10 - - ninja-build - homebrew: - packages: - - cmake - - gcc@10 - - llvm@10 - - ninja - update: true - -install: -# The following Homebrew packages aren't linked by default, and need to be -# prepended to the path explicitly. -- if [ "$TRAVIS_OS_NAME" = "osx" ]; then - export PATH="$(brew --prefix llvm)/bin:$PATH"; - fi -# Fuzzing is only supported on Clang. Perform fuzzing on Debug builds. -# LibFuzzer doesn't ship with CommandLineTools on osx. -- if [ "$CXX" = "clang++" ] && [ "$BUILD_TYPE" = "Debug" ] && [ "$TRAVIS_OS_NAME" != "osx" ]; then - export FUZZING=1; - else - export FUZZING=0; - fi -# /usr/bin/gcc points to an older compiler on both Linux and macOS. -- if [ "$CXX" = "g++" ]; then export CXX="g++-10" CC="gcc-10"; fi -# /usr/bin/clang points to an older compiler on both Linux and macOS. -# -# Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values -# below don't work on macOS. Fortunately, the path change above makes the -# default values (clang and clang++) resolve to the correct compiler on macOS. -- if [ "$TRAVIS_OS_NAME" = "linux" ]; then - if [ "$CXX" = "clang++" ]; then export CXX="clang++-10" CC="clang-10"; fi; - fi -- echo ${CC} -- echo ${CXX} -- ${CXX} --version -- cmake --version - -before_script: -- mkdir -p build && cd build -- cmake .. -G Ninja -DCMAKE_BUILD_TYPE=$BUILD_TYPE - -DSNAPPY_REQUIRE_${CPU_LEVEL}=ON -DSNAPPY_FUZZING_BUILD=${FUZZING} - -DCMAKE_INSTALL_PREFIX=$HOME/.local -- cmake --build . -- cd .. - -script: -- build/snappy_unittest -- build/snappy_benchmark -- if [ -f build/snappy_compress_fuzzer ]; then - build/snappy_compress_fuzzer -runs=1000 -close_fd_mask=3; - fi -- if [ -f build/snappy_uncompress_fuzzer ]; then - build/snappy_uncompress_fuzzer -runs=1000 -close_fd_mask=3; - fi -- cd build && cmake --build . --target install