Hashing improvements
This commit is contained in:
parent
acc8bc0280
commit
391527d812
@ -962,14 +962,37 @@ DataStream& operator>>(DataStream& ds, JsonObject& m) {
|
||||
return ds;
|
||||
}
|
||||
|
||||
size_t hash<Json>::operator()(Json const& v) const {
|
||||
// This is probably a bit slow and weird, using the utf-8 output printer to
|
||||
// produce a Json hash.
|
||||
void Json::getHash(XXHash3& hasher) const {
|
||||
Json::Type type = (Json::Type)m_data.typeIndex();
|
||||
if (type == Json::Type::Bool)
|
||||
hasher.push(m_data.get<bool>() ? "\2\1" : "\2\0", 2);
|
||||
else {
|
||||
hasher.push((const char*)&type, sizeof(type));
|
||||
if (type == Json::Type::Float)
|
||||
hasher.push((const char*)m_data.ptr<double>(), sizeof(double));
|
||||
else if (type == Json::Type::Int)
|
||||
hasher.push((const char*)m_data.ptr<int64_t>(), sizeof(int64_t));
|
||||
else if (type == Json::Type::String) {
|
||||
const String& str = *m_data.get<StringConstPtr>();
|
||||
hasher.push(str.utf8Ptr(), str.utf8Size());
|
||||
}
|
||||
else if (type == Json::Type::Array) {
|
||||
for (Json const& json : *m_data.get<JsonArrayConstPtr>())
|
||||
json.getHash(hasher);
|
||||
}
|
||||
else if (type == Json::Type::Object) {
|
||||
for (auto const& pair : *m_data.get<JsonObjectConstPtr>()) {
|
||||
hasher.push(pair.first.utf8Ptr(), pair.first.utf8Size());
|
||||
pair.second.getHash(hasher);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
size_t h = 0;
|
||||
auto collector = [&h](char c) { h = h * 101 + c; };
|
||||
outputUtf8Json(v, makeFunctionOutputIterator(collector), 0, true);
|
||||
return h;
|
||||
size_t hash<Json>::operator()(Json const& v) const {
|
||||
XXHash3 hasher;
|
||||
v.getHash(hasher);
|
||||
return hasher.digest();
|
||||
}
|
||||
|
||||
Json const* Json::ptr(size_t index) const {
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include "StarDataStream.hpp"
|
||||
#include "StarVariant.hpp"
|
||||
#include "StarString.hpp"
|
||||
#include "StarXXHash.hpp"
|
||||
|
||||
namespace Star {
|
||||
|
||||
@ -259,6 +260,8 @@ public:
|
||||
// Does this Json not share its storage with any other Json?
|
||||
bool unique() const;
|
||||
|
||||
void getHash(XXHash3& hasher) const;
|
||||
|
||||
private:
|
||||
Json const* ptr(size_t index) const;
|
||||
Json const* ptr(String const& key) const;
|
||||
|
@ -5,7 +5,9 @@
|
||||
#include "StarByteArray.hpp"
|
||||
|
||||
#define XXH_STATIC_LINKING_ONLY
|
||||
#define XXH_INLINE_ALL
|
||||
#include "xxhash.h"
|
||||
#include "xxh3.h"
|
||||
|
||||
namespace Star {
|
||||
|
||||
@ -31,6 +33,18 @@ private:
|
||||
XXH64_state_s state;
|
||||
};
|
||||
|
||||
class XXHash3 {
|
||||
public:
|
||||
XXHash3();
|
||||
|
||||
void push(char const* data, size_t length);
|
||||
uint64_t digest();
|
||||
|
||||
private:
|
||||
XXH3_state_s state;
|
||||
};
|
||||
|
||||
|
||||
uint32_t xxHash32(char const* source, size_t length);
|
||||
uint32_t xxHash32(ByteArray const& in);
|
||||
uint32_t xxHash32(String const& in);
|
||||
@ -39,6 +53,10 @@ uint64_t xxHash64(char const* source, size_t length);
|
||||
uint64_t xxHash64(ByteArray const& in);
|
||||
uint64_t xxHash64(String const& in);
|
||||
|
||||
uint64_t xxHash3(char const* source, size_t length);
|
||||
uint64_t xxHash3(ByteArray const& in);
|
||||
uint64_t xxHash3(String const& in);
|
||||
|
||||
#define XXHASH32_PRIMITIVE(TYPE, CAST_TYPE) \
|
||||
inline void xxHash32Push(XXHash32& hash, TYPE const& v) { \
|
||||
CAST_TYPE cv = v; \
|
||||
@ -53,6 +71,13 @@ uint64_t xxHash64(String const& in);
|
||||
hash.push((char const*)(&cv), sizeof(cv)); \
|
||||
}
|
||||
|
||||
#define XXHASH3_PRIMITIVE(TYPE, CAST_TYPE) \
|
||||
inline void xxHash3Push(XXHash3& hash, TYPE const& v) { \
|
||||
CAST_TYPE cv = v; \
|
||||
cv = toLittleEndian(cv); \
|
||||
hash.push((char const*)(&cv), sizeof(cv)); \
|
||||
}
|
||||
|
||||
XXHASH32_PRIMITIVE(bool, bool);
|
||||
XXHASH32_PRIMITIVE(int, int32_t);
|
||||
XXHASH32_PRIMITIVE(long, int64_t);
|
||||
@ -73,6 +98,16 @@ XXHASH64_PRIMITIVE(unsigned long long, uint64_t);
|
||||
XXHASH64_PRIMITIVE(float, float);
|
||||
XXHASH64_PRIMITIVE(double, double);
|
||||
|
||||
XXHASH3_PRIMITIVE(bool, bool);
|
||||
XXHASH3_PRIMITIVE(int, int32_t);
|
||||
XXHASH3_PRIMITIVE(long, int64_t);
|
||||
XXHASH3_PRIMITIVE(long long, int64_t);
|
||||
XXHASH3_PRIMITIVE(unsigned int, uint32_t);
|
||||
XXHASH3_PRIMITIVE(unsigned long, uint64_t);
|
||||
XXHASH3_PRIMITIVE(unsigned long long, uint64_t);
|
||||
XXHASH3_PRIMITIVE(float, float);
|
||||
XXHASH3_PRIMITIVE(double, double);
|
||||
|
||||
inline void xxHash32Push(XXHash32& hash, char const* str) {
|
||||
hash.push(str, strlen(str));
|
||||
}
|
||||
@ -89,6 +124,14 @@ inline void xxHash64Push(XXHash64& hash, String const& str) {
|
||||
hash.push(str.utf8Ptr(), str.size());
|
||||
}
|
||||
|
||||
inline void xxHash3Push(XXHash3& hash, char const* str) {
|
||||
hash.push(str, strlen(str));
|
||||
}
|
||||
|
||||
inline void xxHash3Push(XXHash3& hash, String const& str) {
|
||||
hash.push(str.utf8Ptr(), str.size());
|
||||
}
|
||||
|
||||
inline XXHash32::XXHash32(uint32_t seed) {
|
||||
XXH32_reset(&state, seed);
|
||||
}
|
||||
@ -113,6 +156,18 @@ inline uint64_t XXHash64::digest() {
|
||||
return XXH64_digest(&state);
|
||||
}
|
||||
|
||||
inline XXHash3::XXHash3() {
|
||||
XXH3_64bits_reset(&state);
|
||||
}
|
||||
|
||||
inline void XXHash3::push(char const* data, size_t length) {
|
||||
XXH3_64bits_update(&state, data, length);
|
||||
}
|
||||
|
||||
inline uint64_t XXHash3::digest() {
|
||||
return XXH3_64bits_digest(&state);
|
||||
}
|
||||
|
||||
inline uint32_t xxHash32(char const* source, size_t length) {
|
||||
return XXH32(source, length, 0);
|
||||
}
|
||||
@ -137,6 +192,17 @@ inline uint64_t xxHash64(String const& in) {
|
||||
return xxHash64(in.utf8Ptr(), in.utf8Size());
|
||||
}
|
||||
|
||||
inline uint64_t xxHash3(char const* source, size_t length) {
|
||||
return XXH3_64bits(source, length);
|
||||
}
|
||||
|
||||
inline uint64_t xxHash3(ByteArray const& in) {
|
||||
return xxHash3(in.ptr(), in.size());
|
||||
}
|
||||
|
||||
inline uint64_t xxHash3(String const& in) {
|
||||
return xxHash3(in.utf8Ptr(), in.utf8Size());
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
55
source/extern/xxh3.h
vendored
Normal file
55
source/extern/xxh3.h
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* xxHash - Extremely Fast Hash algorithm
|
||||
* Development source file for `xxh3`
|
||||
* Copyright (C) 2019-2020 Yann Collet
|
||||
*
|
||||
* BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* You can contact the author at:
|
||||
* - xxHash homepage: https://www.xxhash.com
|
||||
* - xxHash source repository: https://github.com/Cyan4973/xxHash
|
||||
*/
|
||||
|
||||
/*
|
||||
* Note: This file used to host the source code of XXH3_* variants.
|
||||
* during the development period.
|
||||
* The source code is now properly integrated within xxhash.h.
|
||||
*
|
||||
* xxh3.h is no longer useful,
|
||||
* but it is still provided for compatibility with source code
|
||||
* which used to include it directly.
|
||||
*
|
||||
* Programs are now highly discouraged to include xxh3.h.
|
||||
* Include `xxhash.h` instead, which is the officially supported interface.
|
||||
*
|
||||
* In the future, xxh3.h will start to generate warnings, then errors,
|
||||
* then it will be removed from source package and from include directory.
|
||||
*/
|
||||
|
||||
/* Simulate the same impact as including the old xxh3.h source file */
|
||||
|
||||
#define XXH_INLINE_ALL
|
||||
#include "xxhash.h"
|
770
source/extern/xxh_x86dispatch.c
vendored
Normal file
770
source/extern/xxh_x86dispatch.c
vendored
Normal file
@ -0,0 +1,770 @@
|
||||
/*
|
||||
* xxHash - Extremely Fast Hash algorithm
|
||||
* Copyright (C) 2020 Yann Collet
|
||||
*
|
||||
* BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* You can contact the author at:
|
||||
* - xxHash homepage: https://www.xxhash.com
|
||||
* - xxHash source repository: https://github.com/Cyan4973/xxHash
|
||||
*/
|
||||
|
||||
|
||||
/*!
|
||||
* @file xxh_x86dispatch.c
|
||||
*
|
||||
* Automatic dispatcher code for the @ref xxh3_family on x86-based targets.
|
||||
*
|
||||
* Optional add-on.
|
||||
*
|
||||
* **Compile this file with the default flags for your target.** Do not compile
|
||||
* with flags like `-mavx*`, `-march=native`, or `/arch:AVX*`, there will be
|
||||
* an error. See @ref XXH_X86DISPATCH_ALLOW_AVX for details.
|
||||
*
|
||||
* @defgroup dispatch x86 Dispatcher
|
||||
* @{
|
||||
*/
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if !(defined(__x86_64__) || defined(__i386__) || defined(_M_IX86) || defined(_M_X64))
|
||||
# error "Dispatching is currently only supported on x86 and x86_64."
|
||||
#endif
|
||||
|
||||
/*!
|
||||
* @def XXH_X86DISPATCH_ALLOW_AVX
|
||||
* @brief Disables the AVX sanity check.
|
||||
*
|
||||
* Don't compile xxh_x86dispatch.c with options like `-mavx*`, `-march=native`,
|
||||
* or `/arch:AVX*`. It is intended to be compiled for the minimum target, and
|
||||
* it selectively enables SSE2, AVX2, and AVX512 when it is needed.
|
||||
*
|
||||
* Using this option _globally_ allows this feature, and therefore makes it
|
||||
* undefined behavior to execute on any CPU without said feature.
|
||||
*
|
||||
* Even if the source code isn't directly using AVX intrinsics in a function,
|
||||
* the compiler can still generate AVX code from autovectorization and by
|
||||
* "upgrading" SSE2 intrinsics to use the VEX prefixes (a.k.a. AVX128).
|
||||
*
|
||||
* Use the same flags that you use to compile the rest of the program; this
|
||||
* file will safely generate SSE2, AVX2, and AVX512 without these flags.
|
||||
*
|
||||
* Define XXH_X86DISPATCH_ALLOW_AVX to ignore this check, and feel free to open
|
||||
* an issue if there is a target in the future where AVX is a default feature.
|
||||
*/
|
||||
#ifdef XXH_DOXYGEN
|
||||
# define XXH_X86DISPATCH_ALLOW_AVX
|
||||
#endif
|
||||
|
||||
#if defined(__AVX__) && !defined(XXH_X86DISPATCH_ALLOW_AVX)
|
||||
# error "Do not compile xxh_x86dispatch.c with AVX enabled! See the comment above."
|
||||
#endif
|
||||
|
||||
#ifdef __has_include
|
||||
# define XXH_HAS_INCLUDE(header) __has_include(header)
|
||||
#else
|
||||
# define XXH_HAS_INCLUDE(header) 0
|
||||
#endif
|
||||
|
||||
/*!
|
||||
* @def XXH_DISPATCH_SCALAR
|
||||
* @brief Enables/dispatching the scalar code path.
|
||||
*
|
||||
* If this is defined to 0, SSE2 support is assumed. This reduces code size
|
||||
* when the scalar path is not needed.
|
||||
*
|
||||
* This is automatically defined to 0 when...
|
||||
* - SSE2 support is enabled in the compiler
|
||||
* - Targeting x86_64
|
||||
* - Targeting Android x86
|
||||
* - Targeting macOS
|
||||
*/
|
||||
#ifndef XXH_DISPATCH_SCALAR
|
||||
# if defined(__SSE2__) || (defined(_M_IX86_FP) && _M_IX86_FP >= 2) /* SSE2 on by default */ \
|
||||
|| defined(__x86_64__) || defined(_M_X64) /* x86_64 */ \
|
||||
|| defined(__ANDROID__) || defined(__APPLEv__) /* Android or macOS */
|
||||
# define XXH_DISPATCH_SCALAR 0 /* disable */
|
||||
# else
|
||||
# define XXH_DISPATCH_SCALAR 1
|
||||
# endif
|
||||
#endif
|
||||
/*!
|
||||
* @def XXH_DISPATCH_AVX2
|
||||
* @brief Enables/disables dispatching for AVX2.
|
||||
*
|
||||
* This is automatically detected if it is not defined.
|
||||
* - GCC 4.7 and later are known to support AVX2, but >4.9 is required for
|
||||
* to get the AVX2 intrinsics and typedefs without -mavx -mavx2.
|
||||
* - Visual Studio 2013 Update 2 and later are known to support AVX2.
|
||||
* - The GCC/Clang internal header `<avx2intrin.h>` is detected. While this is
|
||||
* not allowed to be included directly, it still appears in the builtin
|
||||
* include path and is detectable with `__has_include`.
|
||||
*
|
||||
* @see XXH_AVX2
|
||||
*/
|
||||
#ifndef XXH_DISPATCH_AVX2
|
||||
# if (defined(__GNUC__) && (__GNUC__ > 4)) /* GCC 5.0+ */ \
|
||||
|| (defined(_MSC_VER) && _MSC_VER >= 1900) /* VS 2015+ */ \
|
||||
|| (defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 180030501) /* VS 2013 Update 2 */ \
|
||||
|| XXH_HAS_INCLUDE(<avx2intrin.h>) /* GCC/Clang internal header */
|
||||
# define XXH_DISPATCH_AVX2 1 /* enable dispatch towards AVX2 */
|
||||
# else
|
||||
# define XXH_DISPATCH_AVX2 0
|
||||
# endif
|
||||
#endif /* XXH_DISPATCH_AVX2 */
|
||||
|
||||
/*!
|
||||
* @def XXH_DISPATCH_AVX512
|
||||
* @brief Enables/disables dispatching for AVX512.
|
||||
*
|
||||
* Automatically detected if one of the following conditions is met:
|
||||
* - GCC 4.9 and later are known to support AVX512.
|
||||
* - Visual Studio 2017 and later are known to support AVX2.
|
||||
* - The GCC/Clang internal header `<avx512fintrin.h>` is detected. While this
|
||||
* is not allowed to be included directly, it still appears in the builtin
|
||||
* include path and is detectable with `__has_include`.
|
||||
*
|
||||
* @see XXH_AVX512
|
||||
*/
|
||||
#ifndef XXH_DISPATCH_AVX512
|
||||
# if (defined(__GNUC__) \
|
||||
&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9))) /* GCC 4.9+ */ \
|
||||
|| (defined(_MSC_VER) && _MSC_VER >= 1910) /* VS 2017+ */ \
|
||||
|| XXH_HAS_INCLUDE(<avx512fintrin.h>) /* GCC/Clang internal header */
|
||||
# define XXH_DISPATCH_AVX512 1 /* enable dispatch towards AVX512 */
|
||||
# else
|
||||
# define XXH_DISPATCH_AVX512 0
|
||||
# endif
|
||||
#endif /* XXH_DISPATCH_AVX512 */
|
||||
|
||||
/*!
|
||||
* @def XXH_TARGET_SSE2
|
||||
* @brief Allows a function to be compiled with SSE2 intrinsics.
|
||||
*
|
||||
* Uses `__attribute__((__target__("sse2")))` on GCC to allow SSE2 to be used
|
||||
* even with `-mno-sse2`.
|
||||
*
|
||||
* @def XXH_TARGET_AVX2
|
||||
* @brief Like @ref XXH_TARGET_SSE2, but for AVX2.
|
||||
*
|
||||
* @def XXH_TARGET_AVX512
|
||||
* @brief Like @ref XXH_TARGET_SSE2, but for AVX512.
|
||||
*/
|
||||
#if defined(__GNUC__)
|
||||
# include <emmintrin.h> /* SSE2 */
|
||||
# if XXH_DISPATCH_AVX2 || XXH_DISPATCH_AVX512
|
||||
# include <immintrin.h> /* AVX2, AVX512F */
|
||||
# endif
|
||||
# define XXH_TARGET_SSE2 __attribute__((__target__("sse2")))
|
||||
# define XXH_TARGET_AVX2 __attribute__((__target__("avx2")))
|
||||
# define XXH_TARGET_AVX512 __attribute__((__target__("avx512f")))
|
||||
#elif defined(_MSC_VER)
|
||||
# include <intrin.h>
|
||||
# define XXH_TARGET_SSE2
|
||||
# define XXH_TARGET_AVX2
|
||||
# define XXH_TARGET_AVX512
|
||||
#else
|
||||
# error "Dispatching is currently not supported for your compiler."
|
||||
#endif
|
||||
|
||||
#ifdef XXH_DISPATCH_DEBUG
|
||||
/* debug logging */
|
||||
# include <stdio.h>
|
||||
# define XXH_debugPrint(str) { fprintf(stderr, "DEBUG: xxHash dispatch: %s \n", str); fflush(NULL); }
|
||||
#else
|
||||
# define XXH_debugPrint(str) ((void)0)
|
||||
# undef NDEBUG /* avoid redefinition */
|
||||
# define NDEBUG
|
||||
#endif
|
||||
#include <assert.h>
|
||||
|
||||
#define XXH_INLINE_ALL
|
||||
#define XXH_X86DISPATCH
|
||||
#include "xxhash.h"
|
||||
|
||||
/*
|
||||
* Support both AT&T and Intel dialects
|
||||
*
|
||||
* GCC doesn't convert AT&T syntax to Intel syntax, and will error out if
|
||||
* compiled with -masm=intel. Instead, it supports dialect switching with
|
||||
* curly braces: { AT&T syntax | Intel syntax }
|
||||
*
|
||||
* Clang's integrated assembler automatically converts AT&T syntax to Intel if
|
||||
* needed, making the dialect switching useless (it isn't even supported).
|
||||
*
|
||||
* Note: Comments are written in the inline assembly itself.
|
||||
*/
|
||||
#ifdef __clang__
|
||||
# define XXH_I_ATT(intel, att) att "\n\t"
|
||||
#else
|
||||
# define XXH_I_ATT(intel, att) "{" att "|" intel "}\n\t"
|
||||
#endif
|
||||
|
||||
/*!
|
||||
* @internal
|
||||
* @brief Runs CPUID.
|
||||
*
|
||||
* @param eax , ecx The parameters to pass to CPUID, %eax and %ecx respectively.
|
||||
* @param abcd The array to store the result in, `{ eax, ebx, ecx, edx }`
|
||||
*/
|
||||
static void XXH_cpuid(xxh_u32 eax, xxh_u32 ecx, xxh_u32* abcd)
|
||||
{
|
||||
#if defined(_MSC_VER)
|
||||
__cpuidex(abcd, eax, ecx);
|
||||
#else
|
||||
xxh_u32 ebx, edx;
|
||||
# if defined(__i386__) && defined(__PIC__)
|
||||
__asm__(
|
||||
"# Call CPUID\n\t"
|
||||
"#\n\t"
|
||||
"# On 32-bit x86 with PIC enabled, we are not allowed to overwrite\n\t"
|
||||
"# EBX, so we use EDI instead.\n\t"
|
||||
XXH_I_ATT("mov edi, ebx", "movl %%ebx, %%edi")
|
||||
XXH_I_ATT("cpuid", "cpuid" )
|
||||
XXH_I_ATT("xchg edi, ebx", "xchgl %%ebx, %%edi")
|
||||
: "=D" (ebx),
|
||||
# else
|
||||
__asm__(
|
||||
"# Call CPUID\n\t"
|
||||
XXH_I_ATT("cpuid", "cpuid")
|
||||
: "=b" (ebx),
|
||||
# endif
|
||||
"+a" (eax), "+c" (ecx), "=d" (edx));
|
||||
abcd[0] = eax;
|
||||
abcd[1] = ebx;
|
||||
abcd[2] = ecx;
|
||||
abcd[3] = edx;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Modified version of Intel's guide
|
||||
* https://software.intel.com/en-us/articles/how-to-detect-new-instruction-support-in-the-4th-generation-intel-core-processor-family
|
||||
*/
|
||||
|
||||
#if XXH_DISPATCH_AVX2 || XXH_DISPATCH_AVX512
|
||||
/*!
|
||||
* @internal
|
||||
* @brief Runs `XGETBV`.
|
||||
*
|
||||
* While the CPU may support AVX2, the operating system might not properly save
|
||||
* the full YMM/ZMM registers.
|
||||
*
|
||||
* xgetbv is used for detecting this: Any compliant operating system will define
|
||||
* a set of flags in the xcr0 register indicating how it saves the AVX registers.
|
||||
*
|
||||
* You can manually disable this flag on Windows by running, as admin:
|
||||
*
|
||||
* bcdedit.exe /set xsavedisable 1
|
||||
*
|
||||
* and rebooting. Run the same command with 0 to re-enable it.
|
||||
*/
|
||||
static xxh_u64 XXH_xgetbv(void)
|
||||
{
|
||||
#if defined(_MSC_VER)
|
||||
return _xgetbv(0); /* min VS2010 SP1 compiler is required */
|
||||
#else
|
||||
xxh_u32 xcr0_lo, xcr0_hi;
|
||||
__asm__(
|
||||
"# Call XGETBV\n\t"
|
||||
"#\n\t"
|
||||
"# Older assemblers (e.g. macOS's ancient GAS version) don't support\n\t"
|
||||
"# the XGETBV opcode, so we encode it by hand instead.\n\t"
|
||||
"# See <https://github.com/asmjit/asmjit/issues/78> for details.\n\t"
|
||||
".byte 0x0f, 0x01, 0xd0\n\t"
|
||||
: "=a" (xcr0_lo), "=d" (xcr0_hi) : "c" (0));
|
||||
return xcr0_lo | ((xxh_u64)xcr0_hi << 32);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#define XXH_SSE2_CPUID_MASK (1 << 26)
|
||||
#define XXH_OSXSAVE_CPUID_MASK ((1 << 26) | (1 << 27))
|
||||
#define XXH_AVX2_CPUID_MASK (1 << 5)
|
||||
#define XXH_AVX2_XGETBV_MASK ((1 << 2) | (1 << 1))
|
||||
#define XXH_AVX512F_CPUID_MASK (1 << 16)
|
||||
#define XXH_AVX512F_XGETBV_MASK ((7 << 5) | (1 << 2) | (1 << 1))
|
||||
|
||||
/*!
|
||||
* @internal
|
||||
* @brief Returns the best XXH3 implementation.
|
||||
*
|
||||
* Runs various CPUID/XGETBV tests to try and determine the best implementation.
|
||||
*
|
||||
* @ret The best @ref XXH_VECTOR implementation.
|
||||
* @see XXH_VECTOR_TYPES
|
||||
*/
|
||||
static int XXH_featureTest(void)
|
||||
{
|
||||
xxh_u32 abcd[4];
|
||||
xxh_u32 max_leaves;
|
||||
int best = XXH_SCALAR;
|
||||
#if XXH_DISPATCH_AVX2 || XXH_DISPATCH_AVX512
|
||||
xxh_u64 xgetbv_val;
|
||||
#endif
|
||||
#if defined(__GNUC__) && defined(__i386__)
|
||||
xxh_u32 cpuid_supported;
|
||||
__asm__(
|
||||
"# For the sake of ruthless backwards compatibility, check if CPUID\n\t"
|
||||
"# is supported in the EFLAGS on i386.\n\t"
|
||||
"# This is not necessary on x86_64 - CPUID is mandatory.\n\t"
|
||||
"# The ID flag (bit 21) in the EFLAGS register indicates support\n\t"
|
||||
"# for the CPUID instruction. If a software procedure can set and\n\t"
|
||||
"# clear this flag, the processor executing the procedure supports\n\t"
|
||||
"# the CPUID instruction.\n\t"
|
||||
"# <https://c9x.me/x86/html/file_module_x86_id_45.html>\n\t"
|
||||
"#\n\t"
|
||||
"# Routine is from <https://wiki.osdev.org/CPUID>.\n\t"
|
||||
|
||||
"# Save EFLAGS\n\t"
|
||||
XXH_I_ATT("pushfd", "pushfl" )
|
||||
"# Store EFLAGS\n\t"
|
||||
XXH_I_ATT("pushfd", "pushfl" )
|
||||
"# Invert the ID bit in stored EFLAGS\n\t"
|
||||
XXH_I_ATT("xor dword ptr[esp], 0x200000", "xorl $0x200000, (%%esp)")
|
||||
"# Load stored EFLAGS (with ID bit inverted)\n\t"
|
||||
XXH_I_ATT("popfd", "popfl" )
|
||||
"# Store EFLAGS again (ID bit may or not be inverted)\n\t"
|
||||
XXH_I_ATT("pushfd", "pushfl" )
|
||||
"# eax = modified EFLAGS (ID bit may or may not be inverted)\n\t"
|
||||
XXH_I_ATT("pop eax", "popl %%eax" )
|
||||
"# eax = whichever bits were changed\n\t"
|
||||
XXH_I_ATT("xor eax, dword ptr[esp]", "xorl (%%esp), %%eax" )
|
||||
"# Restore original EFLAGS\n\t"
|
||||
XXH_I_ATT("popfd", "popfl" )
|
||||
"# eax = zero if ID bit can't be changed, else non-zero\n\t"
|
||||
XXH_I_ATT("and eax, 0x200000", "andl $0x200000, %%eax" )
|
||||
: "=a" (cpuid_supported) :: "cc");
|
||||
|
||||
if (XXH_unlikely(!cpuid_supported)) {
|
||||
XXH_debugPrint("CPUID support is not detected!");
|
||||
return best;
|
||||
}
|
||||
|
||||
#endif
|
||||
/* Check how many CPUID pages we have */
|
||||
XXH_cpuid(0, 0, abcd);
|
||||
max_leaves = abcd[0];
|
||||
|
||||
/* Shouldn't happen on hardware, but happens on some QEMU configs. */
|
||||
if (XXH_unlikely(max_leaves == 0)) {
|
||||
XXH_debugPrint("Max CPUID leaves == 0!");
|
||||
return best;
|
||||
}
|
||||
|
||||
/* Check for SSE2, OSXSAVE and xgetbv */
|
||||
XXH_cpuid(1, 0, abcd);
|
||||
|
||||
/*
|
||||
* Test for SSE2. The check is redundant on x86_64, but it doesn't hurt.
|
||||
*/
|
||||
if (XXH_unlikely((abcd[3] & XXH_SSE2_CPUID_MASK) != XXH_SSE2_CPUID_MASK))
|
||||
return best;
|
||||
|
||||
XXH_debugPrint("SSE2 support detected.");
|
||||
|
||||
best = XXH_SSE2;
|
||||
#if XXH_DISPATCH_AVX2 || XXH_DISPATCH_AVX512
|
||||
/* Make sure we have enough leaves */
|
||||
if (XXH_unlikely(max_leaves < 7))
|
||||
return best;
|
||||
|
||||
/* Test for OSXSAVE and XGETBV */
|
||||
if ((abcd[2] & XXH_OSXSAVE_CPUID_MASK) != XXH_OSXSAVE_CPUID_MASK)
|
||||
return best;
|
||||
|
||||
/* CPUID check for AVX features */
|
||||
XXH_cpuid(7, 0, abcd);
|
||||
|
||||
xgetbv_val = XXH_xgetbv();
|
||||
#if XXH_DISPATCH_AVX2
|
||||
/* Validate that AVX2 is supported by the CPU */
|
||||
if ((abcd[1] & XXH_AVX2_CPUID_MASK) != XXH_AVX2_CPUID_MASK)
|
||||
return best;
|
||||
|
||||
/* Validate that the OS supports YMM registers */
|
||||
if ((xgetbv_val & XXH_AVX2_XGETBV_MASK) != XXH_AVX2_XGETBV_MASK) {
|
||||
XXH_debugPrint("AVX2 supported by the CPU, but not the OS.");
|
||||
return best;
|
||||
}
|
||||
|
||||
/* AVX2 supported */
|
||||
XXH_debugPrint("AVX2 support detected.");
|
||||
best = XXH_AVX2;
|
||||
#endif
|
||||
#if XXH_DISPATCH_AVX512
|
||||
/* Check if AVX512F is supported by the CPU */
|
||||
if ((abcd[1] & XXH_AVX512F_CPUID_MASK) != XXH_AVX512F_CPUID_MASK) {
|
||||
XXH_debugPrint("AVX512F not supported by CPU");
|
||||
return best;
|
||||
}
|
||||
|
||||
/* Validate that the OS supports ZMM registers */
|
||||
if ((xgetbv_val & XXH_AVX512F_XGETBV_MASK) != XXH_AVX512F_XGETBV_MASK) {
|
||||
XXH_debugPrint("AVX512F supported by the CPU, but not the OS.");
|
||||
return best;
|
||||
}
|
||||
|
||||
/* AVX512F supported */
|
||||
XXH_debugPrint("AVX512F support detected.");
|
||||
best = XXH_AVX512;
|
||||
#endif
|
||||
#endif
|
||||
return best;
|
||||
}
|
||||
|
||||
|
||||
/* === Vector implementations === */
|
||||
|
||||
/*!
|
||||
* @internal
|
||||
* @brief Defines the various dispatch functions.
|
||||
*
|
||||
* TODO: Consolidate?
|
||||
*
|
||||
* @param suffix The suffix for the functions, e.g. sse2 or scalar
|
||||
* @param target XXH_TARGET_* or empty.
|
||||
*/
|
||||
#define XXH_DEFINE_DISPATCH_FUNCS(suffix, target) \
|
||||
\
|
||||
/* === XXH3, default variants === */ \
|
||||
\
|
||||
XXH_NO_INLINE target XXH64_hash_t \
|
||||
XXHL64_default_##suffix(const void* XXH_RESTRICT input, size_t len) \
|
||||
{ \
|
||||
return XXH3_hashLong_64b_internal( \
|
||||
input, len, XXH3_kSecret, sizeof(XXH3_kSecret), \
|
||||
XXH3_accumulate_512_##suffix, XXH3_scrambleAcc_##suffix \
|
||||
); \
|
||||
} \
|
||||
\
|
||||
/* === XXH3, Seeded variants === */ \
|
||||
\
|
||||
XXH_NO_INLINE target XXH64_hash_t \
|
||||
XXHL64_seed_##suffix(const void* XXH_RESTRICT input, size_t len, \
|
||||
XXH64_hash_t seed) \
|
||||
{ \
|
||||
return XXH3_hashLong_64b_withSeed_internal( \
|
||||
input, len, seed, XXH3_accumulate_512_##suffix, \
|
||||
XXH3_scrambleAcc_##suffix, XXH3_initCustomSecret_##suffix \
|
||||
); \
|
||||
} \
|
||||
\
|
||||
/* === XXH3, Secret variants === */ \
|
||||
\
|
||||
XXH_NO_INLINE target XXH64_hash_t \
|
||||
XXHL64_secret_##suffix(const void* XXH_RESTRICT input, size_t len, \
|
||||
const void* secret, size_t secretLen) \
|
||||
{ \
|
||||
return XXH3_hashLong_64b_internal( \
|
||||
input, len, secret, secretLen, \
|
||||
XXH3_accumulate_512_##suffix, XXH3_scrambleAcc_##suffix \
|
||||
); \
|
||||
} \
|
||||
\
|
||||
/* === XXH3 update variants === */ \
|
||||
\
|
||||
XXH_NO_INLINE target XXH_errorcode \
|
||||
XXH3_update_##suffix(XXH3_state_t* state, const void* input, size_t len) \
|
||||
{ \
|
||||
return XXH3_update(state, (const xxh_u8*)input, len, \
|
||||
XXH3_accumulate_512_##suffix, XXH3_scrambleAcc_##suffix); \
|
||||
} \
|
||||
\
|
||||
/* === XXH128 default variants === */ \
|
||||
\
|
||||
XXH_NO_INLINE target XXH128_hash_t \
|
||||
XXHL128_default_##suffix(const void* XXH_RESTRICT input, size_t len) \
|
||||
{ \
|
||||
return XXH3_hashLong_128b_internal( \
|
||||
input, len, XXH3_kSecret, sizeof(XXH3_kSecret), \
|
||||
XXH3_accumulate_512_##suffix, XXH3_scrambleAcc_##suffix \
|
||||
); \
|
||||
} \
|
||||
\
|
||||
/* === XXH128 Secret variants === */ \
|
||||
\
|
||||
XXH_NO_INLINE target XXH128_hash_t \
|
||||
XXHL128_secret_##suffix(const void* XXH_RESTRICT input, size_t len, \
|
||||
const void* XXH_RESTRICT secret, size_t secretLen) \
|
||||
{ \
|
||||
return XXH3_hashLong_128b_internal( \
|
||||
input, len, (const xxh_u8*)secret, secretLen, \
|
||||
XXH3_accumulate_512_##suffix, XXH3_scrambleAcc_##suffix); \
|
||||
} \
|
||||
\
|
||||
/* === XXH128 Seeded variants === */ \
|
||||
\
|
||||
XXH_NO_INLINE target XXH128_hash_t \
|
||||
XXHL128_seed_##suffix(const void* XXH_RESTRICT input, size_t len, \
|
||||
XXH64_hash_t seed) \
|
||||
{ \
|
||||
return XXH3_hashLong_128b_withSeed_internal(input, len, seed, \
|
||||
XXH3_accumulate_512_##suffix, XXH3_scrambleAcc_##suffix, \
|
||||
XXH3_initCustomSecret_##suffix); \
|
||||
}
|
||||
|
||||
/* End XXH_DEFINE_DISPATCH_FUNCS */
|
||||
|
||||
#if XXH_DISPATCH_SCALAR
|
||||
XXH_DEFINE_DISPATCH_FUNCS(scalar, /* nothing */)
|
||||
#endif
|
||||
XXH_DEFINE_DISPATCH_FUNCS(sse2, XXH_TARGET_SSE2)
|
||||
#if XXH_DISPATCH_AVX2
|
||||
XXH_DEFINE_DISPATCH_FUNCS(avx2, XXH_TARGET_AVX2)
|
||||
#endif
|
||||
#if XXH_DISPATCH_AVX512
|
||||
XXH_DEFINE_DISPATCH_FUNCS(avx512, XXH_TARGET_AVX512)
|
||||
#endif
|
||||
#undef XXH_DEFINE_DISPATCH_FUNCS
|
||||
|
||||
/* ==== Dispatchers ==== */
|
||||
|
||||
typedef XXH64_hash_t (*XXH3_dispatchx86_hashLong64_default)(const void* XXH_RESTRICT, size_t);
|
||||
|
||||
typedef XXH64_hash_t (*XXH3_dispatchx86_hashLong64_withSeed)(const void* XXH_RESTRICT, size_t, XXH64_hash_t);
|
||||
|
||||
typedef XXH64_hash_t (*XXH3_dispatchx86_hashLong64_withSecret)(const void* XXH_RESTRICT, size_t, const void* XXH_RESTRICT, size_t);
|
||||
|
||||
typedef XXH_errorcode (*XXH3_dispatchx86_update)(XXH3_state_t*, const void*, size_t);
|
||||
|
||||
typedef struct {
|
||||
XXH3_dispatchx86_hashLong64_default hashLong64_default;
|
||||
XXH3_dispatchx86_hashLong64_withSeed hashLong64_seed;
|
||||
XXH3_dispatchx86_hashLong64_withSecret hashLong64_secret;
|
||||
XXH3_dispatchx86_update update;
|
||||
} XXH_dispatchFunctions_s;
|
||||
|
||||
#define XXH_NB_DISPATCHES 4
|
||||
|
||||
/*!
|
||||
* @internal
|
||||
* @brief Table of dispatchers for @ref XXH3_64bits().
|
||||
*
|
||||
* @pre The indices must match @ref XXH_VECTOR_TYPE.
|
||||
*/
|
||||
static const XXH_dispatchFunctions_s XXH_kDispatch[XXH_NB_DISPATCHES] = {
|
||||
#if XXH_DISPATCH_SCALAR
|
||||
/* Scalar */ { XXHL64_default_scalar, XXHL64_seed_scalar, XXHL64_secret_scalar, XXH3_update_scalar },
|
||||
#else
|
||||
/* Scalar */ { NULL, NULL, NULL, NULL },
|
||||
#endif
|
||||
/* SSE2 */ { XXHL64_default_sse2, XXHL64_seed_sse2, XXHL64_secret_sse2, XXH3_update_sse2 },
|
||||
#if XXH_DISPATCH_AVX2
|
||||
/* AVX2 */ { XXHL64_default_avx2, XXHL64_seed_avx2, XXHL64_secret_avx2, XXH3_update_avx2 },
|
||||
#else
|
||||
/* AVX2 */ { NULL, NULL, NULL, NULL },
|
||||
#endif
|
||||
#if XXH_DISPATCH_AVX512
|
||||
/* AVX512 */ { XXHL64_default_avx512, XXHL64_seed_avx512, XXHL64_secret_avx512, XXH3_update_avx512 }
|
||||
#else
|
||||
/* AVX512 */ { NULL, NULL, NULL, NULL }
|
||||
#endif
|
||||
};
|
||||
/*!
|
||||
* @internal
|
||||
* @brief The selected dispatch table for @ref XXH3_64bits().
|
||||
*/
|
||||
static XXH_dispatchFunctions_s XXH_g_dispatch = { NULL, NULL, NULL, NULL };
|
||||
|
||||
|
||||
typedef XXH128_hash_t (*XXH3_dispatchx86_hashLong128_default)(const void* XXH_RESTRICT, size_t);
|
||||
|
||||
typedef XXH128_hash_t (*XXH3_dispatchx86_hashLong128_withSeed)(const void* XXH_RESTRICT, size_t, XXH64_hash_t);
|
||||
|
||||
typedef XXH128_hash_t (*XXH3_dispatchx86_hashLong128_withSecret)(const void* XXH_RESTRICT, size_t, const void* XXH_RESTRICT, size_t);
|
||||
|
||||
typedef struct {
|
||||
XXH3_dispatchx86_hashLong128_default hashLong128_default;
|
||||
XXH3_dispatchx86_hashLong128_withSeed hashLong128_seed;
|
||||
XXH3_dispatchx86_hashLong128_withSecret hashLong128_secret;
|
||||
XXH3_dispatchx86_update update;
|
||||
} XXH_dispatch128Functions_s;
|
||||
|
||||
|
||||
/*!
|
||||
* @internal
|
||||
* @brief Table of dispatchers for @ref XXH3_128bits().
|
||||
*
|
||||
* @pre The indices must match @ref XXH_VECTOR_TYPE.
|
||||
*/
|
||||
static const XXH_dispatch128Functions_s XXH_kDispatch128[XXH_NB_DISPATCHES] = {
|
||||
#if XXH_DISPATCH_SCALAR
|
||||
/* Scalar */ { XXHL128_default_scalar, XXHL128_seed_scalar, XXHL128_secret_scalar, XXH3_update_scalar },
|
||||
#else
|
||||
/* Scalar */ { NULL, NULL, NULL, NULL },
|
||||
#endif
|
||||
/* SSE2 */ { XXHL128_default_sse2, XXHL128_seed_sse2, XXHL128_secret_sse2, XXH3_update_sse2 },
|
||||
#if XXH_DISPATCH_AVX2
|
||||
/* AVX2 */ { XXHL128_default_avx2, XXHL128_seed_avx2, XXHL128_secret_avx2, XXH3_update_avx2 },
|
||||
#else
|
||||
/* AVX2 */ { NULL, NULL, NULL, NULL },
|
||||
#endif
|
||||
#if XXH_DISPATCH_AVX512
|
||||
/* AVX512 */ { XXHL128_default_avx512, XXHL128_seed_avx512, XXHL128_secret_avx512, XXH3_update_avx512 }
|
||||
#else
|
||||
/* AVX512 */ { NULL, NULL, NULL, NULL }
|
||||
#endif
|
||||
};
|
||||
|
||||
/*!
|
||||
* @internal
|
||||
* @brief The selected dispatch table for @ref XXH3_64bits().
|
||||
*/
|
||||
static XXH_dispatch128Functions_s XXH_g_dispatch128 = { NULL, NULL, NULL, NULL };
|
||||
|
||||
/*!
|
||||
* @internal
|
||||
* @brief Runs a CPUID check and sets the correct dispatch tables.
|
||||
*/
|
||||
static void XXH_setDispatch(void)
|
||||
{
|
||||
int vecID = XXH_featureTest();
|
||||
XXH_STATIC_ASSERT(XXH_AVX512 == XXH_NB_DISPATCHES-1);
|
||||
assert(XXH_SCALAR <= vecID && vecID <= XXH_AVX512);
|
||||
#if !XXH_DISPATCH_SCALAR
|
||||
assert(vecID != XXH_SCALAR);
|
||||
#endif
|
||||
#if !XXH_DISPATCH_AVX512
|
||||
assert(vecID != XXH_AVX512);
|
||||
#endif
|
||||
#if !XXH_DISPATCH_AVX2
|
||||
assert(vecID != XXH_AVX2);
|
||||
#endif
|
||||
XXH_g_dispatch = XXH_kDispatch[vecID];
|
||||
XXH_g_dispatch128 = XXH_kDispatch128[vecID];
|
||||
}
|
||||
|
||||
|
||||
/* ==== XXH3 public functions ==== */
|
||||
|
||||
static XXH64_hash_t
|
||||
XXH3_hashLong_64b_defaultSecret_selection(const void* input, size_t len,
|
||||
XXH64_hash_t seed64, const xxh_u8* secret, size_t secretLen)
|
||||
{
|
||||
(void)seed64; (void)secret; (void)secretLen;
|
||||
if (XXH_g_dispatch.hashLong64_default == NULL) XXH_setDispatch();
|
||||
return XXH_g_dispatch.hashLong64_default(input, len);
|
||||
}
|
||||
|
||||
XXH64_hash_t XXH3_64bits_dispatch(const void* input, size_t len)
|
||||
{
|
||||
return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_defaultSecret_selection);
|
||||
}
|
||||
|
||||
static XXH64_hash_t
|
||||
XXH3_hashLong_64b_withSeed_selection(const void* input, size_t len,
|
||||
XXH64_hash_t seed64, const xxh_u8* secret, size_t secretLen)
|
||||
{
|
||||
(void)secret; (void)secretLen;
|
||||
if (XXH_g_dispatch.hashLong64_seed == NULL) XXH_setDispatch();
|
||||
return XXH_g_dispatch.hashLong64_seed(input, len, seed64);
|
||||
}
|
||||
|
||||
XXH64_hash_t XXH3_64bits_withSeed_dispatch(const void* input, size_t len, XXH64_hash_t seed)
|
||||
{
|
||||
return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed_selection);
|
||||
}
|
||||
|
||||
static XXH64_hash_t
|
||||
XXH3_hashLong_64b_withSecret_selection(const void* input, size_t len,
|
||||
XXH64_hash_t seed64, const xxh_u8* secret, size_t secretLen)
|
||||
{
|
||||
(void)seed64;
|
||||
if (XXH_g_dispatch.hashLong64_secret == NULL) XXH_setDispatch();
|
||||
return XXH_g_dispatch.hashLong64_secret(input, len, secret, secretLen);
|
||||
}
|
||||
|
||||
XXH64_hash_t XXH3_64bits_withSecret_dispatch(const void* input, size_t len, const void* secret, size_t secretLen)
|
||||
{
|
||||
return XXH3_64bits_internal(input, len, 0, secret, secretLen, XXH3_hashLong_64b_withSecret_selection);
|
||||
}
|
||||
|
||||
XXH_errorcode
|
||||
XXH3_64bits_update_dispatch(XXH3_state_t* state, const void* input, size_t len)
|
||||
{
|
||||
if (XXH_g_dispatch.update == NULL) XXH_setDispatch();
|
||||
return XXH_g_dispatch.update(state, (const xxh_u8*)input, len);
|
||||
}
|
||||
|
||||
|
||||
/* ==== XXH128 public functions ==== */
|
||||
|
||||
static XXH128_hash_t
|
||||
XXH3_hashLong_128b_defaultSecret_selection(const void* input, size_t len,
|
||||
XXH64_hash_t seed64, const void* secret, size_t secretLen)
|
||||
{
|
||||
(void)seed64; (void)secret; (void)secretLen;
|
||||
if (XXH_g_dispatch128.hashLong128_default == NULL) XXH_setDispatch();
|
||||
return XXH_g_dispatch128.hashLong128_default(input, len);
|
||||
}
|
||||
|
||||
XXH128_hash_t XXH3_128bits_dispatch(const void* input, size_t len)
|
||||
{
|
||||
return XXH3_128bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_128b_defaultSecret_selection);
|
||||
}
|
||||
|
||||
static XXH128_hash_t
|
||||
XXH3_hashLong_128b_withSeed_selection(const void* input, size_t len,
|
||||
XXH64_hash_t seed64, const void* secret, size_t secretLen)
|
||||
{
|
||||
(void)secret; (void)secretLen;
|
||||
if (XXH_g_dispatch128.hashLong128_seed == NULL) XXH_setDispatch();
|
||||
return XXH_g_dispatch128.hashLong128_seed(input, len, seed64);
|
||||
}
|
||||
|
||||
XXH128_hash_t XXH3_128bits_withSeed_dispatch(const void* input, size_t len, XXH64_hash_t seed)
|
||||
{
|
||||
return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_128b_withSeed_selection);
|
||||
}
|
||||
|
||||
static XXH128_hash_t
|
||||
XXH3_hashLong_128b_withSecret_selection(const void* input, size_t len,
|
||||
XXH64_hash_t seed64, const void* secret, size_t secretLen)
|
||||
{
|
||||
(void)seed64;
|
||||
if (XXH_g_dispatch128.hashLong128_secret == NULL) XXH_setDispatch();
|
||||
return XXH_g_dispatch128.hashLong128_secret(input, len, secret, secretLen);
|
||||
}
|
||||
|
||||
XXH128_hash_t XXH3_128bits_withSecret_dispatch(const void* input, size_t len, const void* secret, size_t secretLen)
|
||||
{
|
||||
return XXH3_128bits_internal(input, len, 0, secret, secretLen, XXH3_hashLong_128b_withSecret_selection);
|
||||
}
|
||||
|
||||
XXH_errorcode
|
||||
XXH3_128bits_update_dispatch(XXH3_state_t* state, const void* input, size_t len)
|
||||
{
|
||||
if (XXH_g_dispatch128.update == NULL) XXH_setDispatch();
|
||||
return XXH_g_dispatch128.update(state, (const xxh_u8*)input, len);
|
||||
}
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
/*! @} */
|
86
source/extern/xxh_x86dispatch.h
vendored
Normal file
86
source/extern/xxh_x86dispatch.h
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
/*
|
||||
* xxHash - XXH3 Dispatcher for x86-based targets
|
||||
* Copyright (C) 2020 Yann Collet
|
||||
*
|
||||
* BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* You can contact the author at:
|
||||
* - xxHash homepage: https://www.xxhash.com
|
||||
* - xxHash source repository: https://github.com/Cyan4973/xxHash
|
||||
*/
|
||||
|
||||
#ifndef XXH_X86DISPATCH_H_13563687684
|
||||
#define XXH_X86DISPATCH_H_13563687684
|
||||
|
||||
#include "xxhash.h" /* XXH64_hash_t, XXH3_state_t */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_dispatch(const void* input, size_t len);
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed_dispatch(const void* input, size_t len, XXH64_hash_t seed);
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret_dispatch(const void* input, size_t len, const void* secret, size_t secretLen);
|
||||
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update_dispatch(XXH3_state_t* state, const void* input, size_t len);
|
||||
|
||||
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_dispatch(const void* input, size_t len);
|
||||
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed_dispatch(const void* input, size_t len, XXH64_hash_t seed);
|
||||
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret_dispatch(const void* input, size_t len, const void* secret, size_t secretLen);
|
||||
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update_dispatch(XXH3_state_t* state, const void* input, size_t len);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/* automatic replacement of XXH3 functions.
|
||||
* can be disabled by setting XXH_DISPATCH_DISABLE_REPLACE */
|
||||
#ifndef XXH_DISPATCH_DISABLE_REPLACE
|
||||
|
||||
# undef XXH3_64bits
|
||||
# define XXH3_64bits XXH3_64bits_dispatch
|
||||
# undef XXH3_64bits_withSeed
|
||||
# define XXH3_64bits_withSeed XXH3_64bits_withSeed_dispatch
|
||||
# undef XXH3_64bits_withSecret
|
||||
# define XXH3_64bits_withSecret XXH3_64bits_withSecret_dispatch
|
||||
# undef XXH3_64bits_update
|
||||
# define XXH3_64bits_update XXH3_64bits_update_dispatch
|
||||
|
||||
# undef XXH128
|
||||
# define XXH128 XXH3_128bits_withSeed_dispatch
|
||||
# define XXH3_128bits XXH3_128bits_dispatch
|
||||
# undef XXH3_128bits
|
||||
# define XXH3_128bits XXH3_128bits_dispatch
|
||||
# undef XXH3_128bits_withSeed
|
||||
# define XXH3_128bits_withSeed XXH3_128bits_withSeed_dispatch
|
||||
# undef XXH3_128bits_withSecret
|
||||
# define XXH3_128bits_withSecret XXH3_128bits_withSecret_dispatch
|
||||
# undef XXH3_128bits_update
|
||||
# define XXH3_128bits_update XXH3_128bits_update_dispatch
|
||||
|
||||
#endif /* XXH_DISPATCH_DISABLE_REPLACE */
|
||||
|
||||
#endif /* XXH_X86DISPATCH_H_13563687684 */
|
917
source/extern/xxhash.c
vendored
917
source/extern/xxhash.c
vendored
@ -1,888 +1,43 @@
|
||||
/*
|
||||
* xxHash - Fast Hash algorithm
|
||||
* Copyright (C) 2012-2016, Yann Collet
|
||||
*
|
||||
* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - xxHash homepage: http://www.xxhash.com
|
||||
* - xxHash source repository : https://github.com/Cyan4973/xxHash
|
||||
*/
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Tuning parameters
|
||||
***************************************/
|
||||
/*!XXH_FORCE_MEMORY_ACCESS :
|
||||
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
|
||||
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
|
||||
* The below switch allow to select different access method for improved performance.
|
||||
* Method 0 (default) : use `memcpy()`. Safe and portable.
|
||||
* Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
|
||||
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
|
||||
* Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
|
||||
* It can generate buggy code on targets which do not support unaligned memory accesses.
|
||||
* But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
|
||||
* See http://stackoverflow.com/a/32095106/646947 for details.
|
||||
* Prefer these methods in priority order (0 > 1 > 2)
|
||||
* xxHash - Extremely Fast Hash algorithm
|
||||
* Copyright (C) 2012-2020 Yann Collet
|
||||
*
|
||||
* BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* You can contact the author at:
|
||||
* - xxHash homepage: https://www.xxhash.com
|
||||
* - xxHash source repository: https://github.com/Cyan4973/xxHash
|
||||
*/
|
||||
#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
|
||||
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
|
||||
# define XXH_FORCE_MEMORY_ACCESS 2
|
||||
# elif defined(__INTEL_COMPILER) || \
|
||||
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
|
||||
# define XXH_FORCE_MEMORY_ACCESS 1
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/*!XXH_ACCEPT_NULL_INPUT_POINTER :
|
||||
* If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
|
||||
* When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
|
||||
* By default, this option is disabled. To enable it, uncomment below define :
|
||||
|
||||
/*
|
||||
* xxhash.c instantiates functions defined in xxhash.h
|
||||
*/
|
||||
/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
|
||||
|
||||
/*!XXH_FORCE_NATIVE_FORMAT :
|
||||
* By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
|
||||
* Results are therefore identical for little-endian and big-endian CPU.
|
||||
* This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
|
||||
* Should endian-independence be of no importance for your application, you may set the #define below to 1,
|
||||
* to improve speed for Big-endian CPU.
|
||||
* This option has no impact on Little_Endian CPU.
|
||||
*/
|
||||
#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */
|
||||
# define XXH_FORCE_NATIVE_FORMAT 0
|
||||
#endif
|
||||
#define XXH_STATIC_LINKING_ONLY /* access advanced declarations */
|
||||
#define XXH_IMPLEMENTATION /* access definitions */
|
||||
|
||||
/*!XXH_FORCE_ALIGN_CHECK :
|
||||
* This is a minor performance trick, only useful with lots of very small keys.
|
||||
* It means : check for aligned/unaligned input.
|
||||
* The check costs one initial branch per hash;
|
||||
* set it to 0 when the input is guaranteed to be aligned,
|
||||
* or when alignment doesn't matter for performance.
|
||||
*/
|
||||
#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
|
||||
# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
|
||||
# define XXH_FORCE_ALIGN_CHECK 0
|
||||
# else
|
||||
# define XXH_FORCE_ALIGN_CHECK 1
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Includes & Memory related functions
|
||||
***************************************/
|
||||
/*! Modify the local functions below should you wish to use some other memory routines
|
||||
* for malloc(), free() */
|
||||
#include <stdlib.h>
|
||||
static void* XXH_malloc(size_t s) { return malloc(s); }
|
||||
static void XXH_free (void* p) { free(p); }
|
||||
/*! and for memcpy() */
|
||||
#include <string.h>
|
||||
static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
|
||||
|
||||
#define XXH_STATIC_LINKING_ONLY
|
||||
#include "xxhash.h"
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Compiler Specific Options
|
||||
***************************************/
|
||||
#ifdef _MSC_VER /* Visual Studio */
|
||||
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
||||
# define FORCE_INLINE static __forceinline
|
||||
#else
|
||||
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
||||
# ifdef __GNUC__
|
||||
# define FORCE_INLINE static inline __attribute__((always_inline))
|
||||
# else
|
||||
# define FORCE_INLINE static inline
|
||||
# endif
|
||||
# else
|
||||
# define FORCE_INLINE static
|
||||
# endif /* __STDC_VERSION__ */
|
||||
#endif
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Basic Types
|
||||
***************************************/
|
||||
#ifndef MEM_MODULE
|
||||
# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
||||
# include <stdint.h>
|
||||
typedef uint8_t BYTE;
|
||||
typedef uint16_t U16;
|
||||
typedef uint32_t U32;
|
||||
# else
|
||||
typedef unsigned char BYTE;
|
||||
typedef unsigned short U16;
|
||||
typedef unsigned int U32;
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
|
||||
|
||||
/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
|
||||
static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
|
||||
|
||||
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
|
||||
|
||||
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
|
||||
/* currently only defined for gcc and icc */
|
||||
typedef union { U32 u32; } __attribute__((packed)) unalign;
|
||||
static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
|
||||
|
||||
#else
|
||||
|
||||
/* portable and safe solution. Generally efficient.
|
||||
* see : http://stackoverflow.com/a/32095106/646947
|
||||
*/
|
||||
static U32 XXH_read32(const void* memPtr)
|
||||
{
|
||||
U32 val;
|
||||
memcpy(&val, memPtr, sizeof(val));
|
||||
return val;
|
||||
}
|
||||
|
||||
#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Compiler-specific Functions and Macros
|
||||
******************************************/
|
||||
#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
|
||||
|
||||
/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
|
||||
#if defined(_MSC_VER)
|
||||
# define XXH_rotl32(x,r) _rotl(x,r)
|
||||
# define XXH_rotl64(x,r) _rotl64(x,r)
|
||||
#else
|
||||
# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
|
||||
# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER) /* Visual Studio */
|
||||
# define XXH_swap32 _byteswap_ulong
|
||||
#elif XXH_GCC_VERSION >= 403
|
||||
# define XXH_swap32 __builtin_bswap32
|
||||
#else
|
||||
static U32 XXH_swap32 (U32 x)
|
||||
{
|
||||
return ((x << 24) & 0xff000000 ) |
|
||||
((x << 8) & 0x00ff0000 ) |
|
||||
((x >> 8) & 0x0000ff00 ) |
|
||||
((x >> 24) & 0x000000ff );
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Architecture Macros
|
||||
***************************************/
|
||||
typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
|
||||
|
||||
/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
|
||||
#ifndef XXH_CPU_LITTLE_ENDIAN
|
||||
static const int g_one = 1;
|
||||
# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one))
|
||||
#endif
|
||||
|
||||
|
||||
/* ***************************
|
||||
* Memory reads
|
||||
*****************************/
|
||||
typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
|
||||
|
||||
FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
|
||||
{
|
||||
if (align==XXH_unaligned)
|
||||
return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
|
||||
else
|
||||
return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
|
||||
}
|
||||
|
||||
FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
|
||||
{
|
||||
return XXH_readLE32_align(ptr, endian, XXH_unaligned);
|
||||
}
|
||||
|
||||
static U32 XXH_readBE32(const void* ptr)
|
||||
{
|
||||
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
|
||||
}
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Macros
|
||||
***************************************/
|
||||
#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
||||
XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
|
||||
|
||||
|
||||
/* *******************************************************************
|
||||
* 32-bits hash functions
|
||||
*********************************************************************/
|
||||
static const U32 PRIME32_1 = 2654435761U;
|
||||
static const U32 PRIME32_2 = 2246822519U;
|
||||
static const U32 PRIME32_3 = 3266489917U;
|
||||
static const U32 PRIME32_4 = 668265263U;
|
||||
static const U32 PRIME32_5 = 374761393U;
|
||||
|
||||
static U32 XXH32_round(U32 seed, U32 input)
|
||||
{
|
||||
seed += input * PRIME32_2;
|
||||
seed = XXH_rotl32(seed, 13);
|
||||
seed *= PRIME32_1;
|
||||
return seed;
|
||||
}
|
||||
|
||||
FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* bEnd = p + len;
|
||||
U32 h32;
|
||||
#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
|
||||
|
||||
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
|
||||
if (p==NULL) {
|
||||
len=0;
|
||||
bEnd=p=(const BYTE*)(size_t)16;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (len>=16) {
|
||||
const BYTE* const limit = bEnd - 16;
|
||||
U32 v1 = seed + PRIME32_1 + PRIME32_2;
|
||||
U32 v2 = seed + PRIME32_2;
|
||||
U32 v3 = seed + 0;
|
||||
U32 v4 = seed - PRIME32_1;
|
||||
|
||||
do {
|
||||
v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
|
||||
v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
|
||||
v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
|
||||
v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
|
||||
} while (p<=limit);
|
||||
|
||||
h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
|
||||
} else {
|
||||
h32 = seed + PRIME32_5;
|
||||
}
|
||||
|
||||
h32 += (U32) len;
|
||||
|
||||
while (p+4<=bEnd) {
|
||||
h32 += XXH_get32bits(p) * PRIME32_3;
|
||||
h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
|
||||
p+=4;
|
||||
}
|
||||
|
||||
while (p<bEnd) {
|
||||
h32 += (*p) * PRIME32_5;
|
||||
h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
|
||||
p++;
|
||||
}
|
||||
|
||||
h32 ^= h32 >> 15;
|
||||
h32 *= PRIME32_2;
|
||||
h32 ^= h32 >> 13;
|
||||
h32 *= PRIME32_3;
|
||||
h32 ^= h32 >> 16;
|
||||
|
||||
return h32;
|
||||
}
|
||||
|
||||
|
||||
XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
|
||||
{
|
||||
#if 0
|
||||
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
|
||||
XXH32_state_t state;
|
||||
XXH32_reset(&state, seed);
|
||||
XXH32_update(&state, input, len);
|
||||
return XXH32_digest(&state);
|
||||
#else
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if (XXH_FORCE_ALIGN_CHECK) {
|
||||
if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
|
||||
else
|
||||
return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
|
||||
} }
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
|
||||
else
|
||||
return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*====== Hash streaming ======*/
|
||||
|
||||
XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
|
||||
{
|
||||
return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
|
||||
}
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
|
||||
{
|
||||
XXH_free(statePtr);
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
|
||||
{
|
||||
memcpy(dstState, srcState, sizeof(*dstState));
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
|
||||
{
|
||||
XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
|
||||
memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */
|
||||
state.v1 = seed + PRIME32_1 + PRIME32_2;
|
||||
state.v2 = seed + PRIME32_2;
|
||||
state.v3 = seed + 0;
|
||||
state.v4 = seed - PRIME32_1;
|
||||
memcpy(statePtr, &state, sizeof(state));
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* const bEnd = p + len;
|
||||
|
||||
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
|
||||
if (input==NULL) return XXH_ERROR;
|
||||
#endif
|
||||
|
||||
state->total_len_32 += (unsigned)len;
|
||||
state->large_len |= (len>=16) | (state->total_len_32>=16);
|
||||
|
||||
if (state->memsize + len < 16) { /* fill in tmp buffer */
|
||||
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
|
||||
state->memsize += (unsigned)len;
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
if (state->memsize) { /* some data left from previous update */
|
||||
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
|
||||
{ const U32* p32 = state->mem32;
|
||||
state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
|
||||
state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
|
||||
state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
|
||||
state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian));
|
||||
}
|
||||
p += 16-state->memsize;
|
||||
state->memsize = 0;
|
||||
}
|
||||
|
||||
if (p <= bEnd-16) {
|
||||
const BYTE* const limit = bEnd - 16;
|
||||
U32 v1 = state->v1;
|
||||
U32 v2 = state->v2;
|
||||
U32 v3 = state->v3;
|
||||
U32 v4 = state->v4;
|
||||
|
||||
do {
|
||||
v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
|
||||
v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
|
||||
v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
|
||||
v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
|
||||
} while (p<=limit);
|
||||
|
||||
state->v1 = v1;
|
||||
state->v2 = v2;
|
||||
state->v3 = v3;
|
||||
state->v4 = v4;
|
||||
}
|
||||
|
||||
if (p < bEnd) {
|
||||
XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
|
||||
state->memsize = (unsigned)(bEnd-p);
|
||||
}
|
||||
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
|
||||
{
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
|
||||
else
|
||||
return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
|
||||
}
|
||||
|
||||
|
||||
|
||||
FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
|
||||
{
|
||||
const BYTE * p = (const BYTE*)state->mem32;
|
||||
const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
|
||||
U32 h32;
|
||||
|
||||
if (state->large_len) {
|
||||
h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
|
||||
} else {
|
||||
h32 = state->v3 /* == seed */ + PRIME32_5;
|
||||
}
|
||||
|
||||
h32 += state->total_len_32;
|
||||
|
||||
while (p+4<=bEnd) {
|
||||
h32 += XXH_readLE32(p, endian) * PRIME32_3;
|
||||
h32 = XXH_rotl32(h32, 17) * PRIME32_4;
|
||||
p+=4;
|
||||
}
|
||||
|
||||
while (p<bEnd) {
|
||||
h32 += (*p) * PRIME32_5;
|
||||
h32 = XXH_rotl32(h32, 11) * PRIME32_1;
|
||||
p++;
|
||||
}
|
||||
|
||||
h32 ^= h32 >> 15;
|
||||
h32 *= PRIME32_2;
|
||||
h32 ^= h32 >> 13;
|
||||
h32 *= PRIME32_3;
|
||||
h32 ^= h32 >> 16;
|
||||
|
||||
return h32;
|
||||
}
|
||||
|
||||
|
||||
XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
|
||||
{
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH32_digest_endian(state_in, XXH_littleEndian);
|
||||
else
|
||||
return XXH32_digest_endian(state_in, XXH_bigEndian);
|
||||
}
|
||||
|
||||
|
||||
/*====== Canonical representation ======*/
|
||||
|
||||
/*! Default XXH result types are basic unsigned 32 and 64 bits.
|
||||
* The canonical representation follows human-readable write convention, aka big-endian (large digits first).
|
||||
* These functions allow transformation of hash result into and from its canonical format.
|
||||
* This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
|
||||
*/
|
||||
|
||||
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
|
||||
{
|
||||
XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
|
||||
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
|
||||
memcpy(dst, &hash, sizeof(*dst));
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
|
||||
{
|
||||
return XXH_readBE32(src);
|
||||
}
|
||||
|
||||
|
||||
#ifndef XXH_NO_LONG_LONG
|
||||
|
||||
/* *******************************************************************
|
||||
* 64-bits hash functions
|
||||
*********************************************************************/
|
||||
|
||||
/*====== Memory access ======*/
|
||||
|
||||
#ifndef MEM_MODULE
|
||||
# define MEM_MODULE
|
||||
# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
||||
# include <stdint.h>
|
||||
typedef uint64_t U64;
|
||||
# else
|
||||
typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
|
||||
|
||||
/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
|
||||
static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
|
||||
|
||||
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
|
||||
|
||||
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
|
||||
/* currently only defined for gcc and icc */
|
||||
typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64;
|
||||
static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }
|
||||
|
||||
#else
|
||||
|
||||
/* portable and safe solution. Generally efficient.
|
||||
* see : http://stackoverflow.com/a/32095106/646947
|
||||
*/
|
||||
|
||||
static U64 XXH_read64(const void* memPtr)
|
||||
{
|
||||
U64 val;
|
||||
memcpy(&val, memPtr, sizeof(val));
|
||||
return val;
|
||||
}
|
||||
|
||||
#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
|
||||
|
||||
#if defined(_MSC_VER) /* Visual Studio */
|
||||
# define XXH_swap64 _byteswap_uint64
|
||||
#elif XXH_GCC_VERSION >= 403
|
||||
# define XXH_swap64 __builtin_bswap64
|
||||
#else
|
||||
static U64 XXH_swap64 (U64 x)
|
||||
{
|
||||
return ((x << 56) & 0xff00000000000000ULL) |
|
||||
((x << 40) & 0x00ff000000000000ULL) |
|
||||
((x << 24) & 0x0000ff0000000000ULL) |
|
||||
((x << 8) & 0x000000ff00000000ULL) |
|
||||
((x >> 8) & 0x00000000ff000000ULL) |
|
||||
((x >> 24) & 0x0000000000ff0000ULL) |
|
||||
((x >> 40) & 0x000000000000ff00ULL) |
|
||||
((x >> 56) & 0x00000000000000ffULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
|
||||
{
|
||||
if (align==XXH_unaligned)
|
||||
return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
|
||||
else
|
||||
return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
|
||||
}
|
||||
|
||||
FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
|
||||
{
|
||||
return XXH_readLE64_align(ptr, endian, XXH_unaligned);
|
||||
}
|
||||
|
||||
static U64 XXH_readBE64(const void* ptr)
|
||||
{
|
||||
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
|
||||
}
|
||||
|
||||
|
||||
/*====== xxh64 ======*/
|
||||
|
||||
static const U64 PRIME64_1 = 11400714785074694791ULL;
|
||||
static const U64 PRIME64_2 = 14029467366897019727ULL;
|
||||
static const U64 PRIME64_3 = 1609587929392839161ULL;
|
||||
static const U64 PRIME64_4 = 9650029242287828579ULL;
|
||||
static const U64 PRIME64_5 = 2870177450012600261ULL;
|
||||
|
||||
static U64 XXH64_round(U64 acc, U64 input)
|
||||
{
|
||||
acc += input * PRIME64_2;
|
||||
acc = XXH_rotl64(acc, 31);
|
||||
acc *= PRIME64_1;
|
||||
return acc;
|
||||
}
|
||||
|
||||
static U64 XXH64_mergeRound(U64 acc, U64 val)
|
||||
{
|
||||
val = XXH64_round(0, val);
|
||||
acc ^= val;
|
||||
acc = acc * PRIME64_1 + PRIME64_4;
|
||||
return acc;
|
||||
}
|
||||
|
||||
FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* bEnd = p + len;
|
||||
U64 h64;
|
||||
#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
|
||||
|
||||
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
|
||||
if (p==NULL) {
|
||||
len=0;
|
||||
bEnd=p=(const BYTE*)(size_t)32;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (len>=32) {
|
||||
const BYTE* const limit = bEnd - 32;
|
||||
U64 v1 = seed + PRIME64_1 + PRIME64_2;
|
||||
U64 v2 = seed + PRIME64_2;
|
||||
U64 v3 = seed + 0;
|
||||
U64 v4 = seed - PRIME64_1;
|
||||
|
||||
do {
|
||||
v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
|
||||
v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
|
||||
v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
|
||||
v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
|
||||
} while (p<=limit);
|
||||
|
||||
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
|
||||
h64 = XXH64_mergeRound(h64, v1);
|
||||
h64 = XXH64_mergeRound(h64, v2);
|
||||
h64 = XXH64_mergeRound(h64, v3);
|
||||
h64 = XXH64_mergeRound(h64, v4);
|
||||
|
||||
} else {
|
||||
h64 = seed + PRIME64_5;
|
||||
}
|
||||
|
||||
h64 += (U64) len;
|
||||
|
||||
while (p+8<=bEnd) {
|
||||
U64 const k1 = XXH64_round(0, XXH_get64bits(p));
|
||||
h64 ^= k1;
|
||||
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
|
||||
p+=8;
|
||||
}
|
||||
|
||||
if (p+4<=bEnd) {
|
||||
h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
|
||||
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
|
||||
p+=4;
|
||||
}
|
||||
|
||||
while (p<bEnd) {
|
||||
h64 ^= (*p) * PRIME64_5;
|
||||
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
|
||||
p++;
|
||||
}
|
||||
|
||||
h64 ^= h64 >> 33;
|
||||
h64 *= PRIME64_2;
|
||||
h64 ^= h64 >> 29;
|
||||
h64 *= PRIME64_3;
|
||||
h64 ^= h64 >> 32;
|
||||
|
||||
return h64;
|
||||
}
|
||||
|
||||
|
||||
XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
|
||||
{
|
||||
#if 0
|
||||
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
|
||||
XXH64_state_t state;
|
||||
XXH64_reset(&state, seed);
|
||||
XXH64_update(&state, input, len);
|
||||
return XXH64_digest(&state);
|
||||
#else
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if (XXH_FORCE_ALIGN_CHECK) {
|
||||
if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
|
||||
else
|
||||
return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
|
||||
} }
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
|
||||
else
|
||||
return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*====== Hash Streaming ======*/
|
||||
|
||||
XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
|
||||
{
|
||||
return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
|
||||
}
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
|
||||
{
|
||||
XXH_free(statePtr);
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
|
||||
{
|
||||
memcpy(dstState, srcState, sizeof(*dstState));
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
|
||||
{
|
||||
XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
|
||||
memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */
|
||||
state.v1 = seed + PRIME64_1 + PRIME64_2;
|
||||
state.v2 = seed + PRIME64_2;
|
||||
state.v3 = seed + 0;
|
||||
state.v4 = seed - PRIME64_1;
|
||||
memcpy(statePtr, &state, sizeof(state));
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* const bEnd = p + len;
|
||||
|
||||
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
|
||||
if (input==NULL) return XXH_ERROR;
|
||||
#endif
|
||||
|
||||
state->total_len += len;
|
||||
|
||||
if (state->memsize + len < 32) { /* fill in tmp buffer */
|
||||
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
|
||||
state->memsize += (U32)len;
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
if (state->memsize) { /* tmp buffer is full */
|
||||
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
|
||||
state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
|
||||
state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
|
||||
state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
|
||||
state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
|
||||
p += 32-state->memsize;
|
||||
state->memsize = 0;
|
||||
}
|
||||
|
||||
if (p+32 <= bEnd) {
|
||||
const BYTE* const limit = bEnd - 32;
|
||||
U64 v1 = state->v1;
|
||||
U64 v2 = state->v2;
|
||||
U64 v3 = state->v3;
|
||||
U64 v4 = state->v4;
|
||||
|
||||
do {
|
||||
v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
|
||||
v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
|
||||
v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
|
||||
v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
|
||||
} while (p<=limit);
|
||||
|
||||
state->v1 = v1;
|
||||
state->v2 = v2;
|
||||
state->v3 = v3;
|
||||
state->v4 = v4;
|
||||
}
|
||||
|
||||
if (p < bEnd) {
|
||||
XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
|
||||
state->memsize = (unsigned)(bEnd-p);
|
||||
}
|
||||
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
|
||||
{
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
|
||||
else
|
||||
return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
|
||||
}
|
||||
|
||||
FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
|
||||
{
|
||||
const BYTE * p = (const BYTE*)state->mem64;
|
||||
const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
|
||||
U64 h64;
|
||||
|
||||
if (state->total_len >= 32) {
|
||||
U64 const v1 = state->v1;
|
||||
U64 const v2 = state->v2;
|
||||
U64 const v3 = state->v3;
|
||||
U64 const v4 = state->v4;
|
||||
|
||||
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
|
||||
h64 = XXH64_mergeRound(h64, v1);
|
||||
h64 = XXH64_mergeRound(h64, v2);
|
||||
h64 = XXH64_mergeRound(h64, v3);
|
||||
h64 = XXH64_mergeRound(h64, v4);
|
||||
} else {
|
||||
h64 = state->v3 + PRIME64_5;
|
||||
}
|
||||
|
||||
h64 += (U64) state->total_len;
|
||||
|
||||
while (p+8<=bEnd) {
|
||||
U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
|
||||
h64 ^= k1;
|
||||
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
|
||||
p+=8;
|
||||
}
|
||||
|
||||
if (p+4<=bEnd) {
|
||||
h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
|
||||
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
|
||||
p+=4;
|
||||
}
|
||||
|
||||
while (p<bEnd) {
|
||||
h64 ^= (*p) * PRIME64_5;
|
||||
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
|
||||
p++;
|
||||
}
|
||||
|
||||
h64 ^= h64 >> 33;
|
||||
h64 *= PRIME64_2;
|
||||
h64 ^= h64 >> 29;
|
||||
h64 *= PRIME64_3;
|
||||
h64 ^= h64 >> 32;
|
||||
|
||||
return h64;
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
|
||||
{
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH64_digest_endian(state_in, XXH_littleEndian);
|
||||
else
|
||||
return XXH64_digest_endian(state_in, XXH_bigEndian);
|
||||
}
|
||||
|
||||
|
||||
/*====== Canonical representation ======*/
|
||||
|
||||
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
|
||||
{
|
||||
XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
|
||||
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
|
||||
memcpy(dst, &hash, sizeof(*dst));
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
|
||||
{
|
||||
return XXH_readBE64(src);
|
||||
}
|
||||
|
||||
#endif /* XXH_NO_LONG_LONG */
|
||||
|
5609
source/extern/xxhash.h
vendored
5609
source/extern/xxhash.h
vendored
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user