Refactor and reformat.

This commit is contained in:
2023-09-23 18:55:52 +01:00
parent 33f021fd54
commit 1905cb0d43
41 changed files with 1557 additions and 1462 deletions

View File

@@ -110,7 +110,6 @@ AARU_EXPORT void AARU_CALL adler32_slicing(uint16_t *sum1, uint16_t *sum2, const
{
uint32_t s1 = *sum1;
uint32_t s2 = *sum2;
unsigned n;
/* in case user likes doing a byte at a time, keep it fast */
@@ -150,37 +149,37 @@ AARU_EXPORT void AARU_CALL adler32_slicing(uint16_t *sum1, uint16_t *sum2, const
n = NMAX / 16; /* NMAX is divisible by 16 */
do
{
s1 += (data)[0];
s1 += data[0];
s2 += s1;
s1 += (data)[0 + 1];
s1 += data[0 + 1];
s2 += s1;
s1 += (data)[0 + 2];
s1 += data[0 + 2];
s2 += s1;
s1 += (data)[0 + 2 + 1];
s1 += data[0 + 2 + 1];
s2 += s1;
s1 += (data)[0 + 4];
s1 += data[0 + 4];
s2 += s1;
s1 += (data)[0 + 4 + 1];
s1 += data[0 + 4 + 1];
s2 += s1;
s1 += (data)[0 + 4 + 2];
s1 += data[0 + 4 + 2];
s2 += s1;
s1 += (data)[0 + 4 + 2 + 1];
s1 += data[0 + 4 + 2 + 1];
s2 += s1;
s1 += (data)[8];
s1 += data[8];
s2 += s1;
s1 += (data)[8 + 1];
s1 += data[8 + 1];
s2 += s1;
s1 += (data)[8 + 2];
s1 += data[8 + 2];
s2 += s1;
s1 += (data)[8 + 2 + 1];
s1 += data[8 + 2 + 1];
s2 += s1;
s1 += (data)[8 + 4];
s1 += data[8 + 4];
s2 += s1;
s1 += (data)[8 + 4 + 1];
s1 += data[8 + 4 + 1];
s2 += s1;
s1 += (data)[8 + 4 + 2];
s1 += data[8 + 4 + 2];
s2 += s1;
s1 += (data)[8 + 4 + 2 + 1];
s1 += data[8 + 4 + 2 + 1];
s2 += s1;
/* 16 sums unrolled */
@@ -197,37 +196,37 @@ AARU_EXPORT void AARU_CALL adler32_slicing(uint16_t *sum1, uint16_t *sum2, const
while(len >= 16)
{
len -= 16;
s1 += (data)[0];
s1 += data[0];
s2 += s1;
s1 += (data)[0 + 1];
s1 += data[0 + 1];
s2 += s1;
s1 += (data)[0 + 2];
s1 += data[0 + 2];
s2 += s1;
s1 += (data)[0 + 2 + 1];
s1 += data[0 + 2 + 1];
s2 += s1;
s1 += (data)[0 + 4];
s1 += data[0 + 4];
s2 += s1;
s1 += (data)[0 + 4 + 1];
s1 += data[0 + 4 + 1];
s2 += s1;
s1 += (data)[0 + 4 + 2];
s1 += data[0 + 4 + 2];
s2 += s1;
s1 += (data)[0 + 4 + 2 + 1];
s1 += data[0 + 4 + 2 + 1];
s2 += s1;
s1 += (data)[8];
s1 += data[8];
s2 += s1;
s1 += (data)[8 + 1];
s1 += data[8 + 1];
s2 += s1;
s1 += (data)[8 + 2];
s1 += data[8 + 2];
s2 += s1;
s1 += (data)[8 + 2 + 1];
s1 += data[8 + 2 + 1];
s2 += s1;
s1 += (data)[8 + 4];
s1 += data[8 + 4];
s2 += s1;
s1 += (data)[8 + 4 + 1];
s1 += data[8 + 4 + 1];
s2 += s1;
s1 += (data)[8 + 4 + 2];
s1 += data[8 + 4 + 2];
s2 += s1;
s1 += (data)[8 + 4 + 2 + 1];
s1 += data[8 + 4 + 2 + 1];
s2 += s1;
data += 16;

View File

@@ -38,8 +38,9 @@ AARU_EXPORT void AARU_CALL adler32_slicing(uint16_t *sum1, uint16_t *sum2, const
#if defined(__x86_64__) || defined(__amd64) || defined(_M_AMD64) || defined(_M_X64) || defined(__I386__) || \
defined(__i386__) || defined(__THW_INTEL) || defined(_M_IX86)
AARU_EXPORT SSSE3 void AARU_CALL adler32_ssse3(uint16_t* sum1, uint16_t* sum2, const uint8_t* data, long len);
AARU_EXPORT AVX2 void AARU_CALL adler32_avx2(uint16_t* sum1, uint16_t* sum2, const uint8_t* data, long len);
AARU_EXPORT TARGET_WITH_SSSE3 void AARU_CALL
adler32_ssse3(uint16_t *sum1, uint16_t *sum2, const uint8_t *data, long len);
AARU_EXPORT TARGET_WITH_AVX2 void AARU_CALL adler32_avx2(uint16_t *sum1, uint16_t *sum2, const uint8_t *data, long len);
#endif

View File

@@ -33,16 +33,16 @@
#include "simd.h"
/**
* @brief Calculate Adler-32 checksum for a given data using AVX2 instructions.
* @brief Calculate Adler-32 checksum for a given data using TARGET_WITH_AVX2 instructions.
*
* This function calculates the Adler-32 checksum for a block of data using AVX2 vector instructions.
* This function calculates the Adler-32 checksum for a block of data using TARGET_WITH_AVX2 vector instructions.
*
* @param sum1 Pointer to the variable where the first 16-bit checksum value is stored.
* @param sum2 Pointer to the variable where the second 16-bit checksum value is stored.
* @param data Pointer to the data buffer.
* @param len Length of the data buffer in bytes.
*/
AARU_EXPORT AVX2 void AARU_CALL adler32_avx2(uint16_t* sum1, uint16_t* sum2, const uint8_t* data, long len)
AARU_EXPORT TARGET_WITH_AVX2 void AARU_CALL adler32_avx2(uint16_t *sum1, uint16_t *sum2, const uint8_t *data, long len)
{
uint32_t s1 = *sum1;
uint32_t s2 = *sum2;
@@ -103,7 +103,8 @@ AARU_EXPORT AVX2 void AARU_CALL adler32_avx2(uint16_t* sum1, uint16_t* sum2, con
__m256i v_ps = _mm256_set_epi32(0, 0, 0, 0, 0, 0, 0, (s1 * n));
__m256i v_s2 = _mm256_set_epi32(0, 0, 0, 0, 0, 0, 0, s2);
__m256i v_s1 = _mm256_setzero_si256();
do {
do
{
/*
* Load 32 input bytes.
*/
@@ -122,7 +123,8 @@ AARU_EXPORT AVX2 void AARU_CALL adler32_avx2(uint16_t* sum1, uint16_t* sum2, con
v_s2 = _mm256_add_epi32(v_s2, _mm256_madd_epi16(mad, ones));
data += BLOCK_SIZE;
} while(--n);
}
while(--n);
__m128i sum = _mm_add_epi32(_mm256_castsi256_si128(v_s1), _mm256_extracti128_si256(v_s1, 1));
__m128i hi = _mm_unpackhi_epi64(sum, sum);
@@ -171,7 +173,8 @@ AARU_EXPORT AVX2 void AARU_CALL adler32_avx2(uint16_t* sum1, uint16_t* sum2, con
s2 += (s1 += *data++);
len -= 16;
}
while(len--) { s2 += (s1 += *data++); }
while(len--)
{ s2 += (s1 += *data++); }
if(s1 >= ADLER_MODULE) s1 -= ADLER_MODULE;
s2 %= ADLER_MODULE;
}

View File

@@ -48,7 +48,7 @@
* @param data Pointer to the data buffer.
* @param len Length of the data buffer in bytes.
*/
TARGET_WITH_SIMD void adler32_neon(uint16_t *sum1, uint16_t *sum2, const uint8_t *data, uint32_t len)
TARGET_WITH_NEON void adler32_neon(uint16_t *sum1, uint16_t *sum2, const uint8_t *data, uint32_t len)
{
/*
* Split Adler-32 into component sums.
@@ -117,7 +117,8 @@ TARGET_WITH_SIMD void adler32_neon(uint16_t *sum1, uint16_t *sum2, const uint8_t
v_column_sum_3 = vaddw_u8(v_column_sum_3, vget_low_u8(bytes2));
v_column_sum_4 = vaddw_u8(v_column_sum_4, vget_high_u8(bytes2));
data += BLOCK_SIZE;
} while (--n);
}
while(--n);
v_s2 = vshlq_n_u32(v_s2, 5);
/*
* Multiply-add bytes by [ 32, 31, 30, ... ] for s2.
@@ -155,9 +156,9 @@ TARGET_WITH_SIMD void adler32_neon(uint16_t *sum1, uint16_t *sum2, const uint8_t
/*
* Sum epi32 ints v_s1(s2) and accumulate in s1(s2).
*/
uint32x2_t sum1 = vpadd_u32(vget_low_u32(v_s1), vget_high_u32(v_s1));
uint32x2_t sum2 = vpadd_u32(vget_low_u32(v_s2), vget_high_u32(v_s2));
uint32x2_t s1s2 = vpadd_u32(sum1, sum2);
uint32x2_t t_s1 = vpadd_u32(vget_low_u32(v_s1), vget_high_u32(v_s1));
uint32x2_t t_s2 = vpadd_u32(vget_low_u32(v_s2), vget_high_u32(v_s2));
uint32x2_t s1s2 = vpadd_u32(t_s1, t_s2);
s1 += vget_lane_u32(s1s2, 0);
s2 += vget_lane_u32(s1s2, 1);
/*

View File

@@ -41,16 +41,17 @@
/**
* @brief Calculate Adler-32 checksum for a given data using SSSE3 instructions.
* @brief Calculate Adler-32 checksum for a given data using TARGET_WITH_SSSE3 instructions.
*
* This function calculates the Adler-32 checksum for a block of data using SSSE3 vector instructions.
* This function calculates the Adler-32 checksum for a block of data using TARGET_WITH_SSSE3 vector instructions.
*
* @param sum1 Pointer to the variable where the first 16-bit checksum value is stored.
* @param sum2 Pointer to the variable where the second 16-bit checksum value is stored.
* @param data Pointer to the data buffer.
* @param len Length of the data buffer in bytes.
*/
AARU_EXPORT SSSE3 void AARU_CALL adler32_ssse3(uint16_t* sum1, uint16_t* sum2, const uint8_t* data, long len)
AARU_EXPORT TARGET_WITH_SSSE3 void AARU_CALL
adler32_ssse3(uint16_t *sum1, uint16_t *sum2, const uint8_t *data, long len)
{
uint32_t s1 = *sum1;
uint32_t s2 = *sum2;
@@ -77,7 +78,8 @@ AARU_EXPORT SSSE3 void AARU_CALL adler32_ssse3(uint16_t* sum1, uint16_t* sum2, c
__m128i v_ps = _mm_set_epi32(0, 0, 0, s1 * n);
__m128i v_s2 = _mm_set_epi32(0, 0, 0, s2);
__m128i v_s1 = _mm_set_epi32(0, 0, 0, 0);
do {
do
{
/*
* Load 32 input bytes.
*/
@@ -98,7 +100,8 @@ AARU_EXPORT SSSE3 void AARU_CALL adler32_ssse3(uint16_t* sum1, uint16_t* sum2, c
const __m128i mad2 = _mm_maddubs_epi16(bytes2, tap2);
v_s2 = _mm_add_epi32(v_s2, _mm_madd_epi16(mad2, ones));
data += BLOCK_SIZE;
} while(--n);
}
while(--n);
v_s2 = _mm_add_epi32(v_s2, _mm_slli_epi32(v_ps, 5));
/*
* Sum epi32 ints v_s1(s2) and accumulate in s1(s2).
@@ -144,7 +147,8 @@ AARU_EXPORT SSSE3 void AARU_CALL adler32_ssse3(uint16_t* sum1, uint16_t* sum2, c
s2 += (s1 += *data++);
len -= 16;
}
while(len--) { s2 += (s1 += *data++); }
while(len--)
{ s2 += (s1 += *data++); }
if(s1 >= ADLER_MODULE) s1 -= ADLER_MODULE;
s2 %= ADLER_MODULE;
}

View File

@@ -33,15 +33,17 @@
#include "simd.h"
#if !defined(__MINGW32__) && !defined(_MSC_FULL_VER) && (!defined(__ANDROID__) || !defined(__arm__))
TARGET_WITH_CRYPTO static uint64x2_t sse2neon_vmull_p64_crypto(uint64x1_t _a, uint64x1_t _b)
{
poly64_t a = vget_lane_p64(vreinterpret_p64_u64(_a), 0);
poly64_t b = vget_lane_p64(vreinterpret_p64_u64(_b), 0);
return vreinterpretq_u64_p128(vmull_p64(a, b));
}
#endif
TARGET_WITH_SIMD uint64x2_t sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b)
TARGET_WITH_NEON uint64x2_t sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b)
{
#if !defined(__MINGW32__) && !defined(_MSC_FULL_VER) && (!defined(__ANDROID__) || !defined(__arm__))
// Wraps vmull_p64
@@ -136,7 +138,7 @@ TARGET_WITH_SIMD uint64x2_t sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b)
return vreinterpretq_u64_u8(r);
}
TARGET_WITH_SIMD uint64x2_t mm_shuffle_epi8(uint64x2_t a, uint64x2_t b)
TARGET_WITH_NEON uint64x2_t mm_shuffle_epi8(uint64x2_t a, uint64x2_t b)
{
uint8x16_t tbl = vreinterpretq_u8_u64(a); // input a
uint8x16_t idx = vreinterpretq_u8_u64(b); // input b
@@ -151,13 +153,13 @@ TARGET_WITH_SIMD uint64x2_t mm_shuffle_epi8(uint64x2_t a, uint64x2_t b)
#endif
}
TARGET_WITH_SIMD uint64x2_t mm_srli_si128(uint64x2_t a, int imm)
TARGET_WITH_NEON uint64x2_t mm_srli_si128(uint64x2_t a, int imm)
{
uint8x16_t tmp[2] = {vreinterpretq_u8_u64(a), vdupq_n_u8(0)};
return vreinterpretq_u64_u8(vld1q_u8(((uint8_t const *)tmp) + imm));
}
TARGET_WITH_SIMD uint64x2_t mm_slli_si128(uint64x2_t a, int imm)
TARGET_WITH_NEON uint64x2_t mm_slli_si128(uint64x2_t a, int imm)
{
uint8x16_t tmp[2] = {vdupq_n_u8(0), vreinterpretq_u8_u64(a)};
return vreinterpretq_u64_u8(vld1q_u8(((uint8_t const *)tmp) + (16 - imm)));

View File

@@ -22,10 +22,10 @@
#if defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM)
TARGET_WITH_CRYPTO static uint64x2_t sse2neon_vmull_p64_crypto(uint64x1_t _a, uint64x1_t _b);
TARGET_WITH_SIMD uint64x2_t sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b);
TARGET_WITH_SIMD uint64x2_t mm_shuffle_epi8(uint64x2_t a, uint64x2_t b);
TARGET_WITH_SIMD uint64x2_t mm_srli_si128(uint64x2_t a, int imm);
TARGET_WITH_SIMD uint64x2_t mm_slli_si128(uint64x2_t a, int imm);
TARGET_WITH_NEON uint64x2_t sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b);
TARGET_WITH_NEON uint64x2_t mm_shuffle_epi8(uint64x2_t a, uint64x2_t b);
TARGET_WITH_NEON uint64x2_t mm_srli_si128(uint64x2_t a, int imm);
TARGET_WITH_NEON uint64x2_t mm_slli_si128(uint64x2_t a, int imm);
#endif

View File

@@ -66,7 +66,7 @@ AARU_EXPORT int AARU_CALL crc16_update(crc16_ctx* ctx, const uint8_t* data, uint
uint16_t crc;
const uint32_t *current;
const uint8_t* current_char = (const uint8_t*)data;
const uint8_t *current_char = data;
const size_t unroll = 4;
const size_t bytes_at_once = 8 * unroll;
uintptr_t unaligned_length = (4 - (((uintptr_t)current_char) & 3)) & 3;
@@ -89,6 +89,7 @@ AARU_EXPORT int AARU_CALL crc16_update(crc16_ctx* ctx, const uint8_t* data, uint
{
uint32_t one = *current++ ^ crc;
uint32_t two = *current++;
// TODO: Big endian!
crc = crc16_table[0][(two >> 24) & 0xFF] ^ crc16_table[1][(two >> 16) & 0xFF] ^
crc16_table[2][(two >> 8) & 0xFF] ^ crc16_table[3][two & 0xFF] ^ crc16_table[4][(one >> 24) & 0xFF] ^

View File

@@ -65,7 +65,7 @@ AARU_EXPORT int AARU_CALL crc16_ccitt_update(crc16_ccitt_ctx* ctx, const uint8_t
if(!ctx || !data) return -1;
uint16_t crc;
const uint8_t* current_char = (const uint8_t*)data;
const uint8_t *current_char = data;
const size_t unroll = 4;
const size_t bytes_at_once = 8 * unroll;
uintptr_t unaligned_length = (4 - (((uintptr_t)current_char) & 3)) & 3;

View File

@@ -177,6 +177,7 @@ const uint16_t crc16_ccitt_table[8][256] = {
0x7039, 0x37EA, 0xFF9F, 0xB84C, 0x7F54, 0x3887, 0xF0F2, 0xB721, 0x6EE3, 0x2930, 0xE145, 0xA696, 0x618E, 0x265D,
0xEE28, 0xA9FB, 0x4D8D, 0x0A5E, 0xC22B, 0x85F8, 0x42E0, 0x0533, 0xCD46, 0x8A95, 0x5357, 0x1484, 0xDCF1, 0x9B22,
0x5C3A, 0x1BE9, 0xD39C, 0x944F}};
AARU_EXPORT crc16_ccitt_ctx *AARU_CALL crc16_ccitt_init();
AARU_EXPORT int AARU_CALL crc16_ccitt_update(crc16_ccitt_ctx *ctx, const uint8_t *data, uint32_t len);
AARU_EXPORT int AARU_CALL crc16_ccitt_final(crc16_ccitt_ctx *ctx, uint16_t *crc);

View File

@@ -108,7 +108,7 @@ AARU_EXPORT void AARU_CALL crc32_slicing(uint32_t* previous_crc, const uint8_t*
// http://sourceforge.net/projects/slicing-by-8/
uint32_t c;
const uint32_t *current;
const uint8_t* current_char = (const uint8_t*)data;
const uint8_t *current_char = data;
const size_t unroll = 4;
const size_t bytes_at_once = 8 * unroll;
uintptr_t unaligned_length = (4 - (((uintptr_t)current_char) & 3)) & 3;

View File

@@ -270,7 +270,7 @@ AARU_EXPORT void AARU_CALL crc32_slicing(uint32_t* previous_crc, const uin
#if defined(__x86_64__) || defined(__amd64) || defined(_M_AMD64) || defined(_M_X64) || defined(__I386__) || \
defined(__i386__) || defined(__THW_INTEL) || defined(_M_IX86)
AARU_EXPORT CLMUL uint32_t AARU_CALL crc32_clmul(uint32_t previous_crc, const uint8_t* data, long len);
AARU_EXPORT TARGET_WITH_CLMUL uint32_t AARU_CALL crc32_clmul(uint32_t previous_crc, const uint8_t* data, long len);
#endif
#if defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM)
@@ -279,7 +279,7 @@ AARU_EXPORT TARGET_ARMV8_WITH_CRC uint32_t AARU_CALL armv8_crc32_little(uint32_t
const uint8_t* data,
uint32_t len);
#endif
AARU_EXPORT TARGET_WITH_SIMD uint32_t AARU_CALL crc32_vmull(uint32_t previous_crc, const uint8_t* data, long len);
AARU_EXPORT TARGET_WITH_NEON uint32_t AARU_CALL crc32_vmull(uint32_t previous_crc, const uint8_t* data, long len);
#endif
#endif // AARU_CHECKSUMS_NATIVE_CRC32_H

View File

@@ -54,7 +54,7 @@
*/
TARGET_ARMV8_WITH_CRC uint32_t armv8_crc32_little(uint32_t previous_crc, const uint8_t *data, uint32_t len)
{
uint32_t c = (uint32_t)previous_crc;
uint32_t c = previous_crc;
#if defined(__aarch64__) || defined(_M_ARM64)
while(len && ((uintptr_t)data & 7))
@@ -110,7 +110,9 @@ TARGET_ARMV8_WITH_CRC uint32_t armv8_crc32_little(uint32_t previous_crc, const u
data = (const uint8_t *)buf4;
#endif
while(len--) { c = __crc32b(c, *data++); }
while(len--)
{ c = __crc32b(c, *data++); }
return c;
}
#endif

View File

@@ -34,7 +34,7 @@
#include "crc32.h"
#include "crc32_simd.h"
CLMUL static void fold_1(__m128i* xmm_crc0, __m128i* xmm_crc1, __m128i* xmm_crc2, __m128i* xmm_crc3)
TARGET_WITH_CLMUL static void fold_1(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3)
{
const __m128i xmm_fold4 = _mm_set_epi32(0x00000001, 0x54442bd4, 0x00000001, 0xc6e41596);
@@ -56,7 +56,7 @@ CLMUL static void fold_1(__m128i* xmm_crc0, __m128i* xmm_crc1, __m128i* xmm_crc2
*xmm_crc3 = _mm_castps_si128(ps_res);
}
CLMUL static void fold_2(__m128i* xmm_crc0, __m128i* xmm_crc1, __m128i* xmm_crc2, __m128i* xmm_crc3)
TARGET_WITH_CLMUL static void fold_2(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3)
{
const __m128i xmm_fold4 = _mm_set_epi32(0x00000001, 0x54442bd4, 0x00000001, 0xc6e41596);
@@ -86,7 +86,7 @@ CLMUL static void fold_2(__m128i* xmm_crc0, __m128i* xmm_crc1, __m128i* xmm_crc2
*xmm_crc3 = _mm_castps_si128(ps_res31);
}
CLMUL static void fold_3(__m128i* xmm_crc0, __m128i* xmm_crc1, __m128i* xmm_crc2, __m128i* xmm_crc3)
TARGET_WITH_CLMUL static void fold_3(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3)
{
const __m128i xmm_fold4 = _mm_set_epi32(0x00000001, 0x54442bd4, 0x00000001, 0xc6e41596);
@@ -122,7 +122,7 @@ CLMUL static void fold_3(__m128i* xmm_crc0, __m128i* xmm_crc1, __m128i* xmm_crc2
*xmm_crc3 = _mm_castps_si128(ps_res32);
}
CLMUL static void fold_4(__m128i* xmm_crc0, __m128i* xmm_crc1, __m128i* xmm_crc2, __m128i* xmm_crc3)
TARGET_WITH_CLMUL static void fold_4(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3)
{
const __m128i xmm_fold4 = _mm_set_epi32(0x00000001, 0x54442bd4, 0x00000001, 0xc6e41596);
@@ -166,7 +166,7 @@ CLMUL static void fold_4(__m128i* xmm_crc0, __m128i* xmm_crc1, __m128i* xmm_crc2
*xmm_crc3 = _mm_castps_si128(ps_res3);
}
CLMUL static void partial_fold(const size_t len,
TARGET_WITH_CLMUL static void partial_fold(const size_t len,
__m128i *xmm_crc0,
__m128i *xmm_crc1,
__m128i *xmm_crc2,
@@ -224,7 +224,7 @@ CLMUL static void partial_fold(const size_t len,
#define XOR_INITIAL(where) ONCE(where = _mm_xor_si128(where, xmm_initial))
/**
* @brief Calculate the CRC32 checksum using CLMUL instruction extension.
* @brief Calculate the CRC32 checksum using TARGET_WITH_CLMUL instruction extension.
*
* @param previous_crc The previously calculated CRC32 checksum.
* @param data Pointer to the input data buffer.
@@ -232,7 +232,7 @@ CLMUL static void partial_fold(const size_t len,
*
* @return The calculated CRC32 checksum.
*/
AARU_EXPORT CLMUL uint32_t AARU_CALL crc32_clmul(uint32_t previous_crc, const uint8_t* data, long len)
AARU_EXPORT TARGET_WITH_CLMUL uint32_t AARU_CALL crc32_clmul(uint32_t previous_crc, const uint8_t *data, long len)
{
unsigned long algn_diff;
__m128i xmm_t0, xmm_t1, xmm_t2, xmm_t3;
@@ -263,9 +263,12 @@ AARU_EXPORT CLMUL uint32_t AARU_CALL crc32_clmul(uint32_t previous_crc, const ui
uint32_t crc = ~previous_crc;
switch(len)
{
case 3: crc = (crc >> 8) ^ crc32_table[0][(crc & 0xFF) ^ *data++];
case 2: crc = (crc >> 8) ^ crc32_table[0][(crc & 0xFF) ^ *data++];
case 1: crc = (crc >> 8) ^ crc32_table[0][(crc & 0xFF) ^ *data++];
case 3:
crc = (crc >> 8) ^ crc32_table[0][(crc & 0xFF) ^ *data++];
case 2:
crc = (crc >> 8) ^ crc32_table[0][(crc & 0xFF) ^ *data++];
case 1:
crc = (crc >> 8) ^ crc32_table[0][(crc & 0xFF) ^ *data++];
}
return ~crc;
}
@@ -434,7 +437,7 @@ done:
/*
* could just as well write xmm_crc3[2], doing a movaps and truncating, but
* no real advantage - it's a tiny bit slower per call, while no additional CPUs
* would be supported by only requiring SSSE3 and CLMUL instead of SSE4.1 + CLMUL
* would be supported by only requiring TARGET_WITH_SSSE3 and TARGET_WITH_CLMUL instead of SSE4.1 + TARGET_WITH_CLMUL
*/
crc = _mm_extract_epi32(xmm_crc3, 2);
return ~crc;

View File

@@ -43,7 +43,7 @@
#define XOR_INITIAL(where) \
ONCE(where = vreinterpretq_u64_u32(veorq_u32(vreinterpretq_u32_u64(where), vreinterpretq_u32_u64(q_initial))))
TARGET_WITH_SIMD FORCE_INLINE void
TARGET_WITH_NEON FORCE_INLINE void
fold_1(uint64x2_t *q_crc0, uint64x2_t *q_crc1, uint64x2_t *q_crc2, uint64x2_t *q_crc3)
{
uint32_t ALIGNED_(16) data[4] = {0xc6e41596, 0x00000001, 0x54442bd4, 0x00000001};
@@ -67,7 +67,7 @@ TARGET_WITH_SIMD FORCE_INLINE void
*q_crc3 = vreinterpretq_u64_u32(ps_res);
}
TARGET_WITH_SIMD FORCE_INLINE void
TARGET_WITH_NEON FORCE_INLINE void
fold_2(uint64x2_t *q_crc0, uint64x2_t *q_crc1, uint64x2_t *q_crc2, uint64x2_t *q_crc3)
{
uint32_t ALIGNED_(16) data[4] = {0xc6e41596, 0x00000001, 0x54442bd4, 0x00000001};
@@ -99,7 +99,7 @@ TARGET_WITH_SIMD FORCE_INLINE void
*q_crc3 = vreinterpretq_u64_u32(ps_res31);
}
TARGET_WITH_SIMD FORCE_INLINE void
TARGET_WITH_NEON FORCE_INLINE void
fold_3(uint64x2_t *q_crc0, uint64x2_t *q_crc1, uint64x2_t *q_crc2, uint64x2_t *q_crc3)
{
uint32_t ALIGNED_(16) data[4] = {0xc6e41596, 0x00000001, 0x54442bd4, 0x00000001};
@@ -137,7 +137,7 @@ TARGET_WITH_SIMD FORCE_INLINE void
*q_crc3 = vreinterpretq_u64_u32(ps_res32);
}
TARGET_WITH_SIMD FORCE_INLINE void
TARGET_WITH_NEON FORCE_INLINE void
fold_4(uint64x2_t *q_crc0, uint64x2_t *q_crc1, uint64x2_t *q_crc2, uint64x2_t *q_crc3)
{
uint32_t ALIGNED_(16) data[4] = {0xc6e41596, 0x00000001, 0x54442bd4, 0x00000001};
@@ -184,7 +184,7 @@ TARGET_WITH_SIMD FORCE_INLINE void
*q_crc3 = vreinterpretq_u64_u32(ps_res3);
}
TARGET_WITH_SIMD FORCE_INLINE void partial_fold(const size_t len,
TARGET_WITH_NEON FORCE_INLINE void partial_fold(const size_t len,
uint64x2_t *q_crc0,
uint64x2_t *q_crc1,
uint64x2_t *q_crc2,
@@ -247,7 +247,7 @@ TARGET_WITH_SIMD FORCE_INLINE void partial_fold(const size_t len,
*
* @return The CRC-32 checksum of the given data.
*/
TARGET_WITH_SIMD uint32_t crc32_vmull(uint32_t previous_crc, const uint8_t* data, long len)
TARGET_WITH_NEON uint32_t crc32_vmull(uint32_t previous_crc, const uint8_t *data, long len)
{
unsigned long algn_diff;
uint64x2_t q_t0;
@@ -284,9 +284,12 @@ TARGET_WITH_SIMD uint32_t crc32_vmull(uint32_t previous_crc, const uint8_t* data
uint32_t crc = ~previous_crc;
switch(len)
{
case 3: crc = (crc >> 8) ^ crc32_table[0][(crc & 0xFF) ^ *data++];
case 2: crc = (crc >> 8) ^ crc32_table[0][(crc & 0xFF) ^ *data++];
case 1: crc = (crc >> 8) ^ crc32_table[0][(crc & 0xFF) ^ *data++];
case 3:
crc = (crc >> 8) ^ crc32_table[0][(crc & 0xFF) ^ *data++];
case 2:
crc = (crc >> 8) ^ crc32_table[0][(crc & 0xFF) ^ *data++];
case 1:
crc = (crc >> 8) ^ crc32_table[0][(crc & 0xFF) ^ *data++];
}
return ~crc;
}
@@ -457,7 +460,7 @@ done:
/*
* could just as well write q_crc3[2], doing a movaps and truncating, but
* no real advantage - it's a tiny bit slower per call, while no additional CPUs
* would be supported by only requiring SSSE3 and CLMUL instead of SSE4.1 + CLMUL
* would be supported by only requiring TARGET_WITH_SSSE3 and TARGET_WITH_CLMUL instead of SSE4.1 + TARGET_WITH_CLMUL
*/
crc = vgetq_lane_u32(vreinterpretq_u32_u64(q_crc3), (2));
return ~crc;

View File

@@ -34,7 +34,6 @@
*/
AARU_EXPORT crc64_ctx *AARU_CALL crc64_init(void)
{
int i, slice;
crc64_ctx *ctx = (crc64_ctx *)malloc(sizeof(crc64_ctx));
if(!ctx) return NULL;
@@ -98,7 +97,7 @@ AARU_EXPORT void AARU_CALL crc64_slicing(uint64_t* previous_crc, const uint8_t*
while((uintptr_t)(data) & 3)
{
c = crc64_table[0][*data++ ^ ((c)&0xFF)] ^ ((c) >> 8);
c = crc64_table[0][*data++ ^ (c & 0xFF)] ^ (c >> 8);
--len;
}
@@ -110,12 +109,12 @@ AARU_EXPORT void AARU_CALL crc64_slicing(uint64_t* previous_crc, const uint8_t*
const uint32_t tmp = c ^ *(const uint32_t *)(data);
data += 4;
c = crc64_table[3][((tmp)&0xFF)] ^ crc64_table[2][(((tmp) >> 8) & 0xFF)] ^ ((c) >> 32) ^
crc64_table[1][(((tmp) >> 16) & 0xFF)] ^ crc64_table[0][((tmp) >> 24)];
c = crc64_table[3][tmp & 0xFF] ^ crc64_table[2][(tmp >> 8) & 0xFF] ^ (c >> 32) ^
crc64_table[1][tmp >> 16 & 0xFF] ^ crc64_table[0][tmp >> 24];
}
}
while(len-- != 0) c = crc64_table[0][*data++ ^ ((c)&0xFF)] ^ ((c) >> 8);
while(len-- != 0) c = crc64_table[0][*data++ ^ (c & 0xFF)] ^ (c >> 8);
*previous_crc = c;
}

View File

@@ -245,11 +245,11 @@ AARU_EXPORT void AARU_CALL crc64_slicing(uint64_t* previous_crc, const uin
#if defined(__x86_64__) || defined(__amd64) || defined(_M_AMD64) || defined(_M_X64) || defined(__I386__) || \
defined(__i386__) || defined(__THW_INTEL) || defined(_M_IX86)
AARU_EXPORT CLMUL uint64_t AARU_CALL crc64_clmul(uint64_t crc, const uint8_t* data, long length);
AARU_EXPORT TARGET_WITH_CLMUL uint64_t AARU_CALL crc64_clmul(uint64_t crc, const uint8_t *data, long length);
#endif
#if defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM)
AARU_EXPORT TARGET_WITH_SIMD uint64_t AARU_CALL crc64_vmull(uint64_t previous_crc, const uint8_t* data, long len);
AARU_EXPORT TARGET_WITH_NEON uint64_t AARU_CALL crc64_vmull(uint64_t previous_crc, const uint8_t *data, long len);
#endif
#endif // AARU_CHECKSUMS_NATIVE_CRC64_H

View File

@@ -14,7 +14,9 @@
#include <wmmintrin.h>
#ifdef _MSC_VER
#include <intrin.h>
#endif
#include "library.h"
@@ -58,7 +60,7 @@ static const uint8_t shuffleMasks[] = {
0x8f, 0x8e, 0x8d, 0x8c, 0x8b, 0x8a, 0x89, 0x88, 0x87, 0x86, 0x85, 0x84, 0x83, 0x82, 0x81, 0x80,
};
CLMUL static void shiftRight128(__m128i in, size_t n, __m128i* outLeft, __m128i* outRight)
TARGET_WITH_CLMUL static void shiftRight128(__m128i in, size_t n, __m128i *outLeft, __m128i *outRight)
{
const __m128i maskA = _mm_loadu_si128((const __m128i *)(shuffleMasks + (16 - n)));
const __m128i maskB = _mm_xor_si128(maskA, _mm_cmpeq_epi8(_mm_setzero_si128(), _mm_setzero_si128()));
@@ -67,13 +69,13 @@ CLMUL static void shiftRight128(__m128i in, size_t n, __m128i* outLeft, __m128i*
*outRight = _mm_shuffle_epi8(in, maskA);
}
CLMUL static __m128i fold(__m128i in, __m128i foldConstants)
TARGET_WITH_CLMUL static __m128i fold(__m128i in, __m128i foldConstants)
{
return _mm_xor_si128(_mm_clmulepi64_si128(in, foldConstants, 0x00), _mm_clmulepi64_si128(in, foldConstants, 0x11));
}
/**
* @brief Calculate the CRC-64 checksum using CLMUL instruction extension.
* @brief Calculate the CRC-64 checksum using TARGET_WITH_CLMUL instruction extension.
*
* @param previous_crc The previously calculated CRC-64 checksum.
* @param data Pointer to the input data buffer.
@@ -81,7 +83,7 @@ CLMUL static __m128i fold(__m128i in, __m128i foldConstants)
*
* @return The calculated CRC-64 checksum.
*/
AARU_EXPORT CLMUL uint64_t AARU_CALL crc64_clmul(uint64_t crc, const uint8_t* data, long length)
AARU_EXPORT TARGET_WITH_CLMUL uint64_t AARU_CALL crc64_clmul(uint64_t crc, const uint8_t *data, long length)
{
const uint64_t k1 = 0xe05dd497ca393ae4; // bitReflect(expMod65(128 + 64, poly, 1)) << 1;
const uint64_t k2 = 0xdabe95afc7875f40; // bitReflect(expMod65(128, poly, 1)) << 1;
@@ -177,7 +179,8 @@ AARU_EXPORT CLMUL uint64_t AARU_CALL crc64_clmul(uint64_t crc, const uint8_t* da
}
__m128i P;
if(length == 16) { P = _mm_xor_si128(accumulator, _mm_load_si128(alignedData)); }
if(length == 16)
{ P = _mm_xor_si128(accumulator, _mm_load_si128(alignedData)); }
else
{
const __m128i end0 = _mm_xor_si128(accumulator, _mm_load_si128(alignedData));
@@ -196,7 +199,9 @@ AARU_EXPORT CLMUL uint64_t AARU_CALL crc64_clmul(uint64_t crc, const uint8_t* da
// Final Barrett reduction
const __m128i T1 = _mm_clmulepi64_si128(R, foldConstants2, 0x00);
const __m128i T2 =
_mm_xor_si128(_mm_xor_si128(_mm_clmulepi64_si128(T1, foldConstants2, 0x10), _mm_slli_si128(T1, 8)), R);
_mm_xor_si128(
_mm_xor_si128(_mm_clmulepi64_si128(T1, foldConstants2, 0x10), _mm_slli_si128(T1, 8)),
R);
#if defined(_WIN64)
return ~_mm_extract_epi64(T2, 1);

View File

@@ -21,22 +21,32 @@ static const uint8_t shuffleMasks[] = {
0x8f, 0x8e, 0x8d, 0x8c, 0x8b, 0x8a, 0x89, 0x88, 0x87, 0x86, 0x85, 0x84, 0x83, 0x82, 0x81, 0x80,
};
TARGET_WITH_SIMD FORCE_INLINE void shiftRight128(uint64x2_t in, size_t n, uint64x2_t* outLeft, uint64x2_t* outRight)
TARGET_WITH_NEON FORCE_INLINE void shiftRight128(uint64x2_t in, size_t n, uint64x2_t *outLeft, uint64x2_t *outRight)
{
const uint64x2_t maskA =
vreinterpretq_u64_u32(vld1q_u32((const uint32_t*)(const uint64x2_t*)(shuffleMasks + (16 - n))));
vreinterpretq_u64_u32(
vld1q_u32((const uint32_t *)(const uint64x2_t *)(shuffleMasks + (16 - n))));
uint64x2_t b = vreinterpretq_u64_u8(vceqq_u8(vreinterpretq_u8_u64(vreinterpretq_u64_u32(vdupq_n_u32(0))),
vreinterpretq_u8_u64(vreinterpretq_u64_u32(vdupq_n_u32(0)))));
vreinterpretq_u8_u64(
vreinterpretq_u64_u32(vdupq_n_u32(0)))));
const uint64x2_t maskB = vreinterpretq_u64_u32(veorq_u32(vreinterpretq_u32_u64(maskA), vreinterpretq_u32_u64(b)));
*outLeft = mm_shuffle_epi8(in, maskB);
*outRight = mm_shuffle_epi8(in, maskA);
}
TARGET_WITH_SIMD FORCE_INLINE uint64x2_t fold(uint64x2_t in, uint64x2_t foldConstants)
TARGET_WITH_NEON FORCE_INLINE uint64x2_t
fold (uint64x2_t
in,
uint64x2_t foldConstants
)
{
return veorq_u64(sse2neon_vmull_p64(vget_low_u64(in), vget_low_u64(foldConstants)),
sse2neon_vmull_p64(vget_high_u64(in), vget_high_u64(foldConstants)));
return
veorq_u64(sse2neon_vmull_p64(vget_low_u64(in), vget_low_u64(foldConstants)),
sse2neon_vmull_p64(vget_high_u64(in), vget_high_u64(foldConstants))
);
}
/**
@@ -53,7 +63,7 @@ TARGET_WITH_SIMD FORCE_INLINE uint64x2_t fold(uint64x2_t in, uint64x2_t foldCons
*
* @return The CRC-64 checksum of the given data.
*/
AARU_EXPORT TARGET_WITH_SIMD uint64_t AARU_CALL crc64_vmull(uint64_t previous_crc, const uint8_t* data, long len)
AARU_EXPORT TARGET_WITH_NEON uint64_t AARU_CALL crc64_vmull(uint64_t previous_crc, const uint8_t *data, long len)
{
const uint64_t k1 = 0xe05dd497ca393ae4; // bitReflect(expMod65(128 + 64, poly, 1)) << 1;
const uint64_t k2 = 0xdabe95afc7875f40; // bitReflect(expMod65(128, poly, 1)) << 1;
@@ -75,12 +85,14 @@ AARU_EXPORT TARGET_WITH_SIMD uint64_t AARU_CALL crc64_vmull(uint64_t previous_cr
const size_t alignedLength = alignedEnd - alignedData;
const uint64x2_t leadInMask =
vreinterpretq_u64_u32(vld1q_u32((const uint32_t*)(const uint64x2_t*)(shuffleMasks + (16 - leadInSize))));
vreinterpretq_u64_u32(vld1q_u32(
(const uint32_t *)(const uint64x2_t *)(shuffleMasks + (16 - leadInSize))));
uint64x2_t a = vreinterpretq_u64_u32(vdupq_n_u32(0));
uint64x2_t b = vreinterpretq_u64_u32(
vld1q_u32((const uint32_t *)alignedData)); // Use a signed shift right to create a mask with the sign bit
const uint64x2_t data0 =
vreinterpretq_u64_u8(vbslq_u8(vreinterpretq_u8_s8(vshrq_n_s8(vreinterpretq_s8_u64(leadInMask), 7)),
vreinterpretq_u64_u8(
vbslq_u8(vreinterpretq_u8_s8(vshrq_n_s8(vreinterpretq_s8_u64(leadInMask), 7)),
vreinterpretq_u8_u64(b),
vreinterpretq_u8_u64(a)));
@@ -157,7 +169,8 @@ AARU_EXPORT TARGET_WITH_SIMD uint64_t AARU_CALL crc64_vmull(uint64_t previous_cr
else
{
const uint64x2_t end0 =
veorq_u64(accumulator, vreinterpretq_u64_u32(vld1q_u32((const uint32_t*)alignedData)));
veorq_u64(accumulator,
vreinterpretq_u64_u32(vld1q_u32((const uint32_t *)alignedData)));
const uint64x2_t end1 = vreinterpretq_u64_u32(vld1q_u32((const uint32_t *)(alignedData + 1)));
uint64x2_t A, B, C, D;

View File

@@ -103,33 +103,35 @@ AARU_EXPORT int AARU_CALL fletcher16_update(fletcher16_ctx* ctx, const uint8_t*
{
len -= NMAX;
n = NMAX / 11; /* NMAX is divisible by 11 */
do {
sum1 += (data)[0];
do
{
sum1 += data[0];
sum2 += sum1;
sum1 += (data)[0 + 1];
sum1 += data[0 + 1];
sum2 += sum1;
sum1 += (data)[0 + 2];
sum1 += data[0 + 2];
sum2 += sum1;
sum1 += (data)[0 + 2 + 1];
sum1 += data[0 + 2 + 1];
sum2 += sum1;
sum1 += (data)[0 + 4];
sum1 += data[0 + 4];
sum2 += sum1;
sum1 += (data)[0 + 4 + 1];
sum1 += data[0 + 4 + 1];
sum2 += sum1;
sum1 += (data)[0 + 4 + 2];
sum1 += data[0 + 4 + 2];
sum2 += sum1;
sum1 += (data)[0 + 4 + 2 + 1];
sum1 += data[0 + 4 + 2 + 1];
sum2 += sum1;
sum1 += (data)[8];
sum1 += data[8];
sum2 += sum1;
sum1 += (data)[8 + 1];
sum1 += data[8 + 1];
sum2 += sum1;
sum1 += (data)[8 + 2];
sum1 += data[8 + 2];
sum2 += sum1;
/* 11 sums unrolled */
data += 11;
} while(--n);
}
while(--n);
sum1 %= FLETCHER16_MODULE;
sum2 %= FLETCHER16_MODULE;
}
@@ -140,27 +142,27 @@ AARU_EXPORT int AARU_CALL fletcher16_update(fletcher16_ctx* ctx, const uint8_t*
while(len >= 11)
{
len -= 11;
sum1 += (data)[0];
sum1 += data[0];
sum2 += sum1;
sum1 += (data)[0 + 1];
sum1 += data[0 + 1];
sum2 += sum1;
sum1 += (data)[0 + 2];
sum1 += data[0 + 2];
sum2 += sum1;
sum1 += (data)[0 + 2 + 1];
sum1 += data[0 + 2 + 1];
sum2 += sum1;
sum1 += (data)[0 + 4];
sum1 += data[0 + 4];
sum2 += sum1;
sum1 += (data)[0 + 4 + 1];
sum1 += data[0 + 4 + 1];
sum2 += sum1;
sum1 += (data)[0 + 4 + 2];
sum1 += data[0 + 4 + 2];
sum2 += sum1;
sum1 += (data)[0 + 4 + 2 + 1];
sum1 += data[0 + 4 + 2 + 1];
sum2 += sum1;
sum1 += (data)[8];
sum1 += data[8];
sum2 += sum1;
sum1 += (data)[8 + 1];
sum1 += data[8 + 1];
sum2 += sum1;
sum1 += (data)[8 + 2];
sum1 += data[8 + 2];
sum2 += sum1;
data += 11;

View File

@@ -130,43 +130,45 @@ AARU_EXPORT int AARU_CALL fletcher32_update(fletcher32_ctx* ctx, const uint8_t*
{
len -= NMAX;
n = NMAX / 16; /* NMAX is divisible by 16 */
do {
sum1 += (data)[0];
do
{
sum1 += data[0];
sum2 += sum1;
sum1 += (data)[0 + 1];
sum1 += data[0 + 1];
sum2 += sum1;
sum1 += (data)[0 + 2];
sum1 += data[0 + 2];
sum2 += sum1;
sum1 += (data)[0 + 2 + 1];
sum1 += data[0 + 2 + 1];
sum2 += sum1;
sum1 += (data)[0 + 4];
sum1 += data[0 + 4];
sum2 += sum1;
sum1 += (data)[0 + 4 + 1];
sum1 += data[0 + 4 + 1];
sum2 += sum1;
sum1 += (data)[0 + 4 + 2];
sum1 += data[0 + 4 + 2];
sum2 += sum1;
sum1 += (data)[0 + 4 + 2 + 1];
sum1 += data[0 + 4 + 2 + 1];
sum2 += sum1;
sum1 += (data)[8];
sum1 += data[8];
sum2 += sum1;
sum1 += (data)[8 + 1];
sum1 += data[8 + 1];
sum2 += sum1;
sum1 += (data)[8 + 2];
sum1 += data[8 + 2];
sum2 += sum1;
sum1 += (data)[8 + 2 + 1];
sum1 += data[8 + 2 + 1];
sum2 += sum1;
sum1 += (data)[8 + 4];
sum1 += data[8 + 4];
sum2 += sum1;
sum1 += (data)[8 + 4 + 1];
sum1 += data[8 + 4 + 1];
sum2 += sum1;
sum1 += (data)[8 + 4 + 2];
sum1 += data[8 + 4 + 2];
sum2 += sum1;
sum1 += (data)[8 + 4 + 2 + 1];
sum1 += data[8 + 4 + 2 + 1];
sum2 += sum1;
/* 16 sums unrolled */
data += 16;
} while(--n);
}
while(--n);
sum1 %= FLETCHER32_MODULE;
sum2 %= FLETCHER32_MODULE;
}
@@ -177,37 +179,37 @@ AARU_EXPORT int AARU_CALL fletcher32_update(fletcher32_ctx* ctx, const uint8_t*
while(len >= 16)
{
len -= 16;
sum1 += (data)[0];
sum1 += data[0];
sum2 += sum1;
sum1 += (data)[0 + 1];
sum1 += data[0 + 1];
sum2 += sum1;
sum1 += (data)[0 + 2];
sum1 += data[0 + 2];
sum2 += sum1;
sum1 += (data)[0 + 2 + 1];
sum1 += data[0 + 2 + 1];
sum2 += sum1;
sum1 += (data)[0 + 4];
sum1 += data[0 + 4];
sum2 += sum1;
sum1 += (data)[0 + 4 + 1];
sum1 += data[0 + 4 + 1];
sum2 += sum1;
sum1 += (data)[0 + 4 + 2];
sum1 += data[0 + 4 + 2];
sum2 += sum1;
sum1 += (data)[0 + 4 + 2 + 1];
sum1 += data[0 + 4 + 2 + 1];
sum2 += sum1;
sum1 += (data)[8];
sum1 += data[8];
sum2 += sum1;
sum1 += (data)[8 + 1];
sum1 += data[8 + 1];
sum2 += sum1;
sum1 += (data)[8 + 2];
sum1 += data[8 + 2];
sum2 += sum1;
sum1 += (data)[8 + 2 + 1];
sum1 += data[8 + 2 + 1];
sum2 += sum1;
sum1 += (data)[8 + 4];
sum1 += data[8 + 4];
sum2 += sum1;
sum1 += (data)[8 + 4 + 1];
sum1 += data[8 + 4 + 1];
sum2 += sum1;
sum1 += (data)[8 + 4 + 2];
sum1 += data[8 + 4 + 2];
sum2 += sum1;
sum1 += (data)[8 + 4 + 2 + 1];
sum1 += data[8 + 4 + 2 + 1];
sum2 += sum1;
data += 16;

View File

@@ -37,8 +37,8 @@ AARU_EXPORT void AARU_CALL fletcher32_free(fletcher32_ctx* ctx);
#if defined(__x86_64__) || defined(__amd64) || defined(_M_AMD64) || defined(_M_X64) || defined(__I386__) || \
defined(__i386__) || defined(__THW_INTEL) || defined(_M_IX86)
AARU_EXPORT AVX2 void AARU_CALL fletcher32_avx2(uint16_t* sum1, uint16_t* sum2, const uint8_t* data, long len);
AARU_EXPORT SSSE3 void AARU_CALL fletcher32_ssse3(uint16_t* sum1, uint16_t* sum2, const uint8_t* data, long len);
AARU_EXPORT TARGET_WITH_AVX2 void AARU_CALL fletcher32_avx2(uint16_t* sum1, uint16_t* sum2, const uint8_t* data, long len);
AARU_EXPORT TARGET_WITH_SSSE3 void AARU_CALL fletcher32_ssse3(uint16_t* sum1, uint16_t* sum2, const uint8_t* data, long len);
#endif

View File

@@ -42,7 +42,9 @@
* @param data Pointer to the data buffer.
* @param len Length of the data buffer in bytes.
*/
AARU_EXPORT AVX2 void AARU_CALL fletcher32_avx2(uint16_t *sum1, uint16_t *sum2, const uint8_t *data, long len) {
AARU_EXPORT TARGET_WITH_AVX2 void AARU_CALL
fletcher32_avx2(uint16_t *sum1, uint16_t *sum2, const uint8_t *data, long len)
{
uint32_t s1 = *sum1;
uint32_t s2 = *sum2;
@@ -53,7 +55,8 @@ AARU_EXPORT AVX2 void AARU_CALL fletcher32_avx2(uint16_t *sum1, uint16_t *sum2,
long blocks = len / BLOCK_SIZE;
len -= blocks * BLOCK_SIZE;
while (blocks) {
while(blocks)
{
unsigned n = NMAX / BLOCK_SIZE; /* The NMAX constraint. */
if(n > blocks) n = (unsigned)blocks;
@@ -101,7 +104,8 @@ AARU_EXPORT AVX2 void AARU_CALL fletcher32_avx2(uint16_t *sum1, uint16_t *sum2,
__m256i v_ps = _mm256_set_epi32(0, 0, 0, 0, 0, 0, 0, (s1 * n));
__m256i v_s2 = _mm256_set_epi32(0, 0, 0, 0, 0, 0, 0, s2);
__m256i v_s1 = _mm256_setzero_si256();
do {
do
{
/*
* Load 32 input bytes.
*/
@@ -120,7 +124,8 @@ AARU_EXPORT AVX2 void AARU_CALL fletcher32_avx2(uint16_t *sum1, uint16_t *sum2,
v_s2 = _mm256_add_epi32(v_s2, _mm256_madd_epi16(mad, ones));
data += BLOCK_SIZE;
} while (--n);
}
while(--n);
__m128i sum = _mm_add_epi32(_mm256_castsi256_si128(v_s1), _mm256_extracti128_si256(v_s1, 1));
__m128i hi = _mm_unpackhi_epi64(sum, sum);
@@ -147,8 +152,10 @@ AARU_EXPORT AVX2 void AARU_CALL fletcher32_avx2(uint16_t *sum1, uint16_t *sum2,
/*
* Handle leftover data.
*/
if (len) {
if (len >= 16) {
if(len)
{
if(len >= 16)
{
s2 += (s1 += *data++);
s2 += (s1 += *data++);
s2 += (s1 += *data++);
@@ -167,7 +174,8 @@ AARU_EXPORT AVX2 void AARU_CALL fletcher32_avx2(uint16_t *sum1, uint16_t *sum2,
s2 += (s1 += *data++);
len -= 16;
}
while (len--) { s2 += (s1 += *data++); }
while(len--)
{ s2 += (s1 += *data++); }
if(s1 >= FLETCHER32_MODULE) s1 -= FLETCHER32_MODULE;
s2 %= FLETCHER32_MODULE;
}

View File

@@ -38,7 +38,7 @@
#include "fletcher32.h"
#include "simd.h"
TARGET_WITH_SIMD /***/
TARGET_WITH_NEON /***/
/**
* @brief Calculate Fletcher-32 checksum for a given data using NEON instructions.
@@ -50,7 +50,8 @@ TARGET_WITH_SIMD /***/
* @param data Pointer to the data buffer.
* @param len Length of the data buffer in bytes.
*/
void fletcher32_neon(uint16_t *sum1, uint16_t *sum2, const uint8_t *data, uint32_t len) {
void fletcher32_neon(uint16_t *sum1, uint16_t *sum2, const uint8_t *data, uint32_t len)
{
/*
* Split Fletcher-32 into component sums.
*/
@@ -59,8 +60,10 @@ void fletcher32_neon(uint16_t *sum1, uint16_t *sum2, const uint8_t *data, uint32
/*
* Serially compute s1 & s2, until the data is 16-byte aligned.
*/
if ((uintptr_t) data & 15) {
while ((uintptr_t) data & 15) {
if((uintptr_t)data & 15)
{
while((uintptr_t)data & 15)
{
s2 += (s1 += *data++);
--len;
}
@@ -73,7 +76,8 @@ void fletcher32_neon(uint16_t *sum1, uint16_t *sum2, const uint8_t *data, uint32
const unsigned BLOCK_SIZE = 1 << 5;
uint32_t blocks = len / BLOCK_SIZE;
len -= blocks * BLOCK_SIZE;
while (blocks) {
while(blocks)
{
unsigned n = NMAX / BLOCK_SIZE; /* The NMAX constraint. */
if(n > blocks) n = (unsigned)blocks;
blocks -= n;
@@ -92,7 +96,8 @@ void fletcher32_neon(uint16_t *sum1, uint16_t *sum2, const uint8_t *data, uint32
uint16x8_t v_column_sum_2 = vdupq_n_u16(0);
uint16x8_t v_column_sum_3 = vdupq_n_u16(0);
uint16x8_t v_column_sum_4 = vdupq_n_u16(0);
do {
do
{
/*
* Load 32 input bytes.
*/
@@ -114,7 +119,8 @@ void fletcher32_neon(uint16_t *sum1, uint16_t *sum2, const uint8_t *data, uint32
v_column_sum_3 = vaddw_u8(v_column_sum_3, vget_low_u8(bytes2));
v_column_sum_4 = vaddw_u8(v_column_sum_4, vget_high_u8(bytes2));
data += BLOCK_SIZE;
} while (--n);
}
while(--n);
v_s2 = vshlq_n_u32(v_s2, 5);
/*
* Multiply-add bytes by [ 32, 31, 30, ... ] for s2.
@@ -166,8 +172,10 @@ void fletcher32_neon(uint16_t *sum1, uint16_t *sum2, const uint8_t *data, uint32
/*
* Handle leftover data.
*/
if (len) {
if (len >= 16) {
if(len)
{
if(len >= 16)
{
s2 += (s1 += *data++);
s2 += (s1 += *data++);
s2 += (s1 += *data++);
@@ -186,7 +194,8 @@ void fletcher32_neon(uint16_t *sum1, uint16_t *sum2, const uint8_t *data, uint32
s2 += (s1 += *data++);
len -= 16;
}
while (len--) { s2 += (s1 += *data++); }
while(len--)
{ s2 += (s1 += *data++); }
if(s1 >= FLETCHER32_MODULE) s1 -= FLETCHER32_MODULE;
s2 %= FLETCHER32_MODULE;
}

View File

@@ -40,16 +40,17 @@
#include "fletcher32.h"
/**
* @brief Calculate Fletcher-32 checksum for a given data using SSSE3 instructions.
* @brief Calculate Fletcher-32 checksum for a given data using TARGET_WITH_SSSE3 instructions.
*
* This function calculates the Fletcher-32 checksum for a block of data using SSSE3 vector instructions.
* This function calculates the Fletcher-32 checksum for a block of data using TARGET_WITH_SSSE3 vector instructions.
*
* @param sum1 Pointer to the variable where the first 16-bit checksum value is stored.
* @param sum2 Pointer to the variable where the second 16-bit checksum value is stored.
* @param data Pointer to the data buffer.
* @param len Length of the data buffer in bytes.
*/
AARU_EXPORT SSSE3 void AARU_CALL fletcher32_ssse3(uint16_t* sum1, uint16_t* sum2, const uint8_t* data, long len)
AARU_EXPORT TARGET_WITH_SSSE3 void AARU_CALL
fletcher32_ssse3(uint16_t *sum1, uint16_t *sum2, const uint8_t *data, long len)
{
uint32_t s1 = *sum1;
uint32_t s2 = *sum2;
@@ -76,7 +77,8 @@ AARU_EXPORT SSSE3 void AARU_CALL fletcher32_ssse3(uint16_t* sum1, uint16_t* sum2
__m128i v_ps = _mm_set_epi32(0, 0, 0, s1 * n);
__m128i v_s2 = _mm_set_epi32(0, 0, 0, s2);
__m128i v_s1 = _mm_set_epi32(0, 0, 0, 0);
do {
do
{
/*
* Load 32 input bytes.
*/
@@ -97,7 +99,8 @@ AARU_EXPORT SSSE3 void AARU_CALL fletcher32_ssse3(uint16_t* sum1, uint16_t* sum2
const __m128i mad2 = _mm_maddubs_epi16(bytes2, tap2);
v_s2 = _mm_add_epi32(v_s2, _mm_madd_epi16(mad2, ones));
data += BLOCK_SIZE;
} while(--n);
}
while(--n);
v_s2 = _mm_add_epi32(v_s2, _mm_slli_epi32(v_ps, 5));
/*
* Sum epi32 ints v_s1(s2) and accumulate in s1(s2).
@@ -143,7 +146,8 @@ AARU_EXPORT SSSE3 void AARU_CALL fletcher32_ssse3(uint16_t* sum1, uint16_t* sum2
s2 += (s1 += *data++);
len -= 16;
}
while(len--) { s2 += (s1 += *data++); }
while(len--)
{ s2 += (s1 += *data++); }
if(s1 >= FLETCHER32_MODULE) s1 -= FLETCHER32_MODULE;
s2 %= FLETCHER32_MODULE;
}

View File

@@ -20,4 +20,5 @@
#include "library.h"
AARU_EXPORT uint64_t AARU_CALL get_acn_version() { return AARU_CHECKUMS_NATIVE_VERSION; }
AARU_EXPORT uint64_t AARU_CALL get_acn_version()
{ return AARU_CHECKUMS_NATIVE_VERSION; }

47
simd.c
View File

@@ -123,15 +123,15 @@ static void cpuidex(int info, int count, unsigned* eax, unsigned* ebx, unsigned*
}
/**
* @brief Checks if the hardware supports the CLMUL instruction set.
* @brief Checks if the hardware supports the TARGET_WITH_CLMUL instruction set.
*
* The function checks if the system's CPU supports the CLMUL (Carry-Less Multiplication) instruction set.
* CLMUL is an extension to the x86 instruction set architecture and provides hardware acceleration for
* The function checks if the system's CPU supports the TARGET_WITH_CLMUL (Carry-Less Multiplication) instruction set.
* TARGET_WITH_CLMUL is an extension to the x86 instruction set architecture and provides hardware acceleration for
* carry-less multiplication operations.
*
* @return True if CLMUL instruction set is supported, False otherwise.
* @return True if TARGET_WITH_CLMUL instruction set is supported, False otherwise.
*
* @see https://software.intel.com/sites/landingpage/IntrinsicsGuide/#techs=CLMUL
* @see https://software.intel.com/sites/landingpage/IntrinsicsGuide/#techs=TARGET_WITH_CLMUL
* @see https://en.wikipedia.org/wiki/Carry-less_multiplication
*/
int have_clmul(void)
@@ -148,17 +148,17 @@ int have_clmul(void)
}
/**
* @brief Checks if the current processor supports SSSE3 instructions.
* @brief Checks if the current processor supports TARGET_WITH_SSSE3 instructions.
*
* The function detects whether the current processor supports SSSE3 instructions by
* checking the CPU feature flags. SSSE3 (Supplemental Streaming SIMD Extensions 3)
* The function detects whether the current processor supports TARGET_WITH_SSSE3 instructions by
* checking the CPU feature flags. TARGET_WITH_SSSE3 (Supplemental Streaming SIMD Extensions 3)
* is an extension to the x86 instruction set architecture that introduces
* additional SIMD instructions useful for multimedia and signal processing tasks.
*
* @return true if the current processor supports SSSE3 instructions, false otherwise.
* @return true if the current processor supports TARGET_WITH_SSSE3 instructions, false otherwise.
*
* @see https://software.intel.com/sites/landingpage/IntrinsicsGuide/#techs=SSSE3
* @see https://en.wikipedia.org/wiki/SSSE3
* @see https://software.intel.com/sites/landingpage/IntrinsicsGuide/#techs=TARGET_WITH_SSSE3
* @see https://en.wikipedia.org/wiki/TARGET_WITH_SSSE3
*/
int have_ssse3(void)
{
@@ -169,16 +169,16 @@ int have_ssse3(void)
}
/**
* @brief Checks if the current processor supports AVX2 instructions.
* @brief Checks if the current processor supports TARGET_WITH_AVX2 instructions.
*
* The function detects whether the current processor supports AVX2 instructions by
* checking the CPU feature flags. AVX2 (Advanced Vector Extensions 2) is an extension
* The function detects whether the current processor supports TARGET_WITH_AVX2 instructions by
* checking the CPU feature flags. TARGET_WITH_AVX2 (Advanced Vector Extensions 2) is an extension
* to the x86 instruction set architecture that introduces additional SIMD instructions
* useful for multimedia and signal processing tasks.
*
* @return true if the current processor supports AVX2 instructions, false otherwise.
* @return true if the current processor supports TARGET_WITH_AVX2 instructions, false otherwise.
*
* @see https://software.intel.com/sites/landingpage/IntrinsicsGuide/#techs=AVX2
* @see https://software.intel.com/sites/landingpage/IntrinsicsGuide/#techs=TARGET_WITH_AVX2
* @see https://en.wikipedia.org/wiki/Advanced_Vector_Extensions
*/
@@ -193,17 +193,24 @@ int have_avx2(void)
#if defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM)
#if defined(_WIN32)
#include <windows.h>
#include <processthreadsapi.h>
#elif defined(__APPLE__)
#include <sys/sysctl.h>
#else
#include <sys/auxv.h>
#endif
#endif
#if(defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM)) && defined(__APPLE__)
/**
* @brief Checks if the current processor supports NEON instructions.
*
@@ -257,10 +264,13 @@ int have_crc32_apple()
*
* @return true if the current processor supports cryptographic instructions, false otherwise.
*/
int have_crypto_apple() { return 0; }
int have_crypto_apple()
{ return 0; }
#endif
#if defined(__aarch64__) || defined(_M_ARM64)
int have_neon(void)
{
return 1; // ARMv8-A made it mandatory
@@ -305,9 +315,11 @@ int have_arm_crypto(void)
return getauxval(AT_HWCAP) & HWCAP_AES;
#endif
}
#endif
#if defined(__arm__) || defined(_M_ARM)
/**
* @brief Checks if the current processor supports NEON instructions.
*
@@ -377,4 +389,5 @@ int have_arm_crypto(void)
return getauxval(AT_HWCAP2) & HWCAP2_AES;
#endif
}
#endif

20
simd.h
View File

@@ -29,13 +29,13 @@
defined(__i386__) || defined(__THW_INTEL) || defined(_M_IX86)
#ifdef _MSC_VER
#define AVX2
#define SSSE3
#define CLMUL
#define TARGET_WITH_AVX2
#define TARGET_WITH_SSSE3
#define TARGET_WITH_CLMUL
#else
#define AVX2 __attribute__((target("avx2")))
#define SSSE3 __attribute__((target("ssse3")))
#define CLMUL __attribute__((target("pclmul,sse4.1")))
#define TARGET_WITH_AVX2 __attribute__((target("avx2")))
#define TARGET_WITH_SSSE3 __attribute__((target("ssse3")))
#define TARGET_WITH_CLMUL __attribute__((target("pclmul,sse4.1")))
#endif
AARU_EXPORT int have_clmul(void);
@@ -71,7 +71,7 @@ AARU_EXPORT int have_arm_crypto(void);
#define TARGET_ARMV8_WITH_CRC
#define TARGET_WITH_CRYPTO
#define TARGET_WITH_SIMD
#define TARGET_WITH_NEON
#else // _MSC_VER
@@ -89,7 +89,7 @@ AARU_EXPORT int have_arm_crypto(void);
#define TARGET_WITH_CRYPTO __attribute__((target("+crypto")))
#endif
#define TARGET_WITH_SIMD
#define TARGET_WITH_NEON
#else
#if (__ARM_ARCH >= 7 || defined (__ARM_ARCH_8A))
@@ -109,9 +109,9 @@ AARU_EXPORT int have_arm_crypto(void);
#endif
#ifdef __clang__
#define TARGET_WITH_SIMD __attribute__((target("neon")))
#define TARGET_WITH_NEON __attribute__((target("neon")))
#else
#define TARGET_WITH_SIMD __attribute__((target("fpu=neon")))
#define TARGET_WITH_NEON __attribute__((target("fpu=neon")))
#endif
#endif // __aarch64__ || _M_ARM64

View File

@@ -96,7 +96,7 @@ AARU_EXPORT void AARU_CALL spamsum_free(spamsum_ctx* ctx)
#define SUM_HASH(c, h) (((h)*HASH_PRIME) ^ (c));
#define SSDEEP_BS(index) (MIN_BLOCKSIZE << (index))
AARU_LOCAL inline void fuzzy_engine_step(spamsum_ctx* ctx, uint8_t c)
FORCE_INLINE void fuzzy_engine_step(spamsum_ctx *ctx, uint8_t c)
{
uint32_t i;
/* At each character we update the rolling hash and the normal hashes.
@@ -149,7 +149,7 @@ AARU_LOCAL inline void fuzzy_engine_step(spamsum_ctx* ctx, uint8_t c)
}
}
AARU_LOCAL inline void roll_hash(spamsum_ctx* ctx, uint8_t c)
FORCE_INLINE void roll_hash(spamsum_ctx *ctx, uint8_t c)
{
ctx->roll.h2 -= ctx->roll.h1;
ctx->roll.h2 += ROLLING_WINDOW * c;
@@ -167,7 +167,7 @@ AARU_LOCAL inline void roll_hash(spamsum_ctx* ctx, uint8_t c)
ctx->roll.h3 ^= c;
}
AARU_LOCAL inline void fuzzy_try_reduce_blockhash(spamsum_ctx* ctx)
FORCE_INLINE void fuzzy_try_reduce_blockhash(spamsum_ctx *ctx)
{
// assert(ctx->bh_start < ctx->bh_end);
@@ -187,7 +187,7 @@ AARU_LOCAL inline void fuzzy_try_reduce_blockhash(spamsum_ctx* ctx)
++ctx->bh_start;
}
AARU_LOCAL inline void fuzzy_try_fork_blockhash(spamsum_ctx* ctx)
FORCE_INLINE void fuzzy_try_fork_blockhash(spamsum_ctx *ctx)
{
if(ctx->bh_end >= NUM_BLOCKHASHES) return;
@@ -297,8 +297,7 @@ AARU_EXPORT int AARU_CALL spamsum_final(spamsum_ctx* ctx, uint8_t* result)
++bi;
i = (int)ctx->bh[bi].d_len;
if(i <= remain)
;
if(i <= remain);
memcpy(result, ctx->bh[bi].digest, (size_t)i);
result += i;

View File

@@ -59,9 +59,12 @@ AARU_EXPORT int AARU_CALL spamsum_update(spamsum_ctx* ctx, const uint8_
AARU_EXPORT int AARU_CALL spamsum_final(spamsum_ctx *ctx, uint8_t *result);
AARU_EXPORT void AARU_CALL spamsum_free(spamsum_ctx *ctx);
AARU_LOCAL void fuzzy_engine_step(spamsum_ctx* ctx, uint8_t c);
AARU_LOCAL void roll_hash(spamsum_ctx* ctx, uint8_t c);
AARU_LOCAL void fuzzy_try_reduce_blockhash(spamsum_ctx* ctx);
AARU_LOCAL void fuzzy_try_fork_blockhash(spamsum_ctx* ctx);
FORCE_INLINE void fuzzy_engine_step(spamsum_ctx *ctx, uint8_t c);
FORCE_INLINE void roll_hash(spamsum_ctx *ctx, uint8_t c);
FORCE_INLINE void fuzzy_try_reduce_blockhash(spamsum_ctx *ctx);
FORCE_INLINE void fuzzy_try_fork_blockhash(spamsum_ctx *ctx);
#endif // AARU_CHECKSUMS_NATIVE_SPAMSUM_H

View File

@@ -46,7 +46,8 @@ class adler32Fixture : public ::testing::Test
memcpy((void *)(buffer_misaligned + 1), buffer, 1048576);
}
void TearDown() {
void TearDown()
{
free((void *)buffer);
free((void *)buffer_misaligned);
}
@@ -341,6 +342,7 @@ TEST_F(adler32Fixture, adler32_neon_2352bytes)
EXPECT_EQ(adler32, EXPECTED_ADLER32_2352BYTES);
}
#endif
#if defined(__x86_64__) || defined(__amd64) || defined(_M_AMD64) || defined(_M_X64) || defined(__I386__) || \
@@ -561,4 +563,5 @@ TEST_F(adler32Fixture, adler32_ssse3_2352bytes)
EXPECT_EQ(adler32, EXPECTED_ADLER32_2352BYTES);
}
#endif

View File

@@ -46,7 +46,8 @@ class crc16Fixture : public ::testing::Test
memcpy((void *)(buffer_misaligned + 1), buffer, 1048576);
}
void TearDown() {
void TearDown()
{
free((void *)buffer);
free((void *)buffer_misaligned);
}

View File

@@ -46,7 +46,8 @@ class crc16_ccittFixture : public ::testing::Test
memcpy((void *)(buffer_misaligned + 1), buffer, 1048576);
}
void TearDown() {
void TearDown()
{
free((void *)buffer);
free((void *)buffer_misaligned);
}

View File

@@ -46,7 +46,8 @@ class crc32Fixture : public ::testing::Test
memcpy((void *)(buffer_misaligned + 1), buffer, 1048576);
}
void TearDown() {
void TearDown()
{
free((void *)buffer);
free((void *)buffer_misaligned);
}
@@ -282,6 +283,7 @@ TEST_F(crc32Fixture, crc32_clmul_2352bytes)
EXPECT_EQ(crc, EXPECTED_CRC32_2352BYTES);
}
#endif
#if defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM)
@@ -363,6 +365,7 @@ TEST_F(crc32Fixture, crc32_arm_crc32_2352bytes)
EXPECT_EQ(crc, EXPECTED_CRC32_2352BYTES);
}
#endif
TEST_F(crc32Fixture, crc32_vmull)
@@ -442,4 +445,5 @@ TEST_F(crc32Fixture, crc32_vmull_2352bytes)
EXPECT_EQ(crc, EXPECTED_CRC32_2352BYTES);
}
#endif

View File

@@ -46,7 +46,8 @@ class crc64Fixture : public ::testing::Test
memcpy((void *)(buffer_misaligned + 1), buffer, 1048576);
}
void TearDown() {
void TearDown()
{
free((void *)buffer);
free((void *)buffer_misaligned);
}
@@ -282,6 +283,7 @@ TEST_F(crc64Fixture, crc64_clmul_2352bytes)
EXPECT_EQ(crc, EXPECTED_CRC64_2352BYTES);
}
#endif
#if defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM)
@@ -362,4 +364,5 @@ TEST_F(crc64Fixture, crc64_vmull_2352bytes)
EXPECT_EQ(crc, EXPECTED_CRC64_2352BYTES);
}
#endif

View File

@@ -46,7 +46,8 @@ class fletcher16Fixture : public ::testing::Test
memcpy((void *)(buffer_misaligned + 1), buffer, 1048576);
}
void TearDown() {
void TearDown()
{
free((void *)buffer);
free((void *)buffer_misaligned);
}

View File

@@ -46,7 +46,8 @@ class fletcher32Fixture : public ::testing::Test
memcpy((void *)(buffer_misaligned + 1), buffer, 1048576);
}
void TearDown() {
void TearDown()
{
free((void *)buffer);
free((void *)buffer_misaligned);
}
@@ -245,6 +246,7 @@ TEST_F(fletcher32Fixture, fletcher32_neon_2352bytes)
EXPECT_EQ(fletcher32, EXPECTED_FLETCHER32_2352BYTES);
}
#endif
#if defined(__x86_64__) || defined(__amd64) || defined(_M_AMD64) || defined(_M_X64) || defined(__I386__) || \
@@ -465,4 +467,5 @@ TEST_F(fletcher32Fixture, fletcher32_ssse3_2352bytes)
EXPECT_EQ(fletcher32, EXPECTED_FLETCHER32_2352BYTES);
}
#endif

View File

@@ -46,7 +46,8 @@ class spamsumFixture : public ::testing::Test
memcpy((void *)(buffer_misaligned + 1), buffer, 1048576);
}
void TearDown() {
void TearDown()
{
free((void *)buffer);
free((void *)buffer_misaligned);
}