diff --git a/CMakeLists.txt b/CMakeLists.txt index 55557ee..f43e11f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -18,8 +18,10 @@ if("${CMAKE_BUILD_TYPE}" MATCHES "Release") if(${CMAKE_SYSTEM_PROCESSOR} MATCHES "x86_64" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "i686" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "AMD64") add_compile_options(-march=core2 -mfpmath=sse -msse3 -mtune=westmere) + elseif(${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64") + add_compile_options(-march=armv8-a) endif() endif() endif() -add_library("Aaru.Checksums.Native" SHARED adler32.h adler32.c crc16.h crc16.c crc16_ccitt.h crc16_ccitt.c crc32.c crc32.h crc64.c crc64.h fletcher16.h fletcher16.c fletcher32.h fletcher32.c library.h spamsum.c spamsum.h crc32_clmul.c crc64_clmul.c simd.c simd.h adler32_ssse3.c adler32_avx2.c) +add_library("Aaru.Checksums.Native" SHARED adler32.h adler32.c crc16.h crc16.c crc16_ccitt.h crc16_ccitt.c crc32.c crc32.h crc64.c crc64.h fletcher16.h fletcher16.c fletcher32.h fletcher32.c library.h spamsum.c spamsum.h crc32_clmul.c crc64_clmul.c simd.c simd.h adler32_ssse3.c adler32_avx2.c adler32_neon.c) diff --git a/adler32.c b/adler32.c index 1ad47b5..eed4ec8 100644 --- a/adler32.c +++ b/adler32.c @@ -48,7 +48,14 @@ AARU_EXPORT adler32_ctx* AARU_CALL adler32_init() AARU_EXPORT int AARU_CALL adler32_update(adler32_ctx* ctx, const uint8_t* data, uint32_t len) { if(!ctx || !data) return -1; +#if defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM) + if(have_neon()) + { + adler32_neon(&ctx->sum1, &ctx->sum2, data, len); + return 0; + } +#endif #if defined(__x86_64__) || defined(__amd64) || defined(_M_AMD64) || defined(_M_X64) || defined(__I386__) || \ defined(__i386__) || defined(__THW_INTEL) || defined(_M_IX86) if(have_avx2()) diff --git a/adler32.h b/adler32.h index ad8f84f..1097b07 100644 --- a/adler32.h +++ b/adler32.h @@ -42,4 +42,10 @@ void adler32_avx2(uint16_t* sum1, uint16_t* sum2, const unsigned char* buf, size #endif +#if defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM) + +void adler32_neon(uint16_t* sum1, uint16_t* sum2, const unsigned char* buf, uint32_t len); + +#endif + #endif // AARU_CHECKSUMS_NATIVE_ADLER32_H diff --git a/adler32_avx2.c b/adler32_avx2.c index 7075e52..735d535 100644 --- a/adler32_avx2.c +++ b/adler32_avx2.c @@ -2,6 +2,9 @@ // Created by claunia on 28/9/21. // +#if defined(__x86_64__) || defined(__amd64) || defined(_M_AMD64) || defined(_M_X64) || defined(__I386__) || \ + defined(__i386__) || defined(__THW_INTEL) || defined(_M_IX86) + #include #include @@ -148,3 +151,5 @@ AVX2 void adler32_avx2(uint16_t* sum1, uint16_t* sum2, const unsigned char* buf, *sum1 = s1 & 0xFFFF; *sum2 = s2 & 0xFFFF; } + +#endif \ No newline at end of file diff --git a/adler32_neon.c b/adler32_neon.c new file mode 100644 index 0000000..648de6a --- /dev/null +++ b/adler32_neon.c @@ -0,0 +1,139 @@ +// +// Created by claunia on 28/9/21. +// + +#if defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM) + +#include + +#include "library.h" +#include "adler32.h" +#include "simd.h" + +void adler32_neon(uint16_t* sum1, uint16_t* sum2, const unsigned char* buf, uint32_t len) +{ + /* + * Split Adler-32 into component sums. + */ + uint32_t s1 = *sum1; + uint32_t s2 = *sum2; + /* + * Serially compute s1 & s2, until the data is 16-byte aligned. + */ + if((uintptr_t)buf & 15) + { + while((uintptr_t)buf & 15) + { + s2 += (s1 += *buf++); + --len; + } + if(s1 >= ADLER_MODULE) s1 -= ADLER_MODULE; + s2 %= ADLER_MODULE; + } + /* + * Process the data in blocks. + */ + const unsigned BLOCK_SIZE = 1 << 5; + uint32_t blocks = len / BLOCK_SIZE; + len -= blocks * BLOCK_SIZE; + while(blocks) + { + unsigned n = NMAX / BLOCK_SIZE; /* The NMAX constraint. */ + if(n > blocks) n = (unsigned)blocks; + blocks -= n; + /* + * Process n blocks of data. At most NMAX data bytes can be + * processed before s2 must be reduced modulo ADLER_MODULE. + */ + uint32x4_t v_s2 = (uint32x4_t){0, 0, 0, s1 * n}; + uint32x4_t v_s1 = (uint32x4_t){0, 0, 0, 0}; + uint16x8_t v_column_sum_1 = vdupq_n_u16(0); + uint16x8_t v_column_sum_2 = vdupq_n_u16(0); + uint16x8_t v_column_sum_3 = vdupq_n_u16(0); + uint16x8_t v_column_sum_4 = vdupq_n_u16(0); + do { + /* + * Load 32 input bytes. + */ + const uint8x16_t bytes1 = vld1q_u8((uint8_t*)(buf)); + const uint8x16_t bytes2 = vld1q_u8((uint8_t*)(buf + 16)); + /* + * Add previous block byte sum to v_s2. + */ + v_s2 = vaddq_u32(v_s2, v_s1); + /* + * Horizontally add the bytes for s1. + */ + v_s1 = vpadalq_u16(v_s1, vpadalq_u8(vpaddlq_u8(bytes1), bytes2)); + /* + * Vertically add the bytes for s2. + */ + v_column_sum_1 = vaddw_u8(v_column_sum_1, vget_low_u8(bytes1)); + v_column_sum_2 = vaddw_u8(v_column_sum_2, vget_high_u8(bytes1)); + v_column_sum_3 = vaddw_u8(v_column_sum_3, vget_low_u8(bytes2)); + v_column_sum_4 = vaddw_u8(v_column_sum_4, vget_high_u8(bytes2)); + buf += BLOCK_SIZE; + } while(--n); + v_s2 = vshlq_n_u32(v_s2, 5); + /* + * Multiply-add bytes by [ 32, 31, 30, ... ] for s2. + */ + v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_1), (uint16x4_t){32, 31, 30, 29}); + v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_1), (uint16x4_t){28, 27, 26, 25}); + v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_2), (uint16x4_t){24, 23, 22, 21}); + v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_2), (uint16x4_t){20, 19, 18, 17}); + v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_3), (uint16x4_t){16, 15, 14, 13}); + v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_3), (uint16x4_t){12, 11, 10, 9}); + v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_4), (uint16x4_t){8, 7, 6, 5}); + v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_4), (uint16x4_t){4, 3, 2, 1}); + /* + * Sum epi32 ints v_s1(s2) and accumulate in s1(s2). + */ + uint32x2_t sum1 = vpadd_u32(vget_low_u32(v_s1), vget_high_u32(v_s1)); + uint32x2_t sum2 = vpadd_u32(vget_low_u32(v_s2), vget_high_u32(v_s2)); + uint32x2_t s1s2 = vpadd_u32(sum1, sum2); + s1 += vget_lane_u32(s1s2, 0); + s2 += vget_lane_u32(s1s2, 1); + /* + * Reduce. + */ + s1 %= ADLER_MODULE; + s2 %= ADLER_MODULE; + } + /* + * Handle leftover data. + */ + if(len) + { + if(len >= 16) + { + s2 += (s1 += *buf++); + s2 += (s1 += *buf++); + s2 += (s1 += *buf++); + s2 += (s1 += *buf++); + s2 += (s1 += *buf++); + s2 += (s1 += *buf++); + s2 += (s1 += *buf++); + s2 += (s1 += *buf++); + s2 += (s1 += *buf++); + s2 += (s1 += *buf++); + s2 += (s1 += *buf++); + s2 += (s1 += *buf++); + s2 += (s1 += *buf++); + s2 += (s1 += *buf++); + s2 += (s1 += *buf++); + s2 += (s1 += *buf++); + len -= 16; + } + while(len--) { s2 += (s1 += *buf++); } + if(s1 >= ADLER_MODULE) s1 -= ADLER_MODULE; + s2 %= ADLER_MODULE; + } + /* + * Return the recombined sums. + */ + *sum1 = s1 & 0xFFFF; + *sum2 = s2 & 0xFFFF; +} + +#endif \ No newline at end of file diff --git a/simd.c b/simd.c index 91fd687..7fa1efd 100644 --- a/simd.c +++ b/simd.c @@ -1,3 +1,5 @@ +#include "simd.h" + #if defined(__x86_64__) || defined(__amd64) || defined(_M_AMD64) || defined(_M_X64) || defined(__I386__) || \ defined(__i386__) || defined(__THW_INTEL) || defined(_M_IX86) @@ -86,4 +88,19 @@ int have_avx2(void) return ebx & 0x20; } +#endif + +#if defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM) +#include +#endif + +#if defined(__aarch64__) || defined(_M_ARM64) +int have_neon(void) +{ + return 1; // ARMv8-A made it mandatory +} +#endif + +#if defined(__arm__) || defined(_M_ARM) +int have_neon(void) { return getauxval(AT_HWCAP) & HWCAP_NEON; } #endif \ No newline at end of file diff --git a/simd.h b/simd.h index a302eb7..cb0a88a 100644 --- a/simd.h +++ b/simd.h @@ -7,3 +7,15 @@ int have_clmul(void); int have_ssse3(void); int have_avx2(void); #endif + +#if defined(__arm__) || defined(_M_ARM) +#define HWCAP_NEON (1 << 12) +#endif + +#if defined(__aarch64__) || defined(_M_ARM64) +#define HWCAP_NEON (1 << 1) +#endif + +#if defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM) +int have_neon(void); +#endif \ No newline at end of file