mirror of
https://github.com/aaru-dps/Aaru.Checksums.Native.git
synced 2025-12-16 11:14:29 +00:00
Enable ADLER32 using NEON in 32-bit Windows on ARM.
This commit is contained in:
@@ -38,7 +38,7 @@
|
||||
#include "adler32.h"
|
||||
#include "simd.h"
|
||||
|
||||
TARGET_WITH_SIMD void adler32_neon(uint16_t* sum1, uint16_t* sum2, const uint8_t* data, uint32_t len)
|
||||
TARGET_WITH_SIMD void adler32_neon(uint16_t *sum1, uint16_t *sum2, const uint8_t *data, uint32_t len)
|
||||
{
|
||||
/*
|
||||
* Split Adler-32 into component sums.
|
||||
@@ -48,14 +48,14 @@ TARGET_WITH_SIMD void adler32_neon(uint16_t* sum1, uint16_t* sum2, const uint8_t
|
||||
/*
|
||||
* Serially compute s1 & s2, until the data is 16-byte aligned.
|
||||
*/
|
||||
if((uintptr_t)data & 15)
|
||||
if ((uintptr_t) data & 15)
|
||||
{
|
||||
while((uintptr_t)data & 15)
|
||||
while ((uintptr_t) data & 15)
|
||||
{
|
||||
s2 += (s1 += *data++);
|
||||
--len;
|
||||
}
|
||||
if(s1 >= ADLER_MODULE) s1 -= ADLER_MODULE;
|
||||
if (s1 >= ADLER_MODULE) s1 -= ADLER_MODULE;
|
||||
s2 %= ADLER_MODULE;
|
||||
}
|
||||
/*
|
||||
@@ -64,10 +64,10 @@ TARGET_WITH_SIMD void adler32_neon(uint16_t* sum1, uint16_t* sum2, const uint8_t
|
||||
const unsigned BLOCK_SIZE = 1 << 5;
|
||||
uint32_t blocks = len / BLOCK_SIZE;
|
||||
len -= blocks * BLOCK_SIZE;
|
||||
while(blocks)
|
||||
while (blocks)
|
||||
{
|
||||
unsigned n = NMAX / BLOCK_SIZE; /* The NMAX constraint. */
|
||||
if(n > blocks) n = (unsigned)blocks;
|
||||
if (n > blocks) n = (unsigned) blocks;
|
||||
blocks -= n;
|
||||
/*
|
||||
* Process n blocks of data. At most NMAX data bytes can be
|
||||
@@ -77,19 +77,20 @@ TARGET_WITH_SIMD void adler32_neon(uint16_t* sum1, uint16_t* sum2, const uint8_t
|
||||
uint32x4_t v_s2 = {.n128_u32 = {0, 0, 0, s1 * n}};
|
||||
uint32x4_t v_s1 = {.n128_u32 = {0, 0, 0, 0}};
|
||||
#else
|
||||
uint32x4_t v_s2 = (uint32x4_t){0, 0, 0, s1 * n};
|
||||
uint32x4_t v_s1 = (uint32x4_t){0, 0, 0, 0};
|
||||
uint32x4_t v_s2 = (uint32x4_t) {0, 0, 0, s1 * n};
|
||||
uint32x4_t v_s1 = (uint32x4_t) {0, 0, 0, 0};
|
||||
#endif
|
||||
uint16x8_t v_column_sum_1 = vdupq_n_u16(0);
|
||||
uint16x8_t v_column_sum_2 = vdupq_n_u16(0);
|
||||
uint16x8_t v_column_sum_3 = vdupq_n_u16(0);
|
||||
uint16x8_t v_column_sum_4 = vdupq_n_u16(0);
|
||||
do {
|
||||
do
|
||||
{
|
||||
/*
|
||||
* Load 32 input bytes.
|
||||
*/
|
||||
const uint8x16_t bytes1 = vld1q_u8((uint8_t*)(data));
|
||||
const uint8x16_t bytes2 = vld1q_u8((uint8_t*)(data + 16));
|
||||
const uint8x16_t bytes1 = vld1q_u8((uint8_t *) (data));
|
||||
const uint8x16_t bytes2 = vld1q_u8((uint8_t *) (data + 16));
|
||||
/*
|
||||
* Add previous block byte sum to v_s2.
|
||||
*/
|
||||
@@ -106,29 +107,40 @@ TARGET_WITH_SIMD void adler32_neon(uint16_t* sum1, uint16_t* sum2, const uint8_t
|
||||
v_column_sum_3 = vaddw_u8(v_column_sum_3, vget_low_u8(bytes2));
|
||||
v_column_sum_4 = vaddw_u8(v_column_sum_4, vget_high_u8(bytes2));
|
||||
data += BLOCK_SIZE;
|
||||
} while(--n);
|
||||
} while (--n);
|
||||
v_s2 = vshlq_n_u32(v_s2, 5);
|
||||
/*
|
||||
* Multiply-add bytes by [ 32, 31, 30, ... ] for s2.
|
||||
*/
|
||||
#ifdef _MSC_VER
|
||||
v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_1), neon_ld1m_16((uint16_t[]){32, 31, 30, 29}));
|
||||
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_1), neon_ld1m_16((uint16_t[]){28, 27, 26, 25}));
|
||||
v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_2), neon_ld1m_16((uint16_t[]){24, 23, 22, 21}));
|
||||
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_2), neon_ld1m_16((uint16_t[]){20, 19, 18, 17}));
|
||||
v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_3), neon_ld1m_16((uint16_t[]){16, 15, 14, 13}));
|
||||
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_3), neon_ld1m_16((uint16_t[]){12, 11, 10, 9}));
|
||||
v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_4), neon_ld1m_16((uint16_t[]){8, 7, 6, 5}));
|
||||
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_4), neon_ld1m_16((uint16_t[]){4, 3, 2, 1}));
|
||||
#ifdef _M_ARM64
|
||||
v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_1), neon_ld1m_16((uint16_t[]) {32, 31, 30, 29}));
|
||||
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_1), neon_ld1m_16((uint16_t[]) {28, 27, 26, 25}));
|
||||
v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_2), neon_ld1m_16((uint16_t[]) {24, 23, 22, 21}));
|
||||
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_2), neon_ld1m_16((uint16_t[]) {20, 19, 18, 17}));
|
||||
v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_3), neon_ld1m_16((uint16_t[]) {16, 15, 14, 13}));
|
||||
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_3), neon_ld1m_16((uint16_t[]) {12, 11, 10, 9}));
|
||||
v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_4), neon_ld1m_16((uint16_t[]) {8, 7, 6, 5}));
|
||||
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_4), neon_ld1m_16((uint16_t[]) {4, 3, 2, 1}));
|
||||
#else
|
||||
v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_1), (uint16x4_t){32, 31, 30, 29});
|
||||
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_1), (uint16x4_t){28, 27, 26, 25});
|
||||
v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_2), (uint16x4_t){24, 23, 22, 21});
|
||||
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_2), (uint16x4_t){20, 19, 18, 17});
|
||||
v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_3), (uint16x4_t){16, 15, 14, 13});
|
||||
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_3), (uint16x4_t){12, 11, 10, 9});
|
||||
v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_4), (uint16x4_t){8, 7, 6, 5});
|
||||
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_4), (uint16x4_t){4, 3, 2, 1});
|
||||
v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_1), vld1_u16(((uint16_t[]) {32, 31, 30, 29})));
|
||||
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_1), vld1_u16(((uint16_t[]) {28, 27, 26, 25})));
|
||||
v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_2), vld1_u16(((uint16_t[]) {24, 23, 22, 21})));
|
||||
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_2), vld1_u16(((uint16_t[]) {20, 19, 18, 17})));
|
||||
v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_3), vld1_u16(((uint16_t[]) {16, 15, 14, 13})));
|
||||
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_3), vld1_u16(((uint16_t[]) {12, 11, 10, 9})));
|
||||
v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_4), vld1_u16(((uint16_t[]) {8, 7, 6, 5})));
|
||||
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_4), vld1_u16(((uint16_t[]) {4, 3, 2, 1})));
|
||||
#endif
|
||||
#else
|
||||
v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_1), (uint16x4_t) {32, 31, 30, 29});
|
||||
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_1), (uint16x4_t) {28, 27, 26, 25});
|
||||
v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_2), (uint16x4_t) {24, 23, 22, 21});
|
||||
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_2), (uint16x4_t) {20, 19, 18, 17});
|
||||
v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_3), (uint16x4_t) {16, 15, 14, 13});
|
||||
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_3), (uint16x4_t) {12, 11, 10, 9});
|
||||
v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_4), (uint16x4_t) {8, 7, 6, 5});
|
||||
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_4), (uint16x4_t) {4, 3, 2, 1});
|
||||
#endif
|
||||
/*
|
||||
* Sum epi32 ints v_s1(s2) and accumulate in s1(s2).
|
||||
@@ -147,9 +159,9 @@ TARGET_WITH_SIMD void adler32_neon(uint16_t* sum1, uint16_t* sum2, const uint8_t
|
||||
/*
|
||||
* Handle leftover data.
|
||||
*/
|
||||
if(len)
|
||||
if (len)
|
||||
{
|
||||
if(len >= 16)
|
||||
if (len >= 16)
|
||||
{
|
||||
s2 += (s1 += *data++);
|
||||
s2 += (s1 += *data++);
|
||||
@@ -169,8 +181,9 @@ TARGET_WITH_SIMD void adler32_neon(uint16_t* sum1, uint16_t* sum2, const uint8_t
|
||||
s2 += (s1 += *data++);
|
||||
len -= 16;
|
||||
}
|
||||
while(len--) { s2 += (s1 += *data++); }
|
||||
if(s1 >= ADLER_MODULE) s1 -= ADLER_MODULE;
|
||||
while (len--)
|
||||
{ s2 += (s1 += *data++); }
|
||||
if (s1 >= ADLER_MODULE) s1 -= ADLER_MODULE;
|
||||
s2 %= ADLER_MODULE;
|
||||
}
|
||||
/*
|
||||
|
||||
Reference in New Issue
Block a user