diff --git a/adler32.c b/adler32.c index f817908..225cb09 100644 --- a/adler32.c +++ b/adler32.c @@ -46,7 +46,7 @@ AARU_EXPORT adler32_ctx* AARU_CALL adler32_init() AARU_EXPORT int AARU_CALL adler32_update(adler32_ctx* ctx, const uint8_t* data, uint32_t len) { if(!ctx || !data) return -1; -#if defined(__aarch64__) || defined(_M_ARM64) || ((defined(__arm__) || defined(_M_ARM)) && !defined(_WIN32)) +#if defined(__aarch64__) || defined(_M_ARM64) || ((defined(__arm__) || defined(_M_ARM)) && !defined(__MINGW32__)) if(have_neon()) { adler32_neon(&ctx->sum1, &ctx->sum2, data, len); diff --git a/adler32_neon.c b/adler32_neon.c index 58722a2..9346891 100644 --- a/adler32_neon.c +++ b/adler32_neon.c @@ -30,7 +30,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#if defined(__aarch64__) || defined(_M_ARM64) || ((defined(__arm__) || defined(_M_ARM)) && !defined(_WIN32)) +#if defined(__aarch64__) || defined(_M_ARM64) || ((defined(__arm__) || defined(_M_ARM)) && !defined(_MSC_VER)) #include @@ -73,7 +73,7 @@ TARGET_WITH_SIMD void adler32_neon(uint16_t* sum1, uint16_t* sum2, const uint8_t * Process n blocks of data. At most NMAX data bytes can be * processed before s2 must be reduced modulo ADLER_MODULE. */ -#ifdef _WIN32 +#ifdef _MSC_VER uint32x4_t v_s2 = {.n128_u32 = {0, 0, 0, s1 * n}}; uint32x4_t v_s1 = {.n128_u32 = {0, 0, 0, 0}}; #else @@ -111,7 +111,7 @@ TARGET_WITH_SIMD void adler32_neon(uint16_t* sum1, uint16_t* sum2, const uint8_t /* * Multiply-add bytes by [ 32, 31, 30, ... ] for s2. */ -#ifdef _WIN32 +#ifdef _MSC_VER v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_1), neon_ld1m_16((uint16_t[]){32, 31, 30, 29})); v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_1), neon_ld1m_16((uint16_t[]){28, 27, 26, 25})); v_s2 = vmlal_u16(v_s2, vget_low_u16(v_column_sum_2), neon_ld1m_16((uint16_t[]){24, 23, 22, 21})); @@ -180,4 +180,4 @@ TARGET_WITH_SIMD void adler32_neon(uint16_t* sum1, uint16_t* sum2, const uint8_t *sum2 = s2 & 0xFFFF; } -#endif \ No newline at end of file +#endif diff --git a/arm_vmull.c b/arm_vmull.c index cf1f252..e17d065 100644 --- a/arm_vmull.c +++ b/arm_vmull.c @@ -32,7 +32,7 @@ #include "arm_vmull.h" #include "simd.h" -#if !defined(_WIN32) && (!defined(__ANDROID__) || !defined(__arm__)) +#if !defined(__MINGW32__) && (!defined(__ANDROID__) || !defined(__arm__)) TARGET_WITH_CRYPTO static uint64x2_t sse2neon_vmull_p64_crypto(uint64x1_t _a, uint64x1_t _b) { poly64_t a = vget_lane_p64(vreinterpret_p64_u64(_a), 0); @@ -43,7 +43,7 @@ TARGET_WITH_CRYPTO static uint64x2_t sse2neon_vmull_p64_crypto(uint64x1_t _a, ui TARGET_WITH_SIMD uint64x2_t sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b) { -#if !defined(_WIN32) && (!defined(__ANDROID__) || !defined(__arm__)) +#if !defined(__MINGW32__) && (!defined(__ANDROID__) || !defined(__arm__)) // Wraps vmull_p64 if(have_arm_crypto()) return sse2neon_vmull_p64_crypto(_a, _b); #endif