libaaruformat 1.0
Aaru Data Preservation Suite - Format Library
Loading...
Searching...
No Matches
arm_vmull.c
Go to the documentation of this file.
1/*
2 * This file is part of the Aaru Data Preservation Suite.
3 * Copyright (c) 2019-2025 Natalia Portillo.
4 * Copyright sse2neon.h contributors
5 *
6 * sse2neon is freely redistributable under the MIT License.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27#if defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM)
28
29#include <arm_neon.h>
30
31#include <aaruformat.h>
32
33#include "arm_vmull.h"
34
35#if !defined(__MINGW32__) && (!defined(__ANDROID__) || !defined(__arm__))
36TARGET_WITH_CRYPTO static uint64x2_t sse2neon_vmull_p64_crypto(uint64x1_t _a, uint64x1_t _b)
37{
38 poly64_t a = vget_lane_p64(vreinterpret_p64_u64(_a), 0);
39 poly64_t b = vget_lane_p64(vreinterpret_p64_u64(_b), 0);
40 return vreinterpretq_u64_p128(vmull_p64(a, b));
41}
42#endif
43
44TARGET_WITH_SIMD uint64x2_t sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b)
45{
46#if !defined(__MINGW32__) && (!defined(__ANDROID__) || !defined(__arm__))
47 // Wraps vmull_p64
48 if(have_arm_crypto()) return sse2neon_vmull_p64_crypto(_a, _b);
49#endif
50
51 // ARMv7 polyfill
52 // ARMv7/some A64 lacks vmull_p64, but it has vmull_p8.
53 //
54 // vmull_p8 calculates 8 8-bit->16-bit polynomial multiplies, but we need a
55 // 64-bit->128-bit polynomial multiply.
56 //
57 // It needs some work and is somewhat slow, but it is still faster than all
58 // known scalar methods.
59 //
60 // Algorithm adapted to C from
61 // https://www.workofard.com/2017/07/ghash-for-low-end-cores/, which is adapted
62 // from "Fast Software Polynomial Multiplication on ARM Processors Using the
63 // NEON Engine" by Danilo Camara, Conrado Gouvea, Julio Lopez and Ricardo Dahab
64 // (https://hal.inria.fr/hal-01506572)
65
66 poly8x8_t a = vreinterpret_p8_u64(_a);
67 poly8x8_t b = vreinterpret_p8_u64(_b);
68
69 // Masks
70 uint8x16_t k48_32 = vcombine_u8(vcreate_u8(0x0000ffffffffffff), vcreate_u8(0x00000000ffffffff));
71 uint8x16_t k16_00 = vcombine_u8(vcreate_u8(0x000000000000ffff), vcreate_u8(0x0000000000000000));
72
73 // Do the multiplies, rotating with vext to get all combinations
74 uint8x16_t d = vreinterpretq_u8_p16(vmull_p8(a, b)); // D = A0 * B0
75 uint8x16_t e = vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 1))); // E = A0 * B1
76 uint8x16_t f = vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 1), b)); // F = A1 * B0
77 uint8x16_t g = vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 2))); // G = A0 * B2
78 uint8x16_t h = vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 2), b)); // H = A2 * B0
79 uint8x16_t i = vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 3))); // I = A0 * B3
80 uint8x16_t j = vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 3), b)); // J = A3 * B0
81 uint8x16_t k = vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 4))); // L = A0 * B4
82
83 // Add cross products
84 uint8x16_t l = veorq_u8(e, f); // L = E + F
85 uint8x16_t m = veorq_u8(g, h); // M = G + H
86 uint8x16_t n = veorq_u8(i, j); // N = I + J
87
88 // Interleave. Using vzip1 and vzip2 prevents Clang from emitting TBL
89 // instructions.
90#if defined(__aarch64__)
91 uint8x16_t lm_p0 = vreinterpretq_u8_u64(vzip1q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m)));
92 uint8x16_t lm_p1 = vreinterpretq_u8_u64(vzip2q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m)));
93 uint8x16_t nk_p0 = vreinterpretq_u8_u64(vzip1q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k)));
94 uint8x16_t nk_p1 = vreinterpretq_u8_u64(vzip2q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k)));
95#else
96 uint8x16_t lm_p0 = vcombine_u8(vget_low_u8(l), vget_low_u8(m));
97 uint8x16_t lm_p1 = vcombine_u8(vget_high_u8(l), vget_high_u8(m));
98 uint8x16_t nk_p0 = vcombine_u8(vget_low_u8(n), vget_low_u8(k));
99 uint8x16_t nk_p1 = vcombine_u8(vget_high_u8(n), vget_high_u8(k));
100#endif
101 // t0 = (L) (P0 + P1) << 8
102 // t1 = (M) (P2 + P3) << 16
103 uint8x16_t t0t1_tmp = veorq_u8(lm_p0, lm_p1);
104 uint8x16_t t0t1_h = vandq_u8(lm_p1, k48_32);
105 uint8x16_t t0t1_l = veorq_u8(t0t1_tmp, t0t1_h);
106
107 // t2 = (N) (P4 + P5) << 24
108 // t3 = (K) (P6 + P7) << 32
109 uint8x16_t t2t3_tmp = veorq_u8(nk_p0, nk_p1);
110 uint8x16_t t2t3_h = vandq_u8(nk_p1, k16_00);
111 uint8x16_t t2t3_l = veorq_u8(t2t3_tmp, t2t3_h);
112
113 // De-interleave
114#if defined(__aarch64__)
115 uint8x16_t t0 = vreinterpretq_u8_u64(vuzp1q_u64(vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h)));
116 uint8x16_t t1 = vreinterpretq_u8_u64(vuzp2q_u64(vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h)));
117 uint8x16_t t2 = vreinterpretq_u8_u64(vuzp1q_u64(vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h)));
118 uint8x16_t t3 = vreinterpretq_u8_u64(vuzp2q_u64(vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h)));
119#else
120 uint8x16_t t1 = vcombine_u8(vget_high_u8(t0t1_l), vget_high_u8(t0t1_h));
121 uint8x16_t t0 = vcombine_u8(vget_low_u8(t0t1_l), vget_low_u8(t0t1_h));
122 uint8x16_t t3 = vcombine_u8(vget_high_u8(t2t3_l), vget_high_u8(t2t3_h));
123 uint8x16_t t2 = vcombine_u8(vget_low_u8(t2t3_l), vget_low_u8(t2t3_h));
124#endif
125 // Shift the cross products
126 uint8x16_t t0_shift = vextq_u8(t0, t0, 15); // t0 << 8
127 uint8x16_t t1_shift = vextq_u8(t1, t1, 14); // t1 << 16
128 uint8x16_t t2_shift = vextq_u8(t2, t2, 13); // t2 << 24
129 uint8x16_t t3_shift = vextq_u8(t3, t3, 12); // t3 << 32
130
131 // Accumulate the products
132 uint8x16_t cross1 = veorq_u8(t0_shift, t1_shift);
133 uint8x16_t cross2 = veorq_u8(t2_shift, t3_shift);
134 uint8x16_t mix = veorq_u8(d, cross1);
135 uint8x16_t r = veorq_u8(mix, cross2);
136 return vreinterpretq_u64_u8(r);
137}
138
139TARGET_WITH_SIMD uint64x2_t mm_shuffle_epi8(uint64x2_t a, uint64x2_t b)
140{
141 uint8x16_t tbl = vreinterpretq_u8_u64(a); // input a
142 uint8x16_t idx = vreinterpretq_u8_u64(b); // input b
143 uint8x16_t idx_masked = vandq_u8(idx, vdupq_n_u8(0x8F)); // avoid using meaningless bits
144#if defined(__aarch64__)
145 return vreinterpretq_u64_u8(vqtbl1q_u8(tbl, idx_masked));
146#else
147 // use this line if testing on aarch64
148 uint8x8x2_t a_split = {vget_low_u8(tbl), vget_high_u8(tbl)};
149 return vreinterpretq_u64_u8(
150 vcombine_u8(vtbl2_u8(a_split, vget_low_u8(idx_masked)), vtbl2_u8(a_split, vget_high_u8(idx_masked))));
151#endif
152}
153
154TARGET_WITH_SIMD uint64x2_t mm_srli_si128(uint64x2_t a, int imm)
155{
156 uint8x16_t tmp[2] = {vreinterpretq_u8_u64(a), vdupq_n_u8(0)};
157 return vreinterpretq_u64_u8(vld1q_u8(((uint8_t const *)tmp) + imm));
158}
159
160TARGET_WITH_SIMD uint64x2_t mm_slli_si128(uint64x2_t a, int imm)
161{
162 uint8x16_t tmp[2] = {vdupq_n_u8(0), vreinterpretq_u8_u64(a)};
163 return vreinterpretq_u64_u8(vld1q_u8(((uint8_t const *)tmp) + (16 - imm)));
164}
165
166#endif