libaaruformat 1.0
Aaru Data Preservation Suite - Format Library
Loading...
Searching...
No Matches
crc64_vmull.c
Go to the documentation of this file.
1/*
2 * This file is part of the Aaru Data Preservation Suite.
3 * Copyright (c) 2019-2025 Natalia Portillo.
4 *
5 * This library is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU Lesser General Public License as
7 * published by the Free Software Foundation; either version 2.1 of the
8 * License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19#if defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM)
20
21#include <arm_neon.h>
22#include <stddef.h>
23#include <stdint.h>
24
25#include <aaruformat.h>
26
27#include "arm_vmull.h"
28#include "log.h"
29
30static const uint8_t shuffleMasks[] = {
31 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
32 0x8f, 0x8e, 0x8d, 0x8c, 0x8b, 0x8a, 0x89, 0x88, 0x87, 0x86, 0x85, 0x84, 0x83, 0x82, 0x81, 0x80,
33};
34
35TARGET_WITH_SIMD FORCE_INLINE void shiftRight128(uint64x2_t in, size_t n, uint64x2_t *outLeft, uint64x2_t *outRight)
36{
37 const uint64x2_t maskA =
38 vreinterpretq_u64_u32(vld1q_u32((const uint32_t *)(const uint64x2_t *)(shuffleMasks + (16 - n))));
39 uint64x2_t b = vreinterpretq_u64_u8(vceqq_u8(vreinterpretq_u8_u64(vreinterpretq_u64_u32(vdupq_n_u32(0))),
40 vreinterpretq_u8_u64(vreinterpretq_u64_u32(vdupq_n_u32(0)))));
41 const uint64x2_t maskB = vreinterpretq_u64_u32(veorq_u32(vreinterpretq_u32_u64(maskA), vreinterpretq_u32_u64(b)));
42
43 *outLeft = mm_shuffle_epi8(in, maskB);
44 *outRight = mm_shuffle_epi8(in, maskA);
45}
46
47TARGET_WITH_SIMD FORCE_INLINE uint64x2_t fold(uint64x2_t in, uint64x2_t foldConstants)
48{
49 return veorq_u64(sse2neon_vmull_p64(vget_low_u64(in), vget_low_u64(foldConstants)),
50 sse2neon_vmull_p64(vget_high_u64(in), vget_high_u64(foldConstants)));
51}
52
61AARU_EXPORT TARGET_WITH_SIMD uint64_t AARU_CALL aaruf_crc64_vmull(uint64_t previous_crc, const uint8_t *data, long len)
62{
63 TRACE("Entering aaruf_crc64_vmull(%llu, %p, %ld)", previous_crc, data, len);
64
65 const uint64_t k1 = 0xe05dd497ca393ae4; // bitReflect(expMod65(128 + 64, poly, 1)) << 1;
66 const uint64_t k2 = 0xdabe95afc7875f40; // bitReflect(expMod65(128, poly, 1)) << 1;
67 const uint64_t mu = 0x9c3e466c172963d5; // (bitReflect(div129by65(poly)) << 1) | 1;
68 const uint64_t p = 0x92d8af2baf0e1e85; // (bitReflect(poly) << 1) | 1;
69
70 const uint64x2_t foldConstants1 = vcombine_u64(vcreate_u64(k1), vcreate_u64(k2));
71 const uint64x2_t foldConstants2 = vcombine_u64(vcreate_u64(mu), vcreate_u64(p));
72
73 const uint8_t *end = data + len;
74
75 // Align pointers
76 const uint64x2_t *alignedData = (const uint64x2_t *)((uintptr_t)data & ~(uintptr_t)15);
77 const uint64x2_t *alignedEnd = (const uint64x2_t *)(((uintptr_t)end + 15) & ~(uintptr_t)15);
78
79 const size_t leadInSize = data - (const uint8_t *)alignedData;
80 const size_t leadOutSize = (const uint8_t *)alignedEnd - end;
81
82 const size_t alignedLength = alignedEnd - alignedData;
83
84 const uint64x2_t leadInMask =
85 vreinterpretq_u64_u32(vld1q_u32((const uint32_t *)(const uint64x2_t *)(shuffleMasks + (16 - leadInSize))));
86 uint64x2_t a = vreinterpretq_u64_u32(vdupq_n_u32(0));
87 uint64x2_t b = vreinterpretq_u64_u32(
88 vld1q_u32((const uint32_t *)alignedData)); // Use a signed shift right to create a mask with the sign bit
89 const uint64x2_t data0 =
90 vreinterpretq_u64_u8(vbslq_u8(vreinterpretq_u8_s8(vshrq_n_s8(vreinterpretq_s8_u64(leadInMask), 7)),
91 vreinterpretq_u8_u64(b), vreinterpretq_u8_u64(a)));
92
93 const uint64x2_t initialCrc = vsetq_lane_u64(~previous_crc, vdupq_n_u64(0), 0);
94
95 uint64x2_t R;
96 if(alignedLength == 1)
97 {
98 // Single data block, initial CRC possibly bleeds into zero padding
99 uint64x2_t crc0, crc1;
100 shiftRight128(initialCrc, 16 - len, &crc0, &crc1);
101
102 uint64x2_t A, B;
103 shiftRight128(data0, leadOutSize, &A, &B);
104
105 const uint64x2_t P = veorq_u64(A, crc0);
106 R = veorq_u64(sse2neon_vmull_p64(vget_low_u64(P), vget_high_u64(foldConstants1)),
107 veorq_u64(mm_srli_si128(P, 8), mm_slli_si128(crc1, 8)));
108 }
109 else if(alignedLength == 2)
110 {
111 const uint64x2_t data1 = vreinterpretq_u64_u32(vld1q_u32((const uint32_t *)(alignedData + 1)));
112
113 if(len < 8)
114 {
115 // Initial CRC bleeds into the zero padding
116 uint64x2_t crc0, crc1;
117 shiftRight128(initialCrc, 16 - len, &crc0, &crc1);
118
119 uint64x2_t A, B, C, D;
120 shiftRight128(data0, leadOutSize, &A, &B);
121 shiftRight128(data1, leadOutSize, &C, &D);
122
123 const uint64x2_t P = veorq_u64(veorq_u64(B, C), crc0);
124 R = veorq_u64(sse2neon_vmull_p64(vget_low_u64(P), vget_high_u64(foldConstants1)),
125 veorq_u64(mm_srli_si128(P, 8), mm_slli_si128(crc1, 8)));
126 }
127 else
128 {
129 // We can fit the initial CRC into the data without bleeding into the zero padding
130 uint64x2_t crc0, crc1;
131 shiftRight128(initialCrc, leadInSize, &crc0, &crc1);
132
133 uint64x2_t A, B, C, D;
134 shiftRight128(veorq_u64(data0, crc0), leadOutSize, &A, &B);
135 shiftRight128(veorq_u64(data1, crc1), leadOutSize, &C, &D);
136
137 const uint64x2_t P = veorq_u64(fold(A, foldConstants1), veorq_u64(B, C));
138 R = veorq_u64(sse2neon_vmull_p64(vget_low_u64(P), vget_high_u64(foldConstants1)), mm_srli_si128(P, 8));
139 }
140 }
141 else
142 {
143 alignedData++;
144 len -= 16 - leadInSize;
145
146 // Initial CRC can simply be added to data
147 uint64x2_t crc0, crc1;
148 shiftRight128(initialCrc, leadInSize, &crc0, &crc1);
149
150 uint64x2_t accumulator = veorq_u64(fold(veorq_u64(crc0, data0), foldConstants1), crc1);
151
152 while(len >= 32)
153 {
154 accumulator = fold(veorq_u64(vreinterpretq_u64_u32(vld1q_u32((const uint32_t *)alignedData)), accumulator),
155 foldConstants1);
156
157 len -= 16;
158 alignedData++;
159 }
160
161 uint64x2_t P;
162 if(len == 16)
163 P = veorq_u64(accumulator, vreinterpretq_u64_u32(vld1q_u32((const uint32_t *)alignedData)));
164 else
165 {
166 const uint64x2_t end0 =
167 veorq_u64(accumulator, vreinterpretq_u64_u32(vld1q_u32((const uint32_t *)alignedData)));
168 const uint64x2_t end1 = vreinterpretq_u64_u32(vld1q_u32((const uint32_t *)(alignedData + 1)));
169
170 uint64x2_t A, B, C, D;
171 shiftRight128(end0, leadOutSize, &A, &B);
172 shiftRight128(end1, leadOutSize, &C, &D);
173
174 P = veorq_u64(fold(A, foldConstants1),
175 vreinterpretq_u64_u32(vorrq_u32(vreinterpretq_u32_u64(B), vreinterpretq_u32_u64(C))));
176 }
177
178 R = veorq_u64(sse2neon_vmull_p64(vget_low_u64(P), vget_high_u64(foldConstants1)), mm_srli_si128(P, 8));
179 }
180
181 // Final Barrett reduction
182 const uint64x2_t T1 = sse2neon_vmull_p64(vget_low_u64(R), vget_low_u64(foldConstants2));
183 const uint64x2_t T2 = veorq_u64(
184 veorq_u64(sse2neon_vmull_p64(vget_low_u64(T1), vget_high_u64(foldConstants2)), mm_slli_si128(T1, 8)), R);
185
186 TRACE("Exiting aaruf_crc64_vmull()");
187
188 return ~vgetq_lane_u64(T2, 1);
189}
190
191#endif
#define AARU_CALL
Definition decls.h:45
#define AARU_EXPORT
Definition decls.h:54
#define FORCE_INLINE
Definition decls.h:63
#define TRACE(fmt,...)
Definition log.h:25