mirror of
https://github.com/aaru-dps/Aaru.Checksums.Native.git
synced 2025-12-16 11:14:29 +00:00
Refactor field names.
This commit is contained in:
128
spamsum.c
128
spamsum.c
@@ -25,7 +25,7 @@
|
||||
|
||||
#include "spamsum.h"
|
||||
|
||||
static uint8_t _b64[] = {0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D,
|
||||
static uint8_t b64[] = {0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D,
|
||||
0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A,
|
||||
0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D,
|
||||
0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A,
|
||||
@@ -38,9 +38,9 @@ AARU_EXPORT spamsum_ctx* AARU_CALL spamsum_init(void)
|
||||
|
||||
memset(ctx, 0, sizeof(spamsum_ctx));
|
||||
|
||||
ctx->Bhend = 1;
|
||||
ctx->Bh[0].H = HASH_INIT;
|
||||
ctx->Bh[0].Halfh = HASH_INIT;
|
||||
ctx->bh_end = 1;
|
||||
ctx->bh[0].h = HASH_INIT;
|
||||
ctx->bh[0].half_h = HASH_INIT;
|
||||
|
||||
return ctx;
|
||||
}
|
||||
@@ -51,7 +51,7 @@ AARU_EXPORT int AARU_CALL spamsum_update(spamsum_ctx* ctx, const uint8_t* data,
|
||||
|
||||
for(int i = 0; i < len; i++) fuzzy_engine_step(ctx, data[i]);
|
||||
|
||||
ctx->TotalSize += len;
|
||||
ctx->total_size += len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -61,8 +61,8 @@ AARU_EXPORT void AARU_CALL spamsum_free(spamsum_ctx* ctx)
|
||||
if(ctx) free(ctx);
|
||||
}
|
||||
|
||||
#define roll_sum(ctx) (ctx->Roll.H1 + ctx->Roll.H2 + ctx->Roll.H3)
|
||||
#define sum_hash(c, h) ((h * HASH_PRIME) ^ c);
|
||||
#define ROLL_SUM(ctx) (ctx->roll.h1 + ctx->roll.h2 + ctx->roll.h3)
|
||||
#define SUM_HASH(c, h) ((h * HASH_PRIME) ^ c);
|
||||
#define SSDEEP_BS(index) (MIN_BLOCKSIZE << index)
|
||||
|
||||
AARU_LOCAL void fuzzy_engine_step(spamsum_ctx* ctx, uint8_t c)
|
||||
@@ -72,15 +72,15 @@ AARU_LOCAL void fuzzy_engine_step(spamsum_ctx* ctx, uint8_t c)
|
||||
* When the rolling hash hits a reset value then we emit a normal hash
|
||||
* as a element of the signature and reset the normal hash. */
|
||||
roll_hash(ctx, c);
|
||||
uint64_t h = roll_sum(ctx);
|
||||
uint64_t h = ROLL_SUM(ctx);
|
||||
|
||||
for(i = ctx->Bhstart; i < ctx->Bhend; ++i)
|
||||
for(i = ctx->bh_start; i < ctx->bh_end; ++i)
|
||||
{
|
||||
ctx->Bh[i].H = sum_hash(c, ctx->Bh[i].H);
|
||||
ctx->Bh[i].Halfh = sum_hash(c, ctx->Bh[i].Halfh);
|
||||
ctx->bh[i].h = SUM_HASH(c, ctx->bh[i].h);
|
||||
ctx->bh[i].half_h = SUM_HASH(c, ctx->bh[i].half_h);
|
||||
}
|
||||
|
||||
for(i = ctx->Bhstart; i < ctx->Bhend; ++i)
|
||||
for(i = ctx->bh_start; i < ctx->bh_end; ++i)
|
||||
{
|
||||
/* With growing blocksize almost no runs fail the next test. */
|
||||
if(h % SSDEEP_BS(i) != SSDEEP_BS(i) - 1)
|
||||
@@ -92,12 +92,12 @@ AARU_LOCAL void fuzzy_engine_step(spamsum_ctx* ctx, uint8_t c)
|
||||
/* We have hit a reset point. We now emit hashes which are
|
||||
* based on all characters in the piece of the message between
|
||||
* the last reset point and this one */
|
||||
if(0 == ctx->Bh[i].Dlen) fuzzy_try_fork_blockhash(ctx);
|
||||
if(0 == ctx->bh[i].d_len) fuzzy_try_fork_blockhash(ctx);
|
||||
|
||||
ctx->Bh[i].Digest[ctx->Bh[i].Dlen] = _b64[ctx->Bh[i].H % 64];
|
||||
ctx->Bh[i].Halfdigest = _b64[ctx->Bh[i].Halfh % 64];
|
||||
ctx->bh[i].digest[ctx->bh[i].d_len] = b64[ctx->bh[i].h % 64];
|
||||
ctx->bh[i].half_digest = b64[ctx->bh[i].half_h % 64];
|
||||
|
||||
if(ctx->Bh[i].Dlen < SPAMSUM_LENGTH - 1)
|
||||
if(ctx->bh[i].d_len < SPAMSUM_LENGTH - 1)
|
||||
{
|
||||
/* We can have a problem with the tail overflowing. The
|
||||
* easiest way to cope with this is to only reset the
|
||||
@@ -105,13 +105,13 @@ AARU_LOCAL void fuzzy_engine_step(spamsum_ctx* ctx, uint8_t c)
|
||||
* our signature. This has the effect of combining the
|
||||
* last few pieces of the message into a single piece
|
||||
* */
|
||||
ctx->Bh[i].Digest[++ctx->Bh[i].Dlen] = 0;
|
||||
ctx->Bh[i].H = HASH_INIT;
|
||||
ctx->bh[i].digest[++ctx->bh[i].d_len] = 0;
|
||||
ctx->bh[i].h = HASH_INIT;
|
||||
|
||||
if(ctx->Bh[i].Dlen >= SPAMSUM_LENGTH / 2) continue;
|
||||
if(ctx->bh[i].d_len >= SPAMSUM_LENGTH / 2) continue;
|
||||
|
||||
ctx->Bh[i].Halfh = HASH_INIT;
|
||||
ctx->Bh[i].Halfdigest = 0;
|
||||
ctx->bh[i].half_h = HASH_INIT;
|
||||
ctx->bh[i].half_digest = 0;
|
||||
}
|
||||
else
|
||||
fuzzy_try_reduce_blockhash(ctx);
|
||||
@@ -120,72 +120,72 @@ AARU_LOCAL void fuzzy_engine_step(spamsum_ctx* ctx, uint8_t c)
|
||||
|
||||
AARU_LOCAL void roll_hash(spamsum_ctx* ctx, uint8_t c)
|
||||
{
|
||||
ctx->Roll.H2 -= ctx->Roll.H1;
|
||||
ctx->Roll.H2 += ROLLING_WINDOW * c;
|
||||
ctx->roll.h2 -= ctx->roll.h1;
|
||||
ctx->roll.h2 += ROLLING_WINDOW * c;
|
||||
|
||||
ctx->Roll.H1 += c;
|
||||
ctx->Roll.H1 -= ctx->Roll.Window[ctx->Roll.N % ROLLING_WINDOW];
|
||||
ctx->roll.h1 += c;
|
||||
ctx->roll.h1 -= ctx->roll.window[ctx->roll.n % ROLLING_WINDOW];
|
||||
|
||||
ctx->Roll.Window[ctx->Roll.N % ROLLING_WINDOW] = c;
|
||||
ctx->Roll.N++;
|
||||
ctx->roll.window[ctx->roll.n % ROLLING_WINDOW] = c;
|
||||
ctx->roll.n++;
|
||||
|
||||
/* The original spamsum AND'ed this value with 0xFFFFFFFF which
|
||||
* in theory should have no effect. This AND has been removed
|
||||
* for performance (jk) */
|
||||
ctx->Roll.H3 <<= 5;
|
||||
ctx->Roll.H3 ^= c;
|
||||
ctx->roll.h3 <<= 5;
|
||||
ctx->roll.h3 ^= c;
|
||||
}
|
||||
|
||||
AARU_LOCAL void fuzzy_try_reduce_blockhash(spamsum_ctx* ctx)
|
||||
{
|
||||
assert(ctx->Bhstart < ctx->Bhend);
|
||||
assert(ctx->bh_start < ctx->bh_end);
|
||||
|
||||
if(ctx->Bhend - ctx->Bhstart < 2) /* Need at least two working hashes. */
|
||||
if(ctx->bh_end - ctx->bh_start < 2) /* Need at least two working hashes. */
|
||||
return;
|
||||
|
||||
if((uint64_t)SSDEEP_BS(ctx->Bhstart) * SPAMSUM_LENGTH >= ctx->TotalSize)
|
||||
if((uint64_t)SSDEEP_BS(ctx->bh_start) * SPAMSUM_LENGTH >= ctx->total_size)
|
||||
/* Initial blocksize estimate would select this or a smaller
|
||||
* blocksize. */
|
||||
return;
|
||||
|
||||
if(ctx->Bh[ctx->Bhstart + 1].Dlen < SPAMSUM_LENGTH / 2) /* Estimate adjustment would select this blocksize. */
|
||||
if(ctx->bh[ctx->bh_start + 1].d_len < SPAMSUM_LENGTH / 2) /* Estimate adjustment would select this blocksize. */
|
||||
return;
|
||||
|
||||
/* At this point we are clearly no longer interested in the
|
||||
* start_blocksize. Get rid of it. */
|
||||
++ctx->Bhstart;
|
||||
++ctx->bh_start;
|
||||
}
|
||||
|
||||
AARU_LOCAL void fuzzy_try_fork_blockhash(spamsum_ctx* ctx)
|
||||
{
|
||||
if(ctx->Bhend >= NUM_BLOCKHASHES) return;
|
||||
if(ctx->bh_end >= NUM_BLOCKHASHES) return;
|
||||
|
||||
assert(ctx->Bhend != 0);
|
||||
assert(ctx->bh_end != 0);
|
||||
|
||||
uint32_t obh = ctx->Bhend - 1;
|
||||
uint32_t nbh = ctx->Bhend;
|
||||
ctx->Bh[nbh].H = ctx->Bh[obh].H;
|
||||
ctx->Bh[nbh].Halfh = ctx->Bh[obh].Halfh;
|
||||
ctx->Bh[nbh].Digest[0] = 0;
|
||||
ctx->Bh[nbh].Halfdigest = 0;
|
||||
ctx->Bh[nbh].Dlen = 0;
|
||||
++ctx->Bhend;
|
||||
uint32_t obh = ctx->bh_end - 1;
|
||||
uint32_t nbh = ctx->bh_end;
|
||||
ctx->bh[nbh].h = ctx->bh[obh].h;
|
||||
ctx->bh[nbh].half_h = ctx->bh[obh].half_h;
|
||||
ctx->bh[nbh].digest[0] = 0;
|
||||
ctx->bh[nbh].half_digest = 0;
|
||||
ctx->bh[nbh].d_len = 0;
|
||||
++ctx->bh_end;
|
||||
}
|
||||
|
||||
AARU_EXPORT uint8_t* AARU_CALL spamsum_final(spamsum_ctx* ctx)
|
||||
{
|
||||
uint32_t bi = ctx->Bhstart;
|
||||
uint32_t h = roll_sum(ctx);
|
||||
uint32_t bi = ctx->bh_start;
|
||||
uint32_t h = ROLL_SUM(ctx);
|
||||
int remain = (int)(FUZZY_MAX_RESULT - 1); /* Exclude terminating '\0'. */
|
||||
uint8_t* result = (uint8_t*)malloc(FUZZY_MAX_RESULT);
|
||||
|
||||
if(!result) return NULL;
|
||||
|
||||
/* Verify that our elimination was not overeager. */
|
||||
assert(bi == 0 || (uint64_t)SSDEEP_BS(bi) / 2 * SPAMSUM_LENGTH < ctx->TotalSize);
|
||||
assert(bi == 0 || (uint64_t)SSDEEP_BS(bi) / 2 * SPAMSUM_LENGTH < ctx->total_size);
|
||||
|
||||
/* Initial blocksize guess. */
|
||||
while((uint64_t)SSDEEP_BS(bi) * SPAMSUM_LENGTH < ctx->TotalSize)
|
||||
while((uint64_t)SSDEEP_BS(bi) * SPAMSUM_LENGTH < ctx->total_size)
|
||||
{
|
||||
++bi;
|
||||
|
||||
@@ -197,11 +197,11 @@ AARU_EXPORT uint8_t* AARU_CALL spamsum_final(spamsum_ctx* ctx)
|
||||
}
|
||||
|
||||
/* Adapt blocksize guess to actual digest length. */
|
||||
while(bi >= ctx->Bhend) --bi;
|
||||
while(bi >= ctx->bh_end) --bi;
|
||||
|
||||
while(bi > ctx->Bhstart && ctx->Bh[bi].Dlen < SPAMSUM_LENGTH / 2) --bi;
|
||||
while(bi > ctx->bh_start && ctx->bh[bi].d_len < SPAMSUM_LENGTH / 2) --bi;
|
||||
|
||||
assert(!(bi > 0 && ctx->Bh[bi].Dlen < SPAMSUM_LENGTH / 2));
|
||||
assert(!(bi > 0 && ctx->bh[bi].d_len < SPAMSUM_LENGTH / 2));
|
||||
|
||||
int i = snprintf((char*)result, (size_t)remain, "%lu:", (unsigned long)SSDEEP_BS(bi));
|
||||
|
||||
@@ -213,11 +213,11 @@ AARU_EXPORT uint8_t* AARU_CALL spamsum_final(spamsum_ctx* ctx)
|
||||
remain -= i;
|
||||
result += i;
|
||||
|
||||
i = (int)ctx->Bh[bi].Dlen;
|
||||
i = (int)ctx->bh[bi].d_len;
|
||||
|
||||
assert(i <= remain);
|
||||
|
||||
memcpy(result, ctx->Bh[bi].Digest, (size_t)i);
|
||||
memcpy(result, ctx->bh[bi].digest, (size_t)i);
|
||||
result += i;
|
||||
remain -= i;
|
||||
|
||||
@@ -225,7 +225,7 @@ AARU_EXPORT uint8_t* AARU_CALL spamsum_final(spamsum_ctx* ctx)
|
||||
{
|
||||
assert(remain > 0);
|
||||
|
||||
*result = _b64[ctx->Bh[bi].H % 64];
|
||||
*result = b64[ctx->bh[bi].h % 64];
|
||||
|
||||
if(i < 3 || *result != result[-1] || *result != result[-2] || *result != result[-3])
|
||||
{
|
||||
@@ -233,11 +233,11 @@ AARU_EXPORT uint8_t* AARU_CALL spamsum_final(spamsum_ctx* ctx)
|
||||
--remain;
|
||||
}
|
||||
}
|
||||
else if(ctx->Bh[bi].Digest[i] != 0)
|
||||
else if(ctx->bh[bi].digest[i] != 0)
|
||||
{
|
||||
assert(remain > 0);
|
||||
|
||||
*result = ctx->Bh[bi].Digest[i];
|
||||
*result = ctx->bh[bi].digest[i];
|
||||
|
||||
if(i < 3 || *result != result[-1] || *result != result[-2] || *result != result[-3])
|
||||
{
|
||||
@@ -251,15 +251,15 @@ AARU_EXPORT uint8_t* AARU_CALL spamsum_final(spamsum_ctx* ctx)
|
||||
*result++ = ':';
|
||||
--remain;
|
||||
|
||||
if(bi < ctx->Bhend - 1)
|
||||
if(bi < ctx->bh_end - 1)
|
||||
{
|
||||
++bi;
|
||||
i = (int)ctx->Bh[bi].Dlen;
|
||||
i = (int)ctx->bh[bi].d_len;
|
||||
|
||||
if(i <= remain)
|
||||
;
|
||||
|
||||
memcpy(result, ctx->Bh[bi].Digest, (size_t)i);
|
||||
memcpy(result, ctx->bh[bi].digest, (size_t)i);
|
||||
result += i;
|
||||
remain -= i;
|
||||
|
||||
@@ -267,8 +267,8 @@ AARU_EXPORT uint8_t* AARU_CALL spamsum_final(spamsum_ctx* ctx)
|
||||
{
|
||||
assert(remain > 0);
|
||||
|
||||
h = ctx->Bh[bi].Halfh;
|
||||
*result = _b64[h % 64];
|
||||
h = ctx->bh[bi].half_h;
|
||||
*result = b64[h % 64];
|
||||
|
||||
if(i < 3 || *result != result[-1] || *result != result[-2] || *result != result[-3])
|
||||
{
|
||||
@@ -278,7 +278,7 @@ AARU_EXPORT uint8_t* AARU_CALL spamsum_final(spamsum_ctx* ctx)
|
||||
}
|
||||
else
|
||||
{
|
||||
i = ctx->Bh[bi].Halfdigest;
|
||||
i = ctx->bh[bi].half_digest;
|
||||
|
||||
if(i != 0)
|
||||
{
|
||||
@@ -296,11 +296,11 @@ AARU_EXPORT uint8_t* AARU_CALL spamsum_final(spamsum_ctx* ctx)
|
||||
}
|
||||
else if(h != 0)
|
||||
{
|
||||
assert(ctx->Bh[bi].Dlen == 0);
|
||||
assert(ctx->bh[bi].d_len == 0);
|
||||
|
||||
assert(remain > 0);
|
||||
|
||||
*result++ = _b64[ctx->Bh[bi].H % 64];
|
||||
*result++ = b64[ctx->bh[bi].h % 64];
|
||||
/* No need to bother with FUZZY_FLAG_ELIMSEQ, because this
|
||||
* digest has length 1. */
|
||||
--remain;
|
||||
|
||||
Reference in New Issue
Block a user