Util: Reformat hashing and let it use 64-bit regions

This commit is contained in:
Vicki Pfau 2021-07-23 17:56:30 -07:00
parent 6b189fe249
commit 0035b5a22b
2 changed files with 58 additions and 57 deletions

View File

@ -10,7 +10,7 @@
CXX_GUARD_START
uint32_t hash32(const void* key, int len, uint32_t seed);
uint32_t hash32(const void* key, size_t len, uint32_t seed);
CXX_GUARD_END

View File

@ -15,11 +15,11 @@
#define FORCE_INLINE inline __attribute__((always_inline))
static inline uint32_t rotl32 ( uint32_t x, int8_t r ) {
return (x << r) | (x >> (32 - r));
static inline uint32_t rotl32(uint32_t x, int8_t r) {
return (x << r) | (x >> (32 - r));
}
#define ROTL32(x,y) rotl32(x,y)
#define ROTL32(x, y) rotl32(x, y)
#endif
@ -27,81 +27,82 @@ static inline uint32_t rotl32 ( uint32_t x, int8_t r ) {
// Block read - if your platform needs to do endian-swapping or can only
// handle aligned reads, do the conversion here
static FORCE_INLINE uint32_t getblock32 ( const uint32_t * p, int i ) {
uint32_t ret;
LOAD_32LE(ret, i << 2, p);
return ret;
static FORCE_INLINE uint32_t getblock32(const uint32_t* p, ssize_t i) {
uint32_t ret;
LOAD_32LE(ret, i << 2, p);
return ret;
}
//-----------------------------------------------------------------------------
// Finalization mix - force all bits of a hash block to avalanche
static FORCE_INLINE uint32_t fmix32 (uint32_t h) {
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
static FORCE_INLINE uint32_t fmix32(uint32_t h) {
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
return h;
}
//-----------------------------------------------------------------------------
uint32_t hash32(const void* key, int len, uint32_t seed) {
const uint8_t * data = (const uint8_t*)key;
const int nblocks = len / 4;
uint32_t hash32(const void* key, size_t len, uint32_t seed) {
const uint8_t* data = (const uint8_t*) key;
const int nblocks = len / 4;
uint32_t h1 = seed;
uint32_t h1 = seed;
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
//----------
// body
//----------
// body
const uint32_t * blocks = (const uint32_t *)(data + nblocks*4);
const uint32_t* blocks = (const uint32_t*)(data + nblocks * 4);
int i;
for(i = -nblocks; i; i++)
{
uint32_t k1 = getblock32(blocks,i);
int i;
for (i = -nblocks; i; i++) {
uint32_t k1 = getblock32(blocks, i);
k1 *= c1;
k1 = ROTL32(k1,15);
k1 *= c2;
h1 ^= k1;
h1 = ROTL32(h1,13);
h1 = h1*5+0xe6546b64;
}
k1 *= c1;
k1 = ROTL32(k1, 15);
k1 *= c2;
h1 ^= k1;
h1 = ROTL32(h1, 13);
h1 = h1 * 5 + 0xe6546b64;
}
//----------
// tail
//----------
// tail
const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
const uint8_t* tail = (const uint8_t*)(data + nblocks * 4);
uint32_t k1 = 0;
uint32_t k1 = 0;
switch(len & 3)
{
case 3:
k1 ^= tail[2] << 16;
// Fall through
case 2:
k1 ^= tail[1] << 8;
// Fall through
case 1:
k1 ^= tail[0];
k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
};
switch(len & 3) {
case 3:
k1 ^= tail[2] << 16;
// Fall through
case 2:
k1 ^= tail[1] << 8;
// Fall through
case 1:
k1 ^= tail[0];
k1 *= c1;
k1 = ROTL32(k1, 15);
k1 *= c2;
h1 ^= k1;
};
//----------
// finalization
//----------
// finalization
h1 ^= len;
h1 ^= len;
h1 = fmix32(h1);
h1 = fmix32(h1);
return h1;
return h1;
}