#include "rbtdb.h" /* * https://sites.google.com/site/murmurhash/ * * MurmurHash3 was written by Austin Appleby, and is placed in the public * domain. The author hereby disclaims copyright to this source code. * * Eric Wong trivially ported this to C for Ruby tdb (32-bit versions only) */ #include static inline uint32_t rotl32(uint32_t x, int8_t r) { return (x << r) | (x >> (32 - r)); } #define ROTL32(x,y) rotl32(x,y) #define BIG_CONSTANT(x) (x##LLU) /* ---------------------------------------------------------------------------- * Block read - if your platform needs to do endian-swapping or can only * handle aligned reads, do the conversion here */ static inline uint32_t getblock(const uint32_t * p, int i) { return p[i]; } /* ---------------------------------------------------------------------------- * Finalization mix - force all bits of a hash block to avalanche */ static inline uint32_t fmix(uint32_t h) { h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; h ^= h >> 16; return h; } unsigned int rbtdb_murmur3a(TDB_DATA * key) { const uint8_t *data = key->dptr; int len = (int)key->dsize; const int nblocks = len / 4; static const uint32_t seed; uint32_t h1 = seed; int i; static const uint32_t c1 = 0xcc9e2d51; static const uint32_t c2 = 0x1b873593; /* body */ const uint32_t *blocks = (const uint32_t *)(data + nblocks * 4); for (i = -nblocks; i; i++) { uint32_t k1 = getblock(blocks, i); k1 *= c1; k1 = ROTL32(k1, 15); k1 *= c2; h1 ^= k1; h1 = ROTL32(h1, 13); h1 = h1 * 5 + 0xe6546b64; } /* tail */ { const uint8_t *tail = (const uint8_t *)(data + nblocks * 4); uint32_t k1 = 0; switch (len & 3) { case 3: k1 ^= tail[2] << 16; case 2: k1 ^= tail[1] << 8; case 1: k1 ^= tail[0]; k1 *= c1; k1 = ROTL32(k1, 15); k1 *= c2; h1 ^= k1; }; } /* finalization */ h1 ^= len; return fmix(h1); } static inline uint64_t rotl64(uint64_t x, int8_t r) { return (x << r) | (x >> (64 - r)); } #define ROTL64(x,y) rotl64(x,y) static inline uint64_t getblock64(const uint64_t * p, int i) { return p[i]; } static inline uint64_t fmix64(uint64_t k) { k ^= k >> 33; k *= BIG_CONSTANT(0xff51afd7ed558ccd); k ^= k >> 33; k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53); k ^= k >> 33; return k; } /* this was a 128-bit hash for x86_64, but we only want 32-bits */ unsigned int rbtdb_murmur3f(TDB_DATA * key) { const uint8_t *data = key->dptr; int len = (int)key->dsize; const int nblocks = len / 16; static const uint32_t seed; uint64_t h1 = seed; uint64_t h2 = seed; uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5); uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f); int i; /* body */ const uint64_t *blocks = (const uint64_t *)(data); for (i = 0; i < nblocks; i++) { uint64_t k1 = getblock64(blocks, i * 2 + 0); uint64_t k2 = getblock64(blocks, i * 2 + 1); k1 *= c1; k1 = ROTL64(k1, 31); k1 *= c2; h1 ^= k1; h1 = ROTL64(h1, 27); h1 += h2; h1 = h1 * 5 + 0x52dce729; k2 *= c2; k2 = ROTL64(k2, 33); k2 *= c1; h2 ^= k2; h2 = ROTL64(h2, 31); h2 += h1; h2 = h2 * 5 + 0x38495ab5; } /* tail */ { const uint8_t *tail = (const uint8_t *)(data + nblocks * 16); uint64_t k1 = 0; uint64_t k2 = 0; #define CAST64(x) ((uint64_t)(x)) switch (len & 15) { case 15: k2 ^= CAST64(tail[14]) << 48; case 14: k2 ^= CAST64(tail[13]) << 40; case 13: k2 ^= CAST64(tail[12]) << 32; case 12: k2 ^= CAST64(tail[11]) << 24; case 11: k2 ^= CAST64(tail[10]) << 16; case 10: k2 ^= CAST64(tail[9]) << 8; case 9: k2 ^= CAST64(tail[8]) << 0; k2 *= c2; k2 = ROTL64(k2, 33); k2 *= c1; h2 ^= k2; case 8: k1 ^= CAST64(tail[7]) << 56; case 7: k1 ^= CAST64(tail[6]) << 48; case 6: k1 ^= CAST64(tail[5]) << 40; case 5: k1 ^= CAST64(tail[4]) << 32; case 4: k1 ^= CAST64(tail[3]) << 24; case 3: k1 ^= CAST64(tail[2]) << 16; case 2: k1 ^= CAST64(tail[1]) << 8; case 1: k1 ^= CAST64(tail[0]) << 0; k1 *= c1; k1 = ROTL64(k1, 31); k1 *= c2; h1 ^= k1; }; } /* finalization */ h1 ^= len; h2 ^= len; h1 += h2; h2 += h1; h1 = fmix64(h1); h2 = fmix64(h2); h1 += h2; /* not needed for 32-bit hash */ /* h2 += h1; */ return (unsigned int)h1; /* truncate to 32-bits */ }