|
| 1 | +package murmur3 |
| 2 | + |
| 3 | +import ( |
| 4 | + //"encoding/binary" |
| 5 | + "hash" |
| 6 | + "unsafe" |
| 7 | +) |
| 8 | + |
| 9 | +const ( |
| 10 | + c1_128 = 0x87c37b91114253d5 |
| 11 | + c2_128 = 0x4cf5ad432745937f |
| 12 | +) |
| 13 | + |
| 14 | +// Make sure interfaces are correctly implemented. |
| 15 | +var ( |
| 16 | + _ hash.Hash = new(digest128) |
| 17 | + _ Hash128 = new(digest128) |
| 18 | + _ bmixer = new(digest128) |
| 19 | +) |
| 20 | + |
| 21 | +// Hack: the standard api doesn't define any Hash128 interface. |
| 22 | +type Hash128 interface { |
| 23 | + hash.Hash |
| 24 | + Sum128() (uint64, uint64) |
| 25 | +} |
| 26 | + |
| 27 | +// digest128 represents a partial evaluation of a 128 bites hash. |
| 28 | +type digest128 struct { |
| 29 | + digest |
| 30 | + h1 uint64 // Unfinalized running hash part 1. |
| 31 | + h2 uint64 // Unfinalized running hash part 2. |
| 32 | +} |
| 33 | + |
| 34 | +func New128() Hash128 { |
| 35 | + d := new(digest128) |
| 36 | + d.bmixer = d |
| 37 | + d.Reset() |
| 38 | + return d |
| 39 | +} |
| 40 | + |
| 41 | +func (d *digest128) Size() int { return 16 } |
| 42 | + |
| 43 | +func (d *digest128) reset() { d.h1, d.h2 = 1, 1 } |
| 44 | + |
| 45 | +func (d *digest128) Sum(b []byte) []byte { |
| 46 | + h1, h2 := d.h1, d.h2 |
| 47 | + return append(b, |
| 48 | + byte(h1>>56), byte(h1>>48), byte(h1>>40), byte(h1>>32), |
| 49 | + byte(h1>>24), byte(h1>>16), byte(h1>>8), byte(h1), |
| 50 | + |
| 51 | + byte(h2>>56), byte(h2>>48), byte(h2>>40), byte(h2>>32), |
| 52 | + byte(h2>>24), byte(h2>>16), byte(h2>>8), byte(h2), |
| 53 | + ) |
| 54 | +} |
| 55 | + |
| 56 | +func (d *digest128) bmix(p []byte) (tail []byte) { |
| 57 | + h1, h2 := d.h1, d.h2 |
| 58 | + |
| 59 | + nblocks := len(p) / 16 |
| 60 | + for i := 0; i < nblocks; i++ { |
| 61 | + t := (*[2]uint64)(unsafe.Pointer(&p[i*16])) |
| 62 | + k1, k2 := t[0], t[1] |
| 63 | + |
| 64 | + k1 *= c1_128 |
| 65 | + k1 = (k1 << 31) | (k1 >> 33) // rotl64(k1, 31) |
| 66 | + k1 *= c2_128 |
| 67 | + h1 ^= k1 |
| 68 | + |
| 69 | + h1 = (h1 << 27) | (h1 >> 37) // rotl64(h1, 27) |
| 70 | + h1 += h2 |
| 71 | + h1 = h1*5 + 0x52dce729 |
| 72 | + |
| 73 | + k2 *= c2_128 |
| 74 | + k2 = (k2 << 33) | (k2 >> 31) // rotl64(k2, 33) |
| 75 | + k2 *= c1_128 |
| 76 | + h2 ^= k2 |
| 77 | + |
| 78 | + h2 = (h2 << 31) | (h2 >> 33) // rotl64(h2, 31) |
| 79 | + h2 += h1 |
| 80 | + h2 = h2*5 + 0x38495ab5 |
| 81 | + } |
| 82 | + d.h1, d.h2 = h1, h2 |
| 83 | + return p[nblocks*d.Size():] |
| 84 | +} |
| 85 | + |
| 86 | +func (d *digest128) Sum128() (h1, h2 uint64) { |
| 87 | + |
| 88 | + h1, h2 = d.h1, d.h2 |
| 89 | + |
| 90 | + var k1, k2 uint64 |
| 91 | + switch len(d.tail) & 15 { |
| 92 | + case 15: |
| 93 | + k2 ^= uint64(d.tail[14]) << 48 |
| 94 | + fallthrough |
| 95 | + case 14: |
| 96 | + k2 ^= uint64(d.tail[13]) << 40 |
| 97 | + fallthrough |
| 98 | + case 13: |
| 99 | + k2 ^= uint64(d.tail[12]) << 32 |
| 100 | + fallthrough |
| 101 | + case 12: |
| 102 | + k2 ^= uint64(d.tail[11]) << 24 |
| 103 | + fallthrough |
| 104 | + case 11: |
| 105 | + k2 ^= uint64(d.tail[10]) << 16 |
| 106 | + fallthrough |
| 107 | + case 10: |
| 108 | + k2 ^= uint64(d.tail[9]) << 8 |
| 109 | + fallthrough |
| 110 | + case 9: |
| 111 | + k2 ^= uint64(d.tail[8]) << 0 |
| 112 | + |
| 113 | + k2 *= c2_128 |
| 114 | + k2 = (k2 << 33) | (k2 >> 31) // rotl64(k2, 33) |
| 115 | + k2 *= c1_128 |
| 116 | + h2 ^= k2 |
| 117 | + |
| 118 | + fallthrough |
| 119 | + |
| 120 | + case 8: |
| 121 | + k1 ^= uint64(d.tail[7]) << 56 |
| 122 | + fallthrough |
| 123 | + case 7: |
| 124 | + k1 ^= uint64(d.tail[6]) << 48 |
| 125 | + fallthrough |
| 126 | + case 6: |
| 127 | + k1 ^= uint64(d.tail[5]) << 40 |
| 128 | + fallthrough |
| 129 | + case 5: |
| 130 | + k1 ^= uint64(d.tail[4]) << 32 |
| 131 | + fallthrough |
| 132 | + case 4: |
| 133 | + k1 ^= uint64(d.tail[3]) << 24 |
| 134 | + fallthrough |
| 135 | + case 3: |
| 136 | + k1 ^= uint64(d.tail[2]) << 16 |
| 137 | + fallthrough |
| 138 | + case 2: |
| 139 | + k1 ^= uint64(d.tail[1]) << 8 |
| 140 | + fallthrough |
| 141 | + case 1: |
| 142 | + k1 ^= uint64(d.tail[0]) << 0 |
| 143 | + k1 *= c1_128 |
| 144 | + k1 = (k1 << 31) | (k1 >> 33) // rotl64(k1, 31) |
| 145 | + k1 *= c2_128 |
| 146 | + h1 ^= k1 |
| 147 | + } |
| 148 | + |
| 149 | + h1 ^= uint64(d.clen) |
| 150 | + h2 ^= uint64(d.clen) |
| 151 | + |
| 152 | + h1 += h2 |
| 153 | + h2 += h1 |
| 154 | + |
| 155 | + h1 = fmix64(h1) |
| 156 | + h2 = fmix64(h2) |
| 157 | + |
| 158 | + h1 += h2 |
| 159 | + h2 += h1 |
| 160 | + |
| 161 | + return h1, h2 |
| 162 | +} |
| 163 | + |
| 164 | +func fmix64(k uint64) uint64 { |
| 165 | + k ^= k >> 33 |
| 166 | + k *= 0xff51afd7ed558ccd |
| 167 | + k ^= k >> 33 |
| 168 | + k *= 0xc4ceb9fe1a85ec53 |
| 169 | + k ^= k >> 33 |
| 170 | + return k |
| 171 | +} |
| 172 | + |
| 173 | +/* |
| 174 | +func rotl64(x uint64, r byte) uint64 { |
| 175 | + return (x << r) | (x >> (64 - r)) |
| 176 | +} |
| 177 | +*/ |
0 commit comments