| 1 | // Copyright (C) 2012-2014 Jean-Philippe Aumasson |
| 2 | // Copyright (C) 2012-2014 Daniel J. Bernstein <djb@cr.yp.to> |
| 3 | // Copyright (C) 2016 Intel Corporation. |
| 4 | // SPDX-License-Identifier: CC0-1.0 |
| 5 | |
| 6 | #include <QtCore/qassert.h> |
| 7 | #include <QtCore/qcompilerdetection.h> |
| 8 | #include <QtCore/qendian.h> |
| 9 | |
| 10 | #ifdef Q_CC_GNU |
| 11 | # define DECL_HOT_FUNCTION __attribute__((hot)) |
| 12 | #else |
| 13 | # define DECL_HOT_FUNCTION |
| 14 | #endif |
| 15 | |
| 16 | #include <cstdint> |
| 17 | |
| 18 | QT_USE_NAMESPACE |
| 19 | |
| 20 | namespace { |
| 21 | |
| 22 | // This is an inlined version of the SipHash implementation that is |
| 23 | // trying to avoid some memcpy's from uint64 to uint8[] and back. |
| 24 | |
| 25 | #define ROTL(x, b) (((x) << (b)) | ((x) >> (sizeof(x) * 8 - (b)))) |
| 26 | |
| 27 | #define SIPROUND \ |
| 28 | do { \ |
| 29 | v0 += v1; \ |
| 30 | v1 = ROTL(v1, 13); \ |
| 31 | v1 ^= v0; \ |
| 32 | v0 = ROTL(v0, 32); \ |
| 33 | v2 += v3; \ |
| 34 | v3 = ROTL(v3, 16); \ |
| 35 | v3 ^= v2; \ |
| 36 | v0 += v3; \ |
| 37 | v3 = ROTL(v3, 21); \ |
| 38 | v3 ^= v0; \ |
| 39 | v2 += v1; \ |
| 40 | v1 = ROTL(v1, 17); \ |
| 41 | v1 ^= v2; \ |
| 42 | v2 = ROTL(v2, 32); \ |
| 43 | } while (0) |
| 44 | |
| 45 | template <int cROUNDS = 2, int dROUNDS = 4> struct SipHash64 |
| 46 | { |
| 47 | /* "somepseudorandomlygeneratedbytes" */ |
| 48 | uint64_t v0 = 0x736f6d6570736575ULL; |
| 49 | uint64_t v1 = 0x646f72616e646f6dULL; |
| 50 | uint64_t v2 = 0x6c7967656e657261ULL; |
| 51 | uint64_t v3 = 0x7465646279746573ULL; |
| 52 | uint64_t b; |
| 53 | uint64_t k0; |
| 54 | uint64_t k1; |
| 55 | |
| 56 | inline SipHash64(uint64_t fulllen, uint64_t seed, uint64_t seed2); |
| 57 | inline void addBlock(const uint8_t *in, size_t inlen); |
| 58 | inline uint64_t finalize(const uint8_t *in, size_t left); |
| 59 | }; |
| 60 | |
| 61 | template <int cROUNDS, int dROUNDS> |
| 62 | SipHash64<cROUNDS, dROUNDS>::SipHash64(uint64_t inlen, uint64_t seed, uint64_t seed2) |
| 63 | { |
| 64 | b = inlen << 56; |
| 65 | k0 = seed; |
| 66 | k1 = seed2; |
| 67 | v3 ^= k1; |
| 68 | v2 ^= k0; |
| 69 | v1 ^= k1; |
| 70 | v0 ^= k0; |
| 71 | } |
| 72 | |
| 73 | template <int cROUNDS, int dROUNDS> DECL_HOT_FUNCTION void |
| 74 | SipHash64<cROUNDS, dROUNDS>::addBlock(const uint8_t *in, size_t inlen) |
| 75 | { |
| 76 | Q_ASSERT((inlen & 7ULL) == 0); |
| 77 | int i; |
| 78 | const uint8_t *end = in + inlen; |
| 79 | for (; in != end; in += 8) { |
| 80 | uint64_t m = qFromUnaligned<uint64_t>(src: in); |
| 81 | v3 ^= m; |
| 82 | |
| 83 | for (i = 0; i < cROUNDS; ++i) |
| 84 | SIPROUND; |
| 85 | |
| 86 | v0 ^= m; |
| 87 | } |
| 88 | } |
| 89 | |
| 90 | template <int cROUNDS, int dROUNDS> DECL_HOT_FUNCTION uint64_t |
| 91 | SipHash64<cROUNDS, dROUNDS>::finalize(const uint8_t *in, size_t left) |
| 92 | { |
| 93 | int i; |
| 94 | switch (left) { |
| 95 | case 7: |
| 96 | b |= ((uint64_t)in[6]) << 48; |
| 97 | Q_FALLTHROUGH(); |
| 98 | case 6: |
| 99 | b |= ((uint64_t)in[5]) << 40; |
| 100 | Q_FALLTHROUGH(); |
| 101 | case 5: |
| 102 | b |= ((uint64_t)in[4]) << 32; |
| 103 | Q_FALLTHROUGH(); |
| 104 | case 4: |
| 105 | b |= ((uint64_t)in[3]) << 24; |
| 106 | Q_FALLTHROUGH(); |
| 107 | case 3: |
| 108 | b |= ((uint64_t)in[2]) << 16; |
| 109 | Q_FALLTHROUGH(); |
| 110 | case 2: |
| 111 | b |= ((uint64_t)in[1]) << 8; |
| 112 | Q_FALLTHROUGH(); |
| 113 | case 1: |
| 114 | b |= ((uint64_t)in[0]); |
| 115 | break; |
| 116 | case 0: |
| 117 | break; |
| 118 | } |
| 119 | |
| 120 | v3 ^= b; |
| 121 | |
| 122 | for (i = 0; i < cROUNDS; ++i) |
| 123 | SIPROUND; |
| 124 | |
| 125 | v0 ^= b; |
| 126 | |
| 127 | v2 ^= 0xff; |
| 128 | |
| 129 | for (i = 0; i < dROUNDS; ++i) |
| 130 | SIPROUND; |
| 131 | |
| 132 | b = v0 ^ v1 ^ v2 ^ v3; |
| 133 | return b; |
| 134 | } |
| 135 | #undef SIPROUND |
| 136 | |
| 137 | // This is a "SipHash" implementation adopted for 32bit platforms. It performs |
| 138 | // basically the same operations as the 64bit version using 4 byte at a time |
| 139 | // instead of 8. |
| 140 | // |
| 141 | // To make this work, we also need to change the constants for the mixing |
| 142 | // rotations in ROTL. We're simply using half of the 64bit constants, rounded up |
| 143 | // for odd numbers. |
| 144 | // |
| 145 | // For the v0-v4 constants, simply use the first four bytes of the 64 bit versions. |
| 146 | // |
| 147 | |
| 148 | #define SIPROUND \ |
| 149 | do { \ |
| 150 | v0 += v1; \ |
| 151 | v1 = ROTL(v1, 7); \ |
| 152 | v1 ^= v0; \ |
| 153 | v0 = ROTL(v0, 16); \ |
| 154 | v2 += v3; \ |
| 155 | v3 = ROTL(v3, 8); \ |
| 156 | v3 ^= v2; \ |
| 157 | v0 += v3; \ |
| 158 | v3 = ROTL(v3, 11); \ |
| 159 | v3 ^= v0; \ |
| 160 | v2 += v1; \ |
| 161 | v1 = ROTL(v1, 9); \ |
| 162 | v1 ^= v2; \ |
| 163 | v2 = ROTL(v2, 16); \ |
| 164 | } while (0) |
| 165 | |
| 166 | template <int cROUNDS = 2, int dROUNDS = 4> struct SipHash32 |
| 167 | { |
| 168 | /* "somepseudorandomlygeneratedbytes" */ |
| 169 | uint v0 = 0x736f6d65U; |
| 170 | uint v1 = 0x646f7261U; |
| 171 | uint v2 = 0x6c796765U; |
| 172 | uint v3 = 0x74656462U; |
| 173 | uint b; |
| 174 | uint k0; |
| 175 | uint k1; |
| 176 | |
| 177 | inline SipHash32(size_t fulllen, uint seed, uint seed2); |
| 178 | inline void addBlock(const uint8_t *in, size_t inlen); |
| 179 | inline uint finalize(const uint8_t *in, size_t left); |
| 180 | }; |
| 181 | |
| 182 | template <int cROUNDS, int dROUNDS> inline |
| 183 | SipHash32<cROUNDS, dROUNDS>::SipHash32(size_t inlen, uint seed, uint seed2) |
| 184 | { |
| 185 | uint k0 = seed; |
| 186 | uint k1 = seed2; |
| 187 | b = inlen << 24; |
| 188 | v3 ^= k1; |
| 189 | v2 ^= k0; |
| 190 | v1 ^= k1; |
| 191 | v0 ^= k0; |
| 192 | } |
| 193 | |
| 194 | template <int cROUNDS, int dROUNDS> inline DECL_HOT_FUNCTION void |
| 195 | SipHash32<cROUNDS, dROUNDS>::addBlock(const uint8_t *in, size_t inlen) |
| 196 | { |
| 197 | Q_ASSERT((inlen & 3ULL) == 0); |
| 198 | int i; |
| 199 | const uint8_t *end = in + inlen; |
| 200 | for (; in != end; in += 4) { |
| 201 | uint m = qFromUnaligned<uint>(src: in); |
| 202 | v3 ^= m; |
| 203 | |
| 204 | for (i = 0; i < cROUNDS; ++i) |
| 205 | SIPROUND; |
| 206 | |
| 207 | v0 ^= m; |
| 208 | } |
| 209 | } |
| 210 | |
| 211 | template <int cROUNDS, int dROUNDS> inline DECL_HOT_FUNCTION uint |
| 212 | SipHash32<cROUNDS, dROUNDS>::finalize(const uint8_t *in, size_t left) |
| 213 | { |
| 214 | int i; |
| 215 | switch (left) { |
| 216 | case 3: |
| 217 | b |= ((uint)in[2]) << 16; |
| 218 | Q_FALLTHROUGH(); |
| 219 | case 2: |
| 220 | b |= ((uint)in[1]) << 8; |
| 221 | Q_FALLTHROUGH(); |
| 222 | case 1: |
| 223 | b |= ((uint)in[0]); |
| 224 | break; |
| 225 | case 0: |
| 226 | break; |
| 227 | } |
| 228 | |
| 229 | v3 ^= b; |
| 230 | |
| 231 | for (i = 0; i < cROUNDS; ++i) |
| 232 | SIPROUND; |
| 233 | |
| 234 | v0 ^= b; |
| 235 | |
| 236 | v2 ^= 0xff; |
| 237 | |
| 238 | for (i = 0; i < dROUNDS; ++i) |
| 239 | SIPROUND; |
| 240 | |
| 241 | b = v0 ^ v1 ^ v2 ^ v3; |
| 242 | return b; |
| 243 | } |
| 244 | #undef SIPROUND |
| 245 | #undef ROTL |
| 246 | |
| 247 | // Use SipHash-1-2, which has similar performance characteristics as |
| 248 | // Qt 4's hash implementation, instead of the SipHash-2-4 default |
| 249 | template <int cROUNDS = 1, int dROUNDS = 2> |
| 250 | using SipHash = std::conditional_t<sizeof(void *) == 8, |
| 251 | SipHash64<cROUNDS, dROUNDS>, SipHash32<cROUNDS, dROUNDS>>; |
| 252 | |
| 253 | } // unnamed namespace |
| 254 | |
| 255 | Q_NEVER_INLINE DECL_HOT_FUNCTION |
| 256 | static size_t siphash(const uint8_t *in, size_t inlen, size_t seed, size_t seed2) |
| 257 | { |
| 258 | constexpr size_t TailSizeMask = sizeof(void *) - 1; |
| 259 | SipHash<> hasher(inlen, seed, seed2); |
| 260 | hasher.addBlock(in, inlen: inlen & ~TailSizeMask); |
| 261 | return hasher.finalize(in: in + (inlen & ~TailSizeMask), left: inlen & TailSizeMask); |
| 262 | } |
| 263 | |