1 /*
2 * xxHash - Extremely Fast Hash algorithm
3 * Copyright (C) 2012-2016, Yann Collet.
4 *
5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
16 * distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * This program is free software; you can redistribute it and/or modify it under
31 * the terms of the GNU General Public License version 2 as published by the
32 * Free Software Foundation. This program is dual-licensed; you may select
33 * either version 2 of the GNU General Public License ("GPL") or BSD license
34 * ("BSD").
35 *
36 * You can contact the author at:
37 * - xxHash homepage: https://cyan4973.github.io/xxHash/
38 * - xxHash source repository: https://github.com/Cyan4973/xxHash
39 */
40
41 #ifdef __XEN__
42 #include <xen/compiler.h>
43 #include <xen/errno.h>
44 #include <xen/string.h>
45 #include <xen/xxhash.h>
46 #include <xen/unaligned.h>
47 #endif
48
49 /*-*************************************
50 * Macros
51 **************************************/
52 #define xxh_rotl64(x, r) ((x << r) | (x >> (64 - r)))
53
54 #ifdef __LITTLE_ENDIAN
55 # define XXH_CPU_LITTLE_ENDIAN 1
56 #else
57 # define XXH_CPU_LITTLE_ENDIAN 0
58 #endif
59
60 /*-*************************************
61 * Constants
62 **************************************/
63 static const uint64_t PRIME64_1 = 11400714785074694791ULL;
64 static const uint64_t PRIME64_2 = 14029467366897019727ULL;
65 static const uint64_t PRIME64_3 = 1609587929392839161ULL;
66 static const uint64_t PRIME64_4 = 9650029242287828579ULL;
67 static const uint64_t PRIME64_5 = 2870177450012600261ULL;
68
69 /*-**************************
70 * Utils
71 ***************************/
xxh64_copy_state(struct xxh64_state * dst,const struct xxh64_state * src)72 void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src)
73 {
74 memcpy(dst, src, sizeof(*dst));
75 }
76
77 /*-***************************
78 * Simple Hash Functions
79 ****************************/
xxh64_round(uint64_t acc,const uint64_t input)80 static uint64_t xxh64_round(uint64_t acc, const uint64_t input)
81 {
82 acc += input * PRIME64_2;
83 acc = xxh_rotl64(acc, 31);
84 acc *= PRIME64_1;
85 return acc;
86 }
87
xxh64_merge_round(uint64_t acc,uint64_t val)88 static uint64_t xxh64_merge_round(uint64_t acc, uint64_t val)
89 {
90 val = xxh64_round(0, val);
91 acc ^= val;
92 acc = acc * PRIME64_1 + PRIME64_4;
93 return acc;
94 }
95
xxh64(const void * input,const size_t len,const uint64_t seed)96 uint64_t xxh64(const void *input, const size_t len, const uint64_t seed)
97 {
98 const uint8_t *p = (const uint8_t *)input;
99 const uint8_t *const b_end = p + len;
100 uint64_t h64;
101
102 if (len >= 32) {
103 const uint8_t *const limit = b_end - 32;
104 uint64_t v1 = seed + PRIME64_1 + PRIME64_2;
105 uint64_t v2 = seed + PRIME64_2;
106 uint64_t v3 = seed + 0;
107 uint64_t v4 = seed - PRIME64_1;
108
109 do {
110 v1 = xxh64_round(v1, get_unaligned_le64(p));
111 p += 8;
112 v2 = xxh64_round(v2, get_unaligned_le64(p));
113 p += 8;
114 v3 = xxh64_round(v3, get_unaligned_le64(p));
115 p += 8;
116 v4 = xxh64_round(v4, get_unaligned_le64(p));
117 p += 8;
118 } while (p <= limit);
119
120 h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) +
121 xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18);
122 h64 = xxh64_merge_round(h64, v1);
123 h64 = xxh64_merge_round(h64, v2);
124 h64 = xxh64_merge_round(h64, v3);
125 h64 = xxh64_merge_round(h64, v4);
126
127 } else {
128 h64 = seed + PRIME64_5;
129 }
130
131 h64 += (uint64_t)len;
132
133 while (p + 8 <= b_end) {
134 const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p));
135
136 h64 ^= k1;
137 h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
138 p += 8;
139 }
140
141 if (p + 4 <= b_end) {
142 h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1;
143 h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
144 p += 4;
145 }
146
147 while (p < b_end) {
148 h64 ^= (*p) * PRIME64_5;
149 h64 = xxh_rotl64(h64, 11) * PRIME64_1;
150 p++;
151 }
152
153 h64 ^= h64 >> 33;
154 h64 *= PRIME64_2;
155 h64 ^= h64 >> 29;
156 h64 *= PRIME64_3;
157 h64 ^= h64 >> 32;
158
159 return h64;
160 }
161
162 /*-**************************************************
163 * Advanced Hash Functions
164 ***************************************************/
xxh64_reset(struct xxh64_state * statePtr,const uint64_t seed)165 void xxh64_reset(struct xxh64_state *statePtr, const uint64_t seed)
166 {
167 /* use a local state for memcpy() to avoid strict-aliasing warnings */
168 struct xxh64_state state;
169
170 memset(&state, 0, sizeof(state));
171 state.v1 = seed + PRIME64_1 + PRIME64_2;
172 state.v2 = seed + PRIME64_2;
173 state.v3 = seed + 0;
174 state.v4 = seed - PRIME64_1;
175 memcpy(statePtr, &state, sizeof(state));
176 }
177
xxh64_update(struct xxh64_state * state,const void * input,const size_t len)178 int xxh64_update(struct xxh64_state *state, const void *input, const size_t len)
179 {
180 const uint8_t *p = (const uint8_t *)input;
181 const uint8_t *const b_end = p + len;
182
183 if (input == NULL)
184 return -EINVAL;
185
186 state->total_len += len;
187
188 if (state->memsize + len < 32) { /* fill in tmp buffer */
189 memcpy(((uint8_t *)state->mem64) + state->memsize, input, len);
190 state->memsize += (uint32_t)len;
191 return 0;
192 }
193
194 if (state->memsize) { /* tmp buffer is full */
195 uint64_t *p64 = state->mem64;
196
197 memcpy(((uint8_t *)p64) + state->memsize, input,
198 32 - state->memsize);
199
200 state->v1 = xxh64_round(state->v1, get_unaligned_le64(p64));
201 p64++;
202 state->v2 = xxh64_round(state->v2, get_unaligned_le64(p64));
203 p64++;
204 state->v3 = xxh64_round(state->v3, get_unaligned_le64(p64));
205 p64++;
206 state->v4 = xxh64_round(state->v4, get_unaligned_le64(p64));
207
208 p += 32 - state->memsize;
209 state->memsize = 0;
210 }
211
212 if (p + 32 <= b_end) {
213 const uint8_t *const limit = b_end - 32;
214 uint64_t v1 = state->v1;
215 uint64_t v2 = state->v2;
216 uint64_t v3 = state->v3;
217 uint64_t v4 = state->v4;
218
219 do {
220 v1 = xxh64_round(v1, get_unaligned_le64(p));
221 p += 8;
222 v2 = xxh64_round(v2, get_unaligned_le64(p));
223 p += 8;
224 v3 = xxh64_round(v3, get_unaligned_le64(p));
225 p += 8;
226 v4 = xxh64_round(v4, get_unaligned_le64(p));
227 p += 8;
228 } while (p <= limit);
229
230 state->v1 = v1;
231 state->v2 = v2;
232 state->v3 = v3;
233 state->v4 = v4;
234 }
235
236 if (p < b_end) {
237 memcpy(state->mem64, p, (size_t)(b_end-p));
238 state->memsize = (uint32_t)(b_end - p);
239 }
240
241 return 0;
242 }
243
xxh64_digest(const struct xxh64_state * state)244 uint64_t xxh64_digest(const struct xxh64_state *state)
245 {
246 const uint8_t *p = (const uint8_t *)state->mem64;
247 const uint8_t *const b_end = (const uint8_t *)state->mem64 +
248 state->memsize;
249 uint64_t h64;
250
251 if (state->total_len >= 32) {
252 const uint64_t v1 = state->v1;
253 const uint64_t v2 = state->v2;
254 const uint64_t v3 = state->v3;
255 const uint64_t v4 = state->v4;
256
257 h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) +
258 xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18);
259 h64 = xxh64_merge_round(h64, v1);
260 h64 = xxh64_merge_round(h64, v2);
261 h64 = xxh64_merge_round(h64, v3);
262 h64 = xxh64_merge_round(h64, v4);
263 } else {
264 h64 = state->v3 + PRIME64_5;
265 }
266
267 h64 += (uint64_t)state->total_len;
268
269 while (p + 8 <= b_end) {
270 const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p));
271
272 h64 ^= k1;
273 h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
274 p += 8;
275 }
276
277 if (p + 4 <= b_end) {
278 h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1;
279 h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
280 p += 4;
281 }
282
283 while (p < b_end) {
284 h64 ^= (*p) * PRIME64_5;
285 h64 = xxh_rotl64(h64, 11) * PRIME64_1;
286 p++;
287 }
288
289 h64 ^= h64 >> 33;
290 h64 *= PRIME64_2;
291 h64 ^= h64 >> 29;
292 h64 *= PRIME64_3;
293 h64 ^= h64 >> 32;
294
295 return h64;
296 }
297