1 | /* |
2 | * Common code for checksum implementations |
3 | * |
4 | * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
5 | * See https://llvm.org/LICENSE.txt for license information. |
6 | * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
7 | */ |
8 | |
9 | #ifndef CHKSUM_COMMON_H |
10 | #define CHKSUM_COMMON_H |
11 | |
12 | #if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ |
13 | #error Only little endian supported |
14 | #endif |
15 | |
16 | #include <limits.h> |
17 | #include <stdbool.h> |
18 | #include <stdint.h> |
19 | #include <string.h> |
20 | |
21 | /* Assertions must be explicitly enabled */ |
22 | #if WANT_ASSERT |
23 | #undef NDEBUG |
24 | #include <assert.h> |
25 | #define Assert(exp) assert(exp) |
26 | #else |
27 | #define Assert(exp) (void) (exp) |
28 | #endif |
29 | |
30 | #ifdef __GNUC__ |
31 | #define likely(x) __builtin_expect(!!(x), 1) |
32 | #define unlikely(x) __builtin_expect(!!(x), 0) |
33 | #define may_alias __attribute__((__may_alias__)) |
34 | #define always_inline __attribute__((always_inline)) |
35 | #ifdef __clang__ |
36 | #define no_unroll_loops |
37 | #else |
38 | #define no_unroll_loops __attribute__((optimize("no-unroll-loops"))) |
39 | #endif |
40 | #define bswap16(x) __builtin_bswap16((x)) |
41 | #else |
42 | #define likely(x) (x) |
43 | #define unlikely(x) (x) |
44 | #define may_alias |
45 | #define always_inline |
46 | #define no_unroll_loops |
47 | #define bswap16(x) ((uint8_t)((x) >> 8) | ((uint8_t)(x) << 8)) |
48 | #endif |
49 | |
50 | #define ALL_ONES ~UINT64_C(0) |
51 | |
52 | static inline |
53 | uint64_t load64(const void *ptr) |
54 | { |
55 | /* GCC will optimise this to a normal load instruction */ |
56 | uint64_t v; |
57 | memcpy(dest: &v, src: ptr, n: sizeof v); |
58 | return v; |
59 | } |
60 | |
61 | static inline |
62 | uint32_t load32(const void *ptr) |
63 | { |
64 | /* GCC will optimise this to a normal load instruction */ |
65 | uint32_t v; |
66 | memcpy(dest: &v, src: ptr, n: sizeof v); |
67 | return v; |
68 | } |
69 | |
70 | static inline |
71 | uint16_t load16(const void *ptr) |
72 | { |
73 | /* GCC will optimise this to a normal load instruction */ |
74 | uint16_t v; |
75 | memcpy(dest: &v, src: ptr, n: sizeof v); |
76 | return v; |
77 | } |
78 | |
79 | /* slurp_small() is for small buffers, don't waste cycles on alignment */ |
80 | no_unroll_loops |
81 | always_inline |
82 | static inline uint64_t |
83 | slurp_small(const void *ptr, uint32_t nbytes) |
84 | { |
85 | const unsigned char *cptr = ptr; |
86 | uint64_t sum = 0; |
87 | while (nbytes >= 4) |
88 | { |
89 | sum += load32(ptr: cptr); |
90 | cptr += 4; |
91 | nbytes -= 4; |
92 | } |
93 | if (nbytes & 2) |
94 | { |
95 | sum += load16(ptr: cptr); |
96 | cptr += 2; |
97 | } |
98 | if (nbytes & 1) |
99 | { |
100 | sum += (uint8_t) *cptr; |
101 | } |
102 | return sum; |
103 | } |
104 | |
105 | static inline const void * |
106 | align_ptr(const void *ptr, size_t bytes) |
107 | { |
108 | return (void *) ((uintptr_t) ptr & -(uintptr_t) bytes); |
109 | } |
110 | |
111 | always_inline |
112 | static inline uint16_t |
113 | fold_and_swap(uint64_t sum, bool swap) |
114 | { |
115 | /* Fold 64-bit sum to 32 bits */ |
116 | sum = (sum & 0xffffffff) + (sum >> 32); |
117 | sum = (sum & 0xffffffff) + (sum >> 32); |
118 | Assert(sum == (uint32_t) sum); |
119 | |
120 | /* Fold 32-bit sum to 16 bits */ |
121 | sum = (sum & 0xffff) + (sum >> 16); |
122 | sum = (sum & 0xffff) + (sum >> 16); |
123 | Assert(sum == (uint16_t) sum); |
124 | |
125 | if (unlikely(swap)) /* Odd base pointer is unexpected */ |
126 | { |
127 | sum = bswap16(sum); |
128 | } |
129 | |
130 | return (uint16_t) sum; |
131 | } |
132 | |
133 | #endif |
134 | |