1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* |
3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
4 | * operating system. INET is implemented using the BSD Socket |
5 | * interface as the means of communication with the user level. |
6 | * |
7 | * Checksumming functions for IP, TCP, UDP and so on |
8 | * |
9 | * Authors: Jorge Cwik, <jorge@laser.satlink.net> |
10 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> |
11 | * Borrows very liberally from tcp.c and ip.c, see those |
12 | * files for more names. |
13 | */ |
14 | |
15 | #ifndef _CHECKSUM_H |
16 | #define _CHECKSUM_H |
17 | |
18 | #include <linux/errno.h> |
19 | #include <asm/types.h> |
20 | #include <asm/byteorder.h> |
21 | #include <asm/checksum.h> |
22 | #if !defined(_HAVE_ARCH_COPY_AND_CSUM_FROM_USER) || !defined(HAVE_CSUM_COPY_USER) |
23 | #include <linux/uaccess.h> |
24 | #endif |
25 | |
26 | #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER |
27 | static __always_inline |
28 | __wsum csum_and_copy_from_user (const void __user *src, void *dst, |
29 | int len) |
30 | { |
31 | if (copy_from_user(dst, src, len)) |
32 | return 0; |
33 | return csum_partial(dst, len, ~0U); |
34 | } |
35 | #endif |
36 | |
37 | #ifndef HAVE_CSUM_COPY_USER |
38 | static __always_inline __wsum csum_and_copy_to_user |
39 | (const void *src, void __user *dst, int len) |
40 | { |
41 | __wsum sum = csum_partial(src, len, ~0U); |
42 | |
43 | if (copy_to_user(dst, src, len) == 0) |
44 | return sum; |
45 | return 0; |
46 | } |
47 | #endif |
48 | |
49 | #ifndef _HAVE_ARCH_CSUM_AND_COPY |
50 | static __always_inline __wsum |
51 | csum_partial_copy_nocheck(const void *src, void *dst, int len) |
52 | { |
53 | memcpy(dst, src, len); |
54 | return csum_partial(dst, len, 0); |
55 | } |
56 | #endif |
57 | |
58 | #ifndef HAVE_ARCH_CSUM_ADD |
59 | static __always_inline __wsum csum_add(__wsum csum, __wsum addend) |
60 | { |
61 | u32 res = (__force u32)csum; |
62 | res += (__force u32)addend; |
63 | return (__force __wsum)(res + (res < (__force u32)addend)); |
64 | } |
65 | #endif |
66 | |
67 | static __always_inline __wsum csum_sub(__wsum csum, __wsum addend) |
68 | { |
69 | return csum_add(csum, addend: ~addend); |
70 | } |
71 | |
72 | static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend) |
73 | { |
74 | u16 res = (__force u16)csum; |
75 | |
76 | res += (__force u16)addend; |
77 | return (__force __sum16)(res + (res < (__force u16)addend)); |
78 | } |
79 | |
80 | static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend) |
81 | { |
82 | return csum16_add(csum, addend: ~addend); |
83 | } |
84 | |
85 | #ifndef HAVE_ARCH_CSUM_SHIFT |
86 | static __always_inline __wsum csum_shift(__wsum sum, int offset) |
87 | { |
88 | /* rotate sum to align it with a 16b boundary */ |
89 | if (offset & 1) |
90 | return (__force __wsum)ror32(word: (__force u32)sum, shift: 8); |
91 | return sum; |
92 | } |
93 | #endif |
94 | |
95 | static __always_inline __wsum |
96 | csum_block_add(__wsum csum, __wsum csum2, int offset) |
97 | { |
98 | return csum_add(csum, addend: csum_shift(sum: csum2, offset)); |
99 | } |
100 | |
101 | static __always_inline __wsum |
102 | csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len) |
103 | { |
104 | return csum_block_add(csum, csum2, offset); |
105 | } |
106 | |
107 | static __always_inline __wsum |
108 | csum_block_sub(__wsum csum, __wsum csum2, int offset) |
109 | { |
110 | return csum_block_add(csum, csum2: ~csum2, offset); |
111 | } |
112 | |
113 | static __always_inline __wsum csum_unfold(__sum16 n) |
114 | { |
115 | return (__force __wsum)n; |
116 | } |
117 | |
118 | static __always_inline |
119 | __wsum csum_partial_ext(const void *buff, int len, __wsum sum) |
120 | { |
121 | return csum_partial(buff, len, sum); |
122 | } |
123 | |
124 | #define CSUM_MANGLED_0 ((__force __sum16)0xffff) |
125 | |
126 | static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) |
127 | { |
128 | *sum = csum_fold(sum: csum_add(csum: diff, addend: ~csum_unfold(n: *sum))); |
129 | } |
130 | |
131 | static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) |
132 | { |
133 | __wsum tmp = csum_sub(csum: ~csum_unfold(n: *sum), addend: (__force __wsum)from); |
134 | |
135 | *sum = csum_fold(sum: csum_add(csum: tmp, addend: (__force __wsum)to)); |
136 | } |
137 | |
138 | /* Implements RFC 1624 (Incremental Internet Checksum) |
139 | * 3. Discussion states : |
140 | * HC' = ~(~HC + ~m + m') |
141 | * m : old value of a 16bit field |
142 | * m' : new value of a 16bit field |
143 | */ |
144 | static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new) |
145 | { |
146 | *sum = ~csum16_add(csum: csum16_sub(csum: ~(*sum), addend: old), addend: new); |
147 | } |
148 | |
149 | static inline void csum_replace(__wsum *csum, __wsum old, __wsum new) |
150 | { |
151 | *csum = csum_add(csum: csum_sub(csum: *csum, addend: old), addend: new); |
152 | } |
153 | |
154 | struct sk_buff; |
155 | void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, |
156 | __be32 from, __be32 to, bool pseudohdr); |
157 | void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, |
158 | const __be32 *from, const __be32 *to, |
159 | bool pseudohdr); |
160 | void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb, |
161 | __wsum diff, bool pseudohdr); |
162 | |
163 | static __always_inline |
164 | void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, |
165 | __be16 from, __be16 to, bool pseudohdr) |
166 | { |
167 | inet_proto_csum_replace4(sum, skb, from: (__force __be32)from, |
168 | to: (__force __be32)to, pseudohdr); |
169 | } |
170 | |
171 | static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum, |
172 | int start, int offset) |
173 | { |
174 | __sum16 *psum = (__sum16 *)(ptr + offset); |
175 | __wsum delta; |
176 | |
177 | /* Subtract out checksum up to start */ |
178 | csum = csum_sub(csum, addend: csum_partial(buff: ptr, len: start, sum: 0)); |
179 | |
180 | /* Set derived checksum in packet */ |
181 | delta = csum_sub(csum: (__force __wsum)csum_fold(sum: csum), |
182 | addend: (__force __wsum)*psum); |
183 | *psum = csum_fold(sum: csum); |
184 | |
185 | return delta; |
186 | } |
187 | |
188 | static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta) |
189 | { |
190 | *psum = csum_fold(sum: csum_sub(csum: delta, addend: (__force __wsum)*psum)); |
191 | } |
192 | |
193 | static __always_inline __wsum wsum_negate(__wsum val) |
194 | { |
195 | return (__force __wsum)-((__force u32)val); |
196 | } |
197 | #endif |
198 | |