1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * IPV4 GSO/GRO offload support |
4 | * Linux INET implementation |
5 | * |
6 | * Copyright (C) 2016 secunet Security Networks AG |
7 | * Author: Steffen Klassert <steffen.klassert@secunet.com> |
8 | * |
9 | * ESP GRO support |
10 | */ |
11 | |
12 | #include <linux/skbuff.h> |
13 | #include <linux/init.h> |
14 | #include <net/protocol.h> |
15 | #include <crypto/aead.h> |
16 | #include <crypto/authenc.h> |
17 | #include <linux/err.h> |
18 | #include <linux/module.h> |
19 | #include <net/gro.h> |
20 | #include <net/gso.h> |
21 | #include <net/ip.h> |
22 | #include <net/xfrm.h> |
23 | #include <net/esp.h> |
24 | #include <linux/scatterlist.h> |
25 | #include <linux/kernel.h> |
26 | #include <linux/slab.h> |
27 | #include <linux/spinlock.h> |
28 | #include <net/udp.h> |
29 | |
30 | static struct sk_buff *esp4_gro_receive(struct list_head *head, |
31 | struct sk_buff *skb) |
32 | { |
33 | int offset = skb_gro_offset(skb); |
34 | struct xfrm_offload *xo; |
35 | struct xfrm_state *x; |
36 | int encap_type = 0; |
37 | __be32 seq; |
38 | __be32 spi; |
39 | |
40 | if (!pskb_pull(skb, len: offset)) |
41 | return NULL; |
42 | |
43 | if (xfrm_parse_spi(skb, IPPROTO_ESP, spi: &spi, seq: &seq) != 0) |
44 | goto out; |
45 | |
46 | xo = xfrm_offload(skb); |
47 | if (!xo || !(xo->flags & CRYPTO_DONE)) { |
48 | struct sec_path *sp = secpath_set(skb); |
49 | |
50 | if (!sp) |
51 | goto out; |
52 | |
53 | if (sp->len == XFRM_MAX_DEPTH) |
54 | goto out_reset; |
55 | |
56 | x = xfrm_state_lookup(net: dev_net(dev: skb->dev), mark: skb->mark, |
57 | daddr: (xfrm_address_t *)&ip_hdr(skb)->daddr, |
58 | spi, IPPROTO_ESP, AF_INET); |
59 | if (!x) |
60 | goto out_reset; |
61 | |
62 | skb->mark = xfrm_smark_get(mark: skb->mark, x); |
63 | |
64 | sp->xvec[sp->len++] = x; |
65 | sp->olen++; |
66 | |
67 | xo = xfrm_offload(skb); |
68 | if (!xo) |
69 | goto out_reset; |
70 | } |
71 | |
72 | xo->flags |= XFRM_GRO; |
73 | |
74 | if (NAPI_GRO_CB(skb)->proto == IPPROTO_UDP) |
75 | encap_type = UDP_ENCAP_ESPINUDP; |
76 | |
77 | XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; |
78 | XFRM_SPI_SKB_CB(skb)->family = AF_INET; |
79 | XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); |
80 | XFRM_SPI_SKB_CB(skb)->seq = seq; |
81 | |
82 | /* We don't need to handle errors from xfrm_input, it does all |
83 | * the error handling and frees the resources on error. */ |
84 | xfrm_input(skb, IPPROTO_ESP, spi, encap_type); |
85 | |
86 | return ERR_PTR(error: -EINPROGRESS); |
87 | out_reset: |
88 | secpath_reset(skb); |
89 | out: |
90 | skb_push(skb, len: offset); |
91 | NAPI_GRO_CB(skb)->same_flow = 0; |
92 | NAPI_GRO_CB(skb)->flush = 1; |
93 | |
94 | return NULL; |
95 | } |
96 | |
97 | static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb) |
98 | { |
99 | struct ip_esp_hdr *esph; |
100 | struct iphdr *iph = ip_hdr(skb); |
101 | struct xfrm_offload *xo = xfrm_offload(skb); |
102 | int proto = iph->protocol; |
103 | |
104 | skb_push(skb, len: -skb_network_offset(skb)); |
105 | esph = ip_esp_hdr(skb); |
106 | *skb_mac_header(skb) = IPPROTO_ESP; |
107 | |
108 | esph->spi = x->id.spi; |
109 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); |
110 | |
111 | xo->proto = proto; |
112 | } |
113 | |
114 | static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x, |
115 | struct sk_buff *skb, |
116 | netdev_features_t features) |
117 | { |
118 | __be16 type = x->inner_mode.family == AF_INET6 ? htons(ETH_P_IPV6) |
119 | : htons(ETH_P_IP); |
120 | |
121 | return skb_eth_gso_segment(skb, features, type); |
122 | } |
123 | |
124 | static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x, |
125 | struct sk_buff *skb, |
126 | netdev_features_t features) |
127 | { |
128 | const struct net_offload *ops; |
129 | struct sk_buff *segs = ERR_PTR(error: -EINVAL); |
130 | struct xfrm_offload *xo = xfrm_offload(skb); |
131 | |
132 | skb->transport_header += x->props.header_len; |
133 | ops = rcu_dereference(inet_offloads[xo->proto]); |
134 | if (likely(ops && ops->callbacks.gso_segment)) |
135 | segs = ops->callbacks.gso_segment(skb, features); |
136 | |
137 | return segs; |
138 | } |
139 | |
140 | static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x, |
141 | struct sk_buff *skb, |
142 | netdev_features_t features) |
143 | { |
144 | struct xfrm_offload *xo = xfrm_offload(skb); |
145 | struct sk_buff *segs = ERR_PTR(error: -EINVAL); |
146 | const struct net_offload *ops; |
147 | u8 proto = xo->proto; |
148 | |
149 | skb->transport_header += x->props.header_len; |
150 | |
151 | if (x->sel.family != AF_INET6) { |
152 | if (proto == IPPROTO_BEETPH) { |
153 | struct ip_beet_phdr *ph = |
154 | (struct ip_beet_phdr *)skb->data; |
155 | |
156 | skb->transport_header += ph->hdrlen * 8; |
157 | proto = ph->nexthdr; |
158 | } else { |
159 | skb->transport_header -= IPV4_BEET_PHMAXLEN; |
160 | } |
161 | } else { |
162 | __be16 frag; |
163 | |
164 | skb->transport_header += |
165 | ipv6_skip_exthdr(skb, start: 0, nexthdrp: &proto, frag_offp: &frag); |
166 | if (proto == IPPROTO_TCP) |
167 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; |
168 | } |
169 | |
170 | if (proto == IPPROTO_IPV6) |
171 | skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4; |
172 | |
173 | __skb_pull(skb, len: skb_transport_offset(skb)); |
174 | ops = rcu_dereference(inet_offloads[proto]); |
175 | if (likely(ops && ops->callbacks.gso_segment)) |
176 | segs = ops->callbacks.gso_segment(skb, features); |
177 | |
178 | return segs; |
179 | } |
180 | |
181 | static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x, |
182 | struct sk_buff *skb, |
183 | netdev_features_t features) |
184 | { |
185 | switch (x->outer_mode.encap) { |
186 | case XFRM_MODE_TUNNEL: |
187 | return xfrm4_tunnel_gso_segment(x, skb, features); |
188 | case XFRM_MODE_TRANSPORT: |
189 | return xfrm4_transport_gso_segment(x, skb, features); |
190 | case XFRM_MODE_BEET: |
191 | return xfrm4_beet_gso_segment(x, skb, features); |
192 | } |
193 | |
194 | return ERR_PTR(error: -EOPNOTSUPP); |
195 | } |
196 | |
197 | static struct sk_buff *esp4_gso_segment(struct sk_buff *skb, |
198 | netdev_features_t features) |
199 | { |
200 | struct xfrm_state *x; |
201 | struct ip_esp_hdr *esph; |
202 | struct crypto_aead *aead; |
203 | netdev_features_t esp_features = features; |
204 | struct xfrm_offload *xo = xfrm_offload(skb); |
205 | struct sec_path *sp; |
206 | |
207 | if (!xo) |
208 | return ERR_PTR(error: -EINVAL); |
209 | |
210 | if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP)) |
211 | return ERR_PTR(error: -EINVAL); |
212 | |
213 | sp = skb_sec_path(skb); |
214 | x = sp->xvec[sp->len - 1]; |
215 | aead = x->data; |
216 | esph = ip_esp_hdr(skb); |
217 | |
218 | if (esph->spi != x->id.spi) |
219 | return ERR_PTR(error: -EINVAL); |
220 | |
221 | if (!pskb_may_pull(skb, len: sizeof(*esph) + crypto_aead_ivsize(tfm: aead))) |
222 | return ERR_PTR(error: -EINVAL); |
223 | |
224 | __skb_pull(skb, len: sizeof(*esph) + crypto_aead_ivsize(tfm: aead)); |
225 | |
226 | skb->encap_hdr_csum = 1; |
227 | |
228 | if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) && |
229 | !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev) |
230 | esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK | |
231 | NETIF_F_SCTP_CRC); |
232 | else if (!(features & NETIF_F_HW_ESP_TX_CSUM) && |
233 | !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM)) |
234 | esp_features = features & ~(NETIF_F_CSUM_MASK | |
235 | NETIF_F_SCTP_CRC); |
236 | |
237 | xo->flags |= XFRM_GSO_SEGMENT; |
238 | |
239 | return xfrm4_outer_mode_gso_segment(x, skb, features: esp_features); |
240 | } |
241 | |
242 | static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb) |
243 | { |
244 | struct crypto_aead *aead = x->data; |
245 | struct xfrm_offload *xo = xfrm_offload(skb); |
246 | |
247 | if (!pskb_may_pull(skb, len: sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(tfm: aead))) |
248 | return -EINVAL; |
249 | |
250 | if (!(xo->flags & CRYPTO_DONE)) |
251 | skb->ip_summed = CHECKSUM_NONE; |
252 | |
253 | return esp_input_done2(skb, err: 0); |
254 | } |
255 | |
256 | static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) |
257 | { |
258 | int err; |
259 | int alen; |
260 | int blksize; |
261 | struct xfrm_offload *xo; |
262 | struct ip_esp_hdr *esph; |
263 | struct crypto_aead *aead; |
264 | struct esp_info esp; |
265 | bool hw_offload = true; |
266 | __u32 seq; |
267 | |
268 | esp.inplace = true; |
269 | |
270 | xo = xfrm_offload(skb); |
271 | |
272 | if (!xo) |
273 | return -EINVAL; |
274 | |
275 | if ((!(features & NETIF_F_HW_ESP) && |
276 | !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) || |
277 | x->xso.dev != skb->dev) { |
278 | xo->flags |= CRYPTO_FALLBACK; |
279 | hw_offload = false; |
280 | } |
281 | |
282 | esp.proto = xo->proto; |
283 | |
284 | /* skb is pure payload to encrypt */ |
285 | |
286 | aead = x->data; |
287 | alen = crypto_aead_authsize(tfm: aead); |
288 | |
289 | esp.tfclen = 0; |
290 | /* XXX: Add support for tfc padding here. */ |
291 | |
292 | blksize = ALIGN(crypto_aead_blocksize(aead), 4); |
293 | esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); |
294 | esp.plen = esp.clen - skb->len - esp.tfclen; |
295 | esp.tailen = esp.tfclen + esp.plen + alen; |
296 | |
297 | esp.esph = ip_esp_hdr(skb); |
298 | |
299 | |
300 | if (!hw_offload || !skb_is_gso(skb)) { |
301 | esp.nfrags = esp_output_head(x, skb, esp: &esp); |
302 | if (esp.nfrags < 0) |
303 | return esp.nfrags; |
304 | } |
305 | |
306 | seq = xo->seq.low; |
307 | |
308 | esph = esp.esph; |
309 | esph->spi = x->id.spi; |
310 | |
311 | skb_push(skb, len: -skb_network_offset(skb)); |
312 | |
313 | if (xo->flags & XFRM_GSO_SEGMENT) { |
314 | esph->seq_no = htonl(seq); |
315 | |
316 | if (!skb_is_gso(skb)) |
317 | xo->seq.low++; |
318 | else |
319 | xo->seq.low += skb_shinfo(skb)->gso_segs; |
320 | } |
321 | |
322 | if (xo->seq.low < seq) |
323 | xo->seq.hi++; |
324 | |
325 | esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32)); |
326 | |
327 | ip_hdr(skb)->tot_len = htons(skb->len); |
328 | ip_send_check(ip: ip_hdr(skb)); |
329 | |
330 | if (hw_offload) { |
331 | if (!skb_ext_add(skb, id: SKB_EXT_SEC_PATH)) |
332 | return -ENOMEM; |
333 | |
334 | xo = xfrm_offload(skb); |
335 | if (!xo) |
336 | return -EINVAL; |
337 | |
338 | xo->flags |= XFRM_XMIT; |
339 | return 0; |
340 | } |
341 | |
342 | err = esp_output_tail(x, skb, esp: &esp); |
343 | if (err) |
344 | return err; |
345 | |
346 | secpath_reset(skb); |
347 | |
348 | if (skb_needs_linearize(skb, features: skb->dev->features) && |
349 | __skb_linearize(skb)) |
350 | return -ENOMEM; |
351 | return 0; |
352 | } |
353 | |
354 | static const struct net_offload esp4_offload = { |
355 | .callbacks = { |
356 | .gro_receive = esp4_gro_receive, |
357 | .gso_segment = esp4_gso_segment, |
358 | }, |
359 | }; |
360 | |
361 | static const struct xfrm_type_offload esp_type_offload = { |
362 | .owner = THIS_MODULE, |
363 | .proto = IPPROTO_ESP, |
364 | .input_tail = esp_input_tail, |
365 | .xmit = esp_xmit, |
366 | .encap = esp4_gso_encap, |
367 | }; |
368 | |
369 | static int __init esp4_offload_init(void) |
370 | { |
371 | if (xfrm_register_type_offload(type: &esp_type_offload, AF_INET) < 0) { |
372 | pr_info("%s: can't add xfrm type offload\n" , __func__); |
373 | return -EAGAIN; |
374 | } |
375 | |
376 | return inet_add_offload(prot: &esp4_offload, IPPROTO_ESP); |
377 | } |
378 | |
379 | static void __exit esp4_offload_exit(void) |
380 | { |
381 | xfrm_unregister_type_offload(type: &esp_type_offload, AF_INET); |
382 | inet_del_offload(prot: &esp4_offload, IPPROTO_ESP); |
383 | } |
384 | |
385 | module_init(esp4_offload_init); |
386 | module_exit(esp4_offload_exit); |
387 | MODULE_LICENSE("GPL" ); |
388 | MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>" ); |
389 | MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP); |
390 | MODULE_DESCRIPTION("IPV4 GSO/GRO offload support" ); |
391 | |