1// SPDX-License-Identifier: GPL-2.0-or-later
2#include <net/gro.h>
3#include <net/dst_metadata.h>
4#include <net/busy_poll.h>
5#include <trace/events/net.h>
6
7#define MAX_GRO_SKBS 8
8
9/* This should be increased if a protocol with a bigger head is added. */
10#define GRO_MAX_HEAD (MAX_HEADER + 128)
11
12static DEFINE_SPINLOCK(offload_lock);
13
14/**
15 * dev_add_offload - register offload handlers
16 * @po: protocol offload declaration
17 *
18 * Add protocol offload handlers to the networking stack. The passed
19 * &proto_offload is linked into kernel lists and may not be freed until
20 * it has been removed from the kernel lists.
21 *
22 * This call does not sleep therefore it can not
23 * guarantee all CPU's that are in middle of receiving packets
24 * will see the new offload handlers (until the next received packet).
25 */
26void dev_add_offload(struct packet_offload *po)
27{
28 struct packet_offload *elem;
29
30 spin_lock(lock: &offload_lock);
31 list_for_each_entry(elem, &net_hotdata.offload_base, list) {
32 if (po->priority < elem->priority)
33 break;
34 }
35 list_add_rcu(new: &po->list, head: elem->list.prev);
36 spin_unlock(lock: &offload_lock);
37}
38EXPORT_SYMBOL(dev_add_offload);
39
40/**
41 * __dev_remove_offload - remove offload handler
42 * @po: packet offload declaration
43 *
44 * Remove a protocol offload handler that was previously added to the
45 * kernel offload handlers by dev_add_offload(). The passed &offload_type
46 * is removed from the kernel lists and can be freed or reused once this
47 * function returns.
48 *
49 * The packet type might still be in use by receivers
50 * and must not be freed until after all the CPU's have gone
51 * through a quiescent state.
52 */
53static void __dev_remove_offload(struct packet_offload *po)
54{
55 struct list_head *head = &net_hotdata.offload_base;
56 struct packet_offload *po1;
57
58 spin_lock(lock: &offload_lock);
59
60 list_for_each_entry(po1, head, list) {
61 if (po == po1) {
62 list_del_rcu(entry: &po->list);
63 goto out;
64 }
65 }
66
67 pr_warn("dev_remove_offload: %p not found\n", po);
68out:
69 spin_unlock(lock: &offload_lock);
70}
71
72/**
73 * dev_remove_offload - remove packet offload handler
74 * @po: packet offload declaration
75 *
76 * Remove a packet offload handler that was previously added to the kernel
77 * offload handlers by dev_add_offload(). The passed &offload_type is
78 * removed from the kernel lists and can be freed or reused once this
79 * function returns.
80 *
81 * This call sleeps to guarantee that no CPU is looking at the packet
82 * type after return.
83 */
84void dev_remove_offload(struct packet_offload *po)
85{
86 __dev_remove_offload(po);
87
88 synchronize_net();
89}
90EXPORT_SYMBOL(dev_remove_offload);
91
92
93int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
94{
95 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
96 unsigned int offset = skb_gro_offset(skb);
97 unsigned int headlen = skb_headlen(skb);
98 unsigned int len = skb_gro_len(skb);
99 unsigned int delta_truesize;
100 unsigned int gro_max_size;
101 unsigned int new_truesize;
102 struct sk_buff *lp;
103 int segs;
104
105 /* Do not splice page pool based packets w/ non-page pool
106 * packets. This can result in reference count issues as page
107 * pool pages will not decrement the reference count and will
108 * instead be immediately returned to the pool or have frag
109 * count decremented.
110 */
111 if (p->pp_recycle != skb->pp_recycle)
112 return -ETOOMANYREFS;
113
114 /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
115 gro_max_size = p->protocol == htons(ETH_P_IPV6) ?
116 READ_ONCE(p->dev->gro_max_size) :
117 READ_ONCE(p->dev->gro_ipv4_max_size);
118
119 if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
120 return -E2BIG;
121
122 if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
123 if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
124 (p->protocol == htons(ETH_P_IPV6) &&
125 skb_headroom(skb: p) < sizeof(struct hop_jumbo_hdr)) ||
126 p->encapsulation)
127 return -E2BIG;
128 }
129
130 segs = NAPI_GRO_CB(skb)->count;
131 lp = NAPI_GRO_CB(p)->last;
132 pinfo = skb_shinfo(lp);
133
134 if (headlen <= offset) {
135 skb_frag_t *frag;
136 skb_frag_t *frag2;
137 int i = skbinfo->nr_frags;
138 int nr_frags = pinfo->nr_frags + i;
139
140 if (nr_frags > MAX_SKB_FRAGS)
141 goto merge;
142
143 offset -= headlen;
144 pinfo->nr_frags = nr_frags;
145 skbinfo->nr_frags = 0;
146
147 frag = pinfo->frags + nr_frags;
148 frag2 = skbinfo->frags + i;
149 do {
150 *--frag = *--frag2;
151 } while (--i);
152
153 skb_frag_off_add(frag, delta: offset);
154 skb_frag_size_sub(frag, delta: offset);
155
156 /* all fragments truesize : remove (head size + sk_buff) */
157 new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
158 delta_truesize = skb->truesize - new_truesize;
159
160 skb->truesize = new_truesize;
161 skb->len -= skb->data_len;
162 skb->data_len = 0;
163
164 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
165 goto done;
166 } else if (skb->head_frag) {
167 int nr_frags = pinfo->nr_frags;
168 skb_frag_t *frag = pinfo->frags + nr_frags;
169 struct page *page = virt_to_head_page(x: skb->head);
170 unsigned int first_size = headlen - offset;
171 unsigned int first_offset;
172
173 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
174 goto merge;
175
176 first_offset = skb->data -
177 (unsigned char *)page_address(page) +
178 offset;
179
180 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
181
182 skb_frag_fill_page_desc(frag, page, off: first_offset, size: first_size);
183
184 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
185 /* We dont need to clear skbinfo->nr_frags here */
186
187 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
188 delta_truesize = skb->truesize - new_truesize;
189 skb->truesize = new_truesize;
190 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
191 goto done;
192 }
193
194merge:
195 /* sk ownership - if any - completely transferred to the aggregated packet */
196 skb->destructor = NULL;
197 skb->sk = NULL;
198 delta_truesize = skb->truesize;
199 if (offset > headlen) {
200 unsigned int eat = offset - headlen;
201
202 skb_frag_off_add(frag: &skbinfo->frags[0], delta: eat);
203 skb_frag_size_sub(frag: &skbinfo->frags[0], delta: eat);
204 skb->data_len -= eat;
205 skb->len -= eat;
206 offset = headlen;
207 }
208
209 __skb_pull(skb, len: offset);
210
211 if (NAPI_GRO_CB(p)->last == p)
212 skb_shinfo(p)->frag_list = skb;
213 else
214 NAPI_GRO_CB(p)->last->next = skb;
215 NAPI_GRO_CB(p)->last = skb;
216 __skb_header_release(skb);
217 lp = p;
218
219done:
220 NAPI_GRO_CB(p)->count += segs;
221 p->data_len += len;
222 p->truesize += delta_truesize;
223 p->len += len;
224 if (lp != p) {
225 lp->data_len += len;
226 lp->truesize += delta_truesize;
227 lp->len += len;
228 }
229 NAPI_GRO_CB(skb)->same_flow = 1;
230 return 0;
231}
232
233
234static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
235{
236 struct list_head *head = &net_hotdata.offload_base;
237 struct packet_offload *ptype;
238 __be16 type = skb->protocol;
239 int err = -ENOENT;
240
241 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
242
243 if (NAPI_GRO_CB(skb)->count == 1) {
244 skb_shinfo(skb)->gso_size = 0;
245 goto out;
246 }
247
248 rcu_read_lock();
249 list_for_each_entry_rcu(ptype, head, list) {
250 if (ptype->type != type || !ptype->callbacks.gro_complete)
251 continue;
252
253 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
254 ipv6_gro_complete, inet_gro_complete,
255 skb, 0);
256 break;
257 }
258 rcu_read_unlock();
259
260 if (err) {
261 WARN_ON(&ptype->list == head);
262 kfree_skb(skb);
263 return;
264 }
265
266out:
267 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
268}
269
270static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
271 bool flush_old)
272{
273 struct list_head *head = &napi->gro_hash[index].list;
274 struct sk_buff *skb, *p;
275
276 list_for_each_entry_safe_reverse(skb, p, head, list) {
277 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
278 return;
279 skb_list_del_init(skb);
280 napi_gro_complete(napi, skb);
281 napi->gro_hash[index].count--;
282 }
283
284 if (!napi->gro_hash[index].count)
285 __clear_bit(index, &napi->gro_bitmask);
286}
287
288/* napi->gro_hash[].list contains packets ordered by age.
289 * youngest packets at the head of it.
290 * Complete skbs in reverse order to reduce latencies.
291 */
292void napi_gro_flush(struct napi_struct *napi, bool flush_old)
293{
294 unsigned long bitmask = napi->gro_bitmask;
295 unsigned int i, base = ~0U;
296
297 while ((i = ffs(bitmask)) != 0) {
298 bitmask >>= i;
299 base += i;
300 __napi_gro_flush_chain(napi, index: base, flush_old);
301 }
302}
303EXPORT_SYMBOL(napi_gro_flush);
304
305static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb,
306 const struct sk_buff *p,
307 unsigned long diffs)
308{
309#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
310 struct tc_skb_ext *skb_ext;
311 struct tc_skb_ext *p_ext;
312
313 skb_ext = skb_ext_find(skb, id: TC_SKB_EXT);
314 p_ext = skb_ext_find(skb: p, id: TC_SKB_EXT);
315
316 diffs |= (!!p_ext) ^ (!!skb_ext);
317 if (!diffs && unlikely(skb_ext))
318 diffs |= p_ext->chain ^ skb_ext->chain;
319#endif
320 return diffs;
321}
322
323static void gro_list_prepare(const struct list_head *head,
324 const struct sk_buff *skb)
325{
326 unsigned int maclen = skb->dev->hard_header_len;
327 u32 hash = skb_get_hash_raw(skb);
328 struct sk_buff *p;
329
330 list_for_each_entry(p, head, list) {
331 unsigned long diffs;
332
333 NAPI_GRO_CB(p)->flush = 0;
334
335 if (hash != skb_get_hash_raw(skb: p)) {
336 NAPI_GRO_CB(p)->same_flow = 0;
337 continue;
338 }
339
340 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
341 diffs |= p->vlan_all ^ skb->vlan_all;
342 diffs |= skb_metadata_differs(skb_a: p, skb_b: skb);
343 if (maclen == ETH_HLEN)
344 diffs |= compare_ether_header(a: skb_mac_header(skb: p),
345 b: skb_mac_header(skb));
346 else if (!diffs)
347 diffs = memcmp(p: skb_mac_header(skb: p),
348 q: skb_mac_header(skb),
349 size: maclen);
350
351 /* in most common scenarions 'slow_gro' is 0
352 * otherwise we are already on some slower paths
353 * either skip all the infrequent tests altogether or
354 * avoid trying too hard to skip each of them individually
355 */
356 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
357 diffs |= p->sk != skb->sk;
358 diffs |= skb_metadata_dst_cmp(skb_a: p, skb_b: skb);
359 diffs |= skb_get_nfct(skb: p) ^ skb_get_nfct(skb);
360
361 diffs |= gro_list_prepare_tc_ext(skb, p, diffs);
362 }
363
364 NAPI_GRO_CB(p)->same_flow = !diffs;
365 }
366}
367
368static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
369{
370 const struct skb_shared_info *pinfo;
371 const skb_frag_t *frag0;
372 unsigned int headlen;
373
374 NAPI_GRO_CB(skb)->data_offset = 0;
375 headlen = skb_headlen(skb);
376 NAPI_GRO_CB(skb)->frag0 = skb->data;
377 NAPI_GRO_CB(skb)->frag0_len = headlen;
378 if (headlen)
379 return;
380
381 pinfo = skb_shinfo(skb);
382 frag0 = &pinfo->frags[0];
383
384 if (pinfo->nr_frags && !PageHighMem(page: skb_frag_page(frag: frag0)) &&
385 (!NET_IP_ALIGN || !((skb_frag_off(frag: frag0) + nhoff) & 3))) {
386 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag: frag0);
387 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
388 skb_frag_size(frag0),
389 skb->end - skb->tail);
390 }
391}
392
393static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
394{
395 struct skb_shared_info *pinfo = skb_shinfo(skb);
396
397 BUG_ON(skb->end - skb->tail < grow);
398
399 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
400
401 skb->data_len -= grow;
402 skb->tail += grow;
403
404 skb_frag_off_add(frag: &pinfo->frags[0], delta: grow);
405 skb_frag_size_sub(frag: &pinfo->frags[0], delta: grow);
406
407 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
408 skb_frag_unref(skb, f: 0);
409 memmove(pinfo->frags, pinfo->frags + 1,
410 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
411 }
412}
413
414static void gro_try_pull_from_frag0(struct sk_buff *skb)
415{
416 int grow = skb_gro_offset(skb) - skb_headlen(skb);
417
418 if (grow > 0)
419 gro_pull_from_frag0(skb, grow);
420}
421
422static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
423{
424 struct sk_buff *oldest;
425
426 oldest = list_last_entry(head, struct sk_buff, list);
427
428 /* We are called with head length >= MAX_GRO_SKBS, so this is
429 * impossible.
430 */
431 if (WARN_ON_ONCE(!oldest))
432 return;
433
434 /* Do not adjust napi->gro_hash[].count, caller is adding a new
435 * SKB to the chain.
436 */
437 skb_list_del_init(skb: oldest);
438 napi_gro_complete(napi, skb: oldest);
439}
440
441static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
442{
443 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
444 struct gro_list *gro_list = &napi->gro_hash[bucket];
445 struct list_head *head = &net_hotdata.offload_base;
446 struct packet_offload *ptype;
447 __be16 type = skb->protocol;
448 struct sk_buff *pp = NULL;
449 enum gro_result ret;
450 int same_flow;
451
452 if (netif_elide_gro(dev: skb->dev))
453 goto normal;
454
455 gro_list_prepare(head: &gro_list->list, skb);
456
457 rcu_read_lock();
458 list_for_each_entry_rcu(ptype, head, list) {
459 if (ptype->type == type && ptype->callbacks.gro_receive)
460 goto found_ptype;
461 }
462 rcu_read_unlock();
463 goto normal;
464
465found_ptype:
466 skb_set_network_header(skb, offset: skb_gro_offset(skb));
467 skb_reset_mac_len(skb);
468 BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
469 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
470 sizeof(u32))); /* Avoid slow unaligned acc */
471 *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
472 NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
473 NAPI_GRO_CB(skb)->is_atomic = 1;
474 NAPI_GRO_CB(skb)->count = 1;
475 if (unlikely(skb_is_gso(skb))) {
476 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
477 /* Only support TCP and non DODGY users. */
478 if (!skb_is_gso_tcp(skb) ||
479 (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
480 NAPI_GRO_CB(skb)->flush = 1;
481 }
482
483 /* Setup for GRO checksum validation */
484 switch (skb->ip_summed) {
485 case CHECKSUM_COMPLETE:
486 NAPI_GRO_CB(skb)->csum = skb->csum;
487 NAPI_GRO_CB(skb)->csum_valid = 1;
488 break;
489 case CHECKSUM_UNNECESSARY:
490 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
491 break;
492 }
493
494 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
495 ipv6_gro_receive, inet_gro_receive,
496 &gro_list->list, skb);
497
498 rcu_read_unlock();
499
500 if (PTR_ERR(ptr: pp) == -EINPROGRESS) {
501 ret = GRO_CONSUMED;
502 goto ok;
503 }
504
505 same_flow = NAPI_GRO_CB(skb)->same_flow;
506 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
507
508 if (pp) {
509 skb_list_del_init(skb: pp);
510 napi_gro_complete(napi, skb: pp);
511 gro_list->count--;
512 }
513
514 if (same_flow)
515 goto ok;
516
517 if (NAPI_GRO_CB(skb)->flush)
518 goto normal;
519
520 if (unlikely(gro_list->count >= MAX_GRO_SKBS))
521 gro_flush_oldest(napi, head: &gro_list->list);
522 else
523 gro_list->count++;
524
525 /* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
526 gro_try_pull_from_frag0(skb);
527 NAPI_GRO_CB(skb)->age = jiffies;
528 NAPI_GRO_CB(skb)->last = skb;
529 if (!skb_is_gso(skb))
530 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
531 list_add(new: &skb->list, head: &gro_list->list);
532 ret = GRO_HELD;
533ok:
534 if (gro_list->count) {
535 if (!test_bit(bucket, &napi->gro_bitmask))
536 __set_bit(bucket, &napi->gro_bitmask);
537 } else if (test_bit(bucket, &napi->gro_bitmask)) {
538 __clear_bit(bucket, &napi->gro_bitmask);
539 }
540
541 return ret;
542
543normal:
544 ret = GRO_NORMAL;
545 gro_try_pull_from_frag0(skb);
546 goto ok;
547}
548
549struct packet_offload *gro_find_receive_by_type(__be16 type)
550{
551 struct list_head *offload_head = &net_hotdata.offload_base;
552 struct packet_offload *ptype;
553
554 list_for_each_entry_rcu(ptype, offload_head, list) {
555 if (ptype->type != type || !ptype->callbacks.gro_receive)
556 continue;
557 return ptype;
558 }
559 return NULL;
560}
561EXPORT_SYMBOL(gro_find_receive_by_type);
562
563struct packet_offload *gro_find_complete_by_type(__be16 type)
564{
565 struct list_head *offload_head = &net_hotdata.offload_base;
566 struct packet_offload *ptype;
567
568 list_for_each_entry_rcu(ptype, offload_head, list) {
569 if (ptype->type != type || !ptype->callbacks.gro_complete)
570 continue;
571 return ptype;
572 }
573 return NULL;
574}
575EXPORT_SYMBOL(gro_find_complete_by_type);
576
577static gro_result_t napi_skb_finish(struct napi_struct *napi,
578 struct sk_buff *skb,
579 gro_result_t ret)
580{
581 switch (ret) {
582 case GRO_NORMAL:
583 gro_normal_one(napi, skb, segs: 1);
584 break;
585
586 case GRO_MERGED_FREE:
587 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
588 napi_skb_free_stolen_head(skb);
589 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
590 __kfree_skb(skb);
591 else
592 __napi_kfree_skb(skb, reason: SKB_CONSUMED);
593 break;
594
595 case GRO_HELD:
596 case GRO_MERGED:
597 case GRO_CONSUMED:
598 break;
599 }
600
601 return ret;
602}
603
604gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
605{
606 gro_result_t ret;
607
608 skb_mark_napi_id(skb, napi);
609 trace_napi_gro_receive_entry(skb);
610
611 skb_gro_reset_offset(skb, nhoff: 0);
612
613 ret = napi_skb_finish(napi, skb, ret: dev_gro_receive(napi, skb));
614 trace_napi_gro_receive_exit(ret);
615
616 return ret;
617}
618EXPORT_SYMBOL(napi_gro_receive);
619
620static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
621{
622 if (unlikely(skb->pfmemalloc)) {
623 consume_skb(skb);
624 return;
625 }
626 __skb_pull(skb, len: skb_headlen(skb));
627 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
628 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
629 __vlan_hwaccel_clear_tag(skb);
630 skb->dev = napi->dev;
631 skb->skb_iif = 0;
632
633 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
634 skb->pkt_type = PACKET_HOST;
635
636 skb->encapsulation = 0;
637 skb_shinfo(skb)->gso_type = 0;
638 skb_shinfo(skb)->gso_size = 0;
639 if (unlikely(skb->slow_gro)) {
640 skb_orphan(skb);
641 skb_ext_reset(skb);
642 nf_reset_ct(skb);
643 skb->slow_gro = 0;
644 }
645
646 napi->skb = skb;
647}
648
649struct sk_buff *napi_get_frags(struct napi_struct *napi)
650{
651 struct sk_buff *skb = napi->skb;
652
653 if (!skb) {
654 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
655 if (skb) {
656 napi->skb = skb;
657 skb_mark_napi_id(skb, napi);
658 }
659 }
660 return skb;
661}
662EXPORT_SYMBOL(napi_get_frags);
663
664static gro_result_t napi_frags_finish(struct napi_struct *napi,
665 struct sk_buff *skb,
666 gro_result_t ret)
667{
668 switch (ret) {
669 case GRO_NORMAL:
670 case GRO_HELD:
671 __skb_push(skb, ETH_HLEN);
672 skb->protocol = eth_type_trans(skb, dev: skb->dev);
673 if (ret == GRO_NORMAL)
674 gro_normal_one(napi, skb, segs: 1);
675 break;
676
677 case GRO_MERGED_FREE:
678 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
679 napi_skb_free_stolen_head(skb);
680 else
681 napi_reuse_skb(napi, skb);
682 break;
683
684 case GRO_MERGED:
685 case GRO_CONSUMED:
686 break;
687 }
688
689 return ret;
690}
691
692/* Upper GRO stack assumes network header starts at gro_offset=0
693 * Drivers could call both napi_gro_frags() and napi_gro_receive()
694 * We copy ethernet header into skb->data to have a common layout.
695 */
696static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
697{
698 struct sk_buff *skb = napi->skb;
699 const struct ethhdr *eth;
700 unsigned int hlen = sizeof(*eth);
701
702 napi->skb = NULL;
703
704 skb_reset_mac_header(skb);
705 skb_gro_reset_offset(skb, nhoff: hlen);
706
707 if (unlikely(!skb_gro_may_pull(skb, hlen))) {
708 eth = skb_gro_header_slow(skb, hlen, offset: 0);
709 if (unlikely(!eth)) {
710 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
711 __func__, napi->dev->name);
712 napi_reuse_skb(napi, skb);
713 return NULL;
714 }
715 } else {
716 eth = (const struct ethhdr *)skb->data;
717
718 if (NAPI_GRO_CB(skb)->frag0 != skb->data)
719 gro_pull_from_frag0(skb, grow: hlen);
720
721 NAPI_GRO_CB(skb)->frag0 += hlen;
722 NAPI_GRO_CB(skb)->frag0_len -= hlen;
723 }
724 __skb_pull(skb, len: hlen);
725
726 /*
727 * This works because the only protocols we care about don't require
728 * special handling.
729 * We'll fix it up properly in napi_frags_finish()
730 */
731 skb->protocol = eth->h_proto;
732
733 return skb;
734}
735
736gro_result_t napi_gro_frags(struct napi_struct *napi)
737{
738 gro_result_t ret;
739 struct sk_buff *skb = napi_frags_skb(napi);
740
741 trace_napi_gro_frags_entry(skb);
742
743 ret = napi_frags_finish(napi, skb, ret: dev_gro_receive(napi, skb));
744 trace_napi_gro_frags_exit(ret);
745
746 return ret;
747}
748EXPORT_SYMBOL(napi_gro_frags);
749
750/* Compute the checksum from gro_offset and return the folded value
751 * after adding in any pseudo checksum.
752 */
753__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
754{
755 __wsum wsum;
756 __sum16 sum;
757
758 wsum = skb_checksum(skb, offset: skb_gro_offset(skb), len: skb_gro_len(skb), csum: 0);
759
760 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
761 sum = csum_fold(sum: csum_add(NAPI_GRO_CB(skb)->csum, addend: wsum));
762 /* See comments in __skb_checksum_complete(). */
763 if (likely(!sum)) {
764 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
765 !skb->csum_complete_sw)
766 netdev_rx_csum_fault(dev: skb->dev, skb);
767 }
768
769 NAPI_GRO_CB(skb)->csum = wsum;
770 NAPI_GRO_CB(skb)->csum_valid = 1;
771
772 return sum;
773}
774EXPORT_SYMBOL(__skb_gro_checksum_complete);
775

source code of linux/net/core/gro.c