1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3 */
4
5/* Devmaps primary use is as a backend map for XDP BPF helper call
6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7 * spent some effort to ensure the datapath with redirect maps does not use
8 * any locking. This is a quick note on the details.
9 *
10 * We have three possible paths to get into the devmap control plane bpf
11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12 * will invoke an update, delete, or lookup operation. To ensure updates and
13 * deletes appear atomic from the datapath side xchg() is used to modify the
14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
16 * an rcu grace period before free'ing the old data structures. This ensures the
17 * datapath always has a valid copy. However, the datapath does a "flush"
18 * operation that pushes any pending packets in the driver outside the RCU
19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until
21 * this list is empty, indicating outstanding flush operations have completed.
22 *
23 * BPF syscalls may race with BPF program calls on any of the update, delete
24 * or lookup operations. As noted above the xchg() operation also keep the
25 * netdev_map consistent in this case. From the devmap side BPF programs
26 * calling into these operations are the same as multiple user space threads
27 * making system calls.
28 *
29 * Finally, any of the above may race with a netdev_unregister notifier. The
30 * unregister notifier must search for net devices in the map structure that
31 * contain a reference to the net device and remove them. This is a two step
32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33 * check to see if the ifindex is the same as the net_device being removed.
34 * When removing the dev a cmpxchg() is used to ensure the correct dev is
35 * removed, in the case of a concurrent update or delete operation it is
36 * possible that the initially referenced dev is no longer in the map. As the
37 * notifier hook walks the map we know that new dev references can not be
38 * added by the user because core infrastructure ensures dev_get_by_index()
39 * calls will fail at this point.
40 *
41 * The devmap_hash type is a map type which interprets keys as ifindexes and
42 * indexes these using a hashmap. This allows maps that use ifindex as key to be
43 * densely packed instead of having holes in the lookup array for unused
44 * ifindexes. The setup and packet enqueue/send code is shared between the two
45 * types of devmap; only the lookup and insertion is different.
46 */
47#include <linux/bpf.h>
48#include <net/xdp.h>
49#include <linux/filter.h>
50#include <trace/events/xdp.h>
51#include <linux/btf_ids.h>
52
53#define DEV_CREATE_FLAG_MASK \
54 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
55
56struct xdp_dev_bulk_queue {
57 struct xdp_frame *q[DEV_MAP_BULK_SIZE];
58 struct list_head flush_node;
59 struct net_device *dev;
60 struct net_device *dev_rx;
61 struct bpf_prog *xdp_prog;
62 unsigned int count;
63};
64
65struct bpf_dtab_netdev {
66 struct net_device *dev; /* must be first member, due to tracepoint */
67 struct hlist_node index_hlist;
68 struct bpf_prog *xdp_prog;
69 struct rcu_head rcu;
70 unsigned int idx;
71 struct bpf_devmap_val val;
72};
73
74struct bpf_dtab {
75 struct bpf_map map;
76 struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
77 struct list_head list;
78
79 /* these are only used for DEVMAP_HASH type maps */
80 struct hlist_head *dev_index_head;
81 spinlock_t index_lock;
82 unsigned int items;
83 u32 n_buckets;
84};
85
86static DEFINE_PER_CPU(struct list_head, dev_flush_list);
87static DEFINE_SPINLOCK(dev_map_lock);
88static LIST_HEAD(dev_map_list);
89
90static struct hlist_head *dev_map_create_hash(unsigned int entries,
91 int numa_node)
92{
93 int i;
94 struct hlist_head *hash;
95
96 hash = bpf_map_area_alloc(size: (u64) entries * sizeof(*hash), numa_node);
97 if (hash != NULL)
98 for (i = 0; i < entries; i++)
99 INIT_HLIST_HEAD(&hash[i]);
100
101 return hash;
102}
103
104static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
105 int idx)
106{
107 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
108}
109
110static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
111{
112 u32 valsize = attr->value_size;
113
114 /* check sanity of attributes. 2 value sizes supported:
115 * 4 bytes: ifindex
116 * 8 bytes: ifindex + prog fd
117 */
118 if (attr->max_entries == 0 || attr->key_size != 4 ||
119 (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
120 valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
121 attr->map_flags & ~DEV_CREATE_FLAG_MASK)
122 return -EINVAL;
123
124 /* Lookup returns a pointer straight to dev->ifindex, so make sure the
125 * verifier prevents writes from the BPF side
126 */
127 attr->map_flags |= BPF_F_RDONLY_PROG;
128
129
130 bpf_map_init_from_attr(map: &dtab->map, attr);
131
132 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
133 /* hash table size must be power of 2; roundup_pow_of_two() can
134 * overflow into UB on 32-bit arches, so check that first
135 */
136 if (dtab->map.max_entries > 1UL << 31)
137 return -EINVAL;
138
139 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
140
141 dtab->dev_index_head = dev_map_create_hash(entries: dtab->n_buckets,
142 numa_node: dtab->map.numa_node);
143 if (!dtab->dev_index_head)
144 return -ENOMEM;
145
146 spin_lock_init(&dtab->index_lock);
147 } else {
148 dtab->netdev_map = bpf_map_area_alloc(size: (u64) dtab->map.max_entries *
149 sizeof(struct bpf_dtab_netdev *),
150 numa_node: dtab->map.numa_node);
151 if (!dtab->netdev_map)
152 return -ENOMEM;
153 }
154
155 return 0;
156}
157
158static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
159{
160 struct bpf_dtab *dtab;
161 int err;
162
163 dtab = bpf_map_area_alloc(size: sizeof(*dtab), NUMA_NO_NODE);
164 if (!dtab)
165 return ERR_PTR(error: -ENOMEM);
166
167 err = dev_map_init_map(dtab, attr);
168 if (err) {
169 bpf_map_area_free(base: dtab);
170 return ERR_PTR(error: err);
171 }
172
173 spin_lock(lock: &dev_map_lock);
174 list_add_tail_rcu(new: &dtab->list, head: &dev_map_list);
175 spin_unlock(lock: &dev_map_lock);
176
177 return &dtab->map;
178}
179
180static void dev_map_free(struct bpf_map *map)
181{
182 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
183 int i;
184
185 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
186 * so the programs (can be more than one that used this map) were
187 * disconnected from events. The following synchronize_rcu() guarantees
188 * both rcu read critical sections complete and waits for
189 * preempt-disable regions (NAPI being the relevant context here) so we
190 * are certain there will be no further reads against the netdev_map and
191 * all flush operations are complete. Flush operations can only be done
192 * from NAPI context for this reason.
193 */
194
195 spin_lock(lock: &dev_map_lock);
196 list_del_rcu(entry: &dtab->list);
197 spin_unlock(lock: &dev_map_lock);
198
199 bpf_clear_redirect_map(map);
200 synchronize_rcu();
201
202 /* Make sure prior __dev_map_entry_free() have completed. */
203 rcu_barrier();
204
205 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
206 for (i = 0; i < dtab->n_buckets; i++) {
207 struct bpf_dtab_netdev *dev;
208 struct hlist_head *head;
209 struct hlist_node *next;
210
211 head = dev_map_index_hash(dtab, idx: i);
212
213 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
214 hlist_del_rcu(n: &dev->index_hlist);
215 if (dev->xdp_prog)
216 bpf_prog_put(prog: dev->xdp_prog);
217 dev_put(dev: dev->dev);
218 kfree(objp: dev);
219 }
220 }
221
222 bpf_map_area_free(base: dtab->dev_index_head);
223 } else {
224 for (i = 0; i < dtab->map.max_entries; i++) {
225 struct bpf_dtab_netdev *dev;
226
227 dev = rcu_dereference_raw(dtab->netdev_map[i]);
228 if (!dev)
229 continue;
230
231 if (dev->xdp_prog)
232 bpf_prog_put(prog: dev->xdp_prog);
233 dev_put(dev: dev->dev);
234 kfree(objp: dev);
235 }
236
237 bpf_map_area_free(base: dtab->netdev_map);
238 }
239
240 bpf_map_area_free(base: dtab);
241}
242
243static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
244{
245 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
246 u32 index = key ? *(u32 *)key : U32_MAX;
247 u32 *next = next_key;
248
249 if (index >= dtab->map.max_entries) {
250 *next = 0;
251 return 0;
252 }
253
254 if (index == dtab->map.max_entries - 1)
255 return -ENOENT;
256 *next = index + 1;
257 return 0;
258}
259
260/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
261 * by local_bh_disable() (from XDP calls inside NAPI). The
262 * rcu_read_lock_bh_held() below makes lockdep accept both.
263 */
264static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
265{
266 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
267 struct hlist_head *head = dev_map_index_hash(dtab, idx: key);
268 struct bpf_dtab_netdev *dev;
269
270 hlist_for_each_entry_rcu(dev, head, index_hlist,
271 lockdep_is_held(&dtab->index_lock))
272 if (dev->idx == key)
273 return dev;
274
275 return NULL;
276}
277
278static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
279 void *next_key)
280{
281 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
282 u32 idx, *next = next_key;
283 struct bpf_dtab_netdev *dev, *next_dev;
284 struct hlist_head *head;
285 int i = 0;
286
287 if (!key)
288 goto find_first;
289
290 idx = *(u32 *)key;
291
292 dev = __dev_map_hash_lookup_elem(map, key: idx);
293 if (!dev)
294 goto find_first;
295
296 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
297 struct bpf_dtab_netdev, index_hlist);
298
299 if (next_dev) {
300 *next = next_dev->idx;
301 return 0;
302 }
303
304 i = idx & (dtab->n_buckets - 1);
305 i++;
306
307 find_first:
308 for (; i < dtab->n_buckets; i++) {
309 head = dev_map_index_hash(dtab, idx: i);
310
311 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
312 struct bpf_dtab_netdev,
313 index_hlist);
314 if (next_dev) {
315 *next = next_dev->idx;
316 return 0;
317 }
318 }
319
320 return -ENOENT;
321}
322
323static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
324 struct xdp_frame **frames, int n,
325 struct net_device *dev)
326{
327 struct xdp_txq_info txq = { .dev = dev };
328 struct xdp_buff xdp;
329 int i, nframes = 0;
330
331 for (i = 0; i < n; i++) {
332 struct xdp_frame *xdpf = frames[i];
333 u32 act;
334 int err;
335
336 xdp_convert_frame_to_buff(frame: xdpf, xdp: &xdp);
337 xdp.txq = &txq;
338
339 act = bpf_prog_run_xdp(prog: xdp_prog, xdp: &xdp);
340 switch (act) {
341 case XDP_PASS:
342 err = xdp_update_frame_from_buff(xdp: &xdp, xdp_frame: xdpf);
343 if (unlikely(err < 0))
344 xdp_return_frame_rx_napi(xdpf);
345 else
346 frames[nframes++] = xdpf;
347 break;
348 default:
349 bpf_warn_invalid_xdp_action(NULL, prog: xdp_prog, act);
350 fallthrough;
351 case XDP_ABORTED:
352 trace_xdp_exception(dev, xdp: xdp_prog, act);
353 fallthrough;
354 case XDP_DROP:
355 xdp_return_frame_rx_napi(xdpf);
356 break;
357 }
358 }
359 return nframes; /* sent frames count */
360}
361
362static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
363{
364 struct net_device *dev = bq->dev;
365 unsigned int cnt = bq->count;
366 int sent = 0, err = 0;
367 int to_send = cnt;
368 int i;
369
370 if (unlikely(!cnt))
371 return;
372
373 for (i = 0; i < cnt; i++) {
374 struct xdp_frame *xdpf = bq->q[i];
375
376 prefetch(xdpf);
377 }
378
379 if (bq->xdp_prog) {
380 to_send = dev_map_bpf_prog_run(xdp_prog: bq->xdp_prog, frames: bq->q, n: cnt, dev);
381 if (!to_send)
382 goto out;
383 }
384
385 sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
386 if (sent < 0) {
387 /* If ndo_xdp_xmit fails with an errno, no frames have
388 * been xmit'ed.
389 */
390 err = sent;
391 sent = 0;
392 }
393
394 /* If not all frames have been transmitted, it is our
395 * responsibility to free them
396 */
397 for (i = sent; unlikely(i < to_send); i++)
398 xdp_return_frame_rx_napi(xdpf: bq->q[i]);
399
400out:
401 bq->count = 0;
402 trace_xdp_devmap_xmit(from_dev: bq->dev_rx, to_dev: dev, sent, drops: cnt - sent, err);
403}
404
405/* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
406 * driver before returning from its napi->poll() routine. See the comment above
407 * xdp_do_flush() in filter.c.
408 */
409void __dev_flush(void)
410{
411 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
412 struct xdp_dev_bulk_queue *bq, *tmp;
413
414 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
415 bq_xmit_all(bq, XDP_XMIT_FLUSH);
416 bq->dev_rx = NULL;
417 bq->xdp_prog = NULL;
418 __list_del_clearprev(entry: &bq->flush_node);
419 }
420}
421
422#ifdef CONFIG_DEBUG_NET
423bool dev_check_flush(void)
424{
425 if (list_empty(this_cpu_ptr(&dev_flush_list)))
426 return false;
427 __dev_flush();
428 return true;
429}
430#endif
431
432/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
433 * by local_bh_disable() (from XDP calls inside NAPI). The
434 * rcu_read_lock_bh_held() below makes lockdep accept both.
435 */
436static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
437{
438 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
439 struct bpf_dtab_netdev *obj;
440
441 if (key >= map->max_entries)
442 return NULL;
443
444 obj = rcu_dereference_check(dtab->netdev_map[key],
445 rcu_read_lock_bh_held());
446 return obj;
447}
448
449/* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
450 * variable access, and map elements stick around. See comment above
451 * xdp_do_flush() in filter.c.
452 */
453static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
454 struct net_device *dev_rx, struct bpf_prog *xdp_prog)
455{
456 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
457 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
458
459 if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
460 bq_xmit_all(bq, flags: 0);
461
462 /* Ingress dev_rx will be the same for all xdp_frame's in
463 * bulk_queue, because bq stored per-CPU and must be flushed
464 * from net_device drivers NAPI func end.
465 *
466 * Do the same with xdp_prog and flush_list since these fields
467 * are only ever modified together.
468 */
469 if (!bq->dev_rx) {
470 bq->dev_rx = dev_rx;
471 bq->xdp_prog = xdp_prog;
472 list_add(new: &bq->flush_node, head: flush_list);
473 }
474
475 bq->q[bq->count++] = xdpf;
476}
477
478static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
479 struct net_device *dev_rx,
480 struct bpf_prog *xdp_prog)
481{
482 int err;
483
484 if (!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
485 return -EOPNOTSUPP;
486
487 if (unlikely(!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
488 xdp_frame_has_frags(xdpf)))
489 return -EOPNOTSUPP;
490
491 err = xdp_ok_fwd_dev(fwd: dev, pktlen: xdp_get_frame_len(xdpf));
492 if (unlikely(err))
493 return err;
494
495 bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
496 return 0;
497}
498
499static u32 dev_map_bpf_prog_run_skb(struct sk_buff *skb, struct bpf_dtab_netdev *dst)
500{
501 struct xdp_txq_info txq = { .dev = dst->dev };
502 struct xdp_buff xdp;
503 u32 act;
504
505 if (!dst->xdp_prog)
506 return XDP_PASS;
507
508 __skb_pull(skb, len: skb->mac_len);
509 xdp.txq = &txq;
510
511 act = bpf_prog_run_generic_xdp(skb, xdp: &xdp, xdp_prog: dst->xdp_prog);
512 switch (act) {
513 case XDP_PASS:
514 __skb_push(skb, len: skb->mac_len);
515 break;
516 default:
517 bpf_warn_invalid_xdp_action(NULL, prog: dst->xdp_prog, act);
518 fallthrough;
519 case XDP_ABORTED:
520 trace_xdp_exception(dev: dst->dev, xdp: dst->xdp_prog, act);
521 fallthrough;
522 case XDP_DROP:
523 kfree_skb(skb);
524 break;
525 }
526
527 return act;
528}
529
530int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
531 struct net_device *dev_rx)
532{
533 return __xdp_enqueue(dev, xdpf, dev_rx, NULL);
534}
535
536int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
537 struct net_device *dev_rx)
538{
539 struct net_device *dev = dst->dev;
540
541 return __xdp_enqueue(dev, xdpf, dev_rx, xdp_prog: dst->xdp_prog);
542}
543
544static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
545{
546 if (!obj)
547 return false;
548
549 if (!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
550 return false;
551
552 if (unlikely(!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
553 xdp_frame_has_frags(xdpf)))
554 return false;
555
556 if (xdp_ok_fwd_dev(fwd: obj->dev, pktlen: xdp_get_frame_len(xdpf)))
557 return false;
558
559 return true;
560}
561
562static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
563 struct net_device *dev_rx,
564 struct xdp_frame *xdpf)
565{
566 struct xdp_frame *nxdpf;
567
568 nxdpf = xdpf_clone(xdpf);
569 if (!nxdpf)
570 return -ENOMEM;
571
572 bq_enqueue(dev: obj->dev, xdpf: nxdpf, dev_rx, xdp_prog: obj->xdp_prog);
573
574 return 0;
575}
576
577static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifindex)
578{
579 while (num_excluded--) {
580 if (ifindex == excluded[num_excluded])
581 return true;
582 }
583 return false;
584}
585
586/* Get ifindex of each upper device. 'indexes' must be able to hold at
587 * least MAX_NEST_DEV elements.
588 * Returns the number of ifindexes added.
589 */
590static int get_upper_ifindexes(struct net_device *dev, int *indexes)
591{
592 struct net_device *upper;
593 struct list_head *iter;
594 int n = 0;
595
596 netdev_for_each_upper_dev_rcu(dev, upper, iter) {
597 indexes[n++] = upper->ifindex;
598 }
599 return n;
600}
601
602int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
603 struct bpf_map *map, bool exclude_ingress)
604{
605 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
606 struct bpf_dtab_netdev *dst, *last_dst = NULL;
607 int excluded_devices[1+MAX_NEST_DEV];
608 struct hlist_head *head;
609 int num_excluded = 0;
610 unsigned int i;
611 int err;
612
613 if (exclude_ingress) {
614 num_excluded = get_upper_ifindexes(dev: dev_rx, indexes: excluded_devices);
615 excluded_devices[num_excluded++] = dev_rx->ifindex;
616 }
617
618 if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
619 for (i = 0; i < map->max_entries; i++) {
620 dst = rcu_dereference_check(dtab->netdev_map[i],
621 rcu_read_lock_bh_held());
622 if (!is_valid_dst(obj: dst, xdpf))
623 continue;
624
625 if (is_ifindex_excluded(excluded: excluded_devices, num_excluded, ifindex: dst->dev->ifindex))
626 continue;
627
628 /* we only need n-1 clones; last_dst enqueued below */
629 if (!last_dst) {
630 last_dst = dst;
631 continue;
632 }
633
634 err = dev_map_enqueue_clone(obj: last_dst, dev_rx, xdpf);
635 if (err)
636 return err;
637
638 last_dst = dst;
639 }
640 } else { /* BPF_MAP_TYPE_DEVMAP_HASH */
641 for (i = 0; i < dtab->n_buckets; i++) {
642 head = dev_map_index_hash(dtab, idx: i);
643 hlist_for_each_entry_rcu(dst, head, index_hlist,
644 lockdep_is_held(&dtab->index_lock)) {
645 if (!is_valid_dst(obj: dst, xdpf))
646 continue;
647
648 if (is_ifindex_excluded(excluded: excluded_devices, num_excluded,
649 ifindex: dst->dev->ifindex))
650 continue;
651
652 /* we only need n-1 clones; last_dst enqueued below */
653 if (!last_dst) {
654 last_dst = dst;
655 continue;
656 }
657
658 err = dev_map_enqueue_clone(obj: last_dst, dev_rx, xdpf);
659 if (err)
660 return err;
661
662 last_dst = dst;
663 }
664 }
665 }
666
667 /* consume the last copy of the frame */
668 if (last_dst)
669 bq_enqueue(dev: last_dst->dev, xdpf, dev_rx, xdp_prog: last_dst->xdp_prog);
670 else
671 xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
672
673 return 0;
674}
675
676int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
677 struct bpf_prog *xdp_prog)
678{
679 int err;
680
681 err = xdp_ok_fwd_dev(fwd: dst->dev, pktlen: skb->len);
682 if (unlikely(err))
683 return err;
684
685 /* Redirect has already succeeded semantically at this point, so we just
686 * return 0 even if packet is dropped. Helper below takes care of
687 * freeing skb.
688 */
689 if (dev_map_bpf_prog_run_skb(skb, dst) != XDP_PASS)
690 return 0;
691
692 skb->dev = dst->dev;
693 generic_xdp_tx(skb, xdp_prog);
694
695 return 0;
696}
697
698static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
699 struct sk_buff *skb,
700 struct bpf_prog *xdp_prog)
701{
702 struct sk_buff *nskb;
703 int err;
704
705 nskb = skb_clone(skb, GFP_ATOMIC);
706 if (!nskb)
707 return -ENOMEM;
708
709 err = dev_map_generic_redirect(dst, skb: nskb, xdp_prog);
710 if (unlikely(err)) {
711 consume_skb(skb: nskb);
712 return err;
713 }
714
715 return 0;
716}
717
718int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
719 struct bpf_prog *xdp_prog, struct bpf_map *map,
720 bool exclude_ingress)
721{
722 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
723 struct bpf_dtab_netdev *dst, *last_dst = NULL;
724 int excluded_devices[1+MAX_NEST_DEV];
725 struct hlist_head *head;
726 struct hlist_node *next;
727 int num_excluded = 0;
728 unsigned int i;
729 int err;
730
731 if (exclude_ingress) {
732 num_excluded = get_upper_ifindexes(dev, indexes: excluded_devices);
733 excluded_devices[num_excluded++] = dev->ifindex;
734 }
735
736 if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
737 for (i = 0; i < map->max_entries; i++) {
738 dst = rcu_dereference_check(dtab->netdev_map[i],
739 rcu_read_lock_bh_held());
740 if (!dst)
741 continue;
742
743 if (is_ifindex_excluded(excluded: excluded_devices, num_excluded, ifindex: dst->dev->ifindex))
744 continue;
745
746 /* we only need n-1 clones; last_dst enqueued below */
747 if (!last_dst) {
748 last_dst = dst;
749 continue;
750 }
751
752 err = dev_map_redirect_clone(dst: last_dst, skb, xdp_prog);
753 if (err)
754 return err;
755
756 last_dst = dst;
757
758 }
759 } else { /* BPF_MAP_TYPE_DEVMAP_HASH */
760 for (i = 0; i < dtab->n_buckets; i++) {
761 head = dev_map_index_hash(dtab, idx: i);
762 hlist_for_each_entry_safe(dst, next, head, index_hlist) {
763 if (!dst)
764 continue;
765
766 if (is_ifindex_excluded(excluded: excluded_devices, num_excluded,
767 ifindex: dst->dev->ifindex))
768 continue;
769
770 /* we only need n-1 clones; last_dst enqueued below */
771 if (!last_dst) {
772 last_dst = dst;
773 continue;
774 }
775
776 err = dev_map_redirect_clone(dst: last_dst, skb, xdp_prog);
777 if (err)
778 return err;
779
780 last_dst = dst;
781 }
782 }
783 }
784
785 /* consume the first skb and return */
786 if (last_dst)
787 return dev_map_generic_redirect(dst: last_dst, skb, xdp_prog);
788
789 /* dtab is empty */
790 consume_skb(skb);
791 return 0;
792}
793
794static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
795{
796 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, key: *(u32 *)key);
797
798 return obj ? &obj->val : NULL;
799}
800
801static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
802{
803 struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
804 key: *(u32 *)key);
805 return obj ? &obj->val : NULL;
806}
807
808static void __dev_map_entry_free(struct rcu_head *rcu)
809{
810 struct bpf_dtab_netdev *dev;
811
812 dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
813 if (dev->xdp_prog)
814 bpf_prog_put(prog: dev->xdp_prog);
815 dev_put(dev: dev->dev);
816 kfree(objp: dev);
817}
818
819static long dev_map_delete_elem(struct bpf_map *map, void *key)
820{
821 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
822 struct bpf_dtab_netdev *old_dev;
823 int k = *(u32 *)key;
824
825 if (k >= map->max_entries)
826 return -EINVAL;
827
828 old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
829 if (old_dev) {
830 call_rcu(head: &old_dev->rcu, func: __dev_map_entry_free);
831 atomic_dec(v: (atomic_t *)&dtab->items);
832 }
833 return 0;
834}
835
836static long dev_map_hash_delete_elem(struct bpf_map *map, void *key)
837{
838 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
839 struct bpf_dtab_netdev *old_dev;
840 int k = *(u32 *)key;
841 unsigned long flags;
842 int ret = -ENOENT;
843
844 spin_lock_irqsave(&dtab->index_lock, flags);
845
846 old_dev = __dev_map_hash_lookup_elem(map, key: k);
847 if (old_dev) {
848 dtab->items--;
849 hlist_del_init_rcu(n: &old_dev->index_hlist);
850 call_rcu(head: &old_dev->rcu, func: __dev_map_entry_free);
851 ret = 0;
852 }
853 spin_unlock_irqrestore(lock: &dtab->index_lock, flags);
854
855 return ret;
856}
857
858static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
859 struct bpf_dtab *dtab,
860 struct bpf_devmap_val *val,
861 unsigned int idx)
862{
863 struct bpf_prog *prog = NULL;
864 struct bpf_dtab_netdev *dev;
865
866 dev = bpf_map_kmalloc_node(map: &dtab->map, size: sizeof(*dev),
867 GFP_NOWAIT | __GFP_NOWARN,
868 node: dtab->map.numa_node);
869 if (!dev)
870 return ERR_PTR(error: -ENOMEM);
871
872 dev->dev = dev_get_by_index(net, ifindex: val->ifindex);
873 if (!dev->dev)
874 goto err_out;
875
876 if (val->bpf_prog.fd > 0) {
877 prog = bpf_prog_get_type_dev(ufd: val->bpf_prog.fd,
878 type: BPF_PROG_TYPE_XDP, attach_drv: false);
879 if (IS_ERR(ptr: prog))
880 goto err_put_dev;
881 if (prog->expected_attach_type != BPF_XDP_DEVMAP ||
882 !bpf_prog_map_compatible(map: &dtab->map, fp: prog))
883 goto err_put_prog;
884 }
885
886 dev->idx = idx;
887 if (prog) {
888 dev->xdp_prog = prog;
889 dev->val.bpf_prog.id = prog->aux->id;
890 } else {
891 dev->xdp_prog = NULL;
892 dev->val.bpf_prog.id = 0;
893 }
894 dev->val.ifindex = val->ifindex;
895
896 return dev;
897err_put_prog:
898 bpf_prog_put(prog);
899err_put_dev:
900 dev_put(dev: dev->dev);
901err_out:
902 kfree(objp: dev);
903 return ERR_PTR(error: -EINVAL);
904}
905
906static long __dev_map_update_elem(struct net *net, struct bpf_map *map,
907 void *key, void *value, u64 map_flags)
908{
909 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
910 struct bpf_dtab_netdev *dev, *old_dev;
911 struct bpf_devmap_val val = {};
912 u32 i = *(u32 *)key;
913
914 if (unlikely(map_flags > BPF_EXIST))
915 return -EINVAL;
916 if (unlikely(i >= dtab->map.max_entries))
917 return -E2BIG;
918 if (unlikely(map_flags == BPF_NOEXIST))
919 return -EEXIST;
920
921 /* already verified value_size <= sizeof val */
922 memcpy(&val, value, map->value_size);
923
924 if (!val.ifindex) {
925 dev = NULL;
926 /* can not specify fd if ifindex is 0 */
927 if (val.bpf_prog.fd > 0)
928 return -EINVAL;
929 } else {
930 dev = __dev_map_alloc_node(net, dtab, val: &val, idx: i);
931 if (IS_ERR(ptr: dev))
932 return PTR_ERR(ptr: dev);
933 }
934
935 /* Use call_rcu() here to ensure rcu critical sections have completed
936 * Remembering the driver side flush operation will happen before the
937 * net device is removed.
938 */
939 old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
940 if (old_dev)
941 call_rcu(head: &old_dev->rcu, func: __dev_map_entry_free);
942 else
943 atomic_inc(v: (atomic_t *)&dtab->items);
944
945 return 0;
946}
947
948static long dev_map_update_elem(struct bpf_map *map, void *key, void *value,
949 u64 map_flags)
950{
951 return __dev_map_update_elem(current->nsproxy->net_ns,
952 map, key, value, map_flags);
953}
954
955static long __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
956 void *key, void *value, u64 map_flags)
957{
958 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
959 struct bpf_dtab_netdev *dev, *old_dev;
960 struct bpf_devmap_val val = {};
961 u32 idx = *(u32 *)key;
962 unsigned long flags;
963 int err = -EEXIST;
964
965 /* already verified value_size <= sizeof val */
966 memcpy(&val, value, map->value_size);
967
968 if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
969 return -EINVAL;
970
971 spin_lock_irqsave(&dtab->index_lock, flags);
972
973 old_dev = __dev_map_hash_lookup_elem(map, key: idx);
974 if (old_dev && (map_flags & BPF_NOEXIST))
975 goto out_err;
976
977 dev = __dev_map_alloc_node(net, dtab, val: &val, idx);
978 if (IS_ERR(ptr: dev)) {
979 err = PTR_ERR(ptr: dev);
980 goto out_err;
981 }
982
983 if (old_dev) {
984 hlist_del_rcu(n: &old_dev->index_hlist);
985 } else {
986 if (dtab->items >= dtab->map.max_entries) {
987 spin_unlock_irqrestore(lock: &dtab->index_lock, flags);
988 call_rcu(head: &dev->rcu, func: __dev_map_entry_free);
989 return -E2BIG;
990 }
991 dtab->items++;
992 }
993
994 hlist_add_head_rcu(n: &dev->index_hlist,
995 h: dev_map_index_hash(dtab, idx));
996 spin_unlock_irqrestore(lock: &dtab->index_lock, flags);
997
998 if (old_dev)
999 call_rcu(head: &old_dev->rcu, func: __dev_map_entry_free);
1000
1001 return 0;
1002
1003out_err:
1004 spin_unlock_irqrestore(lock: &dtab->index_lock, flags);
1005 return err;
1006}
1007
1008static long dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
1009 u64 map_flags)
1010{
1011 return __dev_map_hash_update_elem(current->nsproxy->net_ns,
1012 map, key, value, map_flags);
1013}
1014
1015static long dev_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1016{
1017 return __bpf_xdp_redirect_map(map, index: ifindex, flags,
1018 flag_mask: BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1019 lookup_elem: __dev_map_lookup_elem);
1020}
1021
1022static long dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1023{
1024 return __bpf_xdp_redirect_map(map, index: ifindex, flags,
1025 flag_mask: BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1026 lookup_elem: __dev_map_hash_lookup_elem);
1027}
1028
1029static u64 dev_map_mem_usage(const struct bpf_map *map)
1030{
1031 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
1032 u64 usage = sizeof(struct bpf_dtab);
1033
1034 if (map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)
1035 usage += (u64)dtab->n_buckets * sizeof(struct hlist_head);
1036 else
1037 usage += (u64)map->max_entries * sizeof(struct bpf_dtab_netdev *);
1038 usage += atomic_read(v: (atomic_t *)&dtab->items) *
1039 (u64)sizeof(struct bpf_dtab_netdev);
1040 return usage;
1041}
1042
1043BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
1044const struct bpf_map_ops dev_map_ops = {
1045 .map_meta_equal = bpf_map_meta_equal,
1046 .map_alloc = dev_map_alloc,
1047 .map_free = dev_map_free,
1048 .map_get_next_key = dev_map_get_next_key,
1049 .map_lookup_elem = dev_map_lookup_elem,
1050 .map_update_elem = dev_map_update_elem,
1051 .map_delete_elem = dev_map_delete_elem,
1052 .map_check_btf = map_check_no_btf,
1053 .map_mem_usage = dev_map_mem_usage,
1054 .map_btf_id = &dev_map_btf_ids[0],
1055 .map_redirect = dev_map_redirect,
1056};
1057
1058const struct bpf_map_ops dev_map_hash_ops = {
1059 .map_meta_equal = bpf_map_meta_equal,
1060 .map_alloc = dev_map_alloc,
1061 .map_free = dev_map_free,
1062 .map_get_next_key = dev_map_hash_get_next_key,
1063 .map_lookup_elem = dev_map_hash_lookup_elem,
1064 .map_update_elem = dev_map_hash_update_elem,
1065 .map_delete_elem = dev_map_hash_delete_elem,
1066 .map_check_btf = map_check_no_btf,
1067 .map_mem_usage = dev_map_mem_usage,
1068 .map_btf_id = &dev_map_btf_ids[0],
1069 .map_redirect = dev_hash_map_redirect,
1070};
1071
1072static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
1073 struct net_device *netdev)
1074{
1075 unsigned long flags;
1076 u32 i;
1077
1078 spin_lock_irqsave(&dtab->index_lock, flags);
1079 for (i = 0; i < dtab->n_buckets; i++) {
1080 struct bpf_dtab_netdev *dev;
1081 struct hlist_head *head;
1082 struct hlist_node *next;
1083
1084 head = dev_map_index_hash(dtab, idx: i);
1085
1086 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
1087 if (netdev != dev->dev)
1088 continue;
1089
1090 dtab->items--;
1091 hlist_del_rcu(n: &dev->index_hlist);
1092 call_rcu(head: &dev->rcu, func: __dev_map_entry_free);
1093 }
1094 }
1095 spin_unlock_irqrestore(lock: &dtab->index_lock, flags);
1096}
1097
1098static int dev_map_notification(struct notifier_block *notifier,
1099 ulong event, void *ptr)
1100{
1101 struct net_device *netdev = netdev_notifier_info_to_dev(info: ptr);
1102 struct bpf_dtab *dtab;
1103 int i, cpu;
1104
1105 switch (event) {
1106 case NETDEV_REGISTER:
1107 if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
1108 break;
1109
1110 /* will be freed in free_netdev() */
1111 netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
1112 if (!netdev->xdp_bulkq)
1113 return NOTIFY_BAD;
1114
1115 for_each_possible_cpu(cpu)
1116 per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
1117 break;
1118 case NETDEV_UNREGISTER:
1119 /* This rcu_read_lock/unlock pair is needed because
1120 * dev_map_list is an RCU list AND to ensure a delete
1121 * operation does not free a netdev_map entry while we
1122 * are comparing it against the netdev being unregistered.
1123 */
1124 rcu_read_lock();
1125 list_for_each_entry_rcu(dtab, &dev_map_list, list) {
1126 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
1127 dev_map_hash_remove_netdev(dtab, netdev);
1128 continue;
1129 }
1130
1131 for (i = 0; i < dtab->map.max_entries; i++) {
1132 struct bpf_dtab_netdev *dev, *odev;
1133
1134 dev = rcu_dereference(dtab->netdev_map[i]);
1135 if (!dev || netdev != dev->dev)
1136 continue;
1137 odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
1138 if (dev == odev) {
1139 call_rcu(head: &dev->rcu,
1140 func: __dev_map_entry_free);
1141 atomic_dec(v: (atomic_t *)&dtab->items);
1142 }
1143 }
1144 }
1145 rcu_read_unlock();
1146 break;
1147 default:
1148 break;
1149 }
1150 return NOTIFY_OK;
1151}
1152
1153static struct notifier_block dev_map_notifier = {
1154 .notifier_call = dev_map_notification,
1155};
1156
1157static int __init dev_map_init(void)
1158{
1159 int cpu;
1160
1161 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
1162 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
1163 offsetof(struct _bpf_dtab_netdev, dev));
1164 register_netdevice_notifier(nb: &dev_map_notifier);
1165
1166 for_each_possible_cpu(cpu)
1167 INIT_LIST_HEAD(list: &per_cpu(dev_flush_list, cpu));
1168 return 0;
1169}
1170
1171subsys_initcall(dev_map_init);
1172

source code of linux/kernel/bpf/devmap.c