1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * net/sched/act_mirred.c packet mirroring and redirect actions |
4 | * |
5 | * Authors: Jamal Hadi Salim (2002-4) |
6 | * |
7 | * TODO: Add ingress support (and socket redirect support) |
8 | */ |
9 | |
10 | #include <linux/types.h> |
11 | #include <linux/kernel.h> |
12 | #include <linux/string.h> |
13 | #include <linux/errno.h> |
14 | #include <linux/skbuff.h> |
15 | #include <linux/rtnetlink.h> |
16 | #include <linux/module.h> |
17 | #include <linux/init.h> |
18 | #include <linux/gfp.h> |
19 | #include <linux/if_arp.h> |
20 | #include <net/net_namespace.h> |
21 | #include <net/netlink.h> |
22 | #include <net/dst.h> |
23 | #include <net/pkt_sched.h> |
24 | #include <net/pkt_cls.h> |
25 | #include <linux/tc_act/tc_mirred.h> |
26 | #include <net/tc_act/tc_mirred.h> |
27 | #include <net/tc_wrapper.h> |
28 | |
29 | static LIST_HEAD(mirred_list); |
30 | static DEFINE_SPINLOCK(mirred_list_lock); |
31 | |
32 | #define MIRRED_NEST_LIMIT 4 |
33 | static DEFINE_PER_CPU(unsigned int, mirred_nest_level); |
34 | |
35 | static bool tcf_mirred_is_act_redirect(int action) |
36 | { |
37 | return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR; |
38 | } |
39 | |
40 | static bool tcf_mirred_act_wants_ingress(int action) |
41 | { |
42 | switch (action) { |
43 | case TCA_EGRESS_REDIR: |
44 | case TCA_EGRESS_MIRROR: |
45 | return false; |
46 | case TCA_INGRESS_REDIR: |
47 | case TCA_INGRESS_MIRROR: |
48 | return true; |
49 | default: |
50 | BUG(); |
51 | } |
52 | } |
53 | |
54 | static bool tcf_mirred_can_reinsert(int action) |
55 | { |
56 | switch (action) { |
57 | case TC_ACT_SHOT: |
58 | case TC_ACT_STOLEN: |
59 | case TC_ACT_QUEUED: |
60 | case TC_ACT_TRAP: |
61 | return true; |
62 | } |
63 | return false; |
64 | } |
65 | |
66 | static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m) |
67 | { |
68 | return rcu_dereference_protected(m->tcfm_dev, |
69 | lockdep_is_held(&m->tcf_lock)); |
70 | } |
71 | |
72 | static void tcf_mirred_release(struct tc_action *a) |
73 | { |
74 | struct tcf_mirred *m = to_mirred(a); |
75 | struct net_device *dev; |
76 | |
77 | spin_lock(lock: &mirred_list_lock); |
78 | list_del(entry: &m->tcfm_list); |
79 | spin_unlock(lock: &mirred_list_lock); |
80 | |
81 | /* last reference to action, no need to lock */ |
82 | dev = rcu_dereference_protected(m->tcfm_dev, 1); |
83 | netdev_put(dev, tracker: &m->tcfm_dev_tracker); |
84 | } |
85 | |
86 | static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = { |
87 | [TCA_MIRRED_PARMS] = { .len = sizeof(struct tc_mirred) }, |
88 | [TCA_MIRRED_BLOCKID] = NLA_POLICY_MIN(NLA_U32, 1), |
89 | }; |
90 | |
91 | static struct tc_action_ops act_mirred_ops; |
92 | |
93 | static void tcf_mirred_replace_dev(struct tcf_mirred *m, |
94 | struct net_device *ndev) |
95 | { |
96 | struct net_device *odev; |
97 | |
98 | odev = rcu_replace_pointer(m->tcfm_dev, ndev, |
99 | lockdep_is_held(&m->tcf_lock)); |
100 | netdev_put(dev: odev, tracker: &m->tcfm_dev_tracker); |
101 | } |
102 | |
103 | static int tcf_mirred_init(struct net *net, struct nlattr *nla, |
104 | struct nlattr *est, struct tc_action **a, |
105 | struct tcf_proto *tp, |
106 | u32 flags, struct netlink_ext_ack *extack) |
107 | { |
108 | struct tc_action_net *tn = net_generic(net, id: act_mirred_ops.net_id); |
109 | bool bind = flags & TCA_ACT_FLAGS_BIND; |
110 | struct nlattr *tb[TCA_MIRRED_MAX + 1]; |
111 | struct tcf_chain *goto_ch = NULL; |
112 | bool = false; |
113 | struct tc_mirred *parm; |
114 | struct tcf_mirred *m; |
115 | bool exists = false; |
116 | int ret, err; |
117 | u32 index; |
118 | |
119 | if (!nla) { |
120 | NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed" ); |
121 | return -EINVAL; |
122 | } |
123 | ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla, |
124 | policy: mirred_policy, extack); |
125 | if (ret < 0) |
126 | return ret; |
127 | if (!tb[TCA_MIRRED_PARMS]) { |
128 | NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters" ); |
129 | return -EINVAL; |
130 | } |
131 | parm = nla_data(nla: tb[TCA_MIRRED_PARMS]); |
132 | index = parm->index; |
133 | err = tcf_idr_check_alloc(tn, index: &index, a, bind); |
134 | if (err < 0) |
135 | return err; |
136 | exists = err; |
137 | if (exists && bind) |
138 | return ACT_P_BOUND; |
139 | |
140 | if (tb[TCA_MIRRED_BLOCKID] && parm->ifindex) { |
141 | NL_SET_ERR_MSG_MOD(extack, |
142 | "Cannot specify Block ID and dev simultaneously" ); |
143 | if (exists) |
144 | tcf_idr_release(a: *a, bind); |
145 | else |
146 | tcf_idr_cleanup(tn, index); |
147 | |
148 | return -EINVAL; |
149 | } |
150 | |
151 | switch (parm->eaction) { |
152 | case TCA_EGRESS_MIRROR: |
153 | case TCA_EGRESS_REDIR: |
154 | case TCA_INGRESS_REDIR: |
155 | case TCA_INGRESS_MIRROR: |
156 | break; |
157 | default: |
158 | if (exists) |
159 | tcf_idr_release(a: *a, bind); |
160 | else |
161 | tcf_idr_cleanup(tn, index); |
162 | NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option" ); |
163 | return -EINVAL; |
164 | } |
165 | |
166 | if (!exists) { |
167 | if (!parm->ifindex && !tb[TCA_MIRRED_BLOCKID]) { |
168 | tcf_idr_cleanup(tn, index); |
169 | NL_SET_ERR_MSG_MOD(extack, |
170 | "Must specify device or block" ); |
171 | return -EINVAL; |
172 | } |
173 | ret = tcf_idr_create_from_flags(tn, index, est, a, |
174 | ops: &act_mirred_ops, bind, flags); |
175 | if (ret) { |
176 | tcf_idr_cleanup(tn, index); |
177 | return ret; |
178 | } |
179 | ret = ACT_P_CREATED; |
180 | } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) { |
181 | tcf_idr_release(a: *a, bind); |
182 | return -EEXIST; |
183 | } |
184 | |
185 | m = to_mirred(*a); |
186 | if (ret == ACT_P_CREATED) |
187 | INIT_LIST_HEAD(list: &m->tcfm_list); |
188 | |
189 | err = tcf_action_check_ctrlact(action: parm->action, tp, handle: &goto_ch, newchain: extack); |
190 | if (err < 0) |
191 | goto release_idr; |
192 | |
193 | spin_lock_bh(lock: &m->tcf_lock); |
194 | |
195 | if (parm->ifindex) { |
196 | struct net_device *ndev; |
197 | |
198 | ndev = dev_get_by_index(net, ifindex: parm->ifindex); |
199 | if (!ndev) { |
200 | spin_unlock_bh(lock: &m->tcf_lock); |
201 | err = -ENODEV; |
202 | goto put_chain; |
203 | } |
204 | mac_header_xmit = dev_is_mac_header_xmit(dev: ndev); |
205 | tcf_mirred_replace_dev(m, ndev); |
206 | netdev_tracker_alloc(dev: ndev, tracker: &m->tcfm_dev_tracker, GFP_ATOMIC); |
207 | m->tcfm_mac_header_xmit = mac_header_xmit; |
208 | m->tcfm_blockid = 0; |
209 | } else if (tb[TCA_MIRRED_BLOCKID]) { |
210 | tcf_mirred_replace_dev(m, NULL); |
211 | m->tcfm_mac_header_xmit = false; |
212 | m->tcfm_blockid = nla_get_u32(nla: tb[TCA_MIRRED_BLOCKID]); |
213 | } |
214 | goto_ch = tcf_action_set_ctrlact(a: *a, action: parm->action, newchain: goto_ch); |
215 | m->tcfm_eaction = parm->eaction; |
216 | spin_unlock_bh(lock: &m->tcf_lock); |
217 | if (goto_ch) |
218 | tcf_chain_put_by_act(chain: goto_ch); |
219 | |
220 | if (ret == ACT_P_CREATED) { |
221 | spin_lock(lock: &mirred_list_lock); |
222 | list_add(new: &m->tcfm_list, head: &mirred_list); |
223 | spin_unlock(lock: &mirred_list_lock); |
224 | } |
225 | |
226 | return ret; |
227 | put_chain: |
228 | if (goto_ch) |
229 | tcf_chain_put_by_act(chain: goto_ch); |
230 | release_idr: |
231 | tcf_idr_release(a: *a, bind); |
232 | return err; |
233 | } |
234 | |
235 | static int |
236 | tcf_mirred_forward(bool at_ingress, bool want_ingress, struct sk_buff *skb) |
237 | { |
238 | int err; |
239 | |
240 | if (!want_ingress) |
241 | err = tcf_dev_queue_xmit(skb, xmit: dev_queue_xmit); |
242 | else if (!at_ingress) |
243 | err = netif_rx(skb); |
244 | else |
245 | err = netif_receive_skb(skb); |
246 | |
247 | return err; |
248 | } |
249 | |
250 | static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m, |
251 | struct net_device *dev, |
252 | const bool , int m_eaction, |
253 | int retval) |
254 | { |
255 | struct sk_buff *skb_to_send = skb; |
256 | bool want_ingress; |
257 | bool is_redirect; |
258 | bool expects_nh; |
259 | bool at_ingress; |
260 | bool dont_clone; |
261 | int mac_len; |
262 | bool at_nh; |
263 | int err; |
264 | |
265 | is_redirect = tcf_mirred_is_act_redirect(action: m_eaction); |
266 | if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) { |
267 | net_notice_ratelimited("tc mirred to Houston: device %s is down\n" , |
268 | dev->name); |
269 | goto err_cant_do; |
270 | } |
271 | |
272 | /* we could easily avoid the clone only if called by ingress and clsact; |
273 | * since we can't easily detect the clsact caller, skip clone only for |
274 | * ingress - that covers the TC S/W datapath. |
275 | */ |
276 | at_ingress = skb_at_tc_ingress(skb); |
277 | dont_clone = skb_at_tc_ingress(skb) && is_redirect && |
278 | tcf_mirred_can_reinsert(action: retval); |
279 | if (!dont_clone) { |
280 | skb_to_send = skb_clone(skb, GFP_ATOMIC); |
281 | if (!skb_to_send) |
282 | goto err_cant_do; |
283 | } |
284 | |
285 | want_ingress = tcf_mirred_act_wants_ingress(action: m_eaction); |
286 | |
287 | /* All mirred/redirected skbs should clear previous ct info */ |
288 | nf_reset_ct(skb: skb_to_send); |
289 | if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */ |
290 | skb_dst_drop(skb: skb_to_send); |
291 | |
292 | expects_nh = want_ingress || !m_mac_header_xmit; |
293 | at_nh = skb->data == skb_network_header(skb); |
294 | if (at_nh != expects_nh) { |
295 | mac_len = at_ingress ? skb->mac_len : |
296 | skb_network_offset(skb); |
297 | if (expects_nh) { |
298 | /* target device/action expect data at nh */ |
299 | skb_pull_rcsum(skb: skb_to_send, len: mac_len); |
300 | } else { |
301 | /* target device/action expect data at mac */ |
302 | skb_push_rcsum(skb: skb_to_send, len: mac_len); |
303 | } |
304 | } |
305 | |
306 | skb_to_send->skb_iif = skb->dev->ifindex; |
307 | skb_to_send->dev = dev; |
308 | |
309 | if (is_redirect) { |
310 | if (skb == skb_to_send) |
311 | retval = TC_ACT_CONSUMED; |
312 | |
313 | skb_set_redirected(skb: skb_to_send, from_ingress: skb_to_send->tc_at_ingress); |
314 | |
315 | err = tcf_mirred_forward(at_ingress, want_ingress, skb: skb_to_send); |
316 | } else { |
317 | err = tcf_mirred_forward(at_ingress, want_ingress, skb: skb_to_send); |
318 | } |
319 | if (err) |
320 | tcf_action_inc_overlimit_qstats(a: &m->common); |
321 | |
322 | return retval; |
323 | |
324 | err_cant_do: |
325 | if (is_redirect) |
326 | retval = TC_ACT_SHOT; |
327 | tcf_action_inc_overlimit_qstats(a: &m->common); |
328 | return retval; |
329 | } |
330 | |
331 | static int tcf_blockcast_redir(struct sk_buff *skb, struct tcf_mirred *m, |
332 | struct tcf_block *block, int m_eaction, |
333 | const u32 exception_ifindex, int retval) |
334 | { |
335 | struct net_device *dev_prev = NULL; |
336 | struct net_device *dev = NULL; |
337 | unsigned long index; |
338 | int mirred_eaction; |
339 | |
340 | mirred_eaction = tcf_mirred_act_wants_ingress(action: m_eaction) ? |
341 | TCA_INGRESS_MIRROR : TCA_EGRESS_MIRROR; |
342 | |
343 | xa_for_each(&block->ports, index, dev) { |
344 | if (index == exception_ifindex) |
345 | continue; |
346 | |
347 | if (!dev_prev) |
348 | goto assign_prev; |
349 | |
350 | tcf_mirred_to_dev(skb, m, dev: dev_prev, |
351 | m_mac_header_xmit: dev_is_mac_header_xmit(dev), |
352 | m_eaction: mirred_eaction, retval); |
353 | assign_prev: |
354 | dev_prev = dev; |
355 | } |
356 | |
357 | if (dev_prev) |
358 | return tcf_mirred_to_dev(skb, m, dev: dev_prev, |
359 | m_mac_header_xmit: dev_is_mac_header_xmit(dev: dev_prev), |
360 | m_eaction, retval); |
361 | |
362 | return retval; |
363 | } |
364 | |
365 | static int tcf_blockcast_mirror(struct sk_buff *skb, struct tcf_mirred *m, |
366 | struct tcf_block *block, int m_eaction, |
367 | const u32 exception_ifindex, int retval) |
368 | { |
369 | struct net_device *dev = NULL; |
370 | unsigned long index; |
371 | |
372 | xa_for_each(&block->ports, index, dev) { |
373 | if (index == exception_ifindex) |
374 | continue; |
375 | |
376 | tcf_mirred_to_dev(skb, m, dev, |
377 | m_mac_header_xmit: dev_is_mac_header_xmit(dev), |
378 | m_eaction, retval); |
379 | } |
380 | |
381 | return retval; |
382 | } |
383 | |
384 | static int tcf_blockcast(struct sk_buff *skb, struct tcf_mirred *m, |
385 | const u32 blockid, struct tcf_result *res, |
386 | int retval) |
387 | { |
388 | const u32 exception_ifindex = skb->dev->ifindex; |
389 | struct tcf_block *block; |
390 | bool is_redirect; |
391 | int m_eaction; |
392 | |
393 | m_eaction = READ_ONCE(m->tcfm_eaction); |
394 | is_redirect = tcf_mirred_is_act_redirect(action: m_eaction); |
395 | |
396 | /* we are already under rcu protection, so can call block lookup |
397 | * directly. |
398 | */ |
399 | block = tcf_block_lookup(net: dev_net(dev: skb->dev), block_index: blockid); |
400 | if (!block || xa_empty(xa: &block->ports)) { |
401 | tcf_action_inc_overlimit_qstats(a: &m->common); |
402 | return retval; |
403 | } |
404 | |
405 | if (is_redirect) |
406 | return tcf_blockcast_redir(skb, m, block, m_eaction, |
407 | exception_ifindex, retval); |
408 | |
409 | /* If it's not redirect, it is mirror */ |
410 | return tcf_blockcast_mirror(skb, m, block, m_eaction, exception_ifindex, |
411 | retval); |
412 | } |
413 | |
414 | TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb, |
415 | const struct tc_action *a, |
416 | struct tcf_result *res) |
417 | { |
418 | struct tcf_mirred *m = to_mirred(a); |
419 | int retval = READ_ONCE(m->tcf_action); |
420 | unsigned int nest_level; |
421 | bool ; |
422 | struct net_device *dev; |
423 | int m_eaction; |
424 | u32 blockid; |
425 | |
426 | nest_level = __this_cpu_inc_return(mirred_nest_level); |
427 | if (unlikely(nest_level > MIRRED_NEST_LIMIT)) { |
428 | net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n" , |
429 | netdev_name(skb->dev)); |
430 | retval = TC_ACT_SHOT; |
431 | goto dec_nest_level; |
432 | } |
433 | |
434 | tcf_lastuse_update(tm: &m->tcf_tm); |
435 | tcf_action_update_bstats(a: &m->common, skb); |
436 | |
437 | blockid = READ_ONCE(m->tcfm_blockid); |
438 | if (blockid) { |
439 | retval = tcf_blockcast(skb, m, blockid, res, retval); |
440 | goto dec_nest_level; |
441 | } |
442 | |
443 | dev = rcu_dereference_bh(m->tcfm_dev); |
444 | if (unlikely(!dev)) { |
445 | pr_notice_once("tc mirred: target device is gone\n" ); |
446 | tcf_action_inc_overlimit_qstats(a: &m->common); |
447 | goto dec_nest_level; |
448 | } |
449 | |
450 | m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit); |
451 | m_eaction = READ_ONCE(m->tcfm_eaction); |
452 | |
453 | retval = tcf_mirred_to_dev(skb, m, dev, m_mac_header_xmit, m_eaction, |
454 | retval); |
455 | |
456 | dec_nest_level: |
457 | __this_cpu_dec(mirred_nest_level); |
458 | |
459 | return retval; |
460 | } |
461 | |
462 | static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets, |
463 | u64 drops, u64 lastuse, bool hw) |
464 | { |
465 | struct tcf_mirred *m = to_mirred(a); |
466 | struct tcf_t *tm = &m->tcf_tm; |
467 | |
468 | tcf_action_update_stats(a, bytes, packets, drops, hw); |
469 | tm->lastuse = max_t(u64, tm->lastuse, lastuse); |
470 | } |
471 | |
472 | static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, |
473 | int ref) |
474 | { |
475 | unsigned char *b = skb_tail_pointer(skb); |
476 | struct tcf_mirred *m = to_mirred(a); |
477 | struct tc_mirred opt = { |
478 | .index = m->tcf_index, |
479 | .refcnt = refcount_read(r: &m->tcf_refcnt) - ref, |
480 | .bindcnt = atomic_read(v: &m->tcf_bindcnt) - bind, |
481 | }; |
482 | struct net_device *dev; |
483 | struct tcf_t t; |
484 | u32 blockid; |
485 | |
486 | spin_lock_bh(lock: &m->tcf_lock); |
487 | opt.action = m->tcf_action; |
488 | opt.eaction = m->tcfm_eaction; |
489 | dev = tcf_mirred_dev_dereference(m); |
490 | if (dev) |
491 | opt.ifindex = dev->ifindex; |
492 | |
493 | if (nla_put(skb, attrtype: TCA_MIRRED_PARMS, attrlen: sizeof(opt), data: &opt)) |
494 | goto nla_put_failure; |
495 | |
496 | blockid = m->tcfm_blockid; |
497 | if (blockid && nla_put_u32(skb, attrtype: TCA_MIRRED_BLOCKID, value: blockid)) |
498 | goto nla_put_failure; |
499 | |
500 | tcf_tm_dump(dtm: &t, stm: &m->tcf_tm); |
501 | if (nla_put_64bit(skb, attrtype: TCA_MIRRED_TM, attrlen: sizeof(t), data: &t, padattr: TCA_MIRRED_PAD)) |
502 | goto nla_put_failure; |
503 | spin_unlock_bh(lock: &m->tcf_lock); |
504 | |
505 | return skb->len; |
506 | |
507 | nla_put_failure: |
508 | spin_unlock_bh(lock: &m->tcf_lock); |
509 | nlmsg_trim(skb, mark: b); |
510 | return -1; |
511 | } |
512 | |
513 | static int mirred_device_event(struct notifier_block *unused, |
514 | unsigned long event, void *ptr) |
515 | { |
516 | struct net_device *dev = netdev_notifier_info_to_dev(info: ptr); |
517 | struct tcf_mirred *m; |
518 | |
519 | ASSERT_RTNL(); |
520 | if (event == NETDEV_UNREGISTER) { |
521 | spin_lock(lock: &mirred_list_lock); |
522 | list_for_each_entry(m, &mirred_list, tcfm_list) { |
523 | spin_lock_bh(lock: &m->tcf_lock); |
524 | if (tcf_mirred_dev_dereference(m) == dev) { |
525 | netdev_put(dev, tracker: &m->tcfm_dev_tracker); |
526 | /* Note : no rcu grace period necessary, as |
527 | * net_device are already rcu protected. |
528 | */ |
529 | RCU_INIT_POINTER(m->tcfm_dev, NULL); |
530 | } |
531 | spin_unlock_bh(lock: &m->tcf_lock); |
532 | } |
533 | spin_unlock(lock: &mirred_list_lock); |
534 | } |
535 | |
536 | return NOTIFY_DONE; |
537 | } |
538 | |
539 | static struct notifier_block mirred_device_notifier = { |
540 | .notifier_call = mirred_device_event, |
541 | }; |
542 | |
543 | static void tcf_mirred_dev_put(void *priv) |
544 | { |
545 | struct net_device *dev = priv; |
546 | |
547 | dev_put(dev); |
548 | } |
549 | |
550 | static struct net_device * |
551 | tcf_mirred_get_dev(const struct tc_action *a, |
552 | tc_action_priv_destructor *destructor) |
553 | { |
554 | struct tcf_mirred *m = to_mirred(a); |
555 | struct net_device *dev; |
556 | |
557 | rcu_read_lock(); |
558 | dev = rcu_dereference(m->tcfm_dev); |
559 | if (dev) { |
560 | dev_hold(dev); |
561 | *destructor = tcf_mirred_dev_put; |
562 | } |
563 | rcu_read_unlock(); |
564 | |
565 | return dev; |
566 | } |
567 | |
568 | static size_t tcf_mirred_get_fill_size(const struct tc_action *act) |
569 | { |
570 | return nla_total_size(payload: sizeof(struct tc_mirred)); |
571 | } |
572 | |
573 | static void tcf_offload_mirred_get_dev(struct flow_action_entry *entry, |
574 | const struct tc_action *act) |
575 | { |
576 | entry->dev = act->ops->get_dev(act, &entry->destructor); |
577 | if (!entry->dev) |
578 | return; |
579 | entry->destructor_priv = entry->dev; |
580 | } |
581 | |
582 | static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data, |
583 | u32 *index_inc, bool bind, |
584 | struct netlink_ext_ack *extack) |
585 | { |
586 | if (bind) { |
587 | struct flow_action_entry *entry = entry_data; |
588 | |
589 | if (is_tcf_mirred_egress_redirect(a: act)) { |
590 | entry->id = FLOW_ACTION_REDIRECT; |
591 | tcf_offload_mirred_get_dev(entry, act); |
592 | } else if (is_tcf_mirred_egress_mirror(a: act)) { |
593 | entry->id = FLOW_ACTION_MIRRED; |
594 | tcf_offload_mirred_get_dev(entry, act); |
595 | } else if (is_tcf_mirred_ingress_redirect(a: act)) { |
596 | entry->id = FLOW_ACTION_REDIRECT_INGRESS; |
597 | tcf_offload_mirred_get_dev(entry, act); |
598 | } else if (is_tcf_mirred_ingress_mirror(a: act)) { |
599 | entry->id = FLOW_ACTION_MIRRED_INGRESS; |
600 | tcf_offload_mirred_get_dev(entry, act); |
601 | } else { |
602 | NL_SET_ERR_MSG_MOD(extack, "Unsupported mirred offload" ); |
603 | return -EOPNOTSUPP; |
604 | } |
605 | *index_inc = 1; |
606 | } else { |
607 | struct flow_offload_action *fl_action = entry_data; |
608 | |
609 | if (is_tcf_mirred_egress_redirect(a: act)) |
610 | fl_action->id = FLOW_ACTION_REDIRECT; |
611 | else if (is_tcf_mirred_egress_mirror(a: act)) |
612 | fl_action->id = FLOW_ACTION_MIRRED; |
613 | else if (is_tcf_mirred_ingress_redirect(a: act)) |
614 | fl_action->id = FLOW_ACTION_REDIRECT_INGRESS; |
615 | else if (is_tcf_mirred_ingress_mirror(a: act)) |
616 | fl_action->id = FLOW_ACTION_MIRRED_INGRESS; |
617 | else |
618 | return -EOPNOTSUPP; |
619 | } |
620 | |
621 | return 0; |
622 | } |
623 | |
624 | static struct tc_action_ops act_mirred_ops = { |
625 | .kind = "mirred" , |
626 | .id = TCA_ID_MIRRED, |
627 | .owner = THIS_MODULE, |
628 | .act = tcf_mirred_act, |
629 | .stats_update = tcf_stats_update, |
630 | .dump = tcf_mirred_dump, |
631 | .cleanup = tcf_mirred_release, |
632 | .init = tcf_mirred_init, |
633 | .get_fill_size = tcf_mirred_get_fill_size, |
634 | .offload_act_setup = tcf_mirred_offload_act_setup, |
635 | .size = sizeof(struct tcf_mirred), |
636 | .get_dev = tcf_mirred_get_dev, |
637 | }; |
638 | MODULE_ALIAS_NET_ACT("mirred" ); |
639 | |
640 | static __net_init int mirred_init_net(struct net *net) |
641 | { |
642 | struct tc_action_net *tn = net_generic(net, id: act_mirred_ops.net_id); |
643 | |
644 | return tc_action_net_init(net, tn, ops: &act_mirred_ops); |
645 | } |
646 | |
647 | static void __net_exit mirred_exit_net(struct list_head *net_list) |
648 | { |
649 | tc_action_net_exit(net_list, id: act_mirred_ops.net_id); |
650 | } |
651 | |
652 | static struct pernet_operations mirred_net_ops = { |
653 | .init = mirred_init_net, |
654 | .exit_batch = mirred_exit_net, |
655 | .id = &act_mirred_ops.net_id, |
656 | .size = sizeof(struct tc_action_net), |
657 | }; |
658 | |
659 | MODULE_AUTHOR("Jamal Hadi Salim(2002)" ); |
660 | MODULE_DESCRIPTION("Device Mirror/redirect actions" ); |
661 | MODULE_LICENSE("GPL" ); |
662 | |
663 | static int __init mirred_init_module(void) |
664 | { |
665 | int err = register_netdevice_notifier(nb: &mirred_device_notifier); |
666 | if (err) |
667 | return err; |
668 | |
669 | pr_info("Mirror/redirect action on\n" ); |
670 | err = tcf_register_action(a: &act_mirred_ops, ops: &mirred_net_ops); |
671 | if (err) |
672 | unregister_netdevice_notifier(nb: &mirred_device_notifier); |
673 | |
674 | return err; |
675 | } |
676 | |
677 | static void __exit mirred_cleanup_module(void) |
678 | { |
679 | tcf_unregister_action(a: &act_mirred_ops, ops: &mirred_net_ops); |
680 | unregister_netdevice_notifier(nb: &mirred_device_notifier); |
681 | } |
682 | |
683 | module_init(mirred_init_module); |
684 | module_exit(mirred_cleanup_module); |
685 | |