1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __NET_PKT_CLS_H |
3 | #define __NET_PKT_CLS_H |
4 | |
5 | #include <linux/pkt_cls.h> |
6 | #include <linux/workqueue.h> |
7 | #include <net/sch_generic.h> |
8 | #include <net/act_api.h> |
9 | #include <net/net_namespace.h> |
10 | |
11 | /* TC action not accessible from user space */ |
12 | #define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1) |
13 | |
14 | /* Basic packet classifier frontend definitions. */ |
15 | |
16 | struct tcf_walker { |
17 | int stop; |
18 | int skip; |
19 | int count; |
20 | bool nonempty; |
21 | unsigned long cookie; |
22 | int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *); |
23 | }; |
24 | |
25 | int register_tcf_proto_ops(struct tcf_proto_ops *ops); |
26 | void unregister_tcf_proto_ops(struct tcf_proto_ops *ops); |
27 | #define NET_CLS_ALIAS_PREFIX "net-cls-" |
28 | #define MODULE_ALIAS_NET_CLS(kind) MODULE_ALIAS(NET_CLS_ALIAS_PREFIX kind) |
29 | |
30 | struct tcf_block_ext_info { |
31 | enum flow_block_binder_type binder_type; |
32 | tcf_chain_head_change_t *chain_head_change; |
33 | void *chain_head_change_priv; |
34 | u32 block_index; |
35 | }; |
36 | |
37 | struct tcf_qevent { |
38 | struct tcf_block *block; |
39 | struct tcf_block_ext_info info; |
40 | struct tcf_proto __rcu *filter_chain; |
41 | }; |
42 | |
43 | struct tcf_block_cb; |
44 | bool tcf_queue_work(struct rcu_work *rwork, work_func_t func); |
45 | |
46 | #ifdef CONFIG_NET_CLS |
47 | struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, |
48 | u32 chain_index); |
49 | void tcf_chain_put_by_act(struct tcf_chain *chain); |
50 | struct tcf_chain *tcf_get_next_chain(struct tcf_block *block, |
51 | struct tcf_chain *chain); |
52 | struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain, |
53 | struct tcf_proto *tp); |
54 | void tcf_block_netif_keep_dst(struct tcf_block *block); |
55 | int tcf_block_get(struct tcf_block **p_block, |
56 | struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, |
57 | struct netlink_ext_ack *extack); |
58 | int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, |
59 | struct tcf_block_ext_info *ei, |
60 | struct netlink_ext_ack *extack); |
61 | void tcf_block_put(struct tcf_block *block); |
62 | void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, |
63 | struct tcf_block_ext_info *ei); |
64 | int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action, |
65 | int police, struct tcf_proto *tp, u32 handle, bool used_action_miss); |
66 | |
67 | static inline bool tcf_block_shared(struct tcf_block *block) |
68 | { |
69 | return block->index; |
70 | } |
71 | |
72 | static inline bool tcf_block_non_null_shared(struct tcf_block *block) |
73 | { |
74 | return block && block->index; |
75 | } |
76 | |
77 | static inline struct Qdisc *tcf_block_q(struct tcf_block *block) |
78 | { |
79 | WARN_ON(tcf_block_shared(block)); |
80 | return block->q; |
81 | } |
82 | |
83 | int tcf_classify(struct sk_buff *skb, |
84 | const struct tcf_block *block, |
85 | const struct tcf_proto *tp, struct tcf_result *res, |
86 | bool compat_mode); |
87 | |
88 | static inline bool tc_cls_stats_dump(struct tcf_proto *tp, |
89 | struct tcf_walker *arg, |
90 | void *filter) |
91 | { |
92 | if (arg->count >= arg->skip && arg->fn(tp, filter, arg) < 0) { |
93 | arg->stop = 1; |
94 | return false; |
95 | } |
96 | |
97 | arg->count++; |
98 | return true; |
99 | } |
100 | |
101 | #else |
102 | static inline bool tcf_block_shared(struct tcf_block *block) |
103 | { |
104 | return false; |
105 | } |
106 | |
107 | static inline bool tcf_block_non_null_shared(struct tcf_block *block) |
108 | { |
109 | return false; |
110 | } |
111 | |
112 | static inline |
113 | int tcf_block_get(struct tcf_block **p_block, |
114 | struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, |
115 | struct netlink_ext_ack *extack) |
116 | { |
117 | return 0; |
118 | } |
119 | |
120 | static inline |
121 | int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, |
122 | struct tcf_block_ext_info *ei, |
123 | struct netlink_ext_ack *extack) |
124 | { |
125 | return 0; |
126 | } |
127 | |
128 | static inline void tcf_block_put(struct tcf_block *block) |
129 | { |
130 | } |
131 | |
132 | static inline |
133 | void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, |
134 | struct tcf_block_ext_info *ei) |
135 | { |
136 | } |
137 | |
138 | static inline struct Qdisc *tcf_block_q(struct tcf_block *block) |
139 | { |
140 | return NULL; |
141 | } |
142 | |
143 | static inline int tcf_classify(struct sk_buff *skb, |
144 | const struct tcf_block *block, |
145 | const struct tcf_proto *tp, |
146 | struct tcf_result *res, bool compat_mode) |
147 | { |
148 | return TC_ACT_UNSPEC; |
149 | } |
150 | |
151 | #endif |
152 | |
153 | static inline unsigned long |
154 | __cls_set_class(unsigned long *clp, unsigned long cl) |
155 | { |
156 | return xchg(clp, cl); |
157 | } |
158 | |
159 | static inline void |
160 | __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base) |
161 | { |
162 | unsigned long cl; |
163 | |
164 | cl = q->ops->cl_ops->bind_tcf(q, base, r->classid); |
165 | cl = __cls_set_class(clp: &r->class, cl); |
166 | if (cl) |
167 | q->ops->cl_ops->unbind_tcf(q, cl); |
168 | } |
169 | |
170 | static inline void |
171 | tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base) |
172 | { |
173 | struct Qdisc *q = tp->chain->block->q; |
174 | |
175 | /* Check q as it is not set for shared blocks. In that case, |
176 | * setting class is not supported. |
177 | */ |
178 | if (!q) |
179 | return; |
180 | sch_tree_lock(q); |
181 | __tcf_bind_filter(q, r, base); |
182 | sch_tree_unlock(q); |
183 | } |
184 | |
185 | static inline void |
186 | __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r) |
187 | { |
188 | unsigned long cl; |
189 | |
190 | if ((cl = __cls_set_class(clp: &r->class, cl: 0)) != 0) |
191 | q->ops->cl_ops->unbind_tcf(q, cl); |
192 | } |
193 | |
194 | static inline void |
195 | tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r) |
196 | { |
197 | struct Qdisc *q = tp->chain->block->q; |
198 | |
199 | if (!q) |
200 | return; |
201 | __tcf_unbind_filter(q, r); |
202 | } |
203 | |
204 | static inline void tc_cls_bind_class(u32 classid, unsigned long cl, |
205 | void *q, struct tcf_result *res, |
206 | unsigned long base) |
207 | { |
208 | if (res->classid == classid) { |
209 | if (cl) |
210 | __tcf_bind_filter(q, r: res, base); |
211 | else |
212 | __tcf_unbind_filter(q, r: res); |
213 | } |
214 | } |
215 | |
216 | struct tcf_exts { |
217 | #ifdef CONFIG_NET_CLS_ACT |
218 | __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ |
219 | int nr_actions; |
220 | struct tc_action **actions; |
221 | struct net *net; |
222 | netns_tracker ns_tracker; |
223 | struct tcf_exts_miss_cookie_node *miss_cookie_node; |
224 | #endif |
225 | /* Map to export classifier specific extension TLV types to the |
226 | * generic extensions API. Unsupported extensions must be set to 0. |
227 | */ |
228 | int action; |
229 | int police; |
230 | }; |
231 | |
232 | static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net, |
233 | int action, int police) |
234 | { |
235 | #ifdef CONFIG_NET_CLS |
236 | return tcf_exts_init_ex(exts, net, action, police, NULL, handle: 0, used_action_miss: false); |
237 | #else |
238 | return -EOPNOTSUPP; |
239 | #endif |
240 | } |
241 | |
242 | /* Return false if the netns is being destroyed in cleanup_net(). Callers |
243 | * need to do cleanup synchronously in this case, otherwise may race with |
244 | * tc_action_net_exit(). Return true for other cases. |
245 | */ |
246 | static inline bool tcf_exts_get_net(struct tcf_exts *exts) |
247 | { |
248 | #ifdef CONFIG_NET_CLS_ACT |
249 | exts->net = maybe_get_net(net: exts->net); |
250 | if (exts->net) |
251 | netns_tracker_alloc(net: exts->net, tracker: &exts->ns_tracker, GFP_KERNEL); |
252 | return exts->net != NULL; |
253 | #else |
254 | return true; |
255 | #endif |
256 | } |
257 | |
258 | static inline void tcf_exts_put_net(struct tcf_exts *exts) |
259 | { |
260 | #ifdef CONFIG_NET_CLS_ACT |
261 | if (exts->net) |
262 | put_net_track(net: exts->net, tracker: &exts->ns_tracker); |
263 | #endif |
264 | } |
265 | |
266 | #ifdef CONFIG_NET_CLS_ACT |
267 | #define tcf_exts_for_each_action(i, a, exts) \ |
268 | for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++) |
269 | #else |
270 | #define tcf_exts_for_each_action(i, a, exts) \ |
271 | for (; 0; (void)(i), (void)(a), (void)(exts)) |
272 | #endif |
273 | |
274 | #define tcf_act_for_each_action(i, a, actions) \ |
275 | for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = actions[i]); i++) |
276 | |
277 | static inline bool tc_act_in_hw(struct tc_action *act) |
278 | { |
279 | return !!act->in_hw_count; |
280 | } |
281 | |
282 | static inline void |
283 | tcf_exts_hw_stats_update(const struct tcf_exts *exts, |
284 | struct flow_stats *stats, |
285 | bool use_act_stats) |
286 | { |
287 | #ifdef CONFIG_NET_CLS_ACT |
288 | int i; |
289 | |
290 | for (i = 0; i < exts->nr_actions; i++) { |
291 | struct tc_action *a = exts->actions[i]; |
292 | |
293 | if (use_act_stats || tc_act_in_hw(act: a)) { |
294 | if (!tcf_action_update_hw_stats(action: a)) |
295 | continue; |
296 | } |
297 | |
298 | preempt_disable(); |
299 | tcf_action_stats_update(a, bytes: stats->bytes, packets: stats->pkts, drops: stats->drops, |
300 | lastuse: stats->lastused, hw: true); |
301 | preempt_enable(); |
302 | |
303 | a->used_hw_stats = stats->used_hw_stats; |
304 | a->used_hw_stats_valid = stats->used_hw_stats_valid; |
305 | } |
306 | #endif |
307 | } |
308 | |
309 | /** |
310 | * tcf_exts_has_actions - check if at least one action is present |
311 | * @exts: tc filter extensions handle |
312 | * |
313 | * Returns true if at least one action is present. |
314 | */ |
315 | static inline bool tcf_exts_has_actions(struct tcf_exts *exts) |
316 | { |
317 | #ifdef CONFIG_NET_CLS_ACT |
318 | return exts->nr_actions; |
319 | #else |
320 | return false; |
321 | #endif |
322 | } |
323 | |
324 | /** |
325 | * tcf_exts_exec - execute tc filter extensions |
326 | * @skb: socket buffer |
327 | * @exts: tc filter extensions handle |
328 | * @res: desired result |
329 | * |
330 | * Executes all configured extensions. Returns TC_ACT_OK on a normal execution, |
331 | * a negative number if the filter must be considered unmatched or |
332 | * a positive action code (TC_ACT_*) which must be returned to the |
333 | * underlying layer. |
334 | */ |
335 | static inline int |
336 | tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, |
337 | struct tcf_result *res) |
338 | { |
339 | #ifdef CONFIG_NET_CLS_ACT |
340 | return tcf_action_exec(skb, actions: exts->actions, nr_actions: exts->nr_actions, res); |
341 | #endif |
342 | return TC_ACT_OK; |
343 | } |
344 | |
345 | static inline int |
346 | tcf_exts_exec_ex(struct sk_buff *skb, struct tcf_exts *exts, int act_index, |
347 | struct tcf_result *res) |
348 | { |
349 | #ifdef CONFIG_NET_CLS_ACT |
350 | return tcf_action_exec(skb, actions: exts->actions + act_index, |
351 | nr_actions: exts->nr_actions - act_index, res); |
352 | #else |
353 | return TC_ACT_OK; |
354 | #endif |
355 | } |
356 | |
357 | int tcf_exts_validate(struct net *net, struct tcf_proto *tp, |
358 | struct nlattr **tb, struct nlattr *rate_tlv, |
359 | struct tcf_exts *exts, u32 flags, |
360 | struct netlink_ext_ack *extack); |
361 | int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb, |
362 | struct nlattr *rate_tlv, struct tcf_exts *exts, |
363 | u32 flags, u32 fl_flags, struct netlink_ext_ack *extack); |
364 | void tcf_exts_destroy(struct tcf_exts *exts); |
365 | void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src); |
366 | int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); |
367 | int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts); |
368 | int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts); |
369 | |
370 | /** |
371 | * struct tcf_pkt_info - packet information |
372 | * |
373 | * @ptr: start of the pkt data |
374 | * @nexthdr: offset of the next header |
375 | */ |
376 | struct tcf_pkt_info { |
377 | unsigned char * ptr; |
378 | int nexthdr; |
379 | }; |
380 | |
381 | #ifdef CONFIG_NET_EMATCH |
382 | |
383 | struct tcf_ematch_ops; |
384 | |
385 | /** |
386 | * struct tcf_ematch - extended match (ematch) |
387 | * |
388 | * @matchid: identifier to allow userspace to reidentify a match |
389 | * @flags: flags specifying attributes and the relation to other matches |
390 | * @ops: the operations lookup table of the corresponding ematch module |
391 | * @datalen: length of the ematch specific configuration data |
392 | * @data: ematch specific data |
393 | * @net: the network namespace |
394 | */ |
395 | struct tcf_ematch { |
396 | struct tcf_ematch_ops * ops; |
397 | unsigned long data; |
398 | unsigned int datalen; |
399 | u16 matchid; |
400 | u16 flags; |
401 | struct net *net; |
402 | }; |
403 | |
404 | static inline int tcf_em_is_container(struct tcf_ematch *em) |
405 | { |
406 | return !em->ops; |
407 | } |
408 | |
409 | static inline int tcf_em_is_simple(struct tcf_ematch *em) |
410 | { |
411 | return em->flags & TCF_EM_SIMPLE; |
412 | } |
413 | |
414 | static inline int tcf_em_is_inverted(struct tcf_ematch *em) |
415 | { |
416 | return em->flags & TCF_EM_INVERT; |
417 | } |
418 | |
419 | static inline int tcf_em_last_match(struct tcf_ematch *em) |
420 | { |
421 | return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END; |
422 | } |
423 | |
424 | static inline int tcf_em_early_end(struct tcf_ematch *em, int result) |
425 | { |
426 | if (tcf_em_last_match(em)) |
427 | return 1; |
428 | |
429 | if (result == 0 && em->flags & TCF_EM_REL_AND) |
430 | return 1; |
431 | |
432 | if (result != 0 && em->flags & TCF_EM_REL_OR) |
433 | return 1; |
434 | |
435 | return 0; |
436 | } |
437 | |
438 | /** |
439 | * struct tcf_ematch_tree - ematch tree handle |
440 | * |
441 | * @hdr: ematch tree header supplied by userspace |
442 | * @matches: array of ematches |
443 | */ |
444 | struct tcf_ematch_tree { |
445 | struct tcf_ematch_tree_hdr hdr; |
446 | struct tcf_ematch * matches; |
447 | |
448 | }; |
449 | |
450 | /** |
451 | * struct tcf_ematch_ops - ematch module operations |
452 | * |
453 | * @kind: identifier (kind) of this ematch module |
454 | * @datalen: length of expected configuration data (optional) |
455 | * @change: called during validation (optional) |
456 | * @match: called during ematch tree evaluation, must return 1/0 |
457 | * @destroy: called during destroyage (optional) |
458 | * @dump: called during dumping process (optional) |
459 | * @owner: owner, must be set to THIS_MODULE |
460 | * @link: link to previous/next ematch module (internal use) |
461 | */ |
462 | struct tcf_ematch_ops { |
463 | int kind; |
464 | int datalen; |
465 | int (*change)(struct net *net, void *, |
466 | int, struct tcf_ematch *); |
467 | int (*match)(struct sk_buff *, struct tcf_ematch *, |
468 | struct tcf_pkt_info *); |
469 | void (*destroy)(struct tcf_ematch *); |
470 | int (*dump)(struct sk_buff *, struct tcf_ematch *); |
471 | struct module *owner; |
472 | struct list_head link; |
473 | }; |
474 | |
475 | int tcf_em_register(struct tcf_ematch_ops *); |
476 | void tcf_em_unregister(struct tcf_ematch_ops *); |
477 | int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, |
478 | struct tcf_ematch_tree *); |
479 | void tcf_em_tree_destroy(struct tcf_ematch_tree *); |
480 | int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); |
481 | int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, |
482 | struct tcf_pkt_info *); |
483 | |
484 | /** |
485 | * tcf_em_tree_match - evaulate an ematch tree |
486 | * |
487 | * @skb: socket buffer of the packet in question |
488 | * @tree: ematch tree to be used for evaluation |
489 | * @info: packet information examined by classifier |
490 | * |
491 | * This function matches @skb against the ematch tree in @tree by going |
492 | * through all ematches respecting their logic relations returning |
493 | * as soon as the result is obvious. |
494 | * |
495 | * Returns 1 if the ematch tree as-one matches, no ematches are configured |
496 | * or ematch is not enabled in the kernel, otherwise 0 is returned. |
497 | */ |
498 | static inline int tcf_em_tree_match(struct sk_buff *skb, |
499 | struct tcf_ematch_tree *tree, |
500 | struct tcf_pkt_info *info) |
501 | { |
502 | if (tree->hdr.nmatches) |
503 | return __tcf_em_tree_match(skb, tree, info); |
504 | else |
505 | return 1; |
506 | } |
507 | |
508 | #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind)) |
509 | |
510 | #else /* CONFIG_NET_EMATCH */ |
511 | |
512 | struct tcf_ematch_tree { |
513 | }; |
514 | |
515 | #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0) |
516 | #define tcf_em_tree_destroy(t) do { (void)(t); } while(0) |
517 | #define tcf_em_tree_dump(skb, t, tlv) (0) |
518 | #define tcf_em_tree_match(skb, t, info) ((void)(info), 1) |
519 | |
520 | #endif /* CONFIG_NET_EMATCH */ |
521 | |
522 | static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer) |
523 | { |
524 | switch (layer) { |
525 | case TCF_LAYER_LINK: |
526 | return skb_mac_header(skb); |
527 | case TCF_LAYER_NETWORK: |
528 | return skb_network_header(skb); |
529 | case TCF_LAYER_TRANSPORT: |
530 | return skb_transport_header(skb); |
531 | } |
532 | |
533 | return NULL; |
534 | } |
535 | |
536 | static inline int tcf_valid_offset(const struct sk_buff *skb, |
537 | const unsigned char *ptr, const int len) |
538 | { |
539 | return likely((ptr + len) <= skb_tail_pointer(skb) && |
540 | ptr >= skb->head && |
541 | (ptr <= (ptr + len))); |
542 | } |
543 | |
544 | static inline int |
545 | tcf_change_indev(struct net *net, struct nlattr *indev_tlv, |
546 | struct netlink_ext_ack *extack) |
547 | { |
548 | char indev[IFNAMSIZ]; |
549 | struct net_device *dev; |
550 | |
551 | if (nla_strscpy(dst: indev, nla: indev_tlv, IFNAMSIZ) < 0) { |
552 | NL_SET_ERR_MSG_ATTR(extack, indev_tlv, |
553 | "Interface name too long" ); |
554 | return -EINVAL; |
555 | } |
556 | dev = __dev_get_by_name(net, name: indev); |
557 | if (!dev) { |
558 | NL_SET_ERR_MSG_ATTR(extack, indev_tlv, |
559 | "Network device not found" ); |
560 | return -ENODEV; |
561 | } |
562 | return dev->ifindex; |
563 | } |
564 | |
565 | static inline bool |
566 | tcf_match_indev(struct sk_buff *skb, int ifindex) |
567 | { |
568 | if (!ifindex) |
569 | return true; |
570 | if (!skb->skb_iif) |
571 | return false; |
572 | return ifindex == skb->skb_iif; |
573 | } |
574 | |
575 | int tc_setup_offload_action(struct flow_action *flow_action, |
576 | const struct tcf_exts *exts, |
577 | struct netlink_ext_ack *extack); |
578 | void tc_cleanup_offload_action(struct flow_action *flow_action); |
579 | int tc_setup_action(struct flow_action *flow_action, |
580 | struct tc_action *actions[], |
581 | u32 miss_cookie_base, |
582 | struct netlink_ext_ack *extack); |
583 | |
584 | int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, |
585 | void *type_data, bool err_stop, bool rtnl_held); |
586 | int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, |
587 | enum tc_setup_type type, void *type_data, bool err_stop, |
588 | u32 *flags, unsigned int *in_hw_count, bool rtnl_held); |
589 | int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, |
590 | enum tc_setup_type type, void *type_data, bool err_stop, |
591 | u32 *old_flags, unsigned int *old_in_hw_count, |
592 | u32 *new_flags, unsigned int *new_in_hw_count, |
593 | bool rtnl_held); |
594 | int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, |
595 | enum tc_setup_type type, void *type_data, bool err_stop, |
596 | u32 *flags, unsigned int *in_hw_count, bool rtnl_held); |
597 | int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, |
598 | bool add, flow_setup_cb_t *cb, |
599 | enum tc_setup_type type, void *type_data, |
600 | void *cb_priv, u32 *flags, unsigned int *in_hw_count); |
601 | unsigned int tcf_exts_num_actions(struct tcf_exts *exts); |
602 | |
603 | #ifdef CONFIG_NET_CLS_ACT |
604 | int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, |
605 | enum flow_block_binder_type binder_type, |
606 | struct nlattr *block_index_attr, |
607 | struct netlink_ext_ack *extack); |
608 | void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch); |
609 | int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, |
610 | struct netlink_ext_ack *extack); |
611 | struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, |
612 | struct sk_buff **to_free, int *ret); |
613 | int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe); |
614 | #else |
615 | static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, |
616 | enum flow_block_binder_type binder_type, |
617 | struct nlattr *block_index_attr, |
618 | struct netlink_ext_ack *extack) |
619 | { |
620 | return 0; |
621 | } |
622 | |
623 | static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch) |
624 | { |
625 | } |
626 | |
627 | static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, |
628 | struct netlink_ext_ack *extack) |
629 | { |
630 | return 0; |
631 | } |
632 | |
633 | static inline struct sk_buff * |
634 | tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, |
635 | struct sk_buff **to_free, int *ret) |
636 | { |
637 | return skb; |
638 | } |
639 | |
640 | static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe) |
641 | { |
642 | return 0; |
643 | } |
644 | #endif |
645 | |
646 | struct tc_cls_u32_knode { |
647 | struct tcf_exts *exts; |
648 | struct tcf_result *res; |
649 | struct tc_u32_sel *sel; |
650 | u32 handle; |
651 | u32 val; |
652 | u32 mask; |
653 | u32 link_handle; |
654 | u8 fshift; |
655 | }; |
656 | |
657 | struct tc_cls_u32_hnode { |
658 | u32 handle; |
659 | u32 prio; |
660 | unsigned int divisor; |
661 | }; |
662 | |
663 | enum tc_clsu32_command { |
664 | TC_CLSU32_NEW_KNODE, |
665 | TC_CLSU32_REPLACE_KNODE, |
666 | TC_CLSU32_DELETE_KNODE, |
667 | TC_CLSU32_NEW_HNODE, |
668 | TC_CLSU32_REPLACE_HNODE, |
669 | TC_CLSU32_DELETE_HNODE, |
670 | }; |
671 | |
672 | struct tc_cls_u32_offload { |
673 | struct flow_cls_common_offload common; |
674 | /* knode values */ |
675 | enum tc_clsu32_command command; |
676 | union { |
677 | struct tc_cls_u32_knode knode; |
678 | struct tc_cls_u32_hnode hnode; |
679 | }; |
680 | }; |
681 | |
682 | static inline bool tc_can_offload(const struct net_device *dev) |
683 | { |
684 | return dev->features & NETIF_F_HW_TC; |
685 | } |
686 | |
687 | static inline bool tc_can_offload_extack(const struct net_device *dev, |
688 | struct netlink_ext_ack *extack) |
689 | { |
690 | bool can = tc_can_offload(dev); |
691 | |
692 | if (!can) |
693 | NL_SET_ERR_MSG(extack, "TC offload is disabled on net device" ); |
694 | |
695 | return can; |
696 | } |
697 | |
698 | static inline bool |
699 | tc_cls_can_offload_and_chain0(const struct net_device *dev, |
700 | struct flow_cls_common_offload *common) |
701 | { |
702 | if (!tc_can_offload_extack(dev, extack: common->extack)) |
703 | return false; |
704 | if (common->chain_index) { |
705 | NL_SET_ERR_MSG(common->extack, |
706 | "Driver supports only offload of chain 0" ); |
707 | return false; |
708 | } |
709 | return true; |
710 | } |
711 | |
712 | static inline bool tc_skip_hw(u32 flags) |
713 | { |
714 | return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false; |
715 | } |
716 | |
717 | static inline bool tc_skip_sw(u32 flags) |
718 | { |
719 | return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false; |
720 | } |
721 | |
722 | /* SKIP_HW and SKIP_SW are mutually exclusive flags. */ |
723 | static inline bool tc_flags_valid(u32 flags) |
724 | { |
725 | if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW | |
726 | TCA_CLS_FLAGS_VERBOSE)) |
727 | return false; |
728 | |
729 | flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW; |
730 | if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))) |
731 | return false; |
732 | |
733 | return true; |
734 | } |
735 | |
736 | static inline bool tc_in_hw(u32 flags) |
737 | { |
738 | return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false; |
739 | } |
740 | |
741 | static inline void |
742 | tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common, |
743 | const struct tcf_proto *tp, u32 flags, |
744 | struct netlink_ext_ack *extack) |
745 | { |
746 | cls_common->chain_index = tp->chain->index; |
747 | cls_common->protocol = tp->protocol; |
748 | cls_common->prio = tp->prio >> 16; |
749 | if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) |
750 | cls_common->extack = extack; |
751 | } |
752 | |
753 | #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) |
754 | static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb) |
755 | { |
756 | struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, id: TC_SKB_EXT); |
757 | |
758 | if (tc_skb_ext) |
759 | memset(tc_skb_ext, 0, sizeof(*tc_skb_ext)); |
760 | return tc_skb_ext; |
761 | } |
762 | #endif |
763 | |
764 | enum tc_matchall_command { |
765 | TC_CLSMATCHALL_REPLACE, |
766 | TC_CLSMATCHALL_DESTROY, |
767 | TC_CLSMATCHALL_STATS, |
768 | }; |
769 | |
770 | struct tc_cls_matchall_offload { |
771 | struct flow_cls_common_offload common; |
772 | enum tc_matchall_command command; |
773 | struct flow_rule *rule; |
774 | struct flow_stats stats; |
775 | bool use_act_stats; |
776 | unsigned long cookie; |
777 | }; |
778 | |
779 | enum tc_clsbpf_command { |
780 | TC_CLSBPF_OFFLOAD, |
781 | TC_CLSBPF_STATS, |
782 | }; |
783 | |
784 | struct tc_cls_bpf_offload { |
785 | struct flow_cls_common_offload common; |
786 | enum tc_clsbpf_command command; |
787 | struct tcf_exts *exts; |
788 | struct bpf_prog *prog; |
789 | struct bpf_prog *oldprog; |
790 | const char *name; |
791 | bool exts_integrated; |
792 | }; |
793 | |
794 | /* This structure holds cookie structure that is passed from user |
795 | * to the kernel for actions and classifiers |
796 | */ |
797 | struct tc_cookie { |
798 | u8 *data; |
799 | u32 len; |
800 | struct rcu_head rcu; |
801 | }; |
802 | |
803 | struct tc_qopt_offload_stats { |
804 | struct gnet_stats_basic_sync *bstats; |
805 | struct gnet_stats_queue *qstats; |
806 | }; |
807 | |
808 | enum tc_mq_command { |
809 | TC_MQ_CREATE, |
810 | TC_MQ_DESTROY, |
811 | TC_MQ_STATS, |
812 | TC_MQ_GRAFT, |
813 | }; |
814 | |
815 | struct tc_mq_opt_offload_graft_params { |
816 | unsigned long queue; |
817 | u32 child_handle; |
818 | }; |
819 | |
820 | struct tc_mq_qopt_offload { |
821 | enum tc_mq_command command; |
822 | u32 handle; |
823 | union { |
824 | struct tc_qopt_offload_stats stats; |
825 | struct tc_mq_opt_offload_graft_params graft_params; |
826 | }; |
827 | }; |
828 | |
829 | enum tc_htb_command { |
830 | /* Root */ |
831 | TC_HTB_CREATE, /* Initialize HTB offload. */ |
832 | TC_HTB_DESTROY, /* Destroy HTB offload. */ |
833 | |
834 | /* Classes */ |
835 | /* Allocate qid and create leaf. */ |
836 | TC_HTB_LEAF_ALLOC_QUEUE, |
837 | /* Convert leaf to inner, preserve and return qid, create new leaf. */ |
838 | TC_HTB_LEAF_TO_INNER, |
839 | /* Delete leaf, while siblings remain. */ |
840 | TC_HTB_LEAF_DEL, |
841 | /* Delete leaf, convert parent to leaf, preserving qid. */ |
842 | TC_HTB_LEAF_DEL_LAST, |
843 | /* TC_HTB_LEAF_DEL_LAST, but delete driver data on hardware errors. */ |
844 | TC_HTB_LEAF_DEL_LAST_FORCE, |
845 | /* Modify parameters of a node. */ |
846 | TC_HTB_NODE_MODIFY, |
847 | |
848 | /* Class qdisc */ |
849 | TC_HTB_LEAF_QUERY_QUEUE, /* Query qid by classid. */ |
850 | }; |
851 | |
852 | struct tc_htb_qopt_offload { |
853 | struct netlink_ext_ack *extack; |
854 | enum tc_htb_command command; |
855 | u32 parent_classid; |
856 | u16 classid; |
857 | u16 qid; |
858 | u32 quantum; |
859 | u64 rate; |
860 | u64 ceil; |
861 | u8 prio; |
862 | }; |
863 | |
864 | #define TC_HTB_CLASSID_ROOT U32_MAX |
865 | |
866 | enum tc_red_command { |
867 | TC_RED_REPLACE, |
868 | TC_RED_DESTROY, |
869 | TC_RED_STATS, |
870 | TC_RED_XSTATS, |
871 | TC_RED_GRAFT, |
872 | }; |
873 | |
874 | struct tc_red_qopt_offload_params { |
875 | u32 min; |
876 | u32 max; |
877 | u32 probability; |
878 | u32 limit; |
879 | bool is_ecn; |
880 | bool is_harddrop; |
881 | bool is_nodrop; |
882 | struct gnet_stats_queue *qstats; |
883 | }; |
884 | |
885 | struct tc_red_qopt_offload { |
886 | enum tc_red_command command; |
887 | u32 handle; |
888 | u32 parent; |
889 | union { |
890 | struct tc_red_qopt_offload_params set; |
891 | struct tc_qopt_offload_stats stats; |
892 | struct red_stats *xstats; |
893 | u32 child_handle; |
894 | }; |
895 | }; |
896 | |
897 | enum tc_gred_command { |
898 | TC_GRED_REPLACE, |
899 | TC_GRED_DESTROY, |
900 | TC_GRED_STATS, |
901 | }; |
902 | |
903 | struct tc_gred_vq_qopt_offload_params { |
904 | bool present; |
905 | u32 limit; |
906 | u32 prio; |
907 | u32 min; |
908 | u32 max; |
909 | bool is_ecn; |
910 | bool is_harddrop; |
911 | u32 probability; |
912 | /* Only need backlog, see struct tc_prio_qopt_offload_params */ |
913 | u32 *backlog; |
914 | }; |
915 | |
916 | struct tc_gred_qopt_offload_params { |
917 | bool grio_on; |
918 | bool wred_on; |
919 | unsigned int dp_cnt; |
920 | unsigned int dp_def; |
921 | struct gnet_stats_queue *qstats; |
922 | struct tc_gred_vq_qopt_offload_params tab[MAX_DPs]; |
923 | }; |
924 | |
925 | struct tc_gred_qopt_offload_stats { |
926 | struct gnet_stats_basic_sync bstats[MAX_DPs]; |
927 | struct gnet_stats_queue qstats[MAX_DPs]; |
928 | struct red_stats *xstats[MAX_DPs]; |
929 | }; |
930 | |
931 | struct tc_gred_qopt_offload { |
932 | enum tc_gred_command command; |
933 | u32 handle; |
934 | u32 parent; |
935 | union { |
936 | struct tc_gred_qopt_offload_params set; |
937 | struct tc_gred_qopt_offload_stats stats; |
938 | }; |
939 | }; |
940 | |
941 | enum tc_prio_command { |
942 | TC_PRIO_REPLACE, |
943 | TC_PRIO_DESTROY, |
944 | TC_PRIO_STATS, |
945 | TC_PRIO_GRAFT, |
946 | }; |
947 | |
948 | struct tc_prio_qopt_offload_params { |
949 | int bands; |
950 | u8 priomap[TC_PRIO_MAX + 1]; |
951 | /* At the point of un-offloading the Qdisc, the reported backlog and |
952 | * qlen need to be reduced by the portion that is in HW. |
953 | */ |
954 | struct gnet_stats_queue *qstats; |
955 | }; |
956 | |
957 | struct tc_prio_qopt_offload_graft_params { |
958 | u8 band; |
959 | u32 child_handle; |
960 | }; |
961 | |
962 | struct tc_prio_qopt_offload { |
963 | enum tc_prio_command command; |
964 | u32 handle; |
965 | u32 parent; |
966 | union { |
967 | struct tc_prio_qopt_offload_params replace_params; |
968 | struct tc_qopt_offload_stats stats; |
969 | struct tc_prio_qopt_offload_graft_params graft_params; |
970 | }; |
971 | }; |
972 | |
973 | enum tc_root_command { |
974 | TC_ROOT_GRAFT, |
975 | }; |
976 | |
977 | struct tc_root_qopt_offload { |
978 | enum tc_root_command command; |
979 | u32 handle; |
980 | bool ingress; |
981 | }; |
982 | |
983 | enum tc_ets_command { |
984 | TC_ETS_REPLACE, |
985 | TC_ETS_DESTROY, |
986 | TC_ETS_STATS, |
987 | TC_ETS_GRAFT, |
988 | }; |
989 | |
990 | struct tc_ets_qopt_offload_replace_params { |
991 | unsigned int bands; |
992 | u8 priomap[TC_PRIO_MAX + 1]; |
993 | unsigned int quanta[TCQ_ETS_MAX_BANDS]; /* 0 for strict bands. */ |
994 | unsigned int weights[TCQ_ETS_MAX_BANDS]; |
995 | struct gnet_stats_queue *qstats; |
996 | }; |
997 | |
998 | struct tc_ets_qopt_offload_graft_params { |
999 | u8 band; |
1000 | u32 child_handle; |
1001 | }; |
1002 | |
1003 | struct tc_ets_qopt_offload { |
1004 | enum tc_ets_command command; |
1005 | u32 handle; |
1006 | u32 parent; |
1007 | union { |
1008 | struct tc_ets_qopt_offload_replace_params replace_params; |
1009 | struct tc_qopt_offload_stats stats; |
1010 | struct tc_ets_qopt_offload_graft_params graft_params; |
1011 | }; |
1012 | }; |
1013 | |
1014 | enum tc_tbf_command { |
1015 | TC_TBF_REPLACE, |
1016 | TC_TBF_DESTROY, |
1017 | TC_TBF_STATS, |
1018 | TC_TBF_GRAFT, |
1019 | }; |
1020 | |
1021 | struct tc_tbf_qopt_offload_replace_params { |
1022 | struct psched_ratecfg rate; |
1023 | u32 max_size; |
1024 | struct gnet_stats_queue *qstats; |
1025 | }; |
1026 | |
1027 | struct tc_tbf_qopt_offload { |
1028 | enum tc_tbf_command command; |
1029 | u32 handle; |
1030 | u32 parent; |
1031 | union { |
1032 | struct tc_tbf_qopt_offload_replace_params replace_params; |
1033 | struct tc_qopt_offload_stats stats; |
1034 | u32 child_handle; |
1035 | }; |
1036 | }; |
1037 | |
1038 | enum tc_fifo_command { |
1039 | TC_FIFO_REPLACE, |
1040 | TC_FIFO_DESTROY, |
1041 | TC_FIFO_STATS, |
1042 | }; |
1043 | |
1044 | struct tc_fifo_qopt_offload { |
1045 | enum tc_fifo_command command; |
1046 | u32 handle; |
1047 | u32 parent; |
1048 | union { |
1049 | struct tc_qopt_offload_stats stats; |
1050 | }; |
1051 | }; |
1052 | |
1053 | #ifdef CONFIG_NET_CLS_ACT |
1054 | DECLARE_STATIC_KEY_FALSE(tc_skb_ext_tc); |
1055 | void tc_skb_ext_tc_enable(void); |
1056 | void tc_skb_ext_tc_disable(void); |
1057 | #define tc_skb_ext_tc_enabled() static_branch_unlikely(&tc_skb_ext_tc) |
1058 | #else /* CONFIG_NET_CLS_ACT */ |
1059 | static inline void tc_skb_ext_tc_enable(void) { } |
1060 | static inline void tc_skb_ext_tc_disable(void) { } |
1061 | #define tc_skb_ext_tc_enabled() false |
1062 | #endif |
1063 | |
1064 | #endif |
1065 | |