1// SPDX-License-Identifier: GPL-2.0
2/* Copyright 2020 NXP
3 */
4#include "sja1105.h"
5#include "sja1105_vl.h"
6
7struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
8 unsigned long cookie)
9{
10 struct sja1105_rule *rule;
11
12 list_for_each_entry(rule, &priv->flow_block.rules, list)
13 if (rule->cookie == cookie)
14 return rule;
15
16 return NULL;
17}
18
19static int sja1105_find_free_l2_policer(struct sja1105_private *priv)
20{
21 int i;
22
23 for (i = 0; i < SJA1105_NUM_L2_POLICERS; i++)
24 if (!priv->flow_block.l2_policer_used[i])
25 return i;
26
27 return -1;
28}
29
30static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
31 struct netlink_ext_ack *extack,
32 unsigned long cookie, int port,
33 u64 rate_bytes_per_sec,
34 u32 burst)
35{
36 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
37 struct sja1105_l2_policing_entry *policing;
38 struct dsa_switch *ds = priv->ds;
39 bool new_rule = false;
40 unsigned long p;
41 int rc;
42
43 if (!rule) {
44 rule = kzalloc(size: sizeof(*rule), GFP_KERNEL);
45 if (!rule)
46 return -ENOMEM;
47
48 rule->cookie = cookie;
49 rule->type = SJA1105_RULE_BCAST_POLICER;
50 rule->bcast_pol.sharindx = sja1105_find_free_l2_policer(priv);
51 rule->key.type = SJA1105_KEY_BCAST;
52 new_rule = true;
53 }
54
55 if (rule->bcast_pol.sharindx == -1) {
56 NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
57 rc = -ENOSPC;
58 goto out;
59 }
60
61 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
62
63 if (policing[(ds->num_ports * SJA1105_NUM_TC) + port].sharindx != port) {
64 NL_SET_ERR_MSG_MOD(extack,
65 "Port already has a broadcast policer");
66 rc = -EEXIST;
67 goto out;
68 }
69
70 rule->port_mask |= BIT(port);
71
72 /* Make the broadcast policers of all ports attached to this block
73 * point to the newly allocated policer
74 */
75 for_each_set_bit(p, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
76 int bcast = (ds->num_ports * SJA1105_NUM_TC) + p;
77
78 policing[bcast].sharindx = rule->bcast_pol.sharindx;
79 }
80
81 policing[rule->bcast_pol.sharindx].rate = div_u64(dividend: rate_bytes_per_sec *
82 512, divisor: 1000000);
83 policing[rule->bcast_pol.sharindx].smax = burst;
84
85 /* TODO: support per-flow MTU */
86 policing[rule->bcast_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
87 ETH_FCS_LEN;
88
89 rc = sja1105_static_config_reload(priv, reason: SJA1105_BEST_EFFORT_POLICING);
90
91out:
92 if (rc == 0 && new_rule) {
93 priv->flow_block.l2_policer_used[rule->bcast_pol.sharindx] = true;
94 list_add(new: &rule->list, head: &priv->flow_block.rules);
95 } else if (new_rule) {
96 kfree(objp: rule);
97 }
98
99 return rc;
100}
101
102static int sja1105_setup_tc_policer(struct sja1105_private *priv,
103 struct netlink_ext_ack *extack,
104 unsigned long cookie, int port, int tc,
105 u64 rate_bytes_per_sec,
106 u32 burst)
107{
108 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
109 struct sja1105_l2_policing_entry *policing;
110 bool new_rule = false;
111 unsigned long p;
112 int rc;
113
114 if (!rule) {
115 rule = kzalloc(size: sizeof(*rule), GFP_KERNEL);
116 if (!rule)
117 return -ENOMEM;
118
119 rule->cookie = cookie;
120 rule->type = SJA1105_RULE_TC_POLICER;
121 rule->tc_pol.sharindx = sja1105_find_free_l2_policer(priv);
122 rule->key.type = SJA1105_KEY_TC;
123 rule->key.tc.pcp = tc;
124 new_rule = true;
125 }
126
127 if (rule->tc_pol.sharindx == -1) {
128 NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
129 rc = -ENOSPC;
130 goto out;
131 }
132
133 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
134
135 if (policing[(port * SJA1105_NUM_TC) + tc].sharindx != port) {
136 NL_SET_ERR_MSG_MOD(extack,
137 "Port-TC pair already has an L2 policer");
138 rc = -EEXIST;
139 goto out;
140 }
141
142 rule->port_mask |= BIT(port);
143
144 /* Make the policers for traffic class @tc of all ports attached to
145 * this block point to the newly allocated policer
146 */
147 for_each_set_bit(p, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
148 int index = (p * SJA1105_NUM_TC) + tc;
149
150 policing[index].sharindx = rule->tc_pol.sharindx;
151 }
152
153 policing[rule->tc_pol.sharindx].rate = div_u64(dividend: rate_bytes_per_sec *
154 512, divisor: 1000000);
155 policing[rule->tc_pol.sharindx].smax = burst;
156
157 /* TODO: support per-flow MTU */
158 policing[rule->tc_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
159 ETH_FCS_LEN;
160
161 rc = sja1105_static_config_reload(priv, reason: SJA1105_BEST_EFFORT_POLICING);
162
163out:
164 if (rc == 0 && new_rule) {
165 priv->flow_block.l2_policer_used[rule->tc_pol.sharindx] = true;
166 list_add(new: &rule->list, head: &priv->flow_block.rules);
167 } else if (new_rule) {
168 kfree(objp: rule);
169 }
170
171 return rc;
172}
173
174static int sja1105_flower_policer(struct sja1105_private *priv, int port,
175 struct netlink_ext_ack *extack,
176 unsigned long cookie,
177 struct sja1105_key *key,
178 u64 rate_bytes_per_sec,
179 u32 burst)
180{
181 switch (key->type) {
182 case SJA1105_KEY_BCAST:
183 return sja1105_setup_bcast_policer(priv, extack, cookie, port,
184 rate_bytes_per_sec, burst);
185 case SJA1105_KEY_TC:
186 return sja1105_setup_tc_policer(priv, extack, cookie, port,
187 tc: key->tc.pcp, rate_bytes_per_sec,
188 burst);
189 default:
190 NL_SET_ERR_MSG_MOD(extack, "Unknown keys for policing");
191 return -EOPNOTSUPP;
192 }
193}
194
195static int sja1105_flower_parse_key(struct sja1105_private *priv,
196 struct netlink_ext_ack *extack,
197 struct flow_cls_offload *cls,
198 struct sja1105_key *key)
199{
200 struct flow_rule *rule = flow_cls_offload_flow_rule(flow_cmd: cls);
201 struct flow_dissector *dissector = rule->match.dissector;
202 bool is_bcast_dmac = false;
203 u64 dmac = U64_MAX;
204 u16 vid = U16_MAX;
205 u16 pcp = U16_MAX;
206
207 if (dissector->used_keys &
208 ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
209 BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
210 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
211 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
212 NL_SET_ERR_MSG_MOD(extack,
213 "Unsupported keys used");
214 return -EOPNOTSUPP;
215 }
216
217 if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_BASIC)) {
218 struct flow_match_basic match;
219
220 flow_rule_match_basic(rule, out: &match);
221 if (match.key->n_proto) {
222 NL_SET_ERR_MSG_MOD(extack,
223 "Matching on protocol not supported");
224 return -EOPNOTSUPP;
225 }
226 }
227
228 if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
229 u8 bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
230 u8 null[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
231 struct flow_match_eth_addrs match;
232
233 flow_rule_match_eth_addrs(rule, out: &match);
234
235 if (!ether_addr_equal_masked(addr1: match.key->src, addr2: null,
236 mask: match.mask->src)) {
237 NL_SET_ERR_MSG_MOD(extack,
238 "Matching on source MAC not supported");
239 return -EOPNOTSUPP;
240 }
241
242 if (!ether_addr_equal(addr1: match.mask->dst, addr2: bcast)) {
243 NL_SET_ERR_MSG_MOD(extack,
244 "Masked matching on MAC not supported");
245 return -EOPNOTSUPP;
246 }
247
248 dmac = ether_addr_to_u64(addr: match.key->dst);
249 is_bcast_dmac = ether_addr_equal(addr1: match.key->dst, addr2: bcast);
250 }
251
252 if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_VLAN)) {
253 struct flow_match_vlan match;
254
255 flow_rule_match_vlan(rule, out: &match);
256
257 if (match.mask->vlan_id &&
258 match.mask->vlan_id != VLAN_VID_MASK) {
259 NL_SET_ERR_MSG_MOD(extack,
260 "Masked matching on VID is not supported");
261 return -EOPNOTSUPP;
262 }
263
264 if (match.mask->vlan_priority &&
265 match.mask->vlan_priority != 0x7) {
266 NL_SET_ERR_MSG_MOD(extack,
267 "Masked matching on PCP is not supported");
268 return -EOPNOTSUPP;
269 }
270
271 if (match.mask->vlan_id)
272 vid = match.key->vlan_id;
273 if (match.mask->vlan_priority)
274 pcp = match.key->vlan_priority;
275 }
276
277 if (is_bcast_dmac && vid == U16_MAX && pcp == U16_MAX) {
278 key->type = SJA1105_KEY_BCAST;
279 return 0;
280 }
281 if (dmac == U64_MAX && vid == U16_MAX && pcp != U16_MAX) {
282 key->type = SJA1105_KEY_TC;
283 key->tc.pcp = pcp;
284 return 0;
285 }
286 if (dmac != U64_MAX && vid != U16_MAX && pcp != U16_MAX) {
287 key->type = SJA1105_KEY_VLAN_AWARE_VL;
288 key->vl.dmac = dmac;
289 key->vl.vid = vid;
290 key->vl.pcp = pcp;
291 return 0;
292 }
293 if (dmac != U64_MAX) {
294 key->type = SJA1105_KEY_VLAN_UNAWARE_VL;
295 key->vl.dmac = dmac;
296 return 0;
297 }
298
299 NL_SET_ERR_MSG_MOD(extack, "Not matching on any known key");
300 return -EOPNOTSUPP;
301}
302
303static int sja1105_policer_validate(const struct flow_action *action,
304 const struct flow_action_entry *act,
305 struct netlink_ext_ack *extack)
306{
307 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
308 NL_SET_ERR_MSG_MOD(extack,
309 "Offload not supported when exceed action is not drop");
310 return -EOPNOTSUPP;
311 }
312
313 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
314 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
315 NL_SET_ERR_MSG_MOD(extack,
316 "Offload not supported when conform action is not pipe or ok");
317 return -EOPNOTSUPP;
318 }
319
320 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
321 !flow_action_is_last_entry(action, entry: act)) {
322 NL_SET_ERR_MSG_MOD(extack,
323 "Offload not supported when conform action is ok, but action is not last");
324 return -EOPNOTSUPP;
325 }
326
327 if (act->police.peakrate_bytes_ps ||
328 act->police.avrate || act->police.overhead) {
329 NL_SET_ERR_MSG_MOD(extack,
330 "Offload not supported when peakrate/avrate/overhead is configured");
331 return -EOPNOTSUPP;
332 }
333
334 if (act->police.rate_pkt_ps) {
335 NL_SET_ERR_MSG_MOD(extack,
336 "QoS offload not support packets per second");
337 return -EOPNOTSUPP;
338 }
339
340 return 0;
341}
342
343int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
344 struct flow_cls_offload *cls, bool ingress)
345{
346 struct flow_rule *rule = flow_cls_offload_flow_rule(flow_cmd: cls);
347 struct netlink_ext_ack *extack = cls->common.extack;
348 struct sja1105_private *priv = ds->priv;
349 const struct flow_action_entry *act;
350 unsigned long cookie = cls->cookie;
351 bool routing_rule = false;
352 struct sja1105_key key;
353 bool gate_rule = false;
354 bool vl_rule = false;
355 int rc, i;
356
357 rc = sja1105_flower_parse_key(priv, extack, cls, key: &key);
358 if (rc)
359 return rc;
360
361 flow_action_for_each(i, act, &rule->action) {
362 switch (act->id) {
363 case FLOW_ACTION_POLICE:
364 rc = sja1105_policer_validate(action: &rule->action, act, extack);
365 if (rc)
366 goto out;
367
368 rc = sja1105_flower_policer(priv, port, extack, cookie,
369 key: &key,
370 rate_bytes_per_sec: act->police.rate_bytes_ps,
371 burst: act->police.burst);
372 if (rc)
373 goto out;
374 break;
375 case FLOW_ACTION_TRAP: {
376 int cpu = dsa_upstream_port(ds, port);
377
378 routing_rule = true;
379 vl_rule = true;
380
381 rc = sja1105_vl_redirect(priv, port, extack, cookie,
382 key: &key, BIT(cpu), append: true);
383 if (rc)
384 goto out;
385 break;
386 }
387 case FLOW_ACTION_REDIRECT: {
388 struct dsa_port *to_dp;
389
390 to_dp = dsa_port_from_netdev(netdev: act->dev);
391 if (IS_ERR(ptr: to_dp)) {
392 NL_SET_ERR_MSG_MOD(extack,
393 "Destination not a switch port");
394 return -EOPNOTSUPP;
395 }
396
397 routing_rule = true;
398 vl_rule = true;
399
400 rc = sja1105_vl_redirect(priv, port, extack, cookie,
401 key: &key, BIT(to_dp->index), append: true);
402 if (rc)
403 goto out;
404 break;
405 }
406 case FLOW_ACTION_DROP:
407 vl_rule = true;
408
409 rc = sja1105_vl_redirect(priv, port, extack, cookie,
410 key: &key, destports: 0, append: false);
411 if (rc)
412 goto out;
413 break;
414 case FLOW_ACTION_GATE:
415 gate_rule = true;
416 vl_rule = true;
417
418 rc = sja1105_vl_gate(priv, port, extack, cookie,
419 key: &key, index: act->hw_index,
420 prio: act->gate.prio,
421 base_time: act->gate.basetime,
422 cycle_time: act->gate.cycletime,
423 cycle_time_ext: act->gate.cycletimeext,
424 num_entries: act->gate.num_entries,
425 entries: act->gate.entries);
426 if (rc)
427 goto out;
428 break;
429 default:
430 NL_SET_ERR_MSG_MOD(extack,
431 "Action not supported");
432 rc = -EOPNOTSUPP;
433 goto out;
434 }
435 }
436
437 if (vl_rule && !rc) {
438 /* Delay scheduling configuration until DESTPORTS has been
439 * populated by all other actions.
440 */
441 if (gate_rule) {
442 if (!routing_rule) {
443 NL_SET_ERR_MSG_MOD(extack,
444 "Can only offload gate action together with redirect or trap");
445 return -EOPNOTSUPP;
446 }
447 rc = sja1105_init_scheduling(priv);
448 if (rc)
449 goto out;
450 }
451
452 rc = sja1105_static_config_reload(priv, reason: SJA1105_VIRTUAL_LINKS);
453 }
454
455out:
456 return rc;
457}
458
459int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
460 struct flow_cls_offload *cls, bool ingress)
461{
462 struct sja1105_private *priv = ds->priv;
463 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie: cls->cookie);
464 struct sja1105_l2_policing_entry *policing;
465 int old_sharindx;
466
467 if (!rule)
468 return 0;
469
470 if (rule->type == SJA1105_RULE_VL)
471 return sja1105_vl_delete(priv, port, rule, extack: cls->common.extack);
472
473 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
474
475 if (rule->type == SJA1105_RULE_BCAST_POLICER) {
476 int bcast = (ds->num_ports * SJA1105_NUM_TC) + port;
477
478 old_sharindx = policing[bcast].sharindx;
479 policing[bcast].sharindx = port;
480 } else if (rule->type == SJA1105_RULE_TC_POLICER) {
481 int index = (port * SJA1105_NUM_TC) + rule->key.tc.pcp;
482
483 old_sharindx = policing[index].sharindx;
484 policing[index].sharindx = port;
485 } else {
486 return -EINVAL;
487 }
488
489 rule->port_mask &= ~BIT(port);
490 if (!rule->port_mask) {
491 priv->flow_block.l2_policer_used[old_sharindx] = false;
492 list_del(entry: &rule->list);
493 kfree(objp: rule);
494 }
495
496 return sja1105_static_config_reload(priv, reason: SJA1105_BEST_EFFORT_POLICING);
497}
498
499int sja1105_cls_flower_stats(struct dsa_switch *ds, int port,
500 struct flow_cls_offload *cls, bool ingress)
501{
502 struct sja1105_private *priv = ds->priv;
503 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie: cls->cookie);
504 int rc;
505
506 if (!rule)
507 return 0;
508
509 if (rule->type != SJA1105_RULE_VL)
510 return 0;
511
512 rc = sja1105_vl_stats(priv, port, rule, stats: &cls->stats,
513 extack: cls->common.extack);
514 if (rc)
515 return rc;
516
517 return 0;
518}
519
520void sja1105_flower_setup(struct dsa_switch *ds)
521{
522 struct sja1105_private *priv = ds->priv;
523 int port;
524
525 INIT_LIST_HEAD(list: &priv->flow_block.rules);
526
527 for (port = 0; port < ds->num_ports; port++)
528 priv->flow_block.l2_policer_used[port] = true;
529}
530
531void sja1105_flower_teardown(struct dsa_switch *ds)
532{
533 struct sja1105_private *priv = ds->priv;
534 struct sja1105_rule *rule;
535 struct list_head *pos, *n;
536
537 list_for_each_safe(pos, n, &priv->flow_block.rules) {
538 rule = list_entry(pos, struct sja1105_rule, list);
539 list_del(entry: &rule->list);
540 kfree(objp: rule);
541 }
542}
543

source code of linux/drivers/net/dsa/sja1105/sja1105_flower.c