1// SPDX-License-Identifier: GPL-2.0
2/* Copyright 2020 NXP
3 */
4#include <net/tc_act/tc_gate.h>
5#include <linux/dsa/8021q.h>
6#include "sja1105_vl.h"
7
8#define SJA1105_SIZE_VL_STATUS 8
9
10/* Insert into the global gate list, sorted by gate action time. */
11static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
12 struct sja1105_rule *rule,
13 u8 gate_state, s64 entry_time,
14 struct netlink_ext_ack *extack)
15{
16 struct sja1105_gate_entry *e;
17 int rc;
18
19 e = kzalloc(size: sizeof(*e), GFP_KERNEL);
20 if (!e)
21 return -ENOMEM;
22
23 e->rule = rule;
24 e->gate_state = gate_state;
25 e->interval = entry_time;
26
27 if (list_empty(head: &gating_cfg->entries)) {
28 list_add(new: &e->list, head: &gating_cfg->entries);
29 } else {
30 struct sja1105_gate_entry *p;
31
32 list_for_each_entry(p, &gating_cfg->entries, list) {
33 if (p->interval == e->interval) {
34 NL_SET_ERR_MSG_MOD(extack,
35 "Gate conflict");
36 rc = -EBUSY;
37 goto err;
38 }
39
40 if (e->interval < p->interval)
41 break;
42 }
43 list_add(new: &e->list, head: p->list.prev);
44 }
45
46 gating_cfg->num_entries++;
47
48 return 0;
49err:
50 kfree(objp: e);
51 return rc;
52}
53
54/* The gate entries contain absolute times in their e->interval field. Convert
55 * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
56 */
57static void
58sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
59 u64 cycle_time)
60{
61 struct sja1105_gate_entry *last_e;
62 struct sja1105_gate_entry *e;
63 struct list_head *prev;
64
65 list_for_each_entry(e, &gating_cfg->entries, list) {
66 struct sja1105_gate_entry *p;
67
68 prev = e->list.prev;
69
70 if (prev == &gating_cfg->entries)
71 continue;
72
73 p = list_entry(prev, struct sja1105_gate_entry, list);
74 p->interval = e->interval - p->interval;
75 }
76 last_e = list_last_entry(&gating_cfg->entries,
77 struct sja1105_gate_entry, list);
78 last_e->interval = cycle_time - last_e->interval;
79}
80
81static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
82{
83 struct sja1105_gate_entry *e, *n;
84
85 list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
86 list_del(entry: &e->list);
87 kfree(objp: e);
88 }
89}
90
91static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
92 struct netlink_ext_ack *extack)
93{
94 struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
95 struct sja1105_rule *rule;
96 s64 max_cycle_time = 0;
97 s64 its_base_time = 0;
98 int i, rc = 0;
99
100 sja1105_free_gating_config(gating_cfg);
101
102 list_for_each_entry(rule, &priv->flow_block.rules, list) {
103 if (rule->type != SJA1105_RULE_VL)
104 continue;
105 if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
106 continue;
107
108 if (max_cycle_time < rule->vl.cycle_time) {
109 max_cycle_time = rule->vl.cycle_time;
110 its_base_time = rule->vl.base_time;
111 }
112 }
113
114 if (!max_cycle_time)
115 return 0;
116
117 dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
118 max_cycle_time, its_base_time);
119
120 gating_cfg->base_time = its_base_time;
121 gating_cfg->cycle_time = max_cycle_time;
122 gating_cfg->num_entries = 0;
123
124 list_for_each_entry(rule, &priv->flow_block.rules, list) {
125 s64 time;
126 s64 rbt;
127
128 if (rule->type != SJA1105_RULE_VL)
129 continue;
130 if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
131 continue;
132
133 /* Calculate the difference between this gating schedule's
134 * base time, and the base time of the gating schedule with the
135 * longest cycle time. We call it the relative base time (rbt).
136 */
137 rbt = future_base_time(base_time: rule->vl.base_time, cycle_time: rule->vl.cycle_time,
138 now: its_base_time);
139 rbt -= its_base_time;
140
141 time = rbt;
142
143 for (i = 0; i < rule->vl.num_entries; i++) {
144 u8 gate_state = rule->vl.entries[i].gate_state;
145 s64 entry_time = time;
146
147 while (entry_time < max_cycle_time) {
148 rc = sja1105_insert_gate_entry(gating_cfg, rule,
149 gate_state,
150 entry_time,
151 extack);
152 if (rc)
153 goto err;
154
155 entry_time += rule->vl.cycle_time;
156 }
157 time += rule->vl.entries[i].interval;
158 }
159 }
160
161 sja1105_gating_cfg_time_to_interval(gating_cfg, cycle_time: max_cycle_time);
162
163 return 0;
164err:
165 sja1105_free_gating_config(gating_cfg);
166 return rc;
167}
168
169/* The switch flow classification core implements TTEthernet, which 'thinks' in
170 * terms of Virtual Links (VL), a concept borrowed from ARINC 664 part 7.
171 * However it also has one other operating mode (VLLUPFORMAT=0) where it acts
172 * somewhat closer to a pre-standard implementation of IEEE 802.1Qci
173 * (Per-Stream Filtering and Policing), which is what the driver is going to be
174 * implementing.
175 *
176 * VL Lookup
177 * Key = {DMAC && VLANID +---------+ Key = { (DMAC[47:16] & VLMASK ==
178 * && VLAN PCP | | VLMARKER)
179 * && INGRESS PORT} +---------+ (both fixed)
180 * (exact match, | && DMAC[15:0] == VLID
181 * all specified in rule) | (specified in rule)
182 * v && INGRESS PORT }
183 * ------------
184 * 0 (PSFP) / \ 1 (ARINC664)
185 * +-----------/ VLLUPFORMAT \----------+
186 * | \ (fixed) / |
187 * | \ / |
188 * 0 (forwarding) v ------------ |
189 * ------------ |
190 * / \ 1 (QoS classification) |
191 * +---/ ISCRITICAL \-----------+ |
192 * | \ (per rule) / | |
193 * | \ / VLID taken from VLID taken from
194 * v ------------ index of rule contents of rule
195 * select that matched that matched
196 * DESTPORTS | |
197 * | +---------+--------+
198 * | |
199 * | v
200 * | VL Forwarding
201 * | (indexed by VLID)
202 * | +---------+
203 * | +--------------| |
204 * | | select TYPE +---------+
205 * | v
206 * | 0 (rate ------------ 1 (time
207 * | constrained) / \ triggered)
208 * | +------/ TYPE \------------+
209 * | | \ (per VLID) / |
210 * | v \ / v
211 * | VL Policing ------------ VL Policing
212 * | (indexed by VLID) (indexed by VLID)
213 * | +---------+ +---------+
214 * | | TYPE=0 | | TYPE=1 |
215 * | +---------+ +---------+
216 * | select SHARINDX select SHARINDX to
217 * | to rate-limit re-enter VL Forwarding
218 * | groups of VL's with new VLID for egress
219 * | to same quota |
220 * | | |
221 * | select MAXLEN -> exceed => drop select MAXLEN -> exceed => drop
222 * | | |
223 * | v v
224 * | VL Forwarding VL Forwarding
225 * | (indexed by SHARINDX) (indexed by SHARINDX)
226 * | +---------+ +---------+
227 * | | TYPE=0 | | TYPE=1 |
228 * | +---------+ +---------+
229 * | select PRIORITY, select PRIORITY,
230 * | PARTITION, DESTPORTS PARTITION, DESTPORTS
231 * | | |
232 * | v v
233 * | VL Policing VL Policing
234 * | (indexed by SHARINDX) (indexed by SHARINDX)
235 * | +---------+ +---------+
236 * | | TYPE=0 | | TYPE=1 |
237 * | +---------+ +---------+
238 * | | |
239 * | v |
240 * | select BAG, -> exceed => drop |
241 * | JITTER v
242 * | | ----------------------------------------------
243 * | | / Reception Window is open for this VL \
244 * | | / (the Schedule Table executes an entry i \
245 * | | / M <= i < N, for which these conditions hold): \ no
246 * | | +----/ \-+
247 * | | |yes \ WINST[M] == 1 && WINSTINDEX[M] == VLID / |
248 * | | | \ WINEND[N] == 1 && WINSTINDEX[N] == VLID / |
249 * | | | \ / |
250 * | | | \ (the VL window has opened and not yet closed)/ |
251 * | | | ---------------------------------------------- |
252 * | | v v
253 * | | dispatch to DESTPORTS when the Schedule Table drop
254 * | | executes an entry i with TXEN == 1 && VLINDEX == i
255 * v v
256 * dispatch immediately to DESTPORTS
257 *
258 * The per-port classification key is always composed of {DMAC, VID, PCP} and
259 * is non-maskable. This 'looks like' the NULL stream identification function
260 * from IEEE 802.1CB clause 6, except for the extra VLAN PCP. When the switch
261 * ports operate as VLAN-unaware, we do allow the user to not specify the VLAN
262 * ID and PCP, and then the port-based defaults will be used.
263 *
264 * In TTEthernet, routing is something that needs to be done manually for each
265 * Virtual Link. So the flow action must always include one of:
266 * a. 'redirect', 'trap' or 'drop': select the egress port list
267 * Additionally, the following actions may be applied on a Virtual Link,
268 * turning it into 'critical' traffic:
269 * b. 'police': turn it into a rate-constrained VL, with bandwidth limitation
270 * given by the maximum frame length, bandwidth allocation gap (BAG) and
271 * maximum jitter.
272 * c. 'gate': turn it into a time-triggered VL, which can be only be received
273 * and forwarded according to a given schedule.
274 */
275
276static bool sja1105_vl_key_lower(struct sja1105_vl_lookup_entry *a,
277 struct sja1105_vl_lookup_entry *b)
278{
279 if (a->macaddr < b->macaddr)
280 return true;
281 if (a->macaddr > b->macaddr)
282 return false;
283 if (a->vlanid < b->vlanid)
284 return true;
285 if (a->vlanid > b->vlanid)
286 return false;
287 if (a->port < b->port)
288 return true;
289 if (a->port > b->port)
290 return false;
291 if (a->vlanprior < b->vlanprior)
292 return true;
293 if (a->vlanprior > b->vlanprior)
294 return false;
295 /* Keys are equal */
296 return false;
297}
298
299/* FIXME: this should change when the bridge upper of the port changes. */
300static u16 sja1105_port_get_tag_8021q_vid(struct dsa_port *dp)
301{
302 unsigned long bridge_num;
303
304 if (!dp->bridge)
305 return dsa_tag_8021q_standalone_vid(dp);
306
307 bridge_num = dsa_port_bridge_num_get(dp);
308
309 return dsa_tag_8021q_bridge_vid(bridge_num);
310}
311
312static int sja1105_init_virtual_links(struct sja1105_private *priv,
313 struct netlink_ext_ack *extack)
314{
315 struct sja1105_vl_policing_entry *vl_policing;
316 struct sja1105_vl_forwarding_entry *vl_fwd;
317 struct sja1105_vl_lookup_entry *vl_lookup;
318 bool have_critical_virtual_links = false;
319 struct sja1105_table *table;
320 struct sja1105_rule *rule;
321 int num_virtual_links = 0;
322 int max_sharindx = 0;
323 int i, j, k;
324
325 /* Figure out the dimensioning of the problem */
326 list_for_each_entry(rule, &priv->flow_block.rules, list) {
327 if (rule->type != SJA1105_RULE_VL)
328 continue;
329 /* Each VL lookup entry matches on a single ingress port */
330 num_virtual_links += hweight_long(w: rule->port_mask);
331
332 if (rule->vl.type != SJA1105_VL_NONCRITICAL)
333 have_critical_virtual_links = true;
334 if (max_sharindx < rule->vl.sharindx)
335 max_sharindx = rule->vl.sharindx;
336 }
337
338 if (num_virtual_links > SJA1105_MAX_VL_LOOKUP_COUNT) {
339 NL_SET_ERR_MSG_MOD(extack, "Not enough VL entries available");
340 return -ENOSPC;
341 }
342
343 if (max_sharindx + 1 > SJA1105_MAX_VL_LOOKUP_COUNT) {
344 NL_SET_ERR_MSG_MOD(extack, "Policer index out of range");
345 return -ENOSPC;
346 }
347
348 max_sharindx = max_t(int, num_virtual_links, max_sharindx) + 1;
349
350 /* Discard previous VL Lookup Table */
351 table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
352 if (table->entry_count) {
353 kfree(objp: table->entries);
354 table->entry_count = 0;
355 }
356
357 /* Discard previous VL Policing Table */
358 table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
359 if (table->entry_count) {
360 kfree(objp: table->entries);
361 table->entry_count = 0;
362 }
363
364 /* Discard previous VL Forwarding Table */
365 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
366 if (table->entry_count) {
367 kfree(objp: table->entries);
368 table->entry_count = 0;
369 }
370
371 /* Discard previous VL Forwarding Parameters Table */
372 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
373 if (table->entry_count) {
374 kfree(objp: table->entries);
375 table->entry_count = 0;
376 }
377
378 /* Nothing to do */
379 if (!num_virtual_links)
380 return 0;
381
382 /* Pre-allocate space in the static config tables */
383
384 /* VL Lookup Table */
385 table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
386 table->entries = kcalloc(n: num_virtual_links,
387 size: table->ops->unpacked_entry_size,
388 GFP_KERNEL);
389 if (!table->entries)
390 return -ENOMEM;
391 table->entry_count = num_virtual_links;
392 vl_lookup = table->entries;
393
394 k = 0;
395
396 list_for_each_entry(rule, &priv->flow_block.rules, list) {
397 unsigned long port;
398
399 if (rule->type != SJA1105_RULE_VL)
400 continue;
401
402 for_each_set_bit(port, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
403 vl_lookup[k].format = SJA1105_VL_FORMAT_PSFP;
404 vl_lookup[k].port = port;
405 vl_lookup[k].macaddr = rule->key.vl.dmac;
406 if (rule->key.type == SJA1105_KEY_VLAN_AWARE_VL) {
407 vl_lookup[k].vlanid = rule->key.vl.vid;
408 vl_lookup[k].vlanprior = rule->key.vl.pcp;
409 } else {
410 /* FIXME */
411 struct dsa_port *dp = dsa_to_port(ds: priv->ds, p: port);
412 u16 vid = sja1105_port_get_tag_8021q_vid(dp);
413
414 vl_lookup[k].vlanid = vid;
415 vl_lookup[k].vlanprior = 0;
416 }
417 /* For critical VLs, the DESTPORTS mask is taken from
418 * the VL Forwarding Table, so no point in putting it
419 * in the VL Lookup Table
420 */
421 if (rule->vl.type == SJA1105_VL_NONCRITICAL)
422 vl_lookup[k].destports = rule->vl.destports;
423 else
424 vl_lookup[k].iscritical = true;
425 vl_lookup[k].flow_cookie = rule->cookie;
426 k++;
427 }
428 }
429
430 /* UM10944.pdf chapter 4.2.3 VL Lookup table:
431 * "the entries in the VL Lookup table must be sorted in ascending
432 * order (i.e. the smallest value must be loaded first) according to
433 * the following sort order: MACADDR, VLANID, PORT, VLANPRIOR."
434 */
435 for (i = 0; i < num_virtual_links; i++) {
436 struct sja1105_vl_lookup_entry *a = &vl_lookup[i];
437
438 for (j = i + 1; j < num_virtual_links; j++) {
439 struct sja1105_vl_lookup_entry *b = &vl_lookup[j];
440
441 if (sja1105_vl_key_lower(a: b, b: a)) {
442 struct sja1105_vl_lookup_entry tmp = *a;
443
444 *a = *b;
445 *b = tmp;
446 }
447 }
448 }
449
450 if (!have_critical_virtual_links)
451 return 0;
452
453 /* VL Policing Table */
454 table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
455 table->entries = kcalloc(n: max_sharindx, size: table->ops->unpacked_entry_size,
456 GFP_KERNEL);
457 if (!table->entries)
458 return -ENOMEM;
459 table->entry_count = max_sharindx;
460 vl_policing = table->entries;
461
462 /* VL Forwarding Table */
463 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
464 table->entries = kcalloc(n: max_sharindx, size: table->ops->unpacked_entry_size,
465 GFP_KERNEL);
466 if (!table->entries)
467 return -ENOMEM;
468 table->entry_count = max_sharindx;
469 vl_fwd = table->entries;
470
471 /* VL Forwarding Parameters Table */
472 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
473 table->entries = kcalloc(n: 1, size: table->ops->unpacked_entry_size,
474 GFP_KERNEL);
475 if (!table->entries)
476 return -ENOMEM;
477 table->entry_count = 1;
478
479 for (i = 0; i < num_virtual_links; i++) {
480 unsigned long cookie = vl_lookup[i].flow_cookie;
481 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
482
483 if (rule->vl.type == SJA1105_VL_NONCRITICAL)
484 continue;
485 if (rule->vl.type == SJA1105_VL_TIME_TRIGGERED) {
486 int sharindx = rule->vl.sharindx;
487
488 vl_policing[i].type = 1;
489 vl_policing[i].sharindx = sharindx;
490 vl_policing[i].maxlen = rule->vl.maxlen;
491 vl_policing[sharindx].type = 1;
492
493 vl_fwd[i].type = 1;
494 vl_fwd[sharindx].type = 1;
495 vl_fwd[sharindx].priority = rule->vl.ipv;
496 vl_fwd[sharindx].partition = 0;
497 vl_fwd[sharindx].destports = rule->vl.destports;
498 }
499 }
500
501 sja1105_frame_memory_partitioning(priv);
502
503 return 0;
504}
505
506int sja1105_vl_redirect(struct sja1105_private *priv, int port,
507 struct netlink_ext_ack *extack, unsigned long cookie,
508 struct sja1105_key *key, unsigned long destports,
509 bool append)
510{
511 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
512 struct dsa_port *dp = dsa_to_port(ds: priv->ds, p: port);
513 bool vlan_aware = dsa_port_is_vlan_filtering(dp);
514 int rc;
515
516 if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
517 NL_SET_ERR_MSG_MOD(extack,
518 "Can only redirect based on DMAC");
519 return -EOPNOTSUPP;
520 } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
521 NL_SET_ERR_MSG_MOD(extack,
522 "Can only redirect based on {DMAC, VID, PCP}");
523 return -EOPNOTSUPP;
524 }
525
526 if (!rule) {
527 rule = kzalloc(size: sizeof(*rule), GFP_KERNEL);
528 if (!rule)
529 return -ENOMEM;
530
531 rule->cookie = cookie;
532 rule->type = SJA1105_RULE_VL;
533 rule->key = *key;
534 list_add(new: &rule->list, head: &priv->flow_block.rules);
535 }
536
537 rule->port_mask |= BIT(port);
538 if (append)
539 rule->vl.destports |= destports;
540 else
541 rule->vl.destports = destports;
542
543 rc = sja1105_init_virtual_links(priv, extack);
544 if (rc) {
545 rule->port_mask &= ~BIT(port);
546 if (!rule->port_mask) {
547 list_del(entry: &rule->list);
548 kfree(objp: rule);
549 }
550 }
551
552 return rc;
553}
554
555int sja1105_vl_delete(struct sja1105_private *priv, int port,
556 struct sja1105_rule *rule, struct netlink_ext_ack *extack)
557{
558 int rc;
559
560 rule->port_mask &= ~BIT(port);
561 if (!rule->port_mask) {
562 list_del(entry: &rule->list);
563 kfree(objp: rule);
564 }
565
566 rc = sja1105_compose_gating_subschedule(priv, extack);
567 if (rc)
568 return rc;
569
570 rc = sja1105_init_virtual_links(priv, extack);
571 if (rc)
572 return rc;
573
574 rc = sja1105_init_scheduling(priv);
575 if (rc < 0)
576 return rc;
577
578 return sja1105_static_config_reload(priv, reason: SJA1105_VIRTUAL_LINKS);
579}
580
581int sja1105_vl_gate(struct sja1105_private *priv, int port,
582 struct netlink_ext_ack *extack, unsigned long cookie,
583 struct sja1105_key *key, u32 index, s32 prio,
584 u64 base_time, u64 cycle_time, u64 cycle_time_ext,
585 u32 num_entries, struct action_gate_entry *entries)
586{
587 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
588 struct dsa_port *dp = dsa_to_port(ds: priv->ds, p: port);
589 bool vlan_aware = dsa_port_is_vlan_filtering(dp);
590 int ipv = -1;
591 int i, rc;
592 s32 rem;
593
594 if (cycle_time_ext) {
595 NL_SET_ERR_MSG_MOD(extack,
596 "Cycle time extension not supported");
597 return -EOPNOTSUPP;
598 }
599
600 div_s64_rem(dividend: base_time, divisor: sja1105_delta_to_ns(delta: 1), remainder: &rem);
601 if (rem) {
602 NL_SET_ERR_MSG_MOD(extack,
603 "Base time must be multiple of 200 ns");
604 return -ERANGE;
605 }
606
607 div_s64_rem(dividend: cycle_time, divisor: sja1105_delta_to_ns(delta: 1), remainder: &rem);
608 if (rem) {
609 NL_SET_ERR_MSG_MOD(extack,
610 "Cycle time must be multiple of 200 ns");
611 return -ERANGE;
612 }
613
614 if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
615 NL_SET_ERR_MSG_MOD(extack,
616 "Can only gate based on DMAC");
617 return -EOPNOTSUPP;
618 } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
619 NL_SET_ERR_MSG_MOD(extack,
620 "Can only gate based on {DMAC, VID, PCP}");
621 return -EOPNOTSUPP;
622 }
623
624 if (!rule) {
625 rule = kzalloc(size: sizeof(*rule), GFP_KERNEL);
626 if (!rule)
627 return -ENOMEM;
628
629 list_add(new: &rule->list, head: &priv->flow_block.rules);
630 rule->cookie = cookie;
631 rule->type = SJA1105_RULE_VL;
632 rule->key = *key;
633 rule->vl.type = SJA1105_VL_TIME_TRIGGERED;
634 rule->vl.sharindx = index;
635 rule->vl.base_time = base_time;
636 rule->vl.cycle_time = cycle_time;
637 rule->vl.num_entries = num_entries;
638 rule->vl.entries = kcalloc(n: num_entries,
639 size: sizeof(struct action_gate_entry),
640 GFP_KERNEL);
641 if (!rule->vl.entries) {
642 rc = -ENOMEM;
643 goto out;
644 }
645
646 for (i = 0; i < num_entries; i++) {
647 div_s64_rem(dividend: entries[i].interval,
648 divisor: sja1105_delta_to_ns(delta: 1), remainder: &rem);
649 if (rem) {
650 NL_SET_ERR_MSG_MOD(extack,
651 "Interval must be multiple of 200 ns");
652 rc = -ERANGE;
653 goto out;
654 }
655
656 if (!entries[i].interval) {
657 NL_SET_ERR_MSG_MOD(extack,
658 "Interval cannot be zero");
659 rc = -ERANGE;
660 goto out;
661 }
662
663 if (ns_to_sja1105_delta(ns: entries[i].interval) >
664 SJA1105_TAS_MAX_DELTA) {
665 NL_SET_ERR_MSG_MOD(extack,
666 "Maximum interval is 52 ms");
667 rc = -ERANGE;
668 goto out;
669 }
670
671 if (entries[i].maxoctets != -1) {
672 NL_SET_ERR_MSG_MOD(extack,
673 "Cannot offload IntervalOctetMax");
674 rc = -EOPNOTSUPP;
675 goto out;
676 }
677
678 if (ipv == -1) {
679 ipv = entries[i].ipv;
680 } else if (ipv != entries[i].ipv) {
681 NL_SET_ERR_MSG_MOD(extack,
682 "Only support a single IPV per VL");
683 rc = -EOPNOTSUPP;
684 goto out;
685 }
686
687 rule->vl.entries[i] = entries[i];
688 }
689
690 if (ipv == -1) {
691 if (key->type == SJA1105_KEY_VLAN_AWARE_VL)
692 ipv = key->vl.pcp;
693 else
694 ipv = 0;
695 }
696
697 /* TODO: support per-flow MTU */
698 rule->vl.maxlen = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
699 rule->vl.ipv = ipv;
700 }
701
702 rule->port_mask |= BIT(port);
703
704 rc = sja1105_compose_gating_subschedule(priv, extack);
705 if (rc)
706 goto out;
707
708 rc = sja1105_init_virtual_links(priv, extack);
709 if (rc)
710 goto out;
711
712 if (sja1105_gating_check_conflicts(priv, port: -1, extack)) {
713 NL_SET_ERR_MSG_MOD(extack, "Conflict with tc-taprio schedule");
714 rc = -ERANGE;
715 goto out;
716 }
717
718out:
719 if (rc) {
720 rule->port_mask &= ~BIT(port);
721 if (!rule->port_mask) {
722 list_del(entry: &rule->list);
723 kfree(objp: rule->vl.entries);
724 kfree(objp: rule);
725 }
726 }
727
728 return rc;
729}
730
731static int sja1105_find_vlid(struct sja1105_private *priv, int port,
732 struct sja1105_key *key)
733{
734 struct sja1105_vl_lookup_entry *vl_lookup;
735 struct sja1105_table *table;
736 int i;
737
738 if (WARN_ON(key->type != SJA1105_KEY_VLAN_AWARE_VL &&
739 key->type != SJA1105_KEY_VLAN_UNAWARE_VL))
740 return -1;
741
742 table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
743 vl_lookup = table->entries;
744
745 for (i = 0; i < table->entry_count; i++) {
746 if (key->type == SJA1105_KEY_VLAN_AWARE_VL) {
747 if (vl_lookup[i].port == port &&
748 vl_lookup[i].macaddr == key->vl.dmac &&
749 vl_lookup[i].vlanid == key->vl.vid &&
750 vl_lookup[i].vlanprior == key->vl.pcp)
751 return i;
752 } else {
753 if (vl_lookup[i].port == port &&
754 vl_lookup[i].macaddr == key->vl.dmac)
755 return i;
756 }
757 }
758
759 return -1;
760}
761
762int sja1105_vl_stats(struct sja1105_private *priv, int port,
763 struct sja1105_rule *rule, struct flow_stats *stats,
764 struct netlink_ext_ack *extack)
765{
766 const struct sja1105_regs *regs = priv->info->regs;
767 u8 buf[SJA1105_SIZE_VL_STATUS] = {0};
768 u64 unreleased;
769 u64 timingerr;
770 u64 lengtherr;
771 int vlid, rc;
772 u64 pkts;
773
774 if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
775 return 0;
776
777 vlid = sja1105_find_vlid(priv, port, key: &rule->key);
778 if (vlid < 0)
779 return 0;
780
781 rc = sja1105_xfer_buf(priv, rw: SPI_READ, reg_addr: regs->vl_status + 2 * vlid, buf,
782 SJA1105_SIZE_VL_STATUS);
783 if (rc) {
784 NL_SET_ERR_MSG_MOD(extack, "SPI access failed");
785 return rc;
786 }
787
788 sja1105_unpack(buf, val: &timingerr, start: 31, end: 16, SJA1105_SIZE_VL_STATUS);
789 sja1105_unpack(buf, val: &unreleased, start: 15, end: 0, SJA1105_SIZE_VL_STATUS);
790 sja1105_unpack(buf, val: &lengtherr, start: 47, end: 32, SJA1105_SIZE_VL_STATUS);
791
792 pkts = timingerr + unreleased + lengtherr;
793
794 flow_stats_update(flow_stats: stats, bytes: 0, pkts: pkts - rule->vl.stats.pkts, drops: 0,
795 lastused: jiffies - rule->vl.stats.lastused,
796 used_hw_stats: FLOW_ACTION_HW_STATS_IMMEDIATE);
797
798 rule->vl.stats.pkts = pkts;
799 rule->vl.stats.lastused = jiffies;
800
801 return 0;
802}
803

source code of linux/drivers/net/dsa/sja1105/sja1105_vl.c