1 | // SPDX-License-Identifier: GPL-2.0 |
2 | |
3 | /* net/sched/sch_taprio.c Time Aware Priority Scheduler |
4 | * |
5 | * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com> |
6 | * |
7 | */ |
8 | |
9 | #include <linux/ethtool.h> |
10 | #include <linux/ethtool_netlink.h> |
11 | #include <linux/types.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/string.h> |
15 | #include <linux/list.h> |
16 | #include <linux/errno.h> |
17 | #include <linux/skbuff.h> |
18 | #include <linux/math64.h> |
19 | #include <linux/module.h> |
20 | #include <linux/spinlock.h> |
21 | #include <linux/rcupdate.h> |
22 | #include <linux/time.h> |
23 | #include <net/gso.h> |
24 | #include <net/netlink.h> |
25 | #include <net/pkt_sched.h> |
26 | #include <net/pkt_cls.h> |
27 | #include <net/sch_generic.h> |
28 | #include <net/sock.h> |
29 | #include <net/tcp.h> |
30 | |
31 | #define TAPRIO_STAT_NOT_SET (~0ULL) |
32 | |
33 | #include "sch_mqprio_lib.h" |
34 | |
35 | static LIST_HEAD(taprio_list); |
36 | static struct static_key_false taprio_have_broken_mqprio; |
37 | static struct static_key_false taprio_have_working_mqprio; |
38 | |
39 | #define TAPRIO_ALL_GATES_OPEN -1 |
40 | |
41 | #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) |
42 | #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD) |
43 | #define TAPRIO_SUPPORTED_FLAGS \ |
44 | (TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST | TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD) |
45 | #define TAPRIO_FLAGS_INVALID U32_MAX |
46 | |
47 | struct sched_entry { |
48 | /* Durations between this GCL entry and the GCL entry where the |
49 | * respective traffic class gate closes |
50 | */ |
51 | u64 gate_duration[TC_MAX_QUEUE]; |
52 | atomic_t budget[TC_MAX_QUEUE]; |
53 | /* The qdisc makes some effort so that no packet leaves |
54 | * after this time |
55 | */ |
56 | ktime_t gate_close_time[TC_MAX_QUEUE]; |
57 | struct list_head list; |
58 | /* Used to calculate when to advance the schedule */ |
59 | ktime_t end_time; |
60 | ktime_t next_txtime; |
61 | int index; |
62 | u32 gate_mask; |
63 | u32 interval; |
64 | u8 command; |
65 | }; |
66 | |
67 | struct sched_gate_list { |
68 | /* Longest non-zero contiguous gate durations per traffic class, |
69 | * or 0 if a traffic class gate never opens during the schedule. |
70 | */ |
71 | u64 max_open_gate_duration[TC_MAX_QUEUE]; |
72 | u32 max_frm_len[TC_MAX_QUEUE]; /* for the fast path */ |
73 | u32 max_sdu[TC_MAX_QUEUE]; /* for dump */ |
74 | struct rcu_head rcu; |
75 | struct list_head entries; |
76 | size_t num_entries; |
77 | ktime_t cycle_end_time; |
78 | s64 cycle_time; |
79 | s64 cycle_time_extension; |
80 | s64 base_time; |
81 | }; |
82 | |
83 | struct taprio_sched { |
84 | struct Qdisc **qdiscs; |
85 | struct Qdisc *root; |
86 | u32 flags; |
87 | enum tk_offsets tk_offset; |
88 | int clockid; |
89 | bool offloaded; |
90 | bool detected_mqprio; |
91 | bool broken_mqprio; |
92 | atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+ |
93 | * speeds it's sub-nanoseconds per byte |
94 | */ |
95 | |
96 | /* Protects the update side of the RCU protected current_entry */ |
97 | spinlock_t current_entry_lock; |
98 | struct sched_entry __rcu *current_entry; |
99 | struct sched_gate_list __rcu *oper_sched; |
100 | struct sched_gate_list __rcu *admin_sched; |
101 | struct hrtimer advance_timer; |
102 | struct list_head taprio_list; |
103 | int cur_txq[TC_MAX_QUEUE]; |
104 | u32 max_sdu[TC_MAX_QUEUE]; /* save info from the user */ |
105 | u32 fp[TC_QOPT_MAX_QUEUE]; /* only for dump and offloading */ |
106 | u32 txtime_delay; |
107 | }; |
108 | |
109 | struct __tc_taprio_qopt_offload { |
110 | refcount_t users; |
111 | struct tc_taprio_qopt_offload offload; |
112 | }; |
113 | |
114 | static void taprio_calculate_gate_durations(struct taprio_sched *q, |
115 | struct sched_gate_list *sched) |
116 | { |
117 | struct net_device *dev = qdisc_dev(qdisc: q->root); |
118 | int num_tc = netdev_get_num_tc(dev); |
119 | struct sched_entry *entry, *cur; |
120 | int tc; |
121 | |
122 | list_for_each_entry(entry, &sched->entries, list) { |
123 | u32 gates_still_open = entry->gate_mask; |
124 | |
125 | /* For each traffic class, calculate each open gate duration, |
126 | * starting at this schedule entry and ending at the schedule |
127 | * entry containing a gate close event for that TC. |
128 | */ |
129 | cur = entry; |
130 | |
131 | do { |
132 | if (!gates_still_open) |
133 | break; |
134 | |
135 | for (tc = 0; tc < num_tc; tc++) { |
136 | if (!(gates_still_open & BIT(tc))) |
137 | continue; |
138 | |
139 | if (cur->gate_mask & BIT(tc)) |
140 | entry->gate_duration[tc] += cur->interval; |
141 | else |
142 | gates_still_open &= ~BIT(tc); |
143 | } |
144 | |
145 | cur = list_next_entry_circular(cur, &sched->entries, list); |
146 | } while (cur != entry); |
147 | |
148 | /* Keep track of the maximum gate duration for each traffic |
149 | * class, taking care to not confuse a traffic class which is |
150 | * temporarily closed with one that is always closed. |
151 | */ |
152 | for (tc = 0; tc < num_tc; tc++) |
153 | if (entry->gate_duration[tc] && |
154 | sched->max_open_gate_duration[tc] < entry->gate_duration[tc]) |
155 | sched->max_open_gate_duration[tc] = entry->gate_duration[tc]; |
156 | } |
157 | } |
158 | |
159 | static bool taprio_entry_allows_tx(ktime_t skb_end_time, |
160 | struct sched_entry *entry, int tc) |
161 | { |
162 | return ktime_before(cmp1: skb_end_time, cmp2: entry->gate_close_time[tc]); |
163 | } |
164 | |
165 | static ktime_t sched_base_time(const struct sched_gate_list *sched) |
166 | { |
167 | if (!sched) |
168 | return KTIME_MAX; |
169 | |
170 | return ns_to_ktime(ns: sched->base_time); |
171 | } |
172 | |
173 | static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono) |
174 | { |
175 | /* This pairs with WRITE_ONCE() in taprio_parse_clockid() */ |
176 | enum tk_offsets tk_offset = READ_ONCE(q->tk_offset); |
177 | |
178 | switch (tk_offset) { |
179 | case TK_OFFS_MAX: |
180 | return mono; |
181 | default: |
182 | return ktime_mono_to_any(tmono: mono, offs: tk_offset); |
183 | } |
184 | } |
185 | |
186 | static ktime_t taprio_get_time(const struct taprio_sched *q) |
187 | { |
188 | return taprio_mono_to_any(q, mono: ktime_get()); |
189 | } |
190 | |
191 | static void taprio_free_sched_cb(struct rcu_head *head) |
192 | { |
193 | struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu); |
194 | struct sched_entry *entry, *n; |
195 | |
196 | list_for_each_entry_safe(entry, n, &sched->entries, list) { |
197 | list_del(entry: &entry->list); |
198 | kfree(objp: entry); |
199 | } |
200 | |
201 | kfree(objp: sched); |
202 | } |
203 | |
204 | static void switch_schedules(struct taprio_sched *q, |
205 | struct sched_gate_list **admin, |
206 | struct sched_gate_list **oper) |
207 | { |
208 | rcu_assign_pointer(q->oper_sched, *admin); |
209 | rcu_assign_pointer(q->admin_sched, NULL); |
210 | |
211 | if (*oper) |
212 | call_rcu(head: &(*oper)->rcu, func: taprio_free_sched_cb); |
213 | |
214 | *oper = *admin; |
215 | *admin = NULL; |
216 | } |
217 | |
218 | /* Get how much time has been already elapsed in the current cycle. */ |
219 | static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time) |
220 | { |
221 | ktime_t time_since_sched_start; |
222 | s32 time_elapsed; |
223 | |
224 | time_since_sched_start = ktime_sub(time, sched->base_time); |
225 | div_s64_rem(dividend: time_since_sched_start, divisor: sched->cycle_time, remainder: &time_elapsed); |
226 | |
227 | return time_elapsed; |
228 | } |
229 | |
230 | static ktime_t get_interval_end_time(struct sched_gate_list *sched, |
231 | struct sched_gate_list *admin, |
232 | struct sched_entry *entry, |
233 | ktime_t intv_start) |
234 | { |
235 | s32 cycle_elapsed = get_cycle_time_elapsed(sched, time: intv_start); |
236 | ktime_t intv_end, cycle_ext_end, cycle_end; |
237 | |
238 | cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed); |
239 | intv_end = ktime_add_ns(intv_start, entry->interval); |
240 | cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension); |
241 | |
242 | if (ktime_before(cmp1: intv_end, cmp2: cycle_end)) |
243 | return intv_end; |
244 | else if (admin && admin != sched && |
245 | ktime_after(cmp1: admin->base_time, cmp2: cycle_end) && |
246 | ktime_before(cmp1: admin->base_time, cmp2: cycle_ext_end)) |
247 | return admin->base_time; |
248 | else |
249 | return cycle_end; |
250 | } |
251 | |
252 | static int length_to_duration(struct taprio_sched *q, int len) |
253 | { |
254 | return div_u64(dividend: len * atomic64_read(v: &q->picos_per_byte), PSEC_PER_NSEC); |
255 | } |
256 | |
257 | static int duration_to_length(struct taprio_sched *q, u64 duration) |
258 | { |
259 | return div_u64(dividend: duration * PSEC_PER_NSEC, divisor: atomic64_read(v: &q->picos_per_byte)); |
260 | } |
261 | |
262 | /* Sets sched->max_sdu[] and sched->max_frm_len[] to the minimum between the |
263 | * q->max_sdu[] requested by the user and the max_sdu dynamically determined by |
264 | * the maximum open gate durations at the given link speed. |
265 | */ |
266 | static void taprio_update_queue_max_sdu(struct taprio_sched *q, |
267 | struct sched_gate_list *sched, |
268 | struct qdisc_size_table *stab) |
269 | { |
270 | struct net_device *dev = qdisc_dev(qdisc: q->root); |
271 | int num_tc = netdev_get_num_tc(dev); |
272 | u32 max_sdu_from_user; |
273 | u32 max_sdu_dynamic; |
274 | u32 max_sdu; |
275 | int tc; |
276 | |
277 | for (tc = 0; tc < num_tc; tc++) { |
278 | max_sdu_from_user = q->max_sdu[tc] ?: U32_MAX; |
279 | |
280 | /* TC gate never closes => keep the queueMaxSDU |
281 | * selected by the user |
282 | */ |
283 | if (sched->max_open_gate_duration[tc] == sched->cycle_time) { |
284 | max_sdu_dynamic = U32_MAX; |
285 | } else { |
286 | u32 max_frm_len; |
287 | |
288 | max_frm_len = duration_to_length(q, duration: sched->max_open_gate_duration[tc]); |
289 | /* Compensate for L1 overhead from size table, |
290 | * but don't let the frame size go negative |
291 | */ |
292 | if (stab) { |
293 | max_frm_len -= stab->szopts.overhead; |
294 | max_frm_len = max_t(int, max_frm_len, |
295 | dev->hard_header_len + 1); |
296 | } |
297 | max_sdu_dynamic = max_frm_len - dev->hard_header_len; |
298 | if (max_sdu_dynamic > dev->max_mtu) |
299 | max_sdu_dynamic = U32_MAX; |
300 | } |
301 | |
302 | max_sdu = min(max_sdu_dynamic, max_sdu_from_user); |
303 | |
304 | if (max_sdu != U32_MAX) { |
305 | sched->max_frm_len[tc] = max_sdu + dev->hard_header_len; |
306 | sched->max_sdu[tc] = max_sdu; |
307 | } else { |
308 | sched->max_frm_len[tc] = U32_MAX; /* never oversized */ |
309 | sched->max_sdu[tc] = 0; |
310 | } |
311 | } |
312 | } |
313 | |
314 | /* Returns the entry corresponding to next available interval. If |
315 | * validate_interval is set, it only validates whether the timestamp occurs |
316 | * when the gate corresponding to the skb's traffic class is open. |
317 | */ |
318 | static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb, |
319 | struct Qdisc *sch, |
320 | struct sched_gate_list *sched, |
321 | struct sched_gate_list *admin, |
322 | ktime_t time, |
323 | ktime_t *interval_start, |
324 | ktime_t *interval_end, |
325 | bool validate_interval) |
326 | { |
327 | ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time; |
328 | ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time; |
329 | struct sched_entry *entry = NULL, *entry_found = NULL; |
330 | struct taprio_sched *q = qdisc_priv(sch); |
331 | struct net_device *dev = qdisc_dev(qdisc: sch); |
332 | bool entry_available = false; |
333 | s32 cycle_elapsed; |
334 | int tc, n; |
335 | |
336 | tc = netdev_get_prio_tc_map(dev, prio: skb->priority); |
337 | packet_transmit_time = length_to_duration(q, len: qdisc_pkt_len(skb)); |
338 | |
339 | *interval_start = 0; |
340 | *interval_end = 0; |
341 | |
342 | if (!sched) |
343 | return NULL; |
344 | |
345 | cycle = sched->cycle_time; |
346 | cycle_elapsed = get_cycle_time_elapsed(sched, time); |
347 | curr_intv_end = ktime_sub_ns(time, cycle_elapsed); |
348 | cycle_end = ktime_add_ns(curr_intv_end, cycle); |
349 | |
350 | list_for_each_entry(entry, &sched->entries, list) { |
351 | curr_intv_start = curr_intv_end; |
352 | curr_intv_end = get_interval_end_time(sched, admin, entry, |
353 | intv_start: curr_intv_start); |
354 | |
355 | if (ktime_after(cmp1: curr_intv_start, cmp2: cycle_end)) |
356 | break; |
357 | |
358 | if (!(entry->gate_mask & BIT(tc)) || |
359 | packet_transmit_time > entry->interval) |
360 | continue; |
361 | |
362 | txtime = entry->next_txtime; |
363 | |
364 | if (ktime_before(cmp1: txtime, cmp2: time) || validate_interval) { |
365 | transmit_end_time = ktime_add_ns(time, packet_transmit_time); |
366 | if ((ktime_before(cmp1: curr_intv_start, cmp2: time) && |
367 | ktime_before(cmp1: transmit_end_time, cmp2: curr_intv_end)) || |
368 | (ktime_after(cmp1: curr_intv_start, cmp2: time) && !validate_interval)) { |
369 | entry_found = entry; |
370 | *interval_start = curr_intv_start; |
371 | *interval_end = curr_intv_end; |
372 | break; |
373 | } else if (!entry_available && !validate_interval) { |
374 | /* Here, we are just trying to find out the |
375 | * first available interval in the next cycle. |
376 | */ |
377 | entry_available = true; |
378 | entry_found = entry; |
379 | *interval_start = ktime_add_ns(curr_intv_start, cycle); |
380 | *interval_end = ktime_add_ns(curr_intv_end, cycle); |
381 | } |
382 | } else if (ktime_before(cmp1: txtime, cmp2: earliest_txtime) && |
383 | !entry_available) { |
384 | earliest_txtime = txtime; |
385 | entry_found = entry; |
386 | n = div_s64(ktime_sub(txtime, curr_intv_start), divisor: cycle); |
387 | *interval_start = ktime_add(curr_intv_start, n * cycle); |
388 | *interval_end = ktime_add(curr_intv_end, n * cycle); |
389 | } |
390 | } |
391 | |
392 | return entry_found; |
393 | } |
394 | |
395 | static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch) |
396 | { |
397 | struct taprio_sched *q = qdisc_priv(sch); |
398 | struct sched_gate_list *sched, *admin; |
399 | ktime_t interval_start, interval_end; |
400 | struct sched_entry *entry; |
401 | |
402 | rcu_read_lock(); |
403 | sched = rcu_dereference(q->oper_sched); |
404 | admin = rcu_dereference(q->admin_sched); |
405 | |
406 | entry = find_entry_to_transmit(skb, sch, sched, admin, time: skb->tstamp, |
407 | interval_start: &interval_start, interval_end: &interval_end, validate_interval: true); |
408 | rcu_read_unlock(); |
409 | |
410 | return entry; |
411 | } |
412 | |
413 | /* This returns the tstamp value set by TCP in terms of the set clock. */ |
414 | static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb) |
415 | { |
416 | unsigned int offset = skb_network_offset(skb); |
417 | const struct ipv6hdr *ipv6h; |
418 | const struct iphdr *iph; |
419 | struct ipv6hdr _ipv6h; |
420 | |
421 | ipv6h = skb_header_pointer(skb, offset, len: sizeof(_ipv6h), buffer: &_ipv6h); |
422 | if (!ipv6h) |
423 | return 0; |
424 | |
425 | if (ipv6h->version == 4) { |
426 | iph = (struct iphdr *)ipv6h; |
427 | offset += iph->ihl * 4; |
428 | |
429 | /* special-case 6in4 tunnelling, as that is a common way to get |
430 | * v6 connectivity in the home |
431 | */ |
432 | if (iph->protocol == IPPROTO_IPV6) { |
433 | ipv6h = skb_header_pointer(skb, offset, |
434 | len: sizeof(_ipv6h), buffer: &_ipv6h); |
435 | |
436 | if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP) |
437 | return 0; |
438 | } else if (iph->protocol != IPPROTO_TCP) { |
439 | return 0; |
440 | } |
441 | } else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) { |
442 | return 0; |
443 | } |
444 | |
445 | return taprio_mono_to_any(q, mono: skb->skb_mstamp_ns); |
446 | } |
447 | |
448 | /* There are a few scenarios where we will have to modify the txtime from |
449 | * what is read from next_txtime in sched_entry. They are: |
450 | * 1. If txtime is in the past, |
451 | * a. The gate for the traffic class is currently open and packet can be |
452 | * transmitted before it closes, schedule the packet right away. |
453 | * b. If the gate corresponding to the traffic class is going to open later |
454 | * in the cycle, set the txtime of packet to the interval start. |
455 | * 2. If txtime is in the future, there are packets corresponding to the |
456 | * current traffic class waiting to be transmitted. So, the following |
457 | * possibilities exist: |
458 | * a. We can transmit the packet before the window containing the txtime |
459 | * closes. |
460 | * b. The window might close before the transmission can be completed |
461 | * successfully. So, schedule the packet in the next open window. |
462 | */ |
463 | static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch) |
464 | { |
465 | ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp; |
466 | struct taprio_sched *q = qdisc_priv(sch); |
467 | struct sched_gate_list *sched, *admin; |
468 | ktime_t minimum_time, now, txtime; |
469 | int len, packet_transmit_time; |
470 | struct sched_entry *entry; |
471 | bool sched_changed; |
472 | |
473 | now = taprio_get_time(q); |
474 | minimum_time = ktime_add_ns(now, q->txtime_delay); |
475 | |
476 | tcp_tstamp = get_tcp_tstamp(q, skb); |
477 | minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp); |
478 | |
479 | rcu_read_lock(); |
480 | admin = rcu_dereference(q->admin_sched); |
481 | sched = rcu_dereference(q->oper_sched); |
482 | if (admin && ktime_after(cmp1: minimum_time, cmp2: admin->base_time)) |
483 | switch_schedules(q, admin: &admin, oper: &sched); |
484 | |
485 | /* Until the schedule starts, all the queues are open */ |
486 | if (!sched || ktime_before(cmp1: minimum_time, cmp2: sched->base_time)) { |
487 | txtime = minimum_time; |
488 | goto done; |
489 | } |
490 | |
491 | len = qdisc_pkt_len(skb); |
492 | packet_transmit_time = length_to_duration(q, len); |
493 | |
494 | do { |
495 | sched_changed = false; |
496 | |
497 | entry = find_entry_to_transmit(skb, sch, sched, admin, |
498 | time: minimum_time, |
499 | interval_start: &interval_start, interval_end: &interval_end, |
500 | validate_interval: false); |
501 | if (!entry) { |
502 | txtime = 0; |
503 | goto done; |
504 | } |
505 | |
506 | txtime = entry->next_txtime; |
507 | txtime = max_t(ktime_t, txtime, minimum_time); |
508 | txtime = max_t(ktime_t, txtime, interval_start); |
509 | |
510 | if (admin && admin != sched && |
511 | ktime_after(cmp1: txtime, cmp2: admin->base_time)) { |
512 | sched = admin; |
513 | sched_changed = true; |
514 | continue; |
515 | } |
516 | |
517 | transmit_end_time = ktime_add(txtime, packet_transmit_time); |
518 | minimum_time = transmit_end_time; |
519 | |
520 | /* Update the txtime of current entry to the next time it's |
521 | * interval starts. |
522 | */ |
523 | if (ktime_after(cmp1: transmit_end_time, cmp2: interval_end)) |
524 | entry->next_txtime = ktime_add(interval_start, sched->cycle_time); |
525 | } while (sched_changed || ktime_after(cmp1: transmit_end_time, cmp2: interval_end)); |
526 | |
527 | entry->next_txtime = transmit_end_time; |
528 | |
529 | done: |
530 | rcu_read_unlock(); |
531 | return txtime; |
532 | } |
533 | |
534 | /* Devices with full offload are expected to honor this in hardware */ |
535 | static bool taprio_skb_exceeds_queue_max_sdu(struct Qdisc *sch, |
536 | struct sk_buff *skb) |
537 | { |
538 | struct taprio_sched *q = qdisc_priv(sch); |
539 | struct net_device *dev = qdisc_dev(qdisc: sch); |
540 | struct sched_gate_list *sched; |
541 | int prio = skb->priority; |
542 | bool exceeds = false; |
543 | u8 tc; |
544 | |
545 | tc = netdev_get_prio_tc_map(dev, prio); |
546 | |
547 | rcu_read_lock(); |
548 | sched = rcu_dereference(q->oper_sched); |
549 | if (sched && skb->len > sched->max_frm_len[tc]) |
550 | exceeds = true; |
551 | rcu_read_unlock(); |
552 | |
553 | return exceeds; |
554 | } |
555 | |
556 | static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch, |
557 | struct Qdisc *child, struct sk_buff **to_free) |
558 | { |
559 | struct taprio_sched *q = qdisc_priv(sch); |
560 | |
561 | /* sk_flags are only safe to use on full sockets. */ |
562 | if (skb->sk && sk_fullsock(sk: skb->sk) && sock_flag(sk: skb->sk, flag: SOCK_TXTIME)) { |
563 | if (!is_valid_interval(skb, sch)) |
564 | return qdisc_drop(skb, sch, to_free); |
565 | } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { |
566 | skb->tstamp = get_packet_txtime(skb, sch); |
567 | if (!skb->tstamp) |
568 | return qdisc_drop(skb, sch, to_free); |
569 | } |
570 | |
571 | qdisc_qstats_backlog_inc(sch, skb); |
572 | sch->q.qlen++; |
573 | |
574 | return qdisc_enqueue(skb, sch: child, to_free); |
575 | } |
576 | |
577 | static int taprio_enqueue_segmented(struct sk_buff *skb, struct Qdisc *sch, |
578 | struct Qdisc *child, |
579 | struct sk_buff **to_free) |
580 | { |
581 | unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb); |
582 | netdev_features_t features = netif_skb_features(skb); |
583 | struct sk_buff *segs, *nskb; |
584 | int ret; |
585 | |
586 | segs = skb_gso_segment(skb, features: features & ~NETIF_F_GSO_MASK); |
587 | if (IS_ERR_OR_NULL(ptr: segs)) |
588 | return qdisc_drop(skb, sch, to_free); |
589 | |
590 | skb_list_walk_safe(segs, segs, nskb) { |
591 | skb_mark_not_on_list(skb: segs); |
592 | qdisc_skb_cb(skb: segs)->pkt_len = segs->len; |
593 | slen += segs->len; |
594 | |
595 | /* FIXME: we should be segmenting to a smaller size |
596 | * rather than dropping these |
597 | */ |
598 | if (taprio_skb_exceeds_queue_max_sdu(sch, skb: segs)) |
599 | ret = qdisc_drop(skb: segs, sch, to_free); |
600 | else |
601 | ret = taprio_enqueue_one(skb: segs, sch, child, to_free); |
602 | |
603 | if (ret != NET_XMIT_SUCCESS) { |
604 | if (net_xmit_drop_count(ret)) |
605 | qdisc_qstats_drop(sch); |
606 | } else { |
607 | numsegs++; |
608 | } |
609 | } |
610 | |
611 | if (numsegs > 1) |
612 | qdisc_tree_reduce_backlog(qdisc: sch, n: 1 - numsegs, len: len - slen); |
613 | consume_skb(skb); |
614 | |
615 | return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; |
616 | } |
617 | |
618 | /* Will not be called in the full offload case, since the TX queues are |
619 | * attached to the Qdisc created using qdisc_create_dflt() |
620 | */ |
621 | static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
622 | struct sk_buff **to_free) |
623 | { |
624 | struct taprio_sched *q = qdisc_priv(sch); |
625 | struct Qdisc *child; |
626 | int queue; |
627 | |
628 | queue = skb_get_queue_mapping(skb); |
629 | |
630 | child = q->qdiscs[queue]; |
631 | if (unlikely(!child)) |
632 | return qdisc_drop(skb, sch, to_free); |
633 | |
634 | if (taprio_skb_exceeds_queue_max_sdu(sch, skb)) { |
635 | /* Large packets might not be transmitted when the transmission |
636 | * duration exceeds any configured interval. Therefore, segment |
637 | * the skb into smaller chunks. Drivers with full offload are |
638 | * expected to handle this in hardware. |
639 | */ |
640 | if (skb_is_gso(skb)) |
641 | return taprio_enqueue_segmented(skb, sch, child, |
642 | to_free); |
643 | |
644 | return qdisc_drop(skb, sch, to_free); |
645 | } |
646 | |
647 | return taprio_enqueue_one(skb, sch, child, to_free); |
648 | } |
649 | |
650 | static struct sk_buff *taprio_peek(struct Qdisc *sch) |
651 | { |
652 | WARN_ONCE(1, "taprio only supports operating as root qdisc, peek() not implemented" ); |
653 | return NULL; |
654 | } |
655 | |
656 | static void taprio_set_budgets(struct taprio_sched *q, |
657 | struct sched_gate_list *sched, |
658 | struct sched_entry *entry) |
659 | { |
660 | struct net_device *dev = qdisc_dev(qdisc: q->root); |
661 | int num_tc = netdev_get_num_tc(dev); |
662 | int tc, budget; |
663 | |
664 | for (tc = 0; tc < num_tc; tc++) { |
665 | /* Traffic classes which never close have infinite budget */ |
666 | if (entry->gate_duration[tc] == sched->cycle_time) |
667 | budget = INT_MAX; |
668 | else |
669 | budget = div64_u64(dividend: (u64)entry->gate_duration[tc] * PSEC_PER_NSEC, |
670 | divisor: atomic64_read(v: &q->picos_per_byte)); |
671 | |
672 | atomic_set(v: &entry->budget[tc], i: budget); |
673 | } |
674 | } |
675 | |
676 | /* When an skb is sent, it consumes from the budget of all traffic classes */ |
677 | static int taprio_update_budgets(struct sched_entry *entry, size_t len, |
678 | int tc_consumed, int num_tc) |
679 | { |
680 | int tc, budget, new_budget = 0; |
681 | |
682 | for (tc = 0; tc < num_tc; tc++) { |
683 | budget = atomic_read(v: &entry->budget[tc]); |
684 | /* Don't consume from infinite budget */ |
685 | if (budget == INT_MAX) { |
686 | if (tc == tc_consumed) |
687 | new_budget = budget; |
688 | continue; |
689 | } |
690 | |
691 | if (tc == tc_consumed) |
692 | new_budget = atomic_sub_return(i: len, v: &entry->budget[tc]); |
693 | else |
694 | atomic_sub(i: len, v: &entry->budget[tc]); |
695 | } |
696 | |
697 | return new_budget; |
698 | } |
699 | |
700 | static struct sk_buff *taprio_dequeue_from_txq(struct Qdisc *sch, int txq, |
701 | struct sched_entry *entry, |
702 | u32 gate_mask) |
703 | { |
704 | struct taprio_sched *q = qdisc_priv(sch); |
705 | struct net_device *dev = qdisc_dev(qdisc: sch); |
706 | struct Qdisc *child = q->qdiscs[txq]; |
707 | int num_tc = netdev_get_num_tc(dev); |
708 | struct sk_buff *skb; |
709 | ktime_t guard; |
710 | int prio; |
711 | int len; |
712 | u8 tc; |
713 | |
714 | if (unlikely(!child)) |
715 | return NULL; |
716 | |
717 | if (TXTIME_ASSIST_IS_ENABLED(q->flags)) |
718 | goto skip_peek_checks; |
719 | |
720 | skb = child->ops->peek(child); |
721 | if (!skb) |
722 | return NULL; |
723 | |
724 | prio = skb->priority; |
725 | tc = netdev_get_prio_tc_map(dev, prio); |
726 | |
727 | if (!(gate_mask & BIT(tc))) |
728 | return NULL; |
729 | |
730 | len = qdisc_pkt_len(skb); |
731 | guard = ktime_add_ns(taprio_get_time(q), length_to_duration(q, len)); |
732 | |
733 | /* In the case that there's no gate entry, there's no |
734 | * guard band ... |
735 | */ |
736 | if (gate_mask != TAPRIO_ALL_GATES_OPEN && |
737 | !taprio_entry_allows_tx(skb_end_time: guard, entry, tc)) |
738 | return NULL; |
739 | |
740 | /* ... and no budget. */ |
741 | if (gate_mask != TAPRIO_ALL_GATES_OPEN && |
742 | taprio_update_budgets(entry, len, tc_consumed: tc, num_tc) < 0) |
743 | return NULL; |
744 | |
745 | skip_peek_checks: |
746 | skb = child->ops->dequeue(child); |
747 | if (unlikely(!skb)) |
748 | return NULL; |
749 | |
750 | qdisc_bstats_update(sch, skb); |
751 | qdisc_qstats_backlog_dec(sch, skb); |
752 | sch->q.qlen--; |
753 | |
754 | return skb; |
755 | } |
756 | |
757 | static void taprio_next_tc_txq(struct net_device *dev, int tc, int *txq) |
758 | { |
759 | int offset = dev->tc_to_txq[tc].offset; |
760 | int count = dev->tc_to_txq[tc].count; |
761 | |
762 | (*txq)++; |
763 | if (*txq == offset + count) |
764 | *txq = offset; |
765 | } |
766 | |
767 | /* Prioritize higher traffic classes, and select among TXQs belonging to the |
768 | * same TC using round robin |
769 | */ |
770 | static struct sk_buff *taprio_dequeue_tc_priority(struct Qdisc *sch, |
771 | struct sched_entry *entry, |
772 | u32 gate_mask) |
773 | { |
774 | struct taprio_sched *q = qdisc_priv(sch); |
775 | struct net_device *dev = qdisc_dev(qdisc: sch); |
776 | int num_tc = netdev_get_num_tc(dev); |
777 | struct sk_buff *skb; |
778 | int tc; |
779 | |
780 | for (tc = num_tc - 1; tc >= 0; tc--) { |
781 | int first_txq = q->cur_txq[tc]; |
782 | |
783 | if (!(gate_mask & BIT(tc))) |
784 | continue; |
785 | |
786 | do { |
787 | skb = taprio_dequeue_from_txq(sch, txq: q->cur_txq[tc], |
788 | entry, gate_mask); |
789 | |
790 | taprio_next_tc_txq(dev, tc, txq: &q->cur_txq[tc]); |
791 | |
792 | if (q->cur_txq[tc] >= dev->num_tx_queues) |
793 | q->cur_txq[tc] = first_txq; |
794 | |
795 | if (skb) |
796 | return skb; |
797 | } while (q->cur_txq[tc] != first_txq); |
798 | } |
799 | |
800 | return NULL; |
801 | } |
802 | |
803 | /* Broken way of prioritizing smaller TXQ indices and ignoring the traffic |
804 | * class other than to determine whether the gate is open or not |
805 | */ |
806 | static struct sk_buff *taprio_dequeue_txq_priority(struct Qdisc *sch, |
807 | struct sched_entry *entry, |
808 | u32 gate_mask) |
809 | { |
810 | struct net_device *dev = qdisc_dev(qdisc: sch); |
811 | struct sk_buff *skb; |
812 | int i; |
813 | |
814 | for (i = 0; i < dev->num_tx_queues; i++) { |
815 | skb = taprio_dequeue_from_txq(sch, txq: i, entry, gate_mask); |
816 | if (skb) |
817 | return skb; |
818 | } |
819 | |
820 | return NULL; |
821 | } |
822 | |
823 | /* Will not be called in the full offload case, since the TX queues are |
824 | * attached to the Qdisc created using qdisc_create_dflt() |
825 | */ |
826 | static struct sk_buff *taprio_dequeue(struct Qdisc *sch) |
827 | { |
828 | struct taprio_sched *q = qdisc_priv(sch); |
829 | struct sk_buff *skb = NULL; |
830 | struct sched_entry *entry; |
831 | u32 gate_mask; |
832 | |
833 | rcu_read_lock(); |
834 | entry = rcu_dereference(q->current_entry); |
835 | /* if there's no entry, it means that the schedule didn't |
836 | * start yet, so force all gates to be open, this is in |
837 | * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5 |
838 | * "AdminGateStates" |
839 | */ |
840 | gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN; |
841 | if (!gate_mask) |
842 | goto done; |
843 | |
844 | if (static_branch_unlikely(&taprio_have_broken_mqprio) && |
845 | !static_branch_likely(&taprio_have_working_mqprio)) { |
846 | /* Single NIC kind which is broken */ |
847 | skb = taprio_dequeue_txq_priority(sch, entry, gate_mask); |
848 | } else if (static_branch_likely(&taprio_have_working_mqprio) && |
849 | !static_branch_unlikely(&taprio_have_broken_mqprio)) { |
850 | /* Single NIC kind which prioritizes properly */ |
851 | skb = taprio_dequeue_tc_priority(sch, entry, gate_mask); |
852 | } else { |
853 | /* Mixed NIC kinds present in system, need dynamic testing */ |
854 | if (q->broken_mqprio) |
855 | skb = taprio_dequeue_txq_priority(sch, entry, gate_mask); |
856 | else |
857 | skb = taprio_dequeue_tc_priority(sch, entry, gate_mask); |
858 | } |
859 | |
860 | done: |
861 | rcu_read_unlock(); |
862 | |
863 | return skb; |
864 | } |
865 | |
866 | static bool should_restart_cycle(const struct sched_gate_list *oper, |
867 | const struct sched_entry *entry) |
868 | { |
869 | if (list_is_last(list: &entry->list, head: &oper->entries)) |
870 | return true; |
871 | |
872 | if (ktime_compare(cmp1: entry->end_time, cmp2: oper->cycle_end_time) == 0) |
873 | return true; |
874 | |
875 | return false; |
876 | } |
877 | |
878 | static bool should_change_schedules(const struct sched_gate_list *admin, |
879 | const struct sched_gate_list *oper, |
880 | ktime_t end_time) |
881 | { |
882 | ktime_t next_base_time, extension_time; |
883 | |
884 | if (!admin) |
885 | return false; |
886 | |
887 | next_base_time = sched_base_time(sched: admin); |
888 | |
889 | /* This is the simple case, the end_time would fall after |
890 | * the next schedule base_time. |
891 | */ |
892 | if (ktime_compare(cmp1: next_base_time, cmp2: end_time) <= 0) |
893 | return true; |
894 | |
895 | /* This is the cycle_time_extension case, if the end_time |
896 | * plus the amount that can be extended would fall after the |
897 | * next schedule base_time, we can extend the current schedule |
898 | * for that amount. |
899 | */ |
900 | extension_time = ktime_add_ns(end_time, oper->cycle_time_extension); |
901 | |
902 | /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about |
903 | * how precisely the extension should be made. So after |
904 | * conformance testing, this logic may change. |
905 | */ |
906 | if (ktime_compare(cmp1: next_base_time, cmp2: extension_time) <= 0) |
907 | return true; |
908 | |
909 | return false; |
910 | } |
911 | |
912 | static enum hrtimer_restart advance_sched(struct hrtimer *timer) |
913 | { |
914 | struct taprio_sched *q = container_of(timer, struct taprio_sched, |
915 | advance_timer); |
916 | struct net_device *dev = qdisc_dev(qdisc: q->root); |
917 | struct sched_gate_list *oper, *admin; |
918 | int num_tc = netdev_get_num_tc(dev); |
919 | struct sched_entry *entry, *next; |
920 | struct Qdisc *sch = q->root; |
921 | ktime_t end_time; |
922 | int tc; |
923 | |
924 | spin_lock(lock: &q->current_entry_lock); |
925 | entry = rcu_dereference_protected(q->current_entry, |
926 | lockdep_is_held(&q->current_entry_lock)); |
927 | oper = rcu_dereference_protected(q->oper_sched, |
928 | lockdep_is_held(&q->current_entry_lock)); |
929 | admin = rcu_dereference_protected(q->admin_sched, |
930 | lockdep_is_held(&q->current_entry_lock)); |
931 | |
932 | if (!oper) |
933 | switch_schedules(q, admin: &admin, oper: &oper); |
934 | |
935 | /* This can happen in two cases: 1. this is the very first run |
936 | * of this function (i.e. we weren't running any schedule |
937 | * previously); 2. The previous schedule just ended. The first |
938 | * entry of all schedules are pre-calculated during the |
939 | * schedule initialization. |
940 | */ |
941 | if (unlikely(!entry || entry->end_time == oper->base_time)) { |
942 | next = list_first_entry(&oper->entries, struct sched_entry, |
943 | list); |
944 | end_time = next->end_time; |
945 | goto first_run; |
946 | } |
947 | |
948 | if (should_restart_cycle(oper, entry)) { |
949 | next = list_first_entry(&oper->entries, struct sched_entry, |
950 | list); |
951 | oper->cycle_end_time = ktime_add_ns(oper->cycle_end_time, |
952 | oper->cycle_time); |
953 | } else { |
954 | next = list_next_entry(entry, list); |
955 | } |
956 | |
957 | end_time = ktime_add_ns(entry->end_time, next->interval); |
958 | end_time = min_t(ktime_t, end_time, oper->cycle_end_time); |
959 | |
960 | for (tc = 0; tc < num_tc; tc++) { |
961 | if (next->gate_duration[tc] == oper->cycle_time) |
962 | next->gate_close_time[tc] = KTIME_MAX; |
963 | else |
964 | next->gate_close_time[tc] = ktime_add_ns(entry->end_time, |
965 | next->gate_duration[tc]); |
966 | } |
967 | |
968 | if (should_change_schedules(admin, oper, end_time)) { |
969 | /* Set things so the next time this runs, the new |
970 | * schedule runs. |
971 | */ |
972 | end_time = sched_base_time(sched: admin); |
973 | switch_schedules(q, admin: &admin, oper: &oper); |
974 | } |
975 | |
976 | next->end_time = end_time; |
977 | taprio_set_budgets(q, sched: oper, entry: next); |
978 | |
979 | first_run: |
980 | rcu_assign_pointer(q->current_entry, next); |
981 | spin_unlock(lock: &q->current_entry_lock); |
982 | |
983 | hrtimer_set_expires(timer: &q->advance_timer, time: end_time); |
984 | |
985 | rcu_read_lock(); |
986 | __netif_schedule(q: sch); |
987 | rcu_read_unlock(); |
988 | |
989 | return HRTIMER_RESTART; |
990 | } |
991 | |
992 | static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { |
993 | [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 }, |
994 | [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 }, |
995 | [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 }, |
996 | [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 }, |
997 | }; |
998 | |
999 | static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { |
1000 | [TCA_TAPRIO_TC_ENTRY_INDEX] = NLA_POLICY_MAX(NLA_U32, |
1001 | TC_QOPT_MAX_QUEUE), |
1002 | [TCA_TAPRIO_TC_ENTRY_MAX_SDU] = { .type = NLA_U32 }, |
1003 | [TCA_TAPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32, |
1004 | TC_FP_EXPRESS, |
1005 | TC_FP_PREEMPTIBLE), |
1006 | }; |
1007 | |
1008 | static const struct netlink_range_validation_signed taprio_cycle_time_range = { |
1009 | .min = 0, |
1010 | .max = INT_MAX, |
1011 | }; |
1012 | |
1013 | static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { |
1014 | [TCA_TAPRIO_ATTR_PRIOMAP] = { |
1015 | .len = sizeof(struct tc_mqprio_qopt) |
1016 | }, |
1017 | [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED }, |
1018 | [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 }, |
1019 | [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED }, |
1020 | [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 }, |
1021 | [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = |
1022 | NLA_POLICY_FULL_RANGE_SIGNED(NLA_S64, &taprio_cycle_time_range), |
1023 | [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 }, |
1024 | [TCA_TAPRIO_ATTR_FLAGS] = |
1025 | NLA_POLICY_MASK(NLA_U32, TAPRIO_SUPPORTED_FLAGS), |
1026 | [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 }, |
1027 | [TCA_TAPRIO_ATTR_TC_ENTRY] = { .type = NLA_NESTED }, |
1028 | }; |
1029 | |
1030 | static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb, |
1031 | struct sched_entry *entry, |
1032 | struct netlink_ext_ack *extack) |
1033 | { |
1034 | int min_duration = length_to_duration(q, ETH_ZLEN); |
1035 | u32 interval = 0; |
1036 | |
1037 | if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD]) |
1038 | entry->command = nla_get_u8( |
1039 | nla: tb[TCA_TAPRIO_SCHED_ENTRY_CMD]); |
1040 | |
1041 | if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]) |
1042 | entry->gate_mask = nla_get_u32( |
1043 | nla: tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]); |
1044 | |
1045 | if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]) |
1046 | interval = nla_get_u32( |
1047 | nla: tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]); |
1048 | |
1049 | /* The interval should allow at least the minimum ethernet |
1050 | * frame to go out. |
1051 | */ |
1052 | if (interval < min_duration) { |
1053 | NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry" ); |
1054 | return -EINVAL; |
1055 | } |
1056 | |
1057 | entry->interval = interval; |
1058 | |
1059 | return 0; |
1060 | } |
1061 | |
1062 | static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n, |
1063 | struct sched_entry *entry, int index, |
1064 | struct netlink_ext_ack *extack) |
1065 | { |
1066 | struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { }; |
1067 | int err; |
1068 | |
1069 | err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, nla: n, |
1070 | policy: entry_policy, NULL); |
1071 | if (err < 0) { |
1072 | NL_SET_ERR_MSG(extack, "Could not parse nested entry" ); |
1073 | return -EINVAL; |
1074 | } |
1075 | |
1076 | entry->index = index; |
1077 | |
1078 | return fill_sched_entry(q, tb, entry, extack); |
1079 | } |
1080 | |
1081 | static int parse_sched_list(struct taprio_sched *q, struct nlattr *list, |
1082 | struct sched_gate_list *sched, |
1083 | struct netlink_ext_ack *extack) |
1084 | { |
1085 | struct nlattr *n; |
1086 | int err, rem; |
1087 | int i = 0; |
1088 | |
1089 | if (!list) |
1090 | return -EINVAL; |
1091 | |
1092 | nla_for_each_nested(n, list, rem) { |
1093 | struct sched_entry *entry; |
1094 | |
1095 | if (nla_type(nla: n) != TCA_TAPRIO_SCHED_ENTRY) { |
1096 | NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'" ); |
1097 | continue; |
1098 | } |
1099 | |
1100 | entry = kzalloc(size: sizeof(*entry), GFP_KERNEL); |
1101 | if (!entry) { |
1102 | NL_SET_ERR_MSG(extack, "Not enough memory for entry" ); |
1103 | return -ENOMEM; |
1104 | } |
1105 | |
1106 | err = parse_sched_entry(q, n, entry, index: i, extack); |
1107 | if (err < 0) { |
1108 | kfree(objp: entry); |
1109 | return err; |
1110 | } |
1111 | |
1112 | list_add_tail(new: &entry->list, head: &sched->entries); |
1113 | i++; |
1114 | } |
1115 | |
1116 | sched->num_entries = i; |
1117 | |
1118 | return i; |
1119 | } |
1120 | |
1121 | static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb, |
1122 | struct sched_gate_list *new, |
1123 | struct netlink_ext_ack *extack) |
1124 | { |
1125 | int err = 0; |
1126 | |
1127 | if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) { |
1128 | NL_SET_ERR_MSG(extack, "Adding a single entry is not supported" ); |
1129 | return -ENOTSUPP; |
1130 | } |
1131 | |
1132 | if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]) |
1133 | new->base_time = nla_get_s64(nla: tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]); |
1134 | |
1135 | if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]) |
1136 | new->cycle_time_extension = nla_get_s64(nla: tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]); |
1137 | |
1138 | if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]) |
1139 | new->cycle_time = nla_get_s64(nla: tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]); |
1140 | |
1141 | if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]) |
1142 | err = parse_sched_list(q, list: tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], |
1143 | sched: new, extack); |
1144 | if (err < 0) |
1145 | return err; |
1146 | |
1147 | if (!new->cycle_time) { |
1148 | struct sched_entry *entry; |
1149 | ktime_t cycle = 0; |
1150 | |
1151 | list_for_each_entry(entry, &new->entries, list) |
1152 | cycle = ktime_add_ns(cycle, entry->interval); |
1153 | |
1154 | if (!cycle) { |
1155 | NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0" ); |
1156 | return -EINVAL; |
1157 | } |
1158 | |
1159 | if (cycle < 0 || cycle > INT_MAX) { |
1160 | NL_SET_ERR_MSG(extack, "'cycle_time' is too big" ); |
1161 | return -EINVAL; |
1162 | } |
1163 | |
1164 | new->cycle_time = cycle; |
1165 | } |
1166 | |
1167 | taprio_calculate_gate_durations(q, sched: new); |
1168 | |
1169 | return 0; |
1170 | } |
1171 | |
1172 | static int taprio_parse_mqprio_opt(struct net_device *dev, |
1173 | struct tc_mqprio_qopt *qopt, |
1174 | struct netlink_ext_ack *extack, |
1175 | u32 taprio_flags) |
1176 | { |
1177 | bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags); |
1178 | |
1179 | if (!qopt && !dev->num_tc) { |
1180 | NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary" ); |
1181 | return -EINVAL; |
1182 | } |
1183 | |
1184 | /* If num_tc is already set, it means that the user already |
1185 | * configured the mqprio part |
1186 | */ |
1187 | if (dev->num_tc) |
1188 | return 0; |
1189 | |
1190 | /* taprio imposes that traffic classes map 1:n to tx queues */ |
1191 | if (qopt->num_tc > dev->num_tx_queues) { |
1192 | NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues" ); |
1193 | return -EINVAL; |
1194 | } |
1195 | |
1196 | /* For some reason, in txtime-assist mode, we allow TXQ ranges for |
1197 | * different TCs to overlap, and just validate the TXQ ranges. |
1198 | */ |
1199 | return mqprio_validate_qopt(dev, qopt, validate_queue_counts: true, allow_overlapping_txqs, |
1200 | extack); |
1201 | } |
1202 | |
1203 | static int taprio_get_start_time(struct Qdisc *sch, |
1204 | struct sched_gate_list *sched, |
1205 | ktime_t *start) |
1206 | { |
1207 | struct taprio_sched *q = qdisc_priv(sch); |
1208 | ktime_t now, base, cycle; |
1209 | s64 n; |
1210 | |
1211 | base = sched_base_time(sched); |
1212 | now = taprio_get_time(q); |
1213 | |
1214 | if (ktime_after(cmp1: base, cmp2: now)) { |
1215 | *start = base; |
1216 | return 0; |
1217 | } |
1218 | |
1219 | cycle = sched->cycle_time; |
1220 | |
1221 | /* The qdisc is expected to have at least one sched_entry. Moreover, |
1222 | * any entry must have 'interval' > 0. Thus if the cycle time is zero, |
1223 | * something went really wrong. In that case, we should warn about this |
1224 | * inconsistent state and return error. |
1225 | */ |
1226 | if (WARN_ON(!cycle)) |
1227 | return -EFAULT; |
1228 | |
1229 | /* Schedule the start time for the beginning of the next |
1230 | * cycle. |
1231 | */ |
1232 | n = div64_s64(ktime_sub_ns(now, base), divisor: cycle); |
1233 | *start = ktime_add_ns(base, (n + 1) * cycle); |
1234 | return 0; |
1235 | } |
1236 | |
1237 | static void setup_first_end_time(struct taprio_sched *q, |
1238 | struct sched_gate_list *sched, ktime_t base) |
1239 | { |
1240 | struct net_device *dev = qdisc_dev(qdisc: q->root); |
1241 | int num_tc = netdev_get_num_tc(dev); |
1242 | struct sched_entry *first; |
1243 | ktime_t cycle; |
1244 | int tc; |
1245 | |
1246 | first = list_first_entry(&sched->entries, |
1247 | struct sched_entry, list); |
1248 | |
1249 | cycle = sched->cycle_time; |
1250 | |
1251 | /* FIXME: find a better place to do this */ |
1252 | sched->cycle_end_time = ktime_add_ns(base, cycle); |
1253 | |
1254 | first->end_time = ktime_add_ns(base, first->interval); |
1255 | taprio_set_budgets(q, sched, entry: first); |
1256 | |
1257 | for (tc = 0; tc < num_tc; tc++) { |
1258 | if (first->gate_duration[tc] == sched->cycle_time) |
1259 | first->gate_close_time[tc] = KTIME_MAX; |
1260 | else |
1261 | first->gate_close_time[tc] = ktime_add_ns(base, first->gate_duration[tc]); |
1262 | } |
1263 | |
1264 | rcu_assign_pointer(q->current_entry, NULL); |
1265 | } |
1266 | |
1267 | static void taprio_start_sched(struct Qdisc *sch, |
1268 | ktime_t start, struct sched_gate_list *new) |
1269 | { |
1270 | struct taprio_sched *q = qdisc_priv(sch); |
1271 | ktime_t expires; |
1272 | |
1273 | if (FULL_OFFLOAD_IS_ENABLED(q->flags)) |
1274 | return; |
1275 | |
1276 | expires = hrtimer_get_expires(timer: &q->advance_timer); |
1277 | if (expires == 0) |
1278 | expires = KTIME_MAX; |
1279 | |
1280 | /* If the new schedule starts before the next expiration, we |
1281 | * reprogram it to the earliest one, so we change the admin |
1282 | * schedule to the operational one at the right time. |
1283 | */ |
1284 | start = min_t(ktime_t, start, expires); |
1285 | |
1286 | hrtimer_start(timer: &q->advance_timer, tim: start, mode: HRTIMER_MODE_ABS); |
1287 | } |
1288 | |
1289 | static void taprio_set_picos_per_byte(struct net_device *dev, |
1290 | struct taprio_sched *q) |
1291 | { |
1292 | struct ethtool_link_ksettings ecmd; |
1293 | int speed = SPEED_10; |
1294 | int picos_per_byte; |
1295 | int err; |
1296 | |
1297 | err = __ethtool_get_link_ksettings(dev, link_ksettings: &ecmd); |
1298 | if (err < 0) |
1299 | goto skip; |
1300 | |
1301 | if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN) |
1302 | speed = ecmd.base.speed; |
1303 | |
1304 | skip: |
1305 | picos_per_byte = (USEC_PER_SEC * 8) / speed; |
1306 | |
1307 | atomic64_set(v: &q->picos_per_byte, i: picos_per_byte); |
1308 | netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n" , |
1309 | dev->name, (long long)atomic64_read(&q->picos_per_byte), |
1310 | ecmd.base.speed); |
1311 | } |
1312 | |
1313 | static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event, |
1314 | void *ptr) |
1315 | { |
1316 | struct net_device *dev = netdev_notifier_info_to_dev(info: ptr); |
1317 | struct sched_gate_list *oper, *admin; |
1318 | struct qdisc_size_table *stab; |
1319 | struct taprio_sched *q; |
1320 | |
1321 | ASSERT_RTNL(); |
1322 | |
1323 | if (event != NETDEV_UP && event != NETDEV_CHANGE) |
1324 | return NOTIFY_DONE; |
1325 | |
1326 | list_for_each_entry(q, &taprio_list, taprio_list) { |
1327 | if (dev != qdisc_dev(qdisc: q->root)) |
1328 | continue; |
1329 | |
1330 | taprio_set_picos_per_byte(dev, q); |
1331 | |
1332 | stab = rtnl_dereference(q->root->stab); |
1333 | |
1334 | oper = rtnl_dereference(q->oper_sched); |
1335 | if (oper) |
1336 | taprio_update_queue_max_sdu(q, sched: oper, stab); |
1337 | |
1338 | admin = rtnl_dereference(q->admin_sched); |
1339 | if (admin) |
1340 | taprio_update_queue_max_sdu(q, sched: admin, stab); |
1341 | |
1342 | break; |
1343 | } |
1344 | |
1345 | return NOTIFY_DONE; |
1346 | } |
1347 | |
1348 | static void setup_txtime(struct taprio_sched *q, |
1349 | struct sched_gate_list *sched, ktime_t base) |
1350 | { |
1351 | struct sched_entry *entry; |
1352 | u64 interval = 0; |
1353 | |
1354 | list_for_each_entry(entry, &sched->entries, list) { |
1355 | entry->next_txtime = ktime_add_ns(base, interval); |
1356 | interval += entry->interval; |
1357 | } |
1358 | } |
1359 | |
1360 | static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries) |
1361 | { |
1362 | struct __tc_taprio_qopt_offload *__offload; |
1363 | |
1364 | __offload = kzalloc(struct_size(__offload, offload.entries, num_entries), |
1365 | GFP_KERNEL); |
1366 | if (!__offload) |
1367 | return NULL; |
1368 | |
1369 | refcount_set(r: &__offload->users, n: 1); |
1370 | |
1371 | return &__offload->offload; |
1372 | } |
1373 | |
1374 | struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload |
1375 | *offload) |
1376 | { |
1377 | struct __tc_taprio_qopt_offload *__offload; |
1378 | |
1379 | __offload = container_of(offload, struct __tc_taprio_qopt_offload, |
1380 | offload); |
1381 | |
1382 | refcount_inc(r: &__offload->users); |
1383 | |
1384 | return offload; |
1385 | } |
1386 | EXPORT_SYMBOL_GPL(taprio_offload_get); |
1387 | |
1388 | void taprio_offload_free(struct tc_taprio_qopt_offload *offload) |
1389 | { |
1390 | struct __tc_taprio_qopt_offload *__offload; |
1391 | |
1392 | __offload = container_of(offload, struct __tc_taprio_qopt_offload, |
1393 | offload); |
1394 | |
1395 | if (!refcount_dec_and_test(r: &__offload->users)) |
1396 | return; |
1397 | |
1398 | kfree(objp: __offload); |
1399 | } |
1400 | EXPORT_SYMBOL_GPL(taprio_offload_free); |
1401 | |
1402 | /* The function will only serve to keep the pointers to the "oper" and "admin" |
1403 | * schedules valid in relation to their base times, so when calling dump() the |
1404 | * users looks at the right schedules. |
1405 | * When using full offload, the admin configuration is promoted to oper at the |
1406 | * base_time in the PHC time domain. But because the system time is not |
1407 | * necessarily in sync with that, we can't just trigger a hrtimer to call |
1408 | * switch_schedules at the right hardware time. |
1409 | * At the moment we call this by hand right away from taprio, but in the future |
1410 | * it will be useful to create a mechanism for drivers to notify taprio of the |
1411 | * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump(). |
1412 | * This is left as TODO. |
1413 | */ |
1414 | static void taprio_offload_config_changed(struct taprio_sched *q) |
1415 | { |
1416 | struct sched_gate_list *oper, *admin; |
1417 | |
1418 | oper = rtnl_dereference(q->oper_sched); |
1419 | admin = rtnl_dereference(q->admin_sched); |
1420 | |
1421 | switch_schedules(q, admin: &admin, oper: &oper); |
1422 | } |
1423 | |
1424 | static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask) |
1425 | { |
1426 | u32 i, queue_mask = 0; |
1427 | |
1428 | for (i = 0; i < dev->num_tc; i++) { |
1429 | u32 offset, count; |
1430 | |
1431 | if (!(tc_mask & BIT(i))) |
1432 | continue; |
1433 | |
1434 | offset = dev->tc_to_txq[i].offset; |
1435 | count = dev->tc_to_txq[i].count; |
1436 | |
1437 | queue_mask |= GENMASK(offset + count - 1, offset); |
1438 | } |
1439 | |
1440 | return queue_mask; |
1441 | } |
1442 | |
1443 | static void taprio_sched_to_offload(struct net_device *dev, |
1444 | struct sched_gate_list *sched, |
1445 | struct tc_taprio_qopt_offload *offload, |
1446 | const struct tc_taprio_caps *caps) |
1447 | { |
1448 | struct sched_entry *entry; |
1449 | int i = 0; |
1450 | |
1451 | offload->base_time = sched->base_time; |
1452 | offload->cycle_time = sched->cycle_time; |
1453 | offload->cycle_time_extension = sched->cycle_time_extension; |
1454 | |
1455 | list_for_each_entry(entry, &sched->entries, list) { |
1456 | struct tc_taprio_sched_entry *e = &offload->entries[i]; |
1457 | |
1458 | e->command = entry->command; |
1459 | e->interval = entry->interval; |
1460 | if (caps->gate_mask_per_txq) |
1461 | e->gate_mask = tc_map_to_queue_mask(dev, |
1462 | tc_mask: entry->gate_mask); |
1463 | else |
1464 | e->gate_mask = entry->gate_mask; |
1465 | |
1466 | i++; |
1467 | } |
1468 | |
1469 | offload->num_entries = i; |
1470 | } |
1471 | |
1472 | static void taprio_detect_broken_mqprio(struct taprio_sched *q) |
1473 | { |
1474 | struct net_device *dev = qdisc_dev(qdisc: q->root); |
1475 | struct tc_taprio_caps caps; |
1476 | |
1477 | qdisc_offload_query_caps(dev, type: TC_SETUP_QDISC_TAPRIO, |
1478 | caps: &caps, caps_len: sizeof(caps)); |
1479 | |
1480 | q->broken_mqprio = caps.broken_mqprio; |
1481 | if (q->broken_mqprio) |
1482 | static_branch_inc(&taprio_have_broken_mqprio); |
1483 | else |
1484 | static_branch_inc(&taprio_have_working_mqprio); |
1485 | |
1486 | q->detected_mqprio = true; |
1487 | } |
1488 | |
1489 | static void taprio_cleanup_broken_mqprio(struct taprio_sched *q) |
1490 | { |
1491 | if (!q->detected_mqprio) |
1492 | return; |
1493 | |
1494 | if (q->broken_mqprio) |
1495 | static_branch_dec(&taprio_have_broken_mqprio); |
1496 | else |
1497 | static_branch_dec(&taprio_have_working_mqprio); |
1498 | } |
1499 | |
1500 | static int taprio_enable_offload(struct net_device *dev, |
1501 | struct taprio_sched *q, |
1502 | struct sched_gate_list *sched, |
1503 | struct netlink_ext_ack *extack) |
1504 | { |
1505 | const struct net_device_ops *ops = dev->netdev_ops; |
1506 | struct tc_taprio_qopt_offload *offload; |
1507 | struct tc_taprio_caps caps; |
1508 | int tc, err = 0; |
1509 | |
1510 | if (!ops->ndo_setup_tc) { |
1511 | NL_SET_ERR_MSG(extack, |
1512 | "Device does not support taprio offload" ); |
1513 | return -EOPNOTSUPP; |
1514 | } |
1515 | |
1516 | qdisc_offload_query_caps(dev, type: TC_SETUP_QDISC_TAPRIO, |
1517 | caps: &caps, caps_len: sizeof(caps)); |
1518 | |
1519 | if (!caps.supports_queue_max_sdu) { |
1520 | for (tc = 0; tc < TC_MAX_QUEUE; tc++) { |
1521 | if (q->max_sdu[tc]) { |
1522 | NL_SET_ERR_MSG_MOD(extack, |
1523 | "Device does not handle queueMaxSDU" ); |
1524 | return -EOPNOTSUPP; |
1525 | } |
1526 | } |
1527 | } |
1528 | |
1529 | offload = taprio_offload_alloc(num_entries: sched->num_entries); |
1530 | if (!offload) { |
1531 | NL_SET_ERR_MSG(extack, |
1532 | "Not enough memory for enabling offload mode" ); |
1533 | return -ENOMEM; |
1534 | } |
1535 | offload->cmd = TAPRIO_CMD_REPLACE; |
1536 | offload->extack = extack; |
1537 | mqprio_qopt_reconstruct(dev, qopt: &offload->mqprio.qopt); |
1538 | offload->mqprio.extack = extack; |
1539 | taprio_sched_to_offload(dev, sched, offload, caps: &caps); |
1540 | mqprio_fp_to_offload(fp: q->fp, mqprio: &offload->mqprio); |
1541 | |
1542 | for (tc = 0; tc < TC_MAX_QUEUE; tc++) |
1543 | offload->max_sdu[tc] = q->max_sdu[tc]; |
1544 | |
1545 | err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); |
1546 | if (err < 0) { |
1547 | NL_SET_ERR_MSG_WEAK(extack, |
1548 | "Device failed to setup taprio offload" ); |
1549 | goto done; |
1550 | } |
1551 | |
1552 | q->offloaded = true; |
1553 | |
1554 | done: |
1555 | /* The offload structure may linger around via a reference taken by the |
1556 | * device driver, so clear up the netlink extack pointer so that the |
1557 | * driver isn't tempted to dereference data which stopped being valid |
1558 | */ |
1559 | offload->extack = NULL; |
1560 | offload->mqprio.extack = NULL; |
1561 | taprio_offload_free(offload); |
1562 | |
1563 | return err; |
1564 | } |
1565 | |
1566 | static int taprio_disable_offload(struct net_device *dev, |
1567 | struct taprio_sched *q, |
1568 | struct netlink_ext_ack *extack) |
1569 | { |
1570 | const struct net_device_ops *ops = dev->netdev_ops; |
1571 | struct tc_taprio_qopt_offload *offload; |
1572 | int err; |
1573 | |
1574 | if (!q->offloaded) |
1575 | return 0; |
1576 | |
1577 | offload = taprio_offload_alloc(num_entries: 0); |
1578 | if (!offload) { |
1579 | NL_SET_ERR_MSG(extack, |
1580 | "Not enough memory to disable offload mode" ); |
1581 | return -ENOMEM; |
1582 | } |
1583 | offload->cmd = TAPRIO_CMD_DESTROY; |
1584 | |
1585 | err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); |
1586 | if (err < 0) { |
1587 | NL_SET_ERR_MSG(extack, |
1588 | "Device failed to disable offload" ); |
1589 | goto out; |
1590 | } |
1591 | |
1592 | q->offloaded = false; |
1593 | |
1594 | out: |
1595 | taprio_offload_free(offload); |
1596 | |
1597 | return err; |
1598 | } |
1599 | |
1600 | /* If full offload is enabled, the only possible clockid is the net device's |
1601 | * PHC. For that reason, specifying a clockid through netlink is incorrect. |
1602 | * For txtime-assist, it is implicitly assumed that the device's PHC is kept |
1603 | * in sync with the specified clockid via a user space daemon such as phc2sys. |
1604 | * For both software taprio and txtime-assist, the clockid is used for the |
1605 | * hrtimer that advances the schedule and hence mandatory. |
1606 | */ |
1607 | static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb, |
1608 | struct netlink_ext_ack *extack) |
1609 | { |
1610 | struct taprio_sched *q = qdisc_priv(sch); |
1611 | struct net_device *dev = qdisc_dev(qdisc: sch); |
1612 | int err = -EINVAL; |
1613 | |
1614 | if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { |
1615 | const struct ethtool_ops *ops = dev->ethtool_ops; |
1616 | struct ethtool_ts_info info = { |
1617 | .cmd = ETHTOOL_GET_TS_INFO, |
1618 | .phc_index = -1, |
1619 | }; |
1620 | |
1621 | if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { |
1622 | NL_SET_ERR_MSG(extack, |
1623 | "The 'clockid' cannot be specified for full offload" ); |
1624 | goto out; |
1625 | } |
1626 | |
1627 | if (ops && ops->get_ts_info) |
1628 | err = ops->get_ts_info(dev, &info); |
1629 | |
1630 | if (err || info.phc_index < 0) { |
1631 | NL_SET_ERR_MSG(extack, |
1632 | "Device does not have a PTP clock" ); |
1633 | err = -ENOTSUPP; |
1634 | goto out; |
1635 | } |
1636 | } else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { |
1637 | int clockid = nla_get_s32(nla: tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]); |
1638 | enum tk_offsets tk_offset; |
1639 | |
1640 | /* We only support static clockids and we don't allow |
1641 | * for it to be modified after the first init. |
1642 | */ |
1643 | if (clockid < 0 || |
1644 | (q->clockid != -1 && q->clockid != clockid)) { |
1645 | NL_SET_ERR_MSG(extack, |
1646 | "Changing the 'clockid' of a running schedule is not supported" ); |
1647 | err = -ENOTSUPP; |
1648 | goto out; |
1649 | } |
1650 | |
1651 | switch (clockid) { |
1652 | case CLOCK_REALTIME: |
1653 | tk_offset = TK_OFFS_REAL; |
1654 | break; |
1655 | case CLOCK_MONOTONIC: |
1656 | tk_offset = TK_OFFS_MAX; |
1657 | break; |
1658 | case CLOCK_BOOTTIME: |
1659 | tk_offset = TK_OFFS_BOOT; |
1660 | break; |
1661 | case CLOCK_TAI: |
1662 | tk_offset = TK_OFFS_TAI; |
1663 | break; |
1664 | default: |
1665 | NL_SET_ERR_MSG(extack, "Invalid 'clockid'" ); |
1666 | err = -EINVAL; |
1667 | goto out; |
1668 | } |
1669 | /* This pairs with READ_ONCE() in taprio_mono_to_any */ |
1670 | WRITE_ONCE(q->tk_offset, tk_offset); |
1671 | |
1672 | q->clockid = clockid; |
1673 | } else { |
1674 | NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory" ); |
1675 | goto out; |
1676 | } |
1677 | |
1678 | /* Everything went ok, return success. */ |
1679 | err = 0; |
1680 | |
1681 | out: |
1682 | return err; |
1683 | } |
1684 | |
1685 | static int taprio_parse_tc_entry(struct Qdisc *sch, |
1686 | struct nlattr *opt, |
1687 | u32 max_sdu[TC_QOPT_MAX_QUEUE], |
1688 | u32 fp[TC_QOPT_MAX_QUEUE], |
1689 | unsigned long *seen_tcs, |
1690 | struct netlink_ext_ack *extack) |
1691 | { |
1692 | struct nlattr *tb[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { }; |
1693 | struct net_device *dev = qdisc_dev(qdisc: sch); |
1694 | int err, tc; |
1695 | u32 val; |
1696 | |
1697 | err = nla_parse_nested(tb, maxtype: TCA_TAPRIO_TC_ENTRY_MAX, nla: opt, |
1698 | policy: taprio_tc_policy, extack); |
1699 | if (err < 0) |
1700 | return err; |
1701 | |
1702 | if (!tb[TCA_TAPRIO_TC_ENTRY_INDEX]) { |
1703 | NL_SET_ERR_MSG_MOD(extack, "TC entry index missing" ); |
1704 | return -EINVAL; |
1705 | } |
1706 | |
1707 | tc = nla_get_u32(nla: tb[TCA_TAPRIO_TC_ENTRY_INDEX]); |
1708 | if (tc >= TC_QOPT_MAX_QUEUE) { |
1709 | NL_SET_ERR_MSG_MOD(extack, "TC entry index out of range" ); |
1710 | return -ERANGE; |
1711 | } |
1712 | |
1713 | if (*seen_tcs & BIT(tc)) { |
1714 | NL_SET_ERR_MSG_MOD(extack, "Duplicate TC entry" ); |
1715 | return -EINVAL; |
1716 | } |
1717 | |
1718 | *seen_tcs |= BIT(tc); |
1719 | |
1720 | if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]) { |
1721 | val = nla_get_u32(nla: tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]); |
1722 | if (val > dev->max_mtu) { |
1723 | NL_SET_ERR_MSG_MOD(extack, "TC max SDU exceeds device max MTU" ); |
1724 | return -ERANGE; |
1725 | } |
1726 | |
1727 | max_sdu[tc] = val; |
1728 | } |
1729 | |
1730 | if (tb[TCA_TAPRIO_TC_ENTRY_FP]) |
1731 | fp[tc] = nla_get_u32(nla: tb[TCA_TAPRIO_TC_ENTRY_FP]); |
1732 | |
1733 | return 0; |
1734 | } |
1735 | |
1736 | static int taprio_parse_tc_entries(struct Qdisc *sch, |
1737 | struct nlattr *opt, |
1738 | struct netlink_ext_ack *extack) |
1739 | { |
1740 | struct taprio_sched *q = qdisc_priv(sch); |
1741 | struct net_device *dev = qdisc_dev(qdisc: sch); |
1742 | u32 max_sdu[TC_QOPT_MAX_QUEUE]; |
1743 | bool have_preemption = false; |
1744 | unsigned long seen_tcs = 0; |
1745 | u32 fp[TC_QOPT_MAX_QUEUE]; |
1746 | struct nlattr *n; |
1747 | int tc, rem; |
1748 | int err = 0; |
1749 | |
1750 | for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { |
1751 | max_sdu[tc] = q->max_sdu[tc]; |
1752 | fp[tc] = q->fp[tc]; |
1753 | } |
1754 | |
1755 | nla_for_each_nested(n, opt, rem) { |
1756 | if (nla_type(nla: n) != TCA_TAPRIO_ATTR_TC_ENTRY) |
1757 | continue; |
1758 | |
1759 | err = taprio_parse_tc_entry(sch, opt: n, max_sdu, fp, seen_tcs: &seen_tcs, |
1760 | extack); |
1761 | if (err) |
1762 | return err; |
1763 | } |
1764 | |
1765 | for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { |
1766 | q->max_sdu[tc] = max_sdu[tc]; |
1767 | q->fp[tc] = fp[tc]; |
1768 | if (fp[tc] != TC_FP_EXPRESS) |
1769 | have_preemption = true; |
1770 | } |
1771 | |
1772 | if (have_preemption) { |
1773 | if (!FULL_OFFLOAD_IS_ENABLED(q->flags)) { |
1774 | NL_SET_ERR_MSG(extack, |
1775 | "Preemption only supported with full offload" ); |
1776 | return -EOPNOTSUPP; |
1777 | } |
1778 | |
1779 | if (!ethtool_dev_mm_supported(dev)) { |
1780 | NL_SET_ERR_MSG(extack, |
1781 | "Device does not support preemption" ); |
1782 | return -EOPNOTSUPP; |
1783 | } |
1784 | } |
1785 | |
1786 | return err; |
1787 | } |
1788 | |
1789 | static int taprio_mqprio_cmp(const struct net_device *dev, |
1790 | const struct tc_mqprio_qopt *mqprio) |
1791 | { |
1792 | int i; |
1793 | |
1794 | if (!mqprio || mqprio->num_tc != dev->num_tc) |
1795 | return -1; |
1796 | |
1797 | for (i = 0; i < mqprio->num_tc; i++) |
1798 | if (dev->tc_to_txq[i].count != mqprio->count[i] || |
1799 | dev->tc_to_txq[i].offset != mqprio->offset[i]) |
1800 | return -1; |
1801 | |
1802 | for (i = 0; i <= TC_BITMASK; i++) |
1803 | if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i]) |
1804 | return -1; |
1805 | |
1806 | return 0; |
1807 | } |
1808 | |
1809 | static int taprio_change(struct Qdisc *sch, struct nlattr *opt, |
1810 | struct netlink_ext_ack *extack) |
1811 | { |
1812 | struct qdisc_size_table *stab = rtnl_dereference(sch->stab); |
1813 | struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { }; |
1814 | struct sched_gate_list *oper, *admin, *new_admin; |
1815 | struct taprio_sched *q = qdisc_priv(sch); |
1816 | struct net_device *dev = qdisc_dev(qdisc: sch); |
1817 | struct tc_mqprio_qopt *mqprio = NULL; |
1818 | unsigned long flags; |
1819 | u32 taprio_flags; |
1820 | ktime_t start; |
1821 | int i, err; |
1822 | |
1823 | err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, nla: opt, |
1824 | policy: taprio_policy, extack); |
1825 | if (err < 0) |
1826 | return err; |
1827 | |
1828 | if (tb[TCA_TAPRIO_ATTR_PRIOMAP]) |
1829 | mqprio = nla_data(nla: tb[TCA_TAPRIO_ATTR_PRIOMAP]); |
1830 | |
1831 | /* The semantics of the 'flags' argument in relation to 'change()' |
1832 | * requests, are interpreted following two rules (which are applied in |
1833 | * this order): (1) an omitted 'flags' argument is interpreted as |
1834 | * zero; (2) the 'flags' of a "running" taprio instance cannot be |
1835 | * changed. |
1836 | */ |
1837 | taprio_flags = tb[TCA_TAPRIO_ATTR_FLAGS] ? nla_get_u32(nla: tb[TCA_TAPRIO_ATTR_FLAGS]) : 0; |
1838 | |
1839 | /* txtime-assist and full offload are mutually exclusive */ |
1840 | if ((taprio_flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) && |
1841 | (taprio_flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)) { |
1842 | NL_SET_ERR_MSG_ATTR(extack, tb[TCA_TAPRIO_ATTR_FLAGS], |
1843 | "TXTIME_ASSIST and FULL_OFFLOAD are mutually exclusive" ); |
1844 | return -EINVAL; |
1845 | } |
1846 | |
1847 | if (q->flags != TAPRIO_FLAGS_INVALID && q->flags != taprio_flags) { |
1848 | NL_SET_ERR_MSG_MOD(extack, |
1849 | "Changing 'flags' of a running schedule is not supported" ); |
1850 | return -EOPNOTSUPP; |
1851 | } |
1852 | q->flags = taprio_flags; |
1853 | |
1854 | err = taprio_parse_mqprio_opt(dev, qopt: mqprio, extack, taprio_flags: q->flags); |
1855 | if (err < 0) |
1856 | return err; |
1857 | |
1858 | err = taprio_parse_tc_entries(sch, opt, extack); |
1859 | if (err) |
1860 | return err; |
1861 | |
1862 | new_admin = kzalloc(size: sizeof(*new_admin), GFP_KERNEL); |
1863 | if (!new_admin) { |
1864 | NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule" ); |
1865 | return -ENOMEM; |
1866 | } |
1867 | INIT_LIST_HEAD(list: &new_admin->entries); |
1868 | |
1869 | oper = rtnl_dereference(q->oper_sched); |
1870 | admin = rtnl_dereference(q->admin_sched); |
1871 | |
1872 | /* no changes - no new mqprio settings */ |
1873 | if (!taprio_mqprio_cmp(dev, mqprio)) |
1874 | mqprio = NULL; |
1875 | |
1876 | if (mqprio && (oper || admin)) { |
1877 | NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported" ); |
1878 | err = -ENOTSUPP; |
1879 | goto free_sched; |
1880 | } |
1881 | |
1882 | if (mqprio) { |
1883 | err = netdev_set_num_tc(dev, num_tc: mqprio->num_tc); |
1884 | if (err) |
1885 | goto free_sched; |
1886 | for (i = 0; i < mqprio->num_tc; i++) { |
1887 | netdev_set_tc_queue(dev, tc: i, |
1888 | count: mqprio->count[i], |
1889 | offset: mqprio->offset[i]); |
1890 | q->cur_txq[i] = mqprio->offset[i]; |
1891 | } |
1892 | |
1893 | /* Always use supplied priority mappings */ |
1894 | for (i = 0; i <= TC_BITMASK; i++) |
1895 | netdev_set_prio_tc_map(dev, prio: i, |
1896 | tc: mqprio->prio_tc_map[i]); |
1897 | } |
1898 | |
1899 | err = parse_taprio_schedule(q, tb, new: new_admin, extack); |
1900 | if (err < 0) |
1901 | goto free_sched; |
1902 | |
1903 | if (new_admin->num_entries == 0) { |
1904 | NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule" ); |
1905 | err = -EINVAL; |
1906 | goto free_sched; |
1907 | } |
1908 | |
1909 | err = taprio_parse_clockid(sch, tb, extack); |
1910 | if (err < 0) |
1911 | goto free_sched; |
1912 | |
1913 | taprio_set_picos_per_byte(dev, q); |
1914 | taprio_update_queue_max_sdu(q, sched: new_admin, stab); |
1915 | |
1916 | if (FULL_OFFLOAD_IS_ENABLED(q->flags)) |
1917 | err = taprio_enable_offload(dev, q, sched: new_admin, extack); |
1918 | else |
1919 | err = taprio_disable_offload(dev, q, extack); |
1920 | if (err) |
1921 | goto free_sched; |
1922 | |
1923 | /* Protects against enqueue()/dequeue() */ |
1924 | spin_lock_bh(lock: qdisc_lock(qdisc: sch)); |
1925 | |
1926 | if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) { |
1927 | if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) { |
1928 | NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled" ); |
1929 | err = -EINVAL; |
1930 | goto unlock; |
1931 | } |
1932 | |
1933 | q->txtime_delay = nla_get_u32(nla: tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]); |
1934 | } |
1935 | |
1936 | if (!TXTIME_ASSIST_IS_ENABLED(q->flags) && |
1937 | !FULL_OFFLOAD_IS_ENABLED(q->flags) && |
1938 | !hrtimer_active(timer: &q->advance_timer)) { |
1939 | hrtimer_init(timer: &q->advance_timer, which_clock: q->clockid, mode: HRTIMER_MODE_ABS); |
1940 | q->advance_timer.function = advance_sched; |
1941 | } |
1942 | |
1943 | err = taprio_get_start_time(sch, sched: new_admin, start: &start); |
1944 | if (err < 0) { |
1945 | NL_SET_ERR_MSG(extack, "Internal error: failed get start time" ); |
1946 | goto unlock; |
1947 | } |
1948 | |
1949 | setup_txtime(q, sched: new_admin, base: start); |
1950 | |
1951 | if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { |
1952 | if (!oper) { |
1953 | rcu_assign_pointer(q->oper_sched, new_admin); |
1954 | err = 0; |
1955 | new_admin = NULL; |
1956 | goto unlock; |
1957 | } |
1958 | |
1959 | rcu_assign_pointer(q->admin_sched, new_admin); |
1960 | if (admin) |
1961 | call_rcu(head: &admin->rcu, func: taprio_free_sched_cb); |
1962 | } else { |
1963 | setup_first_end_time(q, sched: new_admin, base: start); |
1964 | |
1965 | /* Protects against advance_sched() */ |
1966 | spin_lock_irqsave(&q->current_entry_lock, flags); |
1967 | |
1968 | taprio_start_sched(sch, start, new: new_admin); |
1969 | |
1970 | rcu_assign_pointer(q->admin_sched, new_admin); |
1971 | if (admin) |
1972 | call_rcu(head: &admin->rcu, func: taprio_free_sched_cb); |
1973 | |
1974 | spin_unlock_irqrestore(lock: &q->current_entry_lock, flags); |
1975 | |
1976 | if (FULL_OFFLOAD_IS_ENABLED(q->flags)) |
1977 | taprio_offload_config_changed(q); |
1978 | } |
1979 | |
1980 | new_admin = NULL; |
1981 | err = 0; |
1982 | |
1983 | if (!stab) |
1984 | NL_SET_ERR_MSG_MOD(extack, |
1985 | "Size table not specified, frame length estimations may be inaccurate" ); |
1986 | |
1987 | unlock: |
1988 | spin_unlock_bh(lock: qdisc_lock(qdisc: sch)); |
1989 | |
1990 | free_sched: |
1991 | if (new_admin) |
1992 | call_rcu(head: &new_admin->rcu, func: taprio_free_sched_cb); |
1993 | |
1994 | return err; |
1995 | } |
1996 | |
1997 | static void taprio_reset(struct Qdisc *sch) |
1998 | { |
1999 | struct taprio_sched *q = qdisc_priv(sch); |
2000 | struct net_device *dev = qdisc_dev(qdisc: sch); |
2001 | int i; |
2002 | |
2003 | hrtimer_cancel(timer: &q->advance_timer); |
2004 | |
2005 | if (q->qdiscs) { |
2006 | for (i = 0; i < dev->num_tx_queues; i++) |
2007 | if (q->qdiscs[i]) |
2008 | qdisc_reset(qdisc: q->qdiscs[i]); |
2009 | } |
2010 | } |
2011 | |
2012 | static void taprio_destroy(struct Qdisc *sch) |
2013 | { |
2014 | struct taprio_sched *q = qdisc_priv(sch); |
2015 | struct net_device *dev = qdisc_dev(qdisc: sch); |
2016 | struct sched_gate_list *oper, *admin; |
2017 | unsigned int i; |
2018 | |
2019 | list_del(entry: &q->taprio_list); |
2020 | |
2021 | /* Note that taprio_reset() might not be called if an error |
2022 | * happens in qdisc_create(), after taprio_init() has been called. |
2023 | */ |
2024 | hrtimer_cancel(timer: &q->advance_timer); |
2025 | qdisc_synchronize(q: sch); |
2026 | |
2027 | taprio_disable_offload(dev, q, NULL); |
2028 | |
2029 | if (q->qdiscs) { |
2030 | for (i = 0; i < dev->num_tx_queues; i++) |
2031 | qdisc_put(qdisc: q->qdiscs[i]); |
2032 | |
2033 | kfree(objp: q->qdiscs); |
2034 | } |
2035 | q->qdiscs = NULL; |
2036 | |
2037 | netdev_reset_tc(dev); |
2038 | |
2039 | oper = rtnl_dereference(q->oper_sched); |
2040 | admin = rtnl_dereference(q->admin_sched); |
2041 | |
2042 | if (oper) |
2043 | call_rcu(head: &oper->rcu, func: taprio_free_sched_cb); |
2044 | |
2045 | if (admin) |
2046 | call_rcu(head: &admin->rcu, func: taprio_free_sched_cb); |
2047 | |
2048 | taprio_cleanup_broken_mqprio(q); |
2049 | } |
2050 | |
2051 | static int taprio_init(struct Qdisc *sch, struct nlattr *opt, |
2052 | struct netlink_ext_ack *extack) |
2053 | { |
2054 | struct taprio_sched *q = qdisc_priv(sch); |
2055 | struct net_device *dev = qdisc_dev(qdisc: sch); |
2056 | int i, tc; |
2057 | |
2058 | spin_lock_init(&q->current_entry_lock); |
2059 | |
2060 | hrtimer_init(timer: &q->advance_timer, CLOCK_TAI, mode: HRTIMER_MODE_ABS); |
2061 | q->advance_timer.function = advance_sched; |
2062 | |
2063 | q->root = sch; |
2064 | |
2065 | /* We only support static clockids. Use an invalid value as default |
2066 | * and get the valid one on taprio_change(). |
2067 | */ |
2068 | q->clockid = -1; |
2069 | q->flags = TAPRIO_FLAGS_INVALID; |
2070 | |
2071 | list_add(new: &q->taprio_list, head: &taprio_list); |
2072 | |
2073 | if (sch->parent != TC_H_ROOT) { |
2074 | NL_SET_ERR_MSG_MOD(extack, "Can only be attached as root qdisc" ); |
2075 | return -EOPNOTSUPP; |
2076 | } |
2077 | |
2078 | if (!netif_is_multiqueue(dev)) { |
2079 | NL_SET_ERR_MSG_MOD(extack, "Multi-queue device is required" ); |
2080 | return -EOPNOTSUPP; |
2081 | } |
2082 | |
2083 | q->qdiscs = kcalloc(n: dev->num_tx_queues, size: sizeof(q->qdiscs[0]), |
2084 | GFP_KERNEL); |
2085 | if (!q->qdiscs) |
2086 | return -ENOMEM; |
2087 | |
2088 | if (!opt) |
2089 | return -EINVAL; |
2090 | |
2091 | for (i = 0; i < dev->num_tx_queues; i++) { |
2092 | struct netdev_queue *dev_queue; |
2093 | struct Qdisc *qdisc; |
2094 | |
2095 | dev_queue = netdev_get_tx_queue(dev, index: i); |
2096 | qdisc = qdisc_create_dflt(dev_queue, |
2097 | ops: &pfifo_qdisc_ops, |
2098 | TC_H_MAKE(TC_H_MAJ(sch->handle), |
2099 | TC_H_MIN(i + 1)), |
2100 | extack); |
2101 | if (!qdisc) |
2102 | return -ENOMEM; |
2103 | |
2104 | if (i < dev->real_num_tx_queues) |
2105 | qdisc_hash_add(q: qdisc, invisible: false); |
2106 | |
2107 | q->qdiscs[i] = qdisc; |
2108 | } |
2109 | |
2110 | for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) |
2111 | q->fp[tc] = TC_FP_EXPRESS; |
2112 | |
2113 | taprio_detect_broken_mqprio(q); |
2114 | |
2115 | return taprio_change(sch, opt, extack); |
2116 | } |
2117 | |
2118 | static void taprio_attach(struct Qdisc *sch) |
2119 | { |
2120 | struct taprio_sched *q = qdisc_priv(sch); |
2121 | struct net_device *dev = qdisc_dev(qdisc: sch); |
2122 | unsigned int ntx; |
2123 | |
2124 | /* Attach underlying qdisc */ |
2125 | for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { |
2126 | struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, index: ntx); |
2127 | struct Qdisc *old, *dev_queue_qdisc; |
2128 | |
2129 | if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { |
2130 | struct Qdisc *qdisc = q->qdiscs[ntx]; |
2131 | |
2132 | /* In offload mode, the root taprio qdisc is bypassed |
2133 | * and the netdev TX queues see the children directly |
2134 | */ |
2135 | qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
2136 | dev_queue_qdisc = qdisc; |
2137 | } else { |
2138 | /* In software mode, attach the root taprio qdisc |
2139 | * to all netdev TX queues, so that dev_qdisc_enqueue() |
2140 | * goes through taprio_enqueue(). |
2141 | */ |
2142 | dev_queue_qdisc = sch; |
2143 | } |
2144 | old = dev_graft_qdisc(dev_queue, qdisc: dev_queue_qdisc); |
2145 | /* The qdisc's refcount requires to be elevated once |
2146 | * for each netdev TX queue it is grafted onto |
2147 | */ |
2148 | qdisc_refcount_inc(qdisc: dev_queue_qdisc); |
2149 | if (old) |
2150 | qdisc_put(qdisc: old); |
2151 | } |
2152 | } |
2153 | |
2154 | static struct netdev_queue *taprio_queue_get(struct Qdisc *sch, |
2155 | unsigned long cl) |
2156 | { |
2157 | struct net_device *dev = qdisc_dev(qdisc: sch); |
2158 | unsigned long ntx = cl - 1; |
2159 | |
2160 | if (ntx >= dev->num_tx_queues) |
2161 | return NULL; |
2162 | |
2163 | return netdev_get_tx_queue(dev, index: ntx); |
2164 | } |
2165 | |
2166 | static int taprio_graft(struct Qdisc *sch, unsigned long cl, |
2167 | struct Qdisc *new, struct Qdisc **old, |
2168 | struct netlink_ext_ack *extack) |
2169 | { |
2170 | struct taprio_sched *q = qdisc_priv(sch); |
2171 | struct net_device *dev = qdisc_dev(qdisc: sch); |
2172 | struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); |
2173 | |
2174 | if (!dev_queue) |
2175 | return -EINVAL; |
2176 | |
2177 | if (dev->flags & IFF_UP) |
2178 | dev_deactivate(dev); |
2179 | |
2180 | /* In offload mode, the child Qdisc is directly attached to the netdev |
2181 | * TX queue, and thus, we need to keep its refcount elevated in order |
2182 | * to counteract qdisc_graft()'s call to qdisc_put() once per TX queue. |
2183 | * However, save the reference to the new qdisc in the private array in |
2184 | * both software and offload cases, to have an up-to-date reference to |
2185 | * our children. |
2186 | */ |
2187 | *old = q->qdiscs[cl - 1]; |
2188 | if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { |
2189 | WARN_ON_ONCE(dev_graft_qdisc(dev_queue, new) != *old); |
2190 | if (new) |
2191 | qdisc_refcount_inc(qdisc: new); |
2192 | if (*old) |
2193 | qdisc_put(qdisc: *old); |
2194 | } |
2195 | |
2196 | q->qdiscs[cl - 1] = new; |
2197 | if (new) |
2198 | new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
2199 | |
2200 | if (dev->flags & IFF_UP) |
2201 | dev_activate(dev); |
2202 | |
2203 | return 0; |
2204 | } |
2205 | |
2206 | static int dump_entry(struct sk_buff *msg, |
2207 | const struct sched_entry *entry) |
2208 | { |
2209 | struct nlattr *item; |
2210 | |
2211 | item = nla_nest_start_noflag(skb: msg, attrtype: TCA_TAPRIO_SCHED_ENTRY); |
2212 | if (!item) |
2213 | return -ENOSPC; |
2214 | |
2215 | if (nla_put_u32(skb: msg, attrtype: TCA_TAPRIO_SCHED_ENTRY_INDEX, value: entry->index)) |
2216 | goto nla_put_failure; |
2217 | |
2218 | if (nla_put_u8(skb: msg, attrtype: TCA_TAPRIO_SCHED_ENTRY_CMD, value: entry->command)) |
2219 | goto nla_put_failure; |
2220 | |
2221 | if (nla_put_u32(skb: msg, attrtype: TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, |
2222 | value: entry->gate_mask)) |
2223 | goto nla_put_failure; |
2224 | |
2225 | if (nla_put_u32(skb: msg, attrtype: TCA_TAPRIO_SCHED_ENTRY_INTERVAL, |
2226 | value: entry->interval)) |
2227 | goto nla_put_failure; |
2228 | |
2229 | return nla_nest_end(skb: msg, start: item); |
2230 | |
2231 | nla_put_failure: |
2232 | nla_nest_cancel(skb: msg, start: item); |
2233 | return -1; |
2234 | } |
2235 | |
2236 | static int dump_schedule(struct sk_buff *msg, |
2237 | const struct sched_gate_list *root) |
2238 | { |
2239 | struct nlattr *entry_list; |
2240 | struct sched_entry *entry; |
2241 | |
2242 | if (nla_put_s64(skb: msg, attrtype: TCA_TAPRIO_ATTR_SCHED_BASE_TIME, |
2243 | value: root->base_time, padattr: TCA_TAPRIO_PAD)) |
2244 | return -1; |
2245 | |
2246 | if (nla_put_s64(skb: msg, attrtype: TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME, |
2247 | value: root->cycle_time, padattr: TCA_TAPRIO_PAD)) |
2248 | return -1; |
2249 | |
2250 | if (nla_put_s64(skb: msg, attrtype: TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, |
2251 | value: root->cycle_time_extension, padattr: TCA_TAPRIO_PAD)) |
2252 | return -1; |
2253 | |
2254 | entry_list = nla_nest_start_noflag(skb: msg, |
2255 | attrtype: TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST); |
2256 | if (!entry_list) |
2257 | goto error_nest; |
2258 | |
2259 | list_for_each_entry(entry, &root->entries, list) { |
2260 | if (dump_entry(msg, entry) < 0) |
2261 | goto error_nest; |
2262 | } |
2263 | |
2264 | nla_nest_end(skb: msg, start: entry_list); |
2265 | return 0; |
2266 | |
2267 | error_nest: |
2268 | nla_nest_cancel(skb: msg, start: entry_list); |
2269 | return -1; |
2270 | } |
2271 | |
2272 | static int taprio_dump_tc_entries(struct sk_buff *skb, |
2273 | struct taprio_sched *q, |
2274 | struct sched_gate_list *sched) |
2275 | { |
2276 | struct nlattr *n; |
2277 | int tc; |
2278 | |
2279 | for (tc = 0; tc < TC_MAX_QUEUE; tc++) { |
2280 | n = nla_nest_start(skb, attrtype: TCA_TAPRIO_ATTR_TC_ENTRY); |
2281 | if (!n) |
2282 | return -EMSGSIZE; |
2283 | |
2284 | if (nla_put_u32(skb, attrtype: TCA_TAPRIO_TC_ENTRY_INDEX, value: tc)) |
2285 | goto nla_put_failure; |
2286 | |
2287 | if (nla_put_u32(skb, attrtype: TCA_TAPRIO_TC_ENTRY_MAX_SDU, |
2288 | value: sched->max_sdu[tc])) |
2289 | goto nla_put_failure; |
2290 | |
2291 | if (nla_put_u32(skb, attrtype: TCA_TAPRIO_TC_ENTRY_FP, value: q->fp[tc])) |
2292 | goto nla_put_failure; |
2293 | |
2294 | nla_nest_end(skb, start: n); |
2295 | } |
2296 | |
2297 | return 0; |
2298 | |
2299 | nla_put_failure: |
2300 | nla_nest_cancel(skb, start: n); |
2301 | return -EMSGSIZE; |
2302 | } |
2303 | |
2304 | static int taprio_put_stat(struct sk_buff *skb, u64 val, u16 attrtype) |
2305 | { |
2306 | if (val == TAPRIO_STAT_NOT_SET) |
2307 | return 0; |
2308 | if (nla_put_u64_64bit(skb, attrtype, value: val, padattr: TCA_TAPRIO_OFFLOAD_STATS_PAD)) |
2309 | return -EMSGSIZE; |
2310 | return 0; |
2311 | } |
2312 | |
2313 | static int taprio_dump_xstats(struct Qdisc *sch, struct gnet_dump *d, |
2314 | struct tc_taprio_qopt_offload *offload, |
2315 | struct tc_taprio_qopt_stats *stats) |
2316 | { |
2317 | struct net_device *dev = qdisc_dev(qdisc: sch); |
2318 | const struct net_device_ops *ops; |
2319 | struct sk_buff *skb = d->skb; |
2320 | struct nlattr *xstats; |
2321 | int err; |
2322 | |
2323 | ops = qdisc_dev(qdisc: sch)->netdev_ops; |
2324 | |
2325 | /* FIXME I could use qdisc_offload_dump_helper(), but that messes |
2326 | * with sch->flags depending on whether the device reports taprio |
2327 | * stats, and I'm not sure whether that's a good idea, considering |
2328 | * that stats are optional to the offload itself |
2329 | */ |
2330 | if (!ops->ndo_setup_tc) |
2331 | return 0; |
2332 | |
2333 | memset(stats, 0xff, sizeof(*stats)); |
2334 | |
2335 | err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); |
2336 | if (err == -EOPNOTSUPP) |
2337 | return 0; |
2338 | if (err) |
2339 | return err; |
2340 | |
2341 | xstats = nla_nest_start(skb, attrtype: TCA_STATS_APP); |
2342 | if (!xstats) |
2343 | goto err; |
2344 | |
2345 | if (taprio_put_stat(skb, val: stats->window_drops, |
2346 | attrtype: TCA_TAPRIO_OFFLOAD_STATS_WINDOW_DROPS) || |
2347 | taprio_put_stat(skb, val: stats->tx_overruns, |
2348 | attrtype: TCA_TAPRIO_OFFLOAD_STATS_TX_OVERRUNS)) |
2349 | goto err_cancel; |
2350 | |
2351 | nla_nest_end(skb, start: xstats); |
2352 | |
2353 | return 0; |
2354 | |
2355 | err_cancel: |
2356 | nla_nest_cancel(skb, start: xstats); |
2357 | err: |
2358 | return -EMSGSIZE; |
2359 | } |
2360 | |
2361 | static int taprio_dump_stats(struct Qdisc *sch, struct gnet_dump *d) |
2362 | { |
2363 | struct tc_taprio_qopt_offload offload = { |
2364 | .cmd = TAPRIO_CMD_STATS, |
2365 | }; |
2366 | |
2367 | return taprio_dump_xstats(sch, d, offload: &offload, stats: &offload.stats); |
2368 | } |
2369 | |
2370 | static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb) |
2371 | { |
2372 | struct taprio_sched *q = qdisc_priv(sch); |
2373 | struct net_device *dev = qdisc_dev(qdisc: sch); |
2374 | struct sched_gate_list *oper, *admin; |
2375 | struct tc_mqprio_qopt opt = { 0 }; |
2376 | struct nlattr *nest, *sched_nest; |
2377 | |
2378 | oper = rtnl_dereference(q->oper_sched); |
2379 | admin = rtnl_dereference(q->admin_sched); |
2380 | |
2381 | mqprio_qopt_reconstruct(dev, qopt: &opt); |
2382 | |
2383 | nest = nla_nest_start_noflag(skb, attrtype: TCA_OPTIONS); |
2384 | if (!nest) |
2385 | goto start_error; |
2386 | |
2387 | if (nla_put(skb, attrtype: TCA_TAPRIO_ATTR_PRIOMAP, attrlen: sizeof(opt), data: &opt)) |
2388 | goto options_error; |
2389 | |
2390 | if (!FULL_OFFLOAD_IS_ENABLED(q->flags) && |
2391 | nla_put_s32(skb, attrtype: TCA_TAPRIO_ATTR_SCHED_CLOCKID, value: q->clockid)) |
2392 | goto options_error; |
2393 | |
2394 | if (q->flags && nla_put_u32(skb, attrtype: TCA_TAPRIO_ATTR_FLAGS, value: q->flags)) |
2395 | goto options_error; |
2396 | |
2397 | if (q->txtime_delay && |
2398 | nla_put_u32(skb, attrtype: TCA_TAPRIO_ATTR_TXTIME_DELAY, value: q->txtime_delay)) |
2399 | goto options_error; |
2400 | |
2401 | if (oper && taprio_dump_tc_entries(skb, q, sched: oper)) |
2402 | goto options_error; |
2403 | |
2404 | if (oper && dump_schedule(msg: skb, root: oper)) |
2405 | goto options_error; |
2406 | |
2407 | if (!admin) |
2408 | goto done; |
2409 | |
2410 | sched_nest = nla_nest_start_noflag(skb, attrtype: TCA_TAPRIO_ATTR_ADMIN_SCHED); |
2411 | if (!sched_nest) |
2412 | goto options_error; |
2413 | |
2414 | if (dump_schedule(msg: skb, root: admin)) |
2415 | goto admin_error; |
2416 | |
2417 | nla_nest_end(skb, start: sched_nest); |
2418 | |
2419 | done: |
2420 | return nla_nest_end(skb, start: nest); |
2421 | |
2422 | admin_error: |
2423 | nla_nest_cancel(skb, start: sched_nest); |
2424 | |
2425 | options_error: |
2426 | nla_nest_cancel(skb, start: nest); |
2427 | |
2428 | start_error: |
2429 | return -ENOSPC; |
2430 | } |
2431 | |
2432 | static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl) |
2433 | { |
2434 | struct taprio_sched *q = qdisc_priv(sch); |
2435 | struct net_device *dev = qdisc_dev(qdisc: sch); |
2436 | unsigned int ntx = cl - 1; |
2437 | |
2438 | if (ntx >= dev->num_tx_queues) |
2439 | return NULL; |
2440 | |
2441 | return q->qdiscs[ntx]; |
2442 | } |
2443 | |
2444 | static unsigned long taprio_find(struct Qdisc *sch, u32 classid) |
2445 | { |
2446 | unsigned int ntx = TC_H_MIN(classid); |
2447 | |
2448 | if (!taprio_queue_get(sch, cl: ntx)) |
2449 | return 0; |
2450 | return ntx; |
2451 | } |
2452 | |
2453 | static int taprio_dump_class(struct Qdisc *sch, unsigned long cl, |
2454 | struct sk_buff *skb, struct tcmsg *tcm) |
2455 | { |
2456 | struct Qdisc *child = taprio_leaf(sch, cl); |
2457 | |
2458 | tcm->tcm_parent = TC_H_ROOT; |
2459 | tcm->tcm_handle |= TC_H_MIN(cl); |
2460 | tcm->tcm_info = child->handle; |
2461 | |
2462 | return 0; |
2463 | } |
2464 | |
2465 | static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, |
2466 | struct gnet_dump *d) |
2467 | __releases(d->lock) |
2468 | __acquires(d->lock) |
2469 | { |
2470 | struct Qdisc *child = taprio_leaf(sch, cl); |
2471 | struct tc_taprio_qopt_offload offload = { |
2472 | .cmd = TAPRIO_CMD_QUEUE_STATS, |
2473 | .queue_stats = { |
2474 | .queue = cl - 1, |
2475 | }, |
2476 | }; |
2477 | |
2478 | if (gnet_stats_copy_basic(d, NULL, b: &child->bstats, running: true) < 0 || |
2479 | qdisc_qstats_copy(d, sch: child) < 0) |
2480 | return -1; |
2481 | |
2482 | return taprio_dump_xstats(sch, d, offload: &offload, stats: &offload.queue_stats.stats); |
2483 | } |
2484 | |
2485 | static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) |
2486 | { |
2487 | struct net_device *dev = qdisc_dev(qdisc: sch); |
2488 | unsigned long ntx; |
2489 | |
2490 | if (arg->stop) |
2491 | return; |
2492 | |
2493 | arg->count = arg->skip; |
2494 | for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { |
2495 | if (!tc_qdisc_stats_dump(sch, cl: ntx + 1, arg)) |
2496 | break; |
2497 | } |
2498 | } |
2499 | |
2500 | static struct netdev_queue *taprio_select_queue(struct Qdisc *sch, |
2501 | struct tcmsg *tcm) |
2502 | { |
2503 | return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent)); |
2504 | } |
2505 | |
2506 | static const struct Qdisc_class_ops taprio_class_ops = { |
2507 | .graft = taprio_graft, |
2508 | .leaf = taprio_leaf, |
2509 | .find = taprio_find, |
2510 | .walk = taprio_walk, |
2511 | .dump = taprio_dump_class, |
2512 | .dump_stats = taprio_dump_class_stats, |
2513 | .select_queue = taprio_select_queue, |
2514 | }; |
2515 | |
2516 | static struct Qdisc_ops taprio_qdisc_ops __read_mostly = { |
2517 | .cl_ops = &taprio_class_ops, |
2518 | .id = "taprio" , |
2519 | .priv_size = sizeof(struct taprio_sched), |
2520 | .init = taprio_init, |
2521 | .change = taprio_change, |
2522 | .destroy = taprio_destroy, |
2523 | .reset = taprio_reset, |
2524 | .attach = taprio_attach, |
2525 | .peek = taprio_peek, |
2526 | .dequeue = taprio_dequeue, |
2527 | .enqueue = taprio_enqueue, |
2528 | .dump = taprio_dump, |
2529 | .dump_stats = taprio_dump_stats, |
2530 | .owner = THIS_MODULE, |
2531 | }; |
2532 | MODULE_ALIAS_NET_SCH("taprio" ); |
2533 | |
2534 | static struct notifier_block taprio_device_notifier = { |
2535 | .notifier_call = taprio_dev_notifier, |
2536 | }; |
2537 | |
2538 | static int __init taprio_module_init(void) |
2539 | { |
2540 | int err = register_netdevice_notifier(nb: &taprio_device_notifier); |
2541 | |
2542 | if (err) |
2543 | return err; |
2544 | |
2545 | return register_qdisc(qops: &taprio_qdisc_ops); |
2546 | } |
2547 | |
2548 | static void __exit taprio_module_exit(void) |
2549 | { |
2550 | unregister_qdisc(qops: &taprio_qdisc_ops); |
2551 | unregister_netdevice_notifier(nb: &taprio_device_notifier); |
2552 | } |
2553 | |
2554 | module_init(taprio_module_init); |
2555 | module_exit(taprio_module_exit); |
2556 | MODULE_LICENSE("GPL" ); |
2557 | MODULE_DESCRIPTION("Time Aware Priority qdisc" ); |
2558 | |