1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
2 | // Copyright (c) 2020 Mellanox Technologies |
3 | |
4 | #include "en/ptp.h" |
5 | #include "en/health.h" |
6 | #include "en/txrx.h" |
7 | #include "en/params.h" |
8 | #include "en/fs_tt_redirect.h" |
9 | #include <linux/list.h> |
10 | #include <linux/spinlock.h> |
11 | |
12 | struct mlx5e_ptp_fs { |
13 | struct mlx5_flow_handle *l2_rule; |
14 | struct mlx5_flow_handle *udp_v4_rule; |
15 | struct mlx5_flow_handle *udp_v6_rule; |
16 | bool valid; |
17 | }; |
18 | |
19 | struct mlx5e_ptp_params { |
20 | struct mlx5e_params params; |
21 | struct mlx5e_sq_param txq_sq_param; |
22 | struct mlx5e_rq_param rq_param; |
23 | }; |
24 | |
25 | struct mlx5e_ptp_port_ts_cqe_tracker { |
26 | u8 metadata_id; |
27 | bool inuse : 1; |
28 | struct list_head entry; |
29 | }; |
30 | |
31 | struct mlx5e_ptp_port_ts_cqe_list { |
32 | struct mlx5e_ptp_port_ts_cqe_tracker *nodes; |
33 | struct list_head tracker_list_head; |
34 | /* Sync list operations in xmit and napi_poll contexts */ |
35 | spinlock_t tracker_list_lock; |
36 | }; |
37 | |
38 | static inline void |
39 | mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metadata) |
40 | { |
41 | struct mlx5e_ptp_port_ts_cqe_tracker *tracker = &list->nodes[metadata]; |
42 | |
43 | WARN_ON_ONCE(tracker->inuse); |
44 | tracker->inuse = true; |
45 | spin_lock_bh(lock: &list->tracker_list_lock); |
46 | list_add_tail(new: &tracker->entry, head: &list->tracker_list_head); |
47 | spin_unlock_bh(lock: &list->tracker_list_lock); |
48 | } |
49 | |
50 | static void |
51 | mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metadata) |
52 | { |
53 | struct mlx5e_ptp_port_ts_cqe_tracker *tracker = &list->nodes[metadata]; |
54 | |
55 | WARN_ON_ONCE(!tracker->inuse); |
56 | tracker->inuse = false; |
57 | spin_lock_bh(lock: &list->tracker_list_lock); |
58 | list_del(entry: &tracker->entry); |
59 | spin_unlock_bh(lock: &list->tracker_list_lock); |
60 | } |
61 | |
62 | void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata) |
63 | { |
64 | mlx5e_ptp_port_ts_cqe_list_add(list: ptpsq->ts_cqe_pending_list, metadata); |
65 | } |
66 | |
67 | struct mlx5e_skb_cb_hwtstamp { |
68 | ktime_t cqe_hwtstamp; |
69 | ktime_t port_hwtstamp; |
70 | }; |
71 | |
72 | void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb) |
73 | { |
74 | memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp)); |
75 | } |
76 | |
77 | static struct mlx5e_skb_cb_hwtstamp *mlx5e_skb_cb_get_hwts(struct sk_buff *skb) |
78 | { |
79 | BUILD_BUG_ON(sizeof(struct mlx5e_skb_cb_hwtstamp) > sizeof(skb->cb)); |
80 | return (struct mlx5e_skb_cb_hwtstamp *)skb->cb; |
81 | } |
82 | |
83 | static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb, |
84 | struct mlx5e_ptp_cq_stats *cq_stats) |
85 | { |
86 | struct skb_shared_hwtstamps hwts = {}; |
87 | ktime_t diff; |
88 | |
89 | diff = abs(mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp - |
90 | mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp); |
91 | |
92 | /* Maximal allowed diff is 1 / 128 second */ |
93 | if (diff > (NSEC_PER_SEC >> 7)) { |
94 | cq_stats->abort++; |
95 | cq_stats->abort_abs_diff_ns += diff; |
96 | return; |
97 | } |
98 | |
99 | hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp; |
100 | skb_tstamp_tx(orig_skb: skb, hwtstamps: &hwts); |
101 | } |
102 | |
103 | void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type, |
104 | ktime_t hwtstamp, |
105 | struct mlx5e_ptp_cq_stats *cq_stats) |
106 | { |
107 | switch (hwtstamp_type) { |
108 | case (MLX5E_SKB_CB_CQE_HWTSTAMP): |
109 | mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp = hwtstamp; |
110 | break; |
111 | case (MLX5E_SKB_CB_PORT_HWTSTAMP): |
112 | mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp = hwtstamp; |
113 | break; |
114 | } |
115 | |
116 | /* If both CQEs arrive, check and report the port tstamp, and clear skb cb as |
117 | * skb soon to be released. |
118 | */ |
119 | if (!mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp || |
120 | !mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp) |
121 | return; |
122 | |
123 | mlx5e_skb_cb_hwtstamp_tx(skb, cq_stats); |
124 | memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp)); |
125 | } |
126 | |
127 | static struct sk_buff * |
128 | mlx5e_ptp_metadata_map_lookup(struct mlx5e_ptp_metadata_map *map, u16 metadata) |
129 | { |
130 | return map->data[metadata]; |
131 | } |
132 | |
133 | static struct sk_buff * |
134 | mlx5e_ptp_metadata_map_remove(struct mlx5e_ptp_metadata_map *map, u16 metadata) |
135 | { |
136 | struct sk_buff *skb; |
137 | |
138 | skb = map->data[metadata]; |
139 | map->data[metadata] = NULL; |
140 | |
141 | return skb; |
142 | } |
143 | |
144 | static bool mlx5e_ptp_metadata_map_unhealthy(struct mlx5e_ptp_metadata_map *map) |
145 | { |
146 | /* Considered beginning unhealthy state if size * 15 / 2^4 cannot be reclaimed. */ |
147 | return map->undelivered_counter > (map->capacity >> 4) * 15; |
148 | } |
149 | |
150 | static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq, |
151 | ktime_t port_tstamp) |
152 | { |
153 | struct mlx5e_ptp_port_ts_cqe_list *cqe_list = ptpsq->ts_cqe_pending_list; |
154 | ktime_t timeout = ns_to_ktime(MLX5E_PTP_TS_CQE_UNDELIVERED_TIMEOUT); |
155 | struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map; |
156 | struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n; |
157 | |
158 | spin_lock_bh(lock: &cqe_list->tracker_list_lock); |
159 | list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) { |
160 | struct sk_buff *skb = |
161 | mlx5e_ptp_metadata_map_lookup(map: metadata_map, metadata: pos->metadata_id); |
162 | ktime_t dma_tstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp; |
163 | |
164 | if (!dma_tstamp || |
165 | ktime_after(ktime_add(dma_tstamp, timeout), cmp2: port_tstamp)) |
166 | break; |
167 | |
168 | metadata_map->undelivered_counter++; |
169 | WARN_ON_ONCE(!pos->inuse); |
170 | pos->inuse = false; |
171 | list_del(entry: &pos->entry); |
172 | } |
173 | spin_unlock_bh(lock: &cqe_list->tracker_list_lock); |
174 | } |
175 | |
176 | #define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask) |
177 | |
178 | static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq, |
179 | struct mlx5_cqe64 *cqe, |
180 | u8 *md_buff, |
181 | u8 *md_buff_sz, |
182 | int budget) |
183 | { |
184 | struct mlx5e_ptp_port_ts_cqe_list *pending_cqe_list = ptpsq->ts_cqe_pending_list; |
185 | u8 metadata_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter)); |
186 | bool is_err_cqe = !!MLX5E_RX_ERR_CQE(cqe); |
187 | struct mlx5e_txqsq *sq = &ptpsq->txqsq; |
188 | struct sk_buff *skb; |
189 | ktime_t hwtstamp; |
190 | |
191 | if (likely(pending_cqe_list->nodes[metadata_id].inuse)) { |
192 | mlx5e_ptp_port_ts_cqe_list_remove(list: pending_cqe_list, metadata: metadata_id); |
193 | } else { |
194 | /* Reclaim space in the unlikely event CQE was delivered after |
195 | * marking it late. |
196 | */ |
197 | ptpsq->metadata_map.undelivered_counter--; |
198 | ptpsq->cq_stats->late_cqe++; |
199 | } |
200 | |
201 | skb = mlx5e_ptp_metadata_map_remove(map: &ptpsq->metadata_map, metadata: metadata_id); |
202 | |
203 | if (unlikely(is_err_cqe)) { |
204 | ptpsq->cq_stats->err_cqe++; |
205 | goto out; |
206 | } |
207 | |
208 | hwtstamp = mlx5e_cqe_ts_to_ns(func: sq->ptp_cyc2time, clock: sq->clock, cqe_ts: get_cqe_ts(cqe)); |
209 | mlx5e_skb_cb_hwtstamp_handler(skb, hwtstamp_type: MLX5E_SKB_CB_PORT_HWTSTAMP, |
210 | hwtstamp, cq_stats: ptpsq->cq_stats); |
211 | ptpsq->cq_stats->cqe++; |
212 | |
213 | mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, port_tstamp: hwtstamp); |
214 | out: |
215 | napi_consume_skb(skb, budget); |
216 | md_buff[(*md_buff_sz)++] = metadata_id; |
217 | if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) && |
218 | !test_and_set_bit(nr: MLX5E_SQ_STATE_RECOVERING, addr: &sq->state)) |
219 | queue_work(wq: ptpsq->txqsq.priv->wq, work: &ptpsq->report_unhealthy_work); |
220 | } |
221 | |
222 | static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int napi_budget) |
223 | { |
224 | struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq); |
225 | int budget = min(napi_budget, MLX5E_TX_CQ_POLL_BUDGET); |
226 | u8 metadata_buff[MLX5E_TX_CQ_POLL_BUDGET]; |
227 | u8 metadata_buff_sz = 0; |
228 | struct mlx5_cqwq *cqwq; |
229 | struct mlx5_cqe64 *cqe; |
230 | int work_done = 0; |
231 | |
232 | cqwq = &cq->wq; |
233 | |
234 | if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state))) |
235 | return false; |
236 | |
237 | cqe = mlx5_cqwq_get_cqe(wq: cqwq); |
238 | if (!cqe) |
239 | return false; |
240 | |
241 | do { |
242 | mlx5_cqwq_pop(wq: cqwq); |
243 | |
244 | mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, |
245 | md_buff: metadata_buff, md_buff_sz: &metadata_buff_sz, budget: napi_budget); |
246 | } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(wq: cqwq))); |
247 | |
248 | mlx5_cqwq_update_db_record(wq: cqwq); |
249 | |
250 | /* ensure cq space is freed before enabling more cqes */ |
251 | wmb(); |
252 | |
253 | while (metadata_buff_sz > 0) |
254 | mlx5e_ptp_metadata_fifo_push(fifo: &ptpsq->metadata_freelist, |
255 | metadata: metadata_buff[--metadata_buff_sz]); |
256 | |
257 | mlx5e_txqsq_wake(sq: &ptpsq->txqsq); |
258 | |
259 | return work_done == budget; |
260 | } |
261 | |
262 | static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget) |
263 | { |
264 | struct mlx5e_ptp *c = container_of(napi, struct mlx5e_ptp, napi); |
265 | struct mlx5e_ch_stats *ch_stats = c->stats; |
266 | struct mlx5e_rq *rq = &c->rq; |
267 | bool busy = false; |
268 | int work_done = 0; |
269 | int i; |
270 | |
271 | rcu_read_lock(); |
272 | |
273 | ch_stats->poll++; |
274 | |
275 | if (test_bit(MLX5E_PTP_STATE_TX, c->state)) { |
276 | for (i = 0; i < c->num_tc; i++) { |
277 | busy |= mlx5e_poll_tx_cq(cq: &c->ptpsq[i].txqsq.cq, napi_budget: budget); |
278 | busy |= mlx5e_ptp_poll_ts_cq(cq: &c->ptpsq[i].ts_cq, napi_budget: budget); |
279 | } |
280 | } |
281 | if (test_bit(MLX5E_PTP_STATE_RX, c->state) && likely(budget)) { |
282 | work_done = mlx5e_poll_rx_cq(cq: &rq->cq, budget); |
283 | busy |= work_done == budget; |
284 | busy |= INDIRECT_CALL_2(rq->post_wqes, |
285 | mlx5e_post_rx_mpwqes, |
286 | mlx5e_post_rx_wqes, |
287 | rq); |
288 | } |
289 | |
290 | if (busy) { |
291 | work_done = budget; |
292 | goto out; |
293 | } |
294 | |
295 | if (unlikely(!napi_complete_done(napi, work_done))) |
296 | goto out; |
297 | |
298 | ch_stats->arm++; |
299 | |
300 | if (test_bit(MLX5E_PTP_STATE_TX, c->state)) { |
301 | for (i = 0; i < c->num_tc; i++) { |
302 | mlx5e_cq_arm(cq: &c->ptpsq[i].txqsq.cq); |
303 | mlx5e_cq_arm(cq: &c->ptpsq[i].ts_cq); |
304 | } |
305 | } |
306 | if (test_bit(MLX5E_PTP_STATE_RX, c->state)) |
307 | mlx5e_cq_arm(cq: &rq->cq); |
308 | |
309 | out: |
310 | rcu_read_unlock(); |
311 | |
312 | return work_done; |
313 | } |
314 | |
315 | static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix, |
316 | struct mlx5e_params *params, |
317 | struct mlx5e_sq_param *param, |
318 | struct mlx5e_txqsq *sq, int tc, |
319 | struct mlx5e_ptpsq *ptpsq) |
320 | { |
321 | void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); |
322 | struct mlx5_core_dev *mdev = c->mdev; |
323 | struct mlx5_wq_cyc *wq = &sq->wq; |
324 | int err; |
325 | int node; |
326 | |
327 | sq->pdev = c->pdev; |
328 | sq->clock = &mdev->clock; |
329 | sq->mkey_be = c->mkey_be; |
330 | sq->netdev = c->netdev; |
331 | sq->priv = c->priv; |
332 | sq->mdev = mdev; |
333 | sq->ch_ix = MLX5E_PTP_CHANNEL_IX; |
334 | sq->txq_ix = txq_ix; |
335 | sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; |
336 | sq->min_inline_mode = params->tx_min_inline_mode; |
337 | sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); |
338 | sq->stats = &c->priv->ptp_stats.sq[tc]; |
339 | sq->ptpsq = ptpsq; |
340 | INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); |
341 | if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert)) |
342 | set_bit(nr: MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, addr: &sq->state); |
343 | sq->stop_room = param->stop_room; |
344 | sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev); |
345 | |
346 | node = dev_to_node(dev: mlx5_core_dma_dev(dev: mdev)); |
347 | |
348 | param->wq.db_numa_node = node; |
349 | err = mlx5_wq_cyc_create(mdev, param: ¶m->wq, wqc: sqc_wq, wq, wq_ctrl: &sq->wq_ctrl); |
350 | if (err) |
351 | return err; |
352 | wq->db = &wq->db[MLX5_SND_DBR]; |
353 | |
354 | err = mlx5e_alloc_txqsq_db(sq, numa: node); |
355 | if (err) |
356 | goto err_sq_wq_destroy; |
357 | |
358 | return 0; |
359 | |
360 | err_sq_wq_destroy: |
361 | mlx5_wq_destroy(wq_ctrl: &sq->wq_ctrl); |
362 | |
363 | return err; |
364 | } |
365 | |
366 | static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn) |
367 | { |
368 | mlx5_core_destroy_sq(dev: mdev, sqn); |
369 | } |
370 | |
371 | static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa) |
372 | { |
373 | struct mlx5e_ptp_metadata_fifo *metadata_freelist = &ptpsq->metadata_freelist; |
374 | struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map; |
375 | struct mlx5e_ptp_port_ts_cqe_list *cqe_list; |
376 | int db_sz; |
377 | int md; |
378 | |
379 | cqe_list = kvzalloc_node(size: sizeof(*ptpsq->ts_cqe_pending_list), GFP_KERNEL, node: numa); |
380 | if (!cqe_list) |
381 | return -ENOMEM; |
382 | ptpsq->ts_cqe_pending_list = cqe_list; |
383 | |
384 | db_sz = min_t(u32, mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq), |
385 | 1 << MLX5_CAP_GEN_2(ptpsq->txqsq.mdev, |
386 | ts_cqe_metadata_size2wqe_counter)); |
387 | ptpsq->ts_cqe_ctr_mask = db_sz - 1; |
388 | |
389 | cqe_list->nodes = kvzalloc_node(array_size(db_sz, sizeof(*cqe_list->nodes)), |
390 | GFP_KERNEL, node: numa); |
391 | if (!cqe_list->nodes) |
392 | goto free_cqe_list; |
393 | INIT_LIST_HEAD(list: &cqe_list->tracker_list_head); |
394 | spin_lock_init(&cqe_list->tracker_list_lock); |
395 | |
396 | metadata_freelist->data = |
397 | kvzalloc_node(array_size(db_sz, sizeof(*metadata_freelist->data)), |
398 | GFP_KERNEL, node: numa); |
399 | if (!metadata_freelist->data) |
400 | goto free_cqe_list_nodes; |
401 | metadata_freelist->mask = ptpsq->ts_cqe_ctr_mask; |
402 | |
403 | for (md = 0; md < db_sz; ++md) { |
404 | cqe_list->nodes[md].metadata_id = md; |
405 | metadata_freelist->data[md] = md; |
406 | } |
407 | metadata_freelist->pc = db_sz; |
408 | |
409 | metadata_map->data = |
410 | kvzalloc_node(array_size(db_sz, sizeof(*metadata_map->data)), |
411 | GFP_KERNEL, node: numa); |
412 | if (!metadata_map->data) |
413 | goto free_metadata_freelist; |
414 | metadata_map->capacity = db_sz; |
415 | |
416 | return 0; |
417 | |
418 | free_metadata_freelist: |
419 | kvfree(addr: metadata_freelist->data); |
420 | free_cqe_list_nodes: |
421 | kvfree(addr: cqe_list->nodes); |
422 | free_cqe_list: |
423 | kvfree(addr: cqe_list); |
424 | return -ENOMEM; |
425 | } |
426 | |
427 | static void mlx5e_ptp_drain_metadata_map(struct mlx5e_ptp_metadata_map *map) |
428 | { |
429 | int idx; |
430 | |
431 | for (idx = 0; idx < map->capacity; ++idx) { |
432 | struct sk_buff *skb = map->data[idx]; |
433 | |
434 | dev_kfree_skb_any(skb); |
435 | } |
436 | } |
437 | |
438 | static void mlx5e_ptp_free_traffic_db(struct mlx5e_ptpsq *ptpsq) |
439 | { |
440 | mlx5e_ptp_drain_metadata_map(map: &ptpsq->metadata_map); |
441 | kvfree(addr: ptpsq->metadata_map.data); |
442 | kvfree(addr: ptpsq->metadata_freelist.data); |
443 | kvfree(addr: ptpsq->ts_cqe_pending_list->nodes); |
444 | kvfree(addr: ptpsq->ts_cqe_pending_list); |
445 | } |
446 | |
447 | static void mlx5e_ptpsq_unhealthy_work(struct work_struct *work) |
448 | { |
449 | struct mlx5e_ptpsq *ptpsq = |
450 | container_of(work, struct mlx5e_ptpsq, report_unhealthy_work); |
451 | |
452 | mlx5e_reporter_tx_ptpsq_unhealthy(ptpsq); |
453 | } |
454 | |
455 | static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn, |
456 | int txq_ix, struct mlx5e_ptp_params *cparams, |
457 | int tc, struct mlx5e_ptpsq *ptpsq) |
458 | { |
459 | struct mlx5e_sq_param *sqp = &cparams->txq_sq_param; |
460 | struct mlx5e_txqsq *txqsq = &ptpsq->txqsq; |
461 | struct mlx5e_create_sq_param csp = {}; |
462 | int err; |
463 | |
464 | err = mlx5e_ptp_alloc_txqsq(c, txq_ix, params: &cparams->params, param: sqp, |
465 | sq: txqsq, tc, ptpsq); |
466 | if (err) |
467 | return err; |
468 | |
469 | csp.tisn = tisn; |
470 | csp.tis_lst_sz = 1; |
471 | csp.cqn = txqsq->cq.mcq.cqn; |
472 | csp.wq_ctrl = &txqsq->wq_ctrl; |
473 | csp.min_inline_mode = txqsq->min_inline_mode; |
474 | csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn; |
475 | |
476 | err = mlx5e_create_sq_rdy(mdev: c->mdev, param: sqp, csp: &csp, qos_queue_group_id: 0, sqn: &txqsq->sqn); |
477 | if (err) |
478 | goto err_free_txqsq; |
479 | |
480 | err = mlx5e_ptp_alloc_traffic_db(ptpsq, numa: dev_to_node(dev: mlx5_core_dma_dev(dev: c->mdev))); |
481 | if (err) |
482 | goto err_free_txqsq; |
483 | |
484 | INIT_WORK(&ptpsq->report_unhealthy_work, mlx5e_ptpsq_unhealthy_work); |
485 | |
486 | return 0; |
487 | |
488 | err_free_txqsq: |
489 | mlx5e_free_txqsq(sq: txqsq); |
490 | |
491 | return err; |
492 | } |
493 | |
494 | static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq) |
495 | { |
496 | struct mlx5e_txqsq *sq = &ptpsq->txqsq; |
497 | struct mlx5_core_dev *mdev = sq->mdev; |
498 | |
499 | if (current_work() != &ptpsq->report_unhealthy_work) |
500 | cancel_work_sync(work: &ptpsq->report_unhealthy_work); |
501 | mlx5e_ptp_free_traffic_db(ptpsq); |
502 | cancel_work_sync(work: &sq->recover_work); |
503 | mlx5e_ptp_destroy_sq(mdev, sqn: sq->sqn); |
504 | mlx5e_free_txqsq_descs(sq); |
505 | mlx5e_free_txqsq(sq); |
506 | } |
507 | |
508 | static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c, |
509 | struct mlx5e_ptp_params *cparams) |
510 | { |
511 | struct mlx5e_params *params = &cparams->params; |
512 | u8 num_tc = mlx5e_get_dcb_num_tc(params); |
513 | int ix_base; |
514 | int err; |
515 | int tc; |
516 | |
517 | ix_base = num_tc * params->num_channels; |
518 | |
519 | for (tc = 0; tc < num_tc; tc++) { |
520 | int txq_ix = ix_base + tc; |
521 | u32 tisn; |
522 | |
523 | tisn = mlx5e_profile_get_tisn(mdev: c->mdev, priv: c->priv, profile: c->priv->profile, |
524 | lag_port: c->lag_port, tc); |
525 | err = mlx5e_ptp_open_txqsq(c, tisn, txq_ix, cparams, tc, ptpsq: &c->ptpsq[tc]); |
526 | if (err) |
527 | goto close_txqsq; |
528 | } |
529 | |
530 | return 0; |
531 | |
532 | close_txqsq: |
533 | for (--tc; tc >= 0; tc--) |
534 | mlx5e_ptp_close_txqsq(ptpsq: &c->ptpsq[tc]); |
535 | |
536 | return err; |
537 | } |
538 | |
539 | static void mlx5e_ptp_close_txqsqs(struct mlx5e_ptp *c) |
540 | { |
541 | int tc; |
542 | |
543 | for (tc = 0; tc < c->num_tc; tc++) |
544 | mlx5e_ptp_close_txqsq(ptpsq: &c->ptpsq[tc]); |
545 | } |
546 | |
547 | static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c, |
548 | struct mlx5e_ptp_params *cparams) |
549 | { |
550 | struct mlx5e_params *params = &cparams->params; |
551 | struct mlx5e_create_cq_param ccp = {}; |
552 | struct dim_cq_moder ptp_moder = {}; |
553 | struct mlx5e_cq_param *cq_param; |
554 | u8 num_tc; |
555 | int err; |
556 | int tc; |
557 | |
558 | num_tc = mlx5e_get_dcb_num_tc(params); |
559 | |
560 | ccp.netdev = c->netdev; |
561 | ccp.wq = c->priv->wq; |
562 | ccp.node = dev_to_node(dev: mlx5_core_dma_dev(dev: c->mdev)); |
563 | ccp.ch_stats = c->stats; |
564 | ccp.napi = &c->napi; |
565 | ccp.ix = MLX5E_PTP_CHANNEL_IX; |
566 | |
567 | cq_param = &cparams->txq_sq_param.cqp; |
568 | |
569 | for (tc = 0; tc < num_tc; tc++) { |
570 | struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq; |
571 | |
572 | err = mlx5e_open_cq(mdev: c->mdev, moder: ptp_moder, param: cq_param, ccp: &ccp, cq); |
573 | if (err) |
574 | goto out_err_txqsq_cq; |
575 | } |
576 | |
577 | for (tc = 0; tc < num_tc; tc++) { |
578 | struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq; |
579 | struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc]; |
580 | |
581 | err = mlx5e_open_cq(mdev: c->mdev, moder: ptp_moder, param: cq_param, ccp: &ccp, cq); |
582 | if (err) |
583 | goto out_err_ts_cq; |
584 | |
585 | ptpsq->cq_stats = &c->priv->ptp_stats.cq[tc]; |
586 | } |
587 | |
588 | return 0; |
589 | |
590 | out_err_ts_cq: |
591 | for (--tc; tc >= 0; tc--) |
592 | mlx5e_close_cq(cq: &c->ptpsq[tc].ts_cq); |
593 | tc = num_tc; |
594 | out_err_txqsq_cq: |
595 | for (--tc; tc >= 0; tc--) |
596 | mlx5e_close_cq(cq: &c->ptpsq[tc].txqsq.cq); |
597 | |
598 | return err; |
599 | } |
600 | |
601 | static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c, |
602 | struct mlx5e_ptp_params *cparams) |
603 | { |
604 | struct mlx5e_create_cq_param ccp = {}; |
605 | struct dim_cq_moder ptp_moder = {}; |
606 | struct mlx5e_cq_param *cq_param; |
607 | struct mlx5e_cq *cq = &c->rq.cq; |
608 | |
609 | ccp.netdev = c->netdev; |
610 | ccp.wq = c->priv->wq; |
611 | ccp.node = dev_to_node(dev: mlx5_core_dma_dev(dev: c->mdev)); |
612 | ccp.ch_stats = c->stats; |
613 | ccp.napi = &c->napi; |
614 | ccp.ix = MLX5E_PTP_CHANNEL_IX; |
615 | |
616 | cq_param = &cparams->rq_param.cqp; |
617 | |
618 | return mlx5e_open_cq(mdev: c->mdev, moder: ptp_moder, param: cq_param, ccp: &ccp, cq); |
619 | } |
620 | |
621 | static void mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp *c) |
622 | { |
623 | int tc; |
624 | |
625 | for (tc = 0; tc < c->num_tc; tc++) |
626 | mlx5e_close_cq(cq: &c->ptpsq[tc].ts_cq); |
627 | |
628 | for (tc = 0; tc < c->num_tc; tc++) |
629 | mlx5e_close_cq(cq: &c->ptpsq[tc].txqsq.cq); |
630 | } |
631 | |
632 | static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev, |
633 | struct mlx5e_params *params, |
634 | struct mlx5e_sq_param *param) |
635 | { |
636 | void *sqc = param->sqc; |
637 | void *wq; |
638 | |
639 | mlx5e_build_sq_param_common(mdev, param); |
640 | |
641 | wq = MLX5_ADDR_OF(sqc, sqc, wq); |
642 | MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); |
643 | param->stop_room = mlx5e_stop_room_for_max_wqe(mdev); |
644 | mlx5e_build_tx_cq_param(mdev, params, param: ¶m->cqp); |
645 | } |
646 | |
647 | static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev, |
648 | struct net_device *netdev, |
649 | struct mlx5e_ptp_params *ptp_params) |
650 | { |
651 | struct mlx5e_rq_param *rq_params = &ptp_params->rq_param; |
652 | struct mlx5e_params *params = &ptp_params->params; |
653 | |
654 | params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC; |
655 | mlx5e_init_rq_type_params(mdev, params); |
656 | params->sw_mtu = netdev->max_mtu; |
657 | mlx5e_build_rq_param(mdev, params, NULL, param: rq_params); |
658 | } |
659 | |
660 | static void mlx5e_ptp_build_params(struct mlx5e_ptp *c, |
661 | struct mlx5e_ptp_params *cparams, |
662 | struct mlx5e_params *orig) |
663 | { |
664 | struct mlx5e_params *params = &cparams->params; |
665 | |
666 | params->tx_min_inline_mode = orig->tx_min_inline_mode; |
667 | params->num_channels = orig->num_channels; |
668 | params->hard_mtu = orig->hard_mtu; |
669 | params->sw_mtu = orig->sw_mtu; |
670 | params->mqprio = orig->mqprio; |
671 | |
672 | /* SQ */ |
673 | if (test_bit(MLX5E_PTP_STATE_TX, c->state)) { |
674 | params->log_sq_size = |
675 | min(MLX5_CAP_GEN_2(c->mdev, ts_cqe_metadata_size2wqe_counter), |
676 | MLX5E_PTP_MAX_LOG_SQ_SIZE); |
677 | params->log_sq_size = min(params->log_sq_size, orig->log_sq_size); |
678 | mlx5e_ptp_build_sq_param(mdev: c->mdev, params, param: &cparams->txq_sq_param); |
679 | } |
680 | /* RQ */ |
681 | if (test_bit(MLX5E_PTP_STATE_RX, c->state)) { |
682 | params->vlan_strip_disable = orig->vlan_strip_disable; |
683 | mlx5e_ptp_build_rq_param(mdev: c->mdev, netdev: c->netdev, ptp_params: cparams); |
684 | } |
685 | } |
686 | |
687 | static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params, |
688 | struct mlx5e_rq *rq) |
689 | { |
690 | struct mlx5_core_dev *mdev = c->mdev; |
691 | struct mlx5e_priv *priv = c->priv; |
692 | int err; |
693 | |
694 | rq->wq_type = params->rq_wq_type; |
695 | rq->pdev = c->pdev; |
696 | rq->netdev = priv->netdev; |
697 | rq->priv = priv; |
698 | rq->clock = &mdev->clock; |
699 | rq->tstamp = &priv->tstamp; |
700 | rq->mdev = mdev; |
701 | rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); |
702 | rq->stats = &c->priv->ptp_stats.rq; |
703 | rq->ix = MLX5E_PTP_CHANNEL_IX; |
704 | rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev); |
705 | err = mlx5e_rq_set_handlers(rq, params, xsk: false); |
706 | if (err) |
707 | return err; |
708 | |
709 | return xdp_rxq_info_reg(xdp_rxq: &rq->xdp_rxq, dev: rq->netdev, queue_index: rq->ix, napi_id: 0); |
710 | } |
711 | |
712 | static int mlx5e_ptp_open_rq(struct mlx5e_ptp *c, struct mlx5e_params *params, |
713 | struct mlx5e_rq_param *rq_param) |
714 | { |
715 | int node = dev_to_node(dev: c->mdev->device); |
716 | int err, sd_ix; |
717 | u16 q_counter; |
718 | |
719 | err = mlx5e_init_ptp_rq(c, params, rq: &c->rq); |
720 | if (err) |
721 | return err; |
722 | |
723 | sd_ix = mlx5_sd_ch_ix_get_dev_ix(dev: c->mdev, MLX5E_PTP_CHANNEL_IX); |
724 | q_counter = c->priv->q_counter[sd_ix]; |
725 | return mlx5e_open_rq(params, param: rq_param, NULL, node, q_counter, rq: &c->rq); |
726 | } |
727 | |
728 | static int mlx5e_ptp_open_queues(struct mlx5e_ptp *c, |
729 | struct mlx5e_ptp_params *cparams) |
730 | { |
731 | int err; |
732 | |
733 | if (test_bit(MLX5E_PTP_STATE_TX, c->state)) { |
734 | err = mlx5e_ptp_open_tx_cqs(c, cparams); |
735 | if (err) |
736 | return err; |
737 | |
738 | err = mlx5e_ptp_open_txqsqs(c, cparams); |
739 | if (err) |
740 | goto close_tx_cqs; |
741 | } |
742 | if (test_bit(MLX5E_PTP_STATE_RX, c->state)) { |
743 | err = mlx5e_ptp_open_rx_cq(c, cparams); |
744 | if (err) |
745 | goto close_txqsq; |
746 | |
747 | err = mlx5e_ptp_open_rq(c, params: &cparams->params, rq_param: &cparams->rq_param); |
748 | if (err) |
749 | goto close_rx_cq; |
750 | } |
751 | return 0; |
752 | |
753 | close_rx_cq: |
754 | if (test_bit(MLX5E_PTP_STATE_RX, c->state)) |
755 | mlx5e_close_cq(cq: &c->rq.cq); |
756 | close_txqsq: |
757 | if (test_bit(MLX5E_PTP_STATE_TX, c->state)) |
758 | mlx5e_ptp_close_txqsqs(c); |
759 | close_tx_cqs: |
760 | if (test_bit(MLX5E_PTP_STATE_TX, c->state)) |
761 | mlx5e_ptp_close_tx_cqs(c); |
762 | |
763 | return err; |
764 | } |
765 | |
766 | static void mlx5e_ptp_close_queues(struct mlx5e_ptp *c) |
767 | { |
768 | if (test_bit(MLX5E_PTP_STATE_RX, c->state)) { |
769 | mlx5e_close_rq(rq: &c->rq); |
770 | mlx5e_close_cq(cq: &c->rq.cq); |
771 | } |
772 | if (test_bit(MLX5E_PTP_STATE_TX, c->state)) { |
773 | mlx5e_ptp_close_txqsqs(c); |
774 | mlx5e_ptp_close_tx_cqs(c); |
775 | } |
776 | } |
777 | |
778 | static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params) |
779 | { |
780 | if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS)) |
781 | __set_bit(MLX5E_PTP_STATE_TX, c->state); |
782 | |
783 | if (params->ptp_rx) |
784 | __set_bit(MLX5E_PTP_STATE_RX, c->state); |
785 | |
786 | return bitmap_empty(src: c->state, nbits: MLX5E_PTP_STATE_NUM_STATES) ? -EINVAL : 0; |
787 | } |
788 | |
789 | static void mlx5e_ptp_rx_unset_fs(struct mlx5e_flow_steering *fs) |
790 | { |
791 | struct mlx5e_ptp_fs *ptp_fs = mlx5e_fs_get_ptp(fs); |
792 | |
793 | if (!ptp_fs->valid) |
794 | return; |
795 | |
796 | mlx5e_fs_tt_redirect_del_rule(rule: ptp_fs->l2_rule); |
797 | mlx5e_fs_tt_redirect_any_destroy(fs); |
798 | |
799 | mlx5e_fs_tt_redirect_del_rule(rule: ptp_fs->udp_v6_rule); |
800 | mlx5e_fs_tt_redirect_del_rule(rule: ptp_fs->udp_v4_rule); |
801 | mlx5e_fs_tt_redirect_udp_destroy(fs); |
802 | ptp_fs->valid = false; |
803 | } |
804 | |
805 | static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv) |
806 | { |
807 | u32 tirn = mlx5e_rx_res_get_tirn_ptp(res: priv->rx_res); |
808 | struct mlx5e_flow_steering *fs = priv->fs; |
809 | struct mlx5_flow_handle *rule; |
810 | struct mlx5e_ptp_fs *ptp_fs; |
811 | int err; |
812 | |
813 | ptp_fs = mlx5e_fs_get_ptp(fs); |
814 | if (ptp_fs->valid) |
815 | return 0; |
816 | |
817 | err = mlx5e_fs_tt_redirect_udp_create(fs); |
818 | if (err) |
819 | goto out_free; |
820 | |
821 | rule = mlx5e_fs_tt_redirect_udp_add_rule(fs, ttc_type: MLX5_TT_IPV4_UDP, |
822 | tir_num: tirn, PTP_EV_PORT); |
823 | if (IS_ERR(ptr: rule)) { |
824 | err = PTR_ERR(ptr: rule); |
825 | goto out_destroy_fs_udp; |
826 | } |
827 | ptp_fs->udp_v4_rule = rule; |
828 | |
829 | rule = mlx5e_fs_tt_redirect_udp_add_rule(fs, ttc_type: MLX5_TT_IPV6_UDP, |
830 | tir_num: tirn, PTP_EV_PORT); |
831 | if (IS_ERR(ptr: rule)) { |
832 | err = PTR_ERR(ptr: rule); |
833 | goto out_destroy_udp_v4_rule; |
834 | } |
835 | ptp_fs->udp_v6_rule = rule; |
836 | |
837 | err = mlx5e_fs_tt_redirect_any_create(fs); |
838 | if (err) |
839 | goto out_destroy_udp_v6_rule; |
840 | |
841 | rule = mlx5e_fs_tt_redirect_any_add_rule(fs, tir_num: tirn, ETH_P_1588); |
842 | if (IS_ERR(ptr: rule)) { |
843 | err = PTR_ERR(ptr: rule); |
844 | goto out_destroy_fs_any; |
845 | } |
846 | ptp_fs->l2_rule = rule; |
847 | ptp_fs->valid = true; |
848 | |
849 | return 0; |
850 | |
851 | out_destroy_fs_any: |
852 | mlx5e_fs_tt_redirect_any_destroy(fs); |
853 | out_destroy_udp_v6_rule: |
854 | mlx5e_fs_tt_redirect_del_rule(rule: ptp_fs->udp_v6_rule); |
855 | out_destroy_udp_v4_rule: |
856 | mlx5e_fs_tt_redirect_del_rule(rule: ptp_fs->udp_v4_rule); |
857 | out_destroy_fs_udp: |
858 | mlx5e_fs_tt_redirect_udp_destroy(fs); |
859 | out_free: |
860 | return err; |
861 | } |
862 | |
863 | int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, |
864 | u8 lag_port, struct mlx5e_ptp **cp) |
865 | { |
866 | struct net_device *netdev = priv->netdev; |
867 | struct mlx5_core_dev *mdev = priv->mdev; |
868 | struct mlx5e_ptp_params *cparams; |
869 | struct mlx5e_ptp *c; |
870 | int err; |
871 | |
872 | |
873 | c = kvzalloc_node(size: sizeof(*c), GFP_KERNEL, node: dev_to_node(dev: mlx5_core_dma_dev(dev: mdev))); |
874 | cparams = kvzalloc(size: sizeof(*cparams), GFP_KERNEL); |
875 | if (!c || !cparams) { |
876 | err = -ENOMEM; |
877 | goto err_free; |
878 | } |
879 | |
880 | c->priv = priv; |
881 | c->mdev = priv->mdev; |
882 | c->tstamp = &priv->tstamp; |
883 | c->pdev = mlx5_core_dma_dev(dev: priv->mdev); |
884 | c->netdev = priv->netdev; |
885 | c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey); |
886 | c->num_tc = mlx5e_get_dcb_num_tc(params); |
887 | c->stats = &priv->ptp_stats.ch; |
888 | c->lag_port = lag_port; |
889 | |
890 | err = mlx5e_ptp_set_state(c, params); |
891 | if (err) |
892 | goto err_free; |
893 | |
894 | netif_napi_add(dev: netdev, napi: &c->napi, poll: mlx5e_ptp_napi_poll); |
895 | |
896 | mlx5e_ptp_build_params(c, cparams, orig: params); |
897 | |
898 | err = mlx5e_ptp_open_queues(c, cparams); |
899 | if (unlikely(err)) |
900 | goto err_napi_del; |
901 | |
902 | if (test_bit(MLX5E_PTP_STATE_RX, c->state)) |
903 | priv->rx_ptp_opened = true; |
904 | |
905 | *cp = c; |
906 | |
907 | kvfree(addr: cparams); |
908 | |
909 | return 0; |
910 | |
911 | err_napi_del: |
912 | netif_napi_del(napi: &c->napi); |
913 | err_free: |
914 | kvfree(addr: cparams); |
915 | kvfree(addr: c); |
916 | return err; |
917 | } |
918 | |
919 | void mlx5e_ptp_close(struct mlx5e_ptp *c) |
920 | { |
921 | mlx5e_ptp_close_queues(c); |
922 | netif_napi_del(napi: &c->napi); |
923 | |
924 | kvfree(addr: c); |
925 | } |
926 | |
927 | void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c) |
928 | { |
929 | int tc; |
930 | |
931 | napi_enable(n: &c->napi); |
932 | |
933 | if (test_bit(MLX5E_PTP_STATE_TX, c->state)) { |
934 | for (tc = 0; tc < c->num_tc; tc++) |
935 | mlx5e_activate_txqsq(sq: &c->ptpsq[tc].txqsq); |
936 | } |
937 | if (test_bit(MLX5E_PTP_STATE_RX, c->state)) { |
938 | mlx5e_ptp_rx_set_fs(priv: c->priv); |
939 | mlx5e_activate_rq(rq: &c->rq); |
940 | netif_queue_set_napi(dev: c->netdev, queue_index: c->rq.ix, type: NETDEV_QUEUE_TYPE_RX, napi: &c->napi); |
941 | } |
942 | mlx5e_trigger_napi_sched(napi: &c->napi); |
943 | } |
944 | |
945 | void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c) |
946 | { |
947 | int tc; |
948 | |
949 | if (test_bit(MLX5E_PTP_STATE_RX, c->state)) { |
950 | netif_queue_set_napi(dev: c->netdev, queue_index: c->rq.ix, type: NETDEV_QUEUE_TYPE_RX, NULL); |
951 | mlx5e_deactivate_rq(rq: &c->rq); |
952 | } |
953 | |
954 | if (test_bit(MLX5E_PTP_STATE_TX, c->state)) { |
955 | for (tc = 0; tc < c->num_tc; tc++) |
956 | mlx5e_deactivate_txqsq(sq: &c->ptpsq[tc].txqsq); |
957 | } |
958 | |
959 | napi_disable(n: &c->napi); |
960 | } |
961 | |
962 | int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn) |
963 | { |
964 | if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state)) |
965 | return -EINVAL; |
966 | |
967 | *rqn = c->rq.rqn; |
968 | return 0; |
969 | } |
970 | |
971 | int mlx5e_ptp_alloc_rx_fs(struct mlx5e_flow_steering *fs, |
972 | const struct mlx5e_profile *profile) |
973 | { |
974 | struct mlx5e_ptp_fs *ptp_fs; |
975 | |
976 | if (!mlx5e_profile_feature_cap(profile, PTP_RX)) |
977 | return 0; |
978 | |
979 | ptp_fs = kzalloc(size: sizeof(*ptp_fs), GFP_KERNEL); |
980 | if (!ptp_fs) |
981 | return -ENOMEM; |
982 | mlx5e_fs_set_ptp(fs, ptp_fs); |
983 | |
984 | return 0; |
985 | } |
986 | |
987 | void mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering *fs, |
988 | const struct mlx5e_profile *profile) |
989 | { |
990 | struct mlx5e_ptp_fs *ptp_fs = mlx5e_fs_get_ptp(fs); |
991 | |
992 | if (!mlx5e_profile_feature_cap(profile, PTP_RX)) |
993 | return; |
994 | |
995 | mlx5e_ptp_rx_unset_fs(fs); |
996 | kfree(objp: ptp_fs); |
997 | } |
998 | |
999 | int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set) |
1000 | { |
1001 | struct mlx5e_ptp *c = priv->channels.ptp; |
1002 | |
1003 | if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX)) |
1004 | return 0; |
1005 | |
1006 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) |
1007 | return 0; |
1008 | |
1009 | if (set) { |
1010 | if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state)) { |
1011 | netdev_WARN_ONCE(priv->netdev, "Don't try to add PTP RX-FS rules" ); |
1012 | return -EINVAL; |
1013 | } |
1014 | return mlx5e_ptp_rx_set_fs(priv); |
1015 | } |
1016 | /* set == false */ |
1017 | if (c && test_bit(MLX5E_PTP_STATE_RX, c->state)) { |
1018 | netdev_WARN_ONCE(priv->netdev, "Don't try to remove PTP RX-FS rules" ); |
1019 | return -EINVAL; |
1020 | } |
1021 | mlx5e_ptp_rx_unset_fs(fs: priv->fs); |
1022 | return 0; |
1023 | } |
1024 | |