1 | /* |
2 | * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | */ |
32 | #ifndef __MLX5_EN_H__ |
33 | #define __MLX5_EN_H__ |
34 | |
35 | #include <linux/if_vlan.h> |
36 | #include <linux/etherdevice.h> |
37 | #include <linux/timecounter.h> |
38 | #include <linux/net_tstamp.h> |
39 | #include <linux/crash_dump.h> |
40 | #include <linux/mlx5/driver.h> |
41 | #include <linux/mlx5/qp.h> |
42 | #include <linux/mlx5/cq.h> |
43 | #include <linux/mlx5/port.h> |
44 | #include <linux/mlx5/vport.h> |
45 | #include <linux/mlx5/transobj.h> |
46 | #include <linux/mlx5/fs.h> |
47 | #include <linux/rhashtable.h> |
48 | #include <net/udp_tunnel.h> |
49 | #include <net/switchdev.h> |
50 | #include <net/xdp.h> |
51 | #include <linux/dim.h> |
52 | #include <linux/bits.h> |
53 | #include "wq.h" |
54 | #include "mlx5_core.h" |
55 | #include "en_stats.h" |
56 | #include "en/dcbnl.h" |
57 | #include "en/fs.h" |
58 | #include "en/qos.h" |
59 | #include "lib/hv_vhca.h" |
60 | #include "lib/clock.h" |
61 | #include "en/rx_res.h" |
62 | #include "en/selq.h" |
63 | #include "lib/sd.h" |
64 | |
65 | extern const struct net_device_ops mlx5e_netdev_ops; |
66 | struct page_pool; |
67 | |
68 | #define MLX5E_METADATA_ETHER_TYPE (0x8CE4) |
69 | #define MLX5E_METADATA_ETHER_LEN 8 |
70 | |
71 | #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) |
72 | |
73 | #define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu)) |
74 | #define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu)) |
75 | |
76 | #define MLX5E_MAX_NUM_MQPRIO_CH_TC TC_QOPT_MAX_QUEUE |
77 | |
78 | #define MLX5_RX_HEADROOM NET_SKB_PAD |
79 | #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ |
80 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) |
81 | |
82 | #define MLX5E_RX_MAX_HEAD (256) |
83 | #define (8) |
84 | #define (9) |
85 | #define (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE) |
86 | #define (PAGE_SHIFT - MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE) |
87 | #define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64) |
88 | #define MLX5E_SHAMPO_WQ_RESRV_SIZE (64 * 1024) |
89 | #define MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE (4096) |
90 | |
91 | #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \ |
92 | (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */ |
93 | #define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \ |
94 | max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req) |
95 | #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \ |
96 | MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD)) |
97 | |
98 | /* Keep in sync with mlx5e_mpwrq_log_wqe_sz. |
99 | * These are theoretical maximums, which can be further restricted by |
100 | * capabilities. These values are used for static resource allocations and |
101 | * sanity checks. |
102 | * MLX5_SEND_WQE_MAX_SIZE is a bit bigger than the maximum cacheline-aligned WQE |
103 | * size actually used at runtime, but it's not a problem when calculating static |
104 | * array sizes. |
105 | */ |
106 | #define MLX5_UMR_MAX_FLEX_SPACE \ |
107 | (ALIGN_DOWN(MLX5_SEND_WQE_MAX_SIZE - sizeof(struct mlx5e_umr_wqe), \ |
108 | MLX5_UMR_FLEX_ALIGNMENT)) |
109 | #define MLX5_MPWRQ_MAX_PAGES_PER_WQE \ |
110 | rounddown_pow_of_two(MLX5_UMR_MAX_FLEX_SPACE / sizeof(struct mlx5_mtt)) |
111 | |
112 | #define MLX5E_MAX_RQ_NUM_MTTS \ |
113 | (ALIGN_DOWN(U16_MAX, 4) * 2) /* Fits into u16 and aligned by WQEBB. */ |
114 | #define MLX5E_MAX_RQ_NUM_KSMS (U16_MAX - 1) /* So that num_ksms fits into u16. */ |
115 | #define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024)) |
116 | |
117 | #define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM)) |
118 | #define MLX5E_LOG_MAX_RX_WQE_BULK \ |
119 | (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ))) |
120 | |
121 | #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 |
122 | #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa |
123 | #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd |
124 | |
125 | #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK) |
126 | #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa |
127 | #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd |
128 | |
129 | #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 |
130 | |
131 | #define MLX5E_DEFAULT_LRO_TIMEOUT 32 |
132 | #define MLX5E_DEFAULT_SHAMPO_TIMEOUT 1024 |
133 | |
134 | #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 |
135 | #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3 |
136 | #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 |
137 | #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10 |
138 | #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10 |
139 | #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 |
140 | #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80 |
141 | #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2 |
142 | |
143 | #define MLX5E_MIN_NUM_CHANNELS 0x1 |
144 | #define MLX5E_MAX_NUM_CHANNELS 256 |
145 | #define MLX5E_TX_CQ_POLL_BUDGET 128 |
146 | #define MLX5E_TX_XSK_POLL_BUDGET 64 |
147 | #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */ |
148 | |
149 | #define mlx5e_state_dereference(priv, p) \ |
150 | rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock)) |
151 | |
152 | enum mlx5e_devcom_events { |
153 | MPV_DEVCOM_MASTER_UP, |
154 | MPV_DEVCOM_MASTER_DOWN, |
155 | MPV_DEVCOM_IPSEC_MASTER_UP, |
156 | MPV_DEVCOM_IPSEC_MASTER_DOWN, |
157 | }; |
158 | |
159 | static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev) |
160 | { |
161 | if (mlx5_lag_is_lacp_owner(dev: mdev)) |
162 | return 1; |
163 | |
164 | return clamp_t(u8, MLX5_CAP_GEN(mdev, num_lag_ports), 1, MLX5_MAX_PORTS); |
165 | } |
166 | |
167 | static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) |
168 | { |
169 | switch (wq_type) { |
170 | case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
171 | return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW, |
172 | wq_size / 2); |
173 | default: |
174 | return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES, |
175 | wq_size / 2); |
176 | } |
177 | } |
178 | |
179 | /* Use this function to get max num channels (rxqs/txqs) only to create netdev */ |
180 | static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) |
181 | { |
182 | return is_kdump_kernel() ? |
183 | MLX5E_MIN_NUM_CHANNELS : |
184 | min3(mlx5_comp_vectors_max(mdev), (u32)MLX5E_MAX_NUM_CHANNELS, |
185 | (u32)(1 << MLX5_CAP_GEN(mdev, log_max_rqt_size))); |
186 | } |
187 | |
188 | /* The maximum WQE size can be retrieved by max_wqe_sz_sq in |
189 | * bytes units. Driver hardens the limitation to 1KB (16 |
190 | * WQEBBs), unless firmware capability is stricter. |
191 | */ |
192 | static inline u8 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev) |
193 | { |
194 | BUILD_BUG_ON(MLX5_SEND_WQE_MAX_WQEBBS > U8_MAX); |
195 | |
196 | return (u8)min_t(u16, MLX5_SEND_WQE_MAX_WQEBBS, |
197 | MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB); |
198 | } |
199 | |
200 | static inline u8 mlx5e_get_max_sq_aligned_wqebbs(struct mlx5_core_dev *mdev) |
201 | { |
202 | /* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS. |
203 | * Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16, |
204 | * see mlx5e_get_max_sq_wqebbs(), the multiplication (16 * 4 == 64) |
205 | * overflows the 6-bit DS field of Ctrl Segment. Use a bound lower |
206 | * than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be |
207 | * cache-aligned. |
208 | */ |
209 | u8 wqebbs = mlx5e_get_max_sq_wqebbs(mdev); |
210 | |
211 | wqebbs = min_t(u8, wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1); |
212 | #if L1_CACHE_BYTES >= 128 |
213 | wqebbs = ALIGN_DOWN(wqebbs, 2); |
214 | #endif |
215 | return wqebbs; |
216 | } |
217 | |
218 | struct mlx5e_tx_wqe { |
219 | struct mlx5_wqe_ctrl_seg ctrl; |
220 | struct mlx5_wqe_eth_seg eth; |
221 | struct mlx5_wqe_data_seg data[]; |
222 | }; |
223 | |
224 | struct mlx5e_rx_wqe_ll { |
225 | struct mlx5_wqe_srq_next_seg next; |
226 | struct mlx5_wqe_data_seg data[]; |
227 | }; |
228 | |
229 | struct mlx5e_rx_wqe_cyc { |
230 | DECLARE_FLEX_ARRAY(struct mlx5_wqe_data_seg, data); |
231 | }; |
232 | |
233 | struct mlx5e_umr_wqe_hdr { |
234 | struct mlx5_wqe_ctrl_seg ctrl; |
235 | struct mlx5_wqe_umr_ctrl_seg uctrl; |
236 | struct mlx5_mkey_seg mkc; |
237 | }; |
238 | |
239 | struct mlx5e_umr_wqe { |
240 | struct mlx5e_umr_wqe_hdr hdr; |
241 | union { |
242 | DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts); |
243 | DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms); |
244 | DECLARE_FLEX_ARRAY(struct mlx5_ksm, inline_ksms); |
245 | }; |
246 | }; |
247 | static_assert(offsetof(struct mlx5e_umr_wqe, inline_mtts) == sizeof(struct mlx5e_umr_wqe_hdr), |
248 | "struct members should be included in struct mlx5e_umr_wqe_hdr, not in struct mlx5e_umr_wqe" ); |
249 | |
250 | enum mlx5e_priv_flag { |
251 | MLX5E_PFLAG_RX_CQE_BASED_MODER, |
252 | MLX5E_PFLAG_TX_CQE_BASED_MODER, |
253 | MLX5E_PFLAG_RX_CQE_COMPRESS, |
254 | MLX5E_PFLAG_RX_STRIDING_RQ, |
255 | MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, |
256 | MLX5E_PFLAG_XDP_TX_MPWQE, |
257 | MLX5E_PFLAG_SKB_TX_MPWQE, |
258 | MLX5E_PFLAG_TX_PORT_TS, |
259 | MLX5E_NUM_PFLAGS, /* Keep last */ |
260 | }; |
261 | |
262 | #define MLX5E_SET_PFLAG(params, pflag, enable) \ |
263 | do { \ |
264 | if (enable) \ |
265 | (params)->pflags |= BIT(pflag); \ |
266 | else \ |
267 | (params)->pflags &= ~(BIT(pflag)); \ |
268 | } while (0) |
269 | |
270 | #define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag)))) |
271 | |
272 | enum packet_merge { |
273 | MLX5E_PACKET_MERGE_NONE, |
274 | MLX5E_PACKET_MERGE_LRO, |
275 | MLX5E_PACKET_MERGE_SHAMPO, |
276 | }; |
277 | |
278 | struct mlx5e_packet_merge_param { |
279 | enum packet_merge type; |
280 | u32 timeout; |
281 | struct { |
282 | u8 match_criteria_type; |
283 | u8 alignment_granularity; |
284 | } shampo; |
285 | }; |
286 | |
287 | struct mlx5e_params { |
288 | u8 log_sq_size; |
289 | u8 rq_wq_type; |
290 | u8 log_rq_mtu_frames; |
291 | u16 num_channels; |
292 | struct { |
293 | u16 mode; |
294 | u8 num_tc; |
295 | struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; |
296 | struct { |
297 | u64 max_rate[TC_MAX_QUEUE]; |
298 | u32 hw_id[TC_MAX_QUEUE]; |
299 | } channel; |
300 | } mqprio; |
301 | bool rx_cqe_compress_def; |
302 | struct dim_cq_moder rx_cq_moderation; |
303 | struct dim_cq_moder tx_cq_moderation; |
304 | struct mlx5e_packet_merge_param packet_merge; |
305 | u8 tx_min_inline_mode; |
306 | bool vlan_strip_disable; |
307 | bool scatter_fcs_en; |
308 | bool rx_dim_enabled; |
309 | bool tx_dim_enabled; |
310 | bool rx_moder_use_cqe_mode; |
311 | bool tx_moder_use_cqe_mode; |
312 | u32 pflags; |
313 | struct bpf_prog *xdp_prog; |
314 | struct mlx5e_xsk *xsk; |
315 | unsigned int sw_mtu; |
316 | int hard_mtu; |
317 | bool ptp_rx; |
318 | __be32 terminate_lkey_be; |
319 | }; |
320 | |
321 | static inline u8 mlx5e_get_dcb_num_tc(struct mlx5e_params *params) |
322 | { |
323 | return params->mqprio.mode == TC_MQPRIO_MODE_DCB ? |
324 | params->mqprio.num_tc : 1; |
325 | } |
326 | |
327 | /* Keep this enum consistent with the corresponding strings array |
328 | * declared in en/reporter_rx.c |
329 | */ |
330 | enum { |
331 | MLX5E_RQ_STATE_ENABLED = 0, |
332 | MLX5E_RQ_STATE_RECOVERING, |
333 | MLX5E_RQ_STATE_DIM, |
334 | MLX5E_RQ_STATE_NO_CSUM_COMPLETE, |
335 | MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */ |
336 | MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */ |
337 | MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */ |
338 | MLX5E_RQ_STATE_MINI_CQE_ENHANCED, /* set when enhanced mini_cqe_cap is used */ |
339 | MLX5E_RQ_STATE_XSK, /* set to indicate an xsk rq */ |
340 | MLX5E_NUM_RQ_STATES, /* Must be kept last */ |
341 | }; |
342 | |
343 | struct mlx5e_cq { |
344 | /* data path - accessed per cqe */ |
345 | struct mlx5_cqwq wq; |
346 | |
347 | /* data path - accessed per napi poll */ |
348 | u16 event_ctr; |
349 | struct napi_struct *napi; |
350 | struct mlx5_core_cq mcq; |
351 | struct mlx5e_ch_stats *ch_stats; |
352 | |
353 | /* control */ |
354 | struct net_device *netdev; |
355 | struct mlx5_core_dev *mdev; |
356 | struct workqueue_struct *workqueue; |
357 | struct mlx5_wq_ctrl wq_ctrl; |
358 | } ____cacheline_aligned_in_smp; |
359 | |
360 | struct mlx5e_cq_decomp { |
361 | /* cqe decompression */ |
362 | struct mlx5_cqe64 title; |
363 | struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE]; |
364 | u8 mini_arr_idx; |
365 | u16 left; |
366 | u16 wqe_counter; |
367 | bool last_cqe_title; |
368 | } ____cacheline_aligned_in_smp; |
369 | |
370 | enum mlx5e_dma_map_type { |
371 | MLX5E_DMA_MAP_SINGLE, |
372 | MLX5E_DMA_MAP_PAGE |
373 | }; |
374 | |
375 | struct mlx5e_sq_dma { |
376 | dma_addr_t addr; |
377 | u32 size; |
378 | enum mlx5e_dma_map_type type; |
379 | }; |
380 | |
381 | /* Keep this enum consistent with with the corresponding strings array |
382 | * declared in en/reporter_tx.c |
383 | */ |
384 | enum { |
385 | MLX5E_SQ_STATE_ENABLED = 0, |
386 | MLX5E_SQ_STATE_MPWQE, |
387 | MLX5E_SQ_STATE_RECOVERING, |
388 | MLX5E_SQ_STATE_IPSEC, |
389 | MLX5E_SQ_STATE_DIM, |
390 | MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, |
391 | MLX5E_SQ_STATE_PENDING_XSK_TX, |
392 | MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, |
393 | MLX5E_NUM_SQ_STATES, /* Must be kept last */ |
394 | }; |
395 | |
396 | struct mlx5e_tx_mpwqe { |
397 | /* Current MPWQE session */ |
398 | struct mlx5e_tx_wqe *wqe; |
399 | u32 bytes_count; |
400 | u8 ds_count; |
401 | u8 ds_count_max; |
402 | u8 pkt_count; |
403 | u8 inline_on; |
404 | }; |
405 | |
406 | struct mlx5e_skb_fifo { |
407 | struct sk_buff **fifo; |
408 | u16 *pc; |
409 | u16 *cc; |
410 | u16 mask; |
411 | }; |
412 | |
413 | struct mlx5e_ptpsq; |
414 | |
415 | struct mlx5e_txqsq { |
416 | /* data path */ |
417 | |
418 | /* dirtied @completion */ |
419 | u16 cc; |
420 | u16 skb_fifo_cc; |
421 | u32 dma_fifo_cc; |
422 | struct dim *dim; /* Adaptive Moderation */ |
423 | |
424 | /* dirtied @xmit */ |
425 | u16 pc ____cacheline_aligned_in_smp; |
426 | u16 skb_fifo_pc; |
427 | u32 dma_fifo_pc; |
428 | struct mlx5e_tx_mpwqe mpwqe; |
429 | |
430 | struct mlx5e_cq cq; |
431 | |
432 | /* read only */ |
433 | struct mlx5_wq_cyc wq; |
434 | u32 dma_fifo_mask; |
435 | struct mlx5e_sq_stats *stats; |
436 | struct { |
437 | struct mlx5e_sq_dma *dma_fifo; |
438 | struct mlx5e_skb_fifo skb_fifo; |
439 | struct mlx5e_tx_wqe_info *wqe_info; |
440 | } db; |
441 | void __iomem *uar_map; |
442 | struct netdev_queue *txq; |
443 | u32 sqn; |
444 | u16 stop_room; |
445 | u8 max_sq_mpw_wqebbs; |
446 | u8 min_inline_mode; |
447 | struct device *pdev; |
448 | __be32 mkey_be; |
449 | unsigned long state; |
450 | unsigned int hw_mtu; |
451 | struct mlx5_clock *clock; |
452 | struct net_device *netdev; |
453 | struct mlx5_core_dev *mdev; |
454 | struct mlx5e_channel *channel; |
455 | struct mlx5e_priv *priv; |
456 | |
457 | /* control path */ |
458 | struct mlx5_wq_ctrl wq_ctrl; |
459 | int ch_ix; |
460 | int txq_ix; |
461 | u32 rate_limit; |
462 | struct work_struct recover_work; |
463 | struct mlx5e_ptpsq *ptpsq; |
464 | cqe_ts_to_ns ptp_cyc2time; |
465 | } ____cacheline_aligned_in_smp; |
466 | |
467 | struct mlx5e_xdp_info_fifo { |
468 | union mlx5e_xdp_info *xi; |
469 | u32 *cc; |
470 | u32 *pc; |
471 | u32 mask; |
472 | }; |
473 | |
474 | struct mlx5e_xdpsq; |
475 | struct mlx5e_xmit_data; |
476 | struct xsk_tx_metadata; |
477 | typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *); |
478 | typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *, |
479 | struct mlx5e_xmit_data *, |
480 | int, |
481 | struct xsk_tx_metadata *); |
482 | |
483 | struct mlx5e_xdpsq { |
484 | /* data path */ |
485 | |
486 | /* dirtied @completion */ |
487 | u32 xdpi_fifo_cc; |
488 | u16 cc; |
489 | |
490 | /* dirtied @xmit */ |
491 | u32 xdpi_fifo_pc ____cacheline_aligned_in_smp; |
492 | u16 pc; |
493 | struct mlx5_wqe_ctrl_seg *doorbell_cseg; |
494 | struct mlx5e_tx_mpwqe mpwqe; |
495 | |
496 | struct mlx5e_cq cq; |
497 | |
498 | /* read only */ |
499 | struct xsk_buff_pool *xsk_pool; |
500 | struct mlx5_wq_cyc wq; |
501 | struct mlx5e_xdpsq_stats *stats; |
502 | mlx5e_fp_xmit_xdp_frame_check xmit_xdp_frame_check; |
503 | mlx5e_fp_xmit_xdp_frame xmit_xdp_frame; |
504 | struct { |
505 | struct mlx5e_xdp_wqe_info *wqe_info; |
506 | struct mlx5e_xdp_info_fifo xdpi_fifo; |
507 | } db; |
508 | void __iomem *uar_map; |
509 | u32 sqn; |
510 | struct device *pdev; |
511 | __be32 mkey_be; |
512 | u16 stop_room; |
513 | u8 max_sq_mpw_wqebbs; |
514 | u8 min_inline_mode; |
515 | unsigned long state; |
516 | unsigned int hw_mtu; |
517 | |
518 | /* control path */ |
519 | struct mlx5_wq_ctrl wq_ctrl; |
520 | struct mlx5e_channel *channel; |
521 | } ____cacheline_aligned_in_smp; |
522 | |
523 | struct mlx5e_xdp_buff { |
524 | struct xdp_buff xdp; |
525 | struct mlx5_cqe64 *cqe; |
526 | struct mlx5e_rq *rq; |
527 | }; |
528 | |
529 | struct mlx5e_ktls_resync_resp; |
530 | |
531 | struct mlx5e_icosq { |
532 | /* data path */ |
533 | u16 cc; |
534 | u16 pc; |
535 | |
536 | struct mlx5_wqe_ctrl_seg *doorbell_cseg; |
537 | struct mlx5e_cq cq; |
538 | |
539 | /* write@xmit, read@completion */ |
540 | struct { |
541 | struct mlx5e_icosq_wqe_info *wqe_info; |
542 | } db; |
543 | |
544 | /* read only */ |
545 | struct mlx5_wq_cyc wq; |
546 | void __iomem *uar_map; |
547 | u32 sqn; |
548 | u16 reserved_room; |
549 | unsigned long state; |
550 | struct mlx5e_ktls_resync_resp *ktls_resync; |
551 | |
552 | /* control path */ |
553 | struct mlx5_wq_ctrl wq_ctrl; |
554 | struct mlx5e_channel *channel; |
555 | |
556 | struct work_struct recover_work; |
557 | } ____cacheline_aligned_in_smp; |
558 | |
559 | struct mlx5e_frag_page { |
560 | struct page *page; |
561 | u16 frags; |
562 | }; |
563 | |
564 | enum mlx5e_wqe_frag_flag { |
565 | MLX5E_WQE_FRAG_LAST_IN_PAGE, |
566 | MLX5E_WQE_FRAG_SKIP_RELEASE, |
567 | }; |
568 | |
569 | struct mlx5e_wqe_frag_info { |
570 | union { |
571 | struct mlx5e_frag_page *frag_page; |
572 | struct xdp_buff **xskp; |
573 | }; |
574 | u32 offset; |
575 | u8 flags; |
576 | }; |
577 | |
578 | union mlx5e_alloc_units { |
579 | DECLARE_FLEX_ARRAY(struct mlx5e_frag_page, frag_pages); |
580 | DECLARE_FLEX_ARRAY(struct page *, pages); |
581 | DECLARE_FLEX_ARRAY(struct xdp_buff *, xsk_buffs); |
582 | }; |
583 | |
584 | struct mlx5e_mpw_info { |
585 | u16 consumed_strides; |
586 | DECLARE_BITMAP(skip_release_bitmap, MLX5_MPWRQ_MAX_PAGES_PER_WQE); |
587 | struct mlx5e_frag_page linear_page; |
588 | union mlx5e_alloc_units alloc_units; |
589 | }; |
590 | |
591 | #define MLX5E_MAX_RX_FRAGS 4 |
592 | |
593 | struct mlx5e_rq; |
594 | typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*); |
595 | typedef struct sk_buff * |
596 | (*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, |
597 | struct mlx5_cqe64 *cqe, u16 cqe_bcnt, |
598 | u32 head_offset, u32 page_idx); |
599 | typedef struct sk_buff * |
600 | (*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, |
601 | struct mlx5_cqe64 *cqe, u32 cqe_bcnt); |
602 | typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); |
603 | typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); |
604 | typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool); |
605 | |
606 | int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk); |
607 | void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params); |
608 | |
609 | enum mlx5e_rq_flag { |
610 | MLX5E_RQ_FLAG_XDP_XMIT, |
611 | MLX5E_RQ_FLAG_XDP_REDIRECT, |
612 | }; |
613 | |
614 | struct mlx5e_rq_frag_info { |
615 | int frag_size; |
616 | int frag_stride; |
617 | }; |
618 | |
619 | struct mlx5e_rq_frags_info { |
620 | struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS]; |
621 | u8 num_frags; |
622 | u8 log_num_frags; |
623 | u16 wqe_bulk; |
624 | u16 refill_unit; |
625 | u8 wqe_index_mask; |
626 | }; |
627 | |
628 | struct mlx5e_dma_info { |
629 | dma_addr_t addr; |
630 | union { |
631 | struct mlx5e_frag_page *frag_page; |
632 | struct page *page; |
633 | }; |
634 | }; |
635 | |
636 | struct mlx5e_shampo_hd { |
637 | u32 mkey; |
638 | struct mlx5e_frag_page *pages; |
639 | u32 hd_per_wq; |
640 | u16 hd_per_wqe; |
641 | u16 pages_per_wq; |
642 | unsigned long *bitmap; |
643 | u16 pi; |
644 | u16 ci; |
645 | __be32 key; |
646 | }; |
647 | |
648 | struct mlx5e_hw_gro_data { |
649 | struct sk_buff *skb; |
650 | struct flow_keys fk; |
651 | int second_ip_id; |
652 | }; |
653 | |
654 | enum mlx5e_mpwrq_umr_mode { |
655 | MLX5E_MPWRQ_UMR_MODE_ALIGNED, |
656 | MLX5E_MPWRQ_UMR_MODE_UNALIGNED, |
657 | MLX5E_MPWRQ_UMR_MODE_OVERSIZED, |
658 | MLX5E_MPWRQ_UMR_MODE_TRIPLE, |
659 | }; |
660 | |
661 | struct mlx5e_rq { |
662 | /* data path */ |
663 | union { |
664 | struct { |
665 | struct mlx5_wq_cyc wq; |
666 | struct mlx5e_wqe_frag_info *frags; |
667 | union mlx5e_alloc_units *alloc_units; |
668 | struct mlx5e_rq_frags_info info; |
669 | mlx5e_fp_skb_from_cqe skb_from_cqe; |
670 | } wqe; |
671 | struct { |
672 | struct mlx5_wq_ll wq; |
673 | struct mlx5e_umr_wqe_hdr umr_wqe; |
674 | struct mlx5e_mpw_info *info; |
675 | mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq; |
676 | __be32 umr_mkey_be; |
677 | u16 num_strides; |
678 | u16 actual_wq_head; |
679 | u8 log_stride_sz; |
680 | u8 umr_in_progress; |
681 | u8 umr_last_bulk; |
682 | u8 umr_completed; |
683 | u8 min_wqe_bulk; |
684 | u8 page_shift; |
685 | u8 pages_per_wqe; |
686 | u8 umr_wqebbs; |
687 | u8 mtts_per_wqe; |
688 | u8 umr_mode; |
689 | struct mlx5e_shampo_hd *shampo; |
690 | } mpwqe; |
691 | }; |
692 | struct { |
693 | u16 headroom; |
694 | u32 frame0_sz; |
695 | u8 map_dir; /* dma map direction */ |
696 | } buff; |
697 | |
698 | struct device *pdev; |
699 | struct net_device *netdev; |
700 | struct mlx5e_rq_stats *stats; |
701 | struct mlx5e_cq cq; |
702 | struct mlx5e_cq_decomp cqd; |
703 | struct hwtstamp_config *tstamp; |
704 | struct mlx5_clock *clock; |
705 | struct mlx5e_icosq *icosq; |
706 | struct mlx5e_priv *priv; |
707 | |
708 | struct mlx5e_hw_gro_data *hw_gro_data; |
709 | |
710 | mlx5e_fp_handle_rx_cqe handle_rx_cqe; |
711 | mlx5e_fp_post_rx_wqes post_wqes; |
712 | mlx5e_fp_dealloc_wqe dealloc_wqe; |
713 | |
714 | unsigned long state; |
715 | int ix; |
716 | unsigned int hw_mtu; |
717 | |
718 | struct dim *dim; /* Dynamic Interrupt Moderation */ |
719 | |
720 | /* XDP */ |
721 | struct bpf_prog __rcu *xdp_prog; |
722 | struct mlx5e_xdpsq *xdpsq; |
723 | DECLARE_BITMAP(flags, 8); |
724 | struct page_pool *page_pool; |
725 | struct mlx5e_xdp_buff mxbuf; |
726 | |
727 | /* AF_XDP zero-copy */ |
728 | struct xsk_buff_pool *xsk_pool; |
729 | |
730 | struct work_struct recover_work; |
731 | |
732 | /* control */ |
733 | struct mlx5_wq_ctrl wq_ctrl; |
734 | __be32 mkey_be; |
735 | u8 wq_type; |
736 | u32 rqn; |
737 | struct mlx5_core_dev *mdev; |
738 | struct mlx5e_channel *channel; |
739 | struct mlx5e_dma_info wqe_overflow; |
740 | |
741 | /* XDP read-mostly */ |
742 | struct xdp_rxq_info xdp_rxq; |
743 | cqe_ts_to_ns ptp_cyc2time; |
744 | } ____cacheline_aligned_in_smp; |
745 | |
746 | enum mlx5e_channel_state { |
747 | MLX5E_CHANNEL_STATE_XSK, |
748 | MLX5E_CHANNEL_NUM_STATES |
749 | }; |
750 | |
751 | struct mlx5e_channel { |
752 | /* data path */ |
753 | struct mlx5e_rq rq; |
754 | struct mlx5e_xdpsq rq_xdpsq; |
755 | struct mlx5e_txqsq sq[MLX5_MAX_NUM_TC]; |
756 | struct mlx5e_icosq icosq; /* internal control operations */ |
757 | struct mlx5e_txqsq __rcu * __rcu *qos_sqs; |
758 | bool xdp; |
759 | struct napi_struct napi; |
760 | struct device *pdev; |
761 | struct net_device *netdev; |
762 | __be32 mkey_be; |
763 | u16 qos_sqs_size; |
764 | u8 num_tc; |
765 | u8 lag_port; |
766 | |
767 | /* XDP_REDIRECT */ |
768 | struct mlx5e_xdpsq *xdpsq; |
769 | |
770 | /* AF_XDP zero-copy */ |
771 | struct mlx5e_rq xskrq; |
772 | struct mlx5e_xdpsq xsksq; |
773 | |
774 | /* Async ICOSQ */ |
775 | struct mlx5e_icosq async_icosq; |
776 | /* async_icosq can be accessed from any CPU - the spinlock protects it. */ |
777 | spinlock_t async_icosq_lock; |
778 | |
779 | /* data path - accessed per napi poll */ |
780 | const struct cpumask *aff_mask; |
781 | struct mlx5e_ch_stats *stats; |
782 | |
783 | /* control */ |
784 | struct mlx5e_priv *priv; |
785 | struct mlx5_core_dev *mdev; |
786 | struct hwtstamp_config *tstamp; |
787 | DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES); |
788 | int ix; |
789 | int vec_ix; |
790 | int sd_ix; |
791 | int cpu; |
792 | /* Sync between icosq recovery and XSK enable/disable. */ |
793 | struct mutex icosq_recovery_lock; |
794 | |
795 | /* coalescing configuration */ |
796 | struct dim_cq_moder rx_cq_moder; |
797 | struct dim_cq_moder tx_cq_moder; |
798 | }; |
799 | |
800 | struct mlx5e_ptp; |
801 | |
802 | struct mlx5e_channels { |
803 | struct mlx5e_channel **c; |
804 | struct mlx5e_ptp *ptp; |
805 | unsigned int num; |
806 | struct mlx5e_params params; |
807 | }; |
808 | |
809 | struct mlx5e_channel_stats { |
810 | struct mlx5e_ch_stats ch; |
811 | struct mlx5e_sq_stats sq[MLX5_MAX_NUM_TC]; |
812 | struct mlx5e_rq_stats rq; |
813 | struct mlx5e_rq_stats xskrq; |
814 | struct mlx5e_xdpsq_stats rq_xdpsq; |
815 | struct mlx5e_xdpsq_stats xdpsq; |
816 | struct mlx5e_xdpsq_stats xsksq; |
817 | } ____cacheline_aligned_in_smp; |
818 | |
819 | struct mlx5e_ptp_stats { |
820 | struct mlx5e_ch_stats ch; |
821 | struct mlx5e_sq_stats sq[MLX5_MAX_NUM_TC]; |
822 | struct mlx5e_ptp_cq_stats cq[MLX5_MAX_NUM_TC]; |
823 | struct mlx5e_rq_stats rq; |
824 | } ____cacheline_aligned_in_smp; |
825 | |
826 | enum { |
827 | MLX5E_STATE_OPENED, |
828 | MLX5E_STATE_DESTROYING, |
829 | MLX5E_STATE_XDP_TX_ENABLED, |
830 | MLX5E_STATE_XDP_ACTIVE, |
831 | MLX5E_STATE_CHANNELS_ACTIVE, |
832 | }; |
833 | |
834 | struct mlx5e_modify_sq_param { |
835 | int curr_state; |
836 | int next_state; |
837 | int rl_update; |
838 | int rl_index; |
839 | bool qos_update; |
840 | u16 qos_queue_group_id; |
841 | }; |
842 | |
843 | #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE) |
844 | struct mlx5e_hv_vhca_stats_agent { |
845 | struct mlx5_hv_vhca_agent *agent; |
846 | struct delayed_work work; |
847 | u16 delay; |
848 | void *buf; |
849 | }; |
850 | #endif |
851 | |
852 | struct mlx5e_xsk { |
853 | /* XSK buffer pools are stored separately from channels, |
854 | * because we don't want to lose them when channels are |
855 | * recreated. The kernel also stores buffer pool, but it doesn't |
856 | * distinguish between zero-copy and non-zero-copy UMEMs, so |
857 | * rely on our mechanism. |
858 | */ |
859 | struct xsk_buff_pool **pools; |
860 | u16 refcnt; |
861 | bool ever_used; |
862 | }; |
863 | |
864 | /* Temporary storage for variables that are allocated when struct mlx5e_priv is |
865 | * initialized, and used where we can't allocate them because that functions |
866 | * must not fail. Use with care and make sure the same variable is not used |
867 | * simultaneously by multiple users. |
868 | */ |
869 | struct mlx5e_scratchpad { |
870 | cpumask_var_t cpumask; |
871 | }; |
872 | |
873 | struct mlx5e_trap; |
874 | struct mlx5e_htb; |
875 | |
876 | struct mlx5e_priv { |
877 | /* priv data path fields - start */ |
878 | struct mlx5e_selq selq; |
879 | struct mlx5e_txqsq **txq2sq; |
880 | struct mlx5e_sq_stats **txq2sq_stats; |
881 | |
882 | #ifdef CONFIG_MLX5_CORE_EN_DCB |
883 | struct mlx5e_dcbx_dp dcbx_dp; |
884 | #endif |
885 | /* priv data path fields - end */ |
886 | |
887 | unsigned long state; |
888 | struct mutex state_lock; /* Protects Interface state */ |
889 | struct mlx5e_rq drop_rq; |
890 | |
891 | struct mlx5e_channels channels; |
892 | struct mlx5e_rx_res *rx_res; |
893 | u32 *tx_rates; |
894 | |
895 | struct mlx5e_flow_steering *fs; |
896 | |
897 | struct workqueue_struct *wq; |
898 | struct work_struct update_carrier_work; |
899 | struct work_struct set_rx_mode_work; |
900 | struct work_struct tx_timeout_work; |
901 | struct work_struct update_stats_work; |
902 | struct work_struct monitor_counters_work; |
903 | struct mlx5_nb monitor_counters_nb; |
904 | |
905 | struct mlx5_core_dev *mdev; |
906 | struct net_device *netdev; |
907 | struct mlx5e_trap *en_trap; |
908 | struct mlx5e_stats stats; |
909 | struct mlx5e_channel_stats **channel_stats; |
910 | struct mlx5e_channel_stats trap_stats; |
911 | struct mlx5e_ptp_stats ptp_stats; |
912 | struct mlx5e_sq_stats **htb_qos_sq_stats; |
913 | u16 htb_max_qos_sqs; |
914 | u16 stats_nch; |
915 | u16 max_nch; |
916 | u8 max_opened_tc; |
917 | bool tx_ptp_opened; |
918 | bool rx_ptp_opened; |
919 | struct hwtstamp_config tstamp; |
920 | u16 q_counter[MLX5_SD_MAX_GROUP_SZ]; |
921 | u16 drop_rq_q_counter; |
922 | struct notifier_block events_nb; |
923 | struct notifier_block blocking_events_nb; |
924 | |
925 | struct udp_tunnel_nic_info nic_info; |
926 | #ifdef CONFIG_MLX5_CORE_EN_DCB |
927 | struct mlx5e_dcbx dcbx; |
928 | #endif |
929 | |
930 | const struct mlx5e_profile *profile; |
931 | void *ppriv; |
932 | #ifdef CONFIG_MLX5_MACSEC |
933 | struct mlx5e_macsec *macsec; |
934 | #endif |
935 | #ifdef CONFIG_MLX5_EN_IPSEC |
936 | struct mlx5e_ipsec *ipsec; |
937 | #endif |
938 | #ifdef CONFIG_MLX5_EN_TLS |
939 | struct mlx5e_tls *tls; |
940 | #endif |
941 | struct devlink_health_reporter *tx_reporter; |
942 | struct devlink_health_reporter *rx_reporter; |
943 | struct mlx5e_xsk xsk; |
944 | #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE) |
945 | struct mlx5e_hv_vhca_stats_agent stats_agent; |
946 | #endif |
947 | struct mlx5e_scratchpad scratchpad; |
948 | struct mlx5e_htb *htb; |
949 | struct mlx5e_mqprio_rl *mqprio_rl; |
950 | struct dentry *dfs_root; |
951 | struct mlx5_devcom_comp_dev *devcom; |
952 | }; |
953 | |
954 | struct mlx5e_dev { |
955 | struct mlx5e_priv *priv; |
956 | struct devlink_port dl_port; |
957 | }; |
958 | |
959 | struct mlx5e_rx_handlers { |
960 | mlx5e_fp_handle_rx_cqe handle_rx_cqe; |
961 | mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; |
962 | mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe_shampo; |
963 | }; |
964 | |
965 | extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic; |
966 | |
967 | enum mlx5e_profile_feature { |
968 | MLX5E_PROFILE_FEATURE_PTP_RX, |
969 | MLX5E_PROFILE_FEATURE_PTP_TX, |
970 | MLX5E_PROFILE_FEATURE_QOS_HTB, |
971 | MLX5E_PROFILE_FEATURE_FS_VLAN, |
972 | MLX5E_PROFILE_FEATURE_FS_TC, |
973 | }; |
974 | |
975 | struct mlx5e_profile { |
976 | int (*init)(struct mlx5_core_dev *mdev, |
977 | struct net_device *netdev); |
978 | void (*cleanup)(struct mlx5e_priv *priv); |
979 | int (*init_rx)(struct mlx5e_priv *priv); |
980 | void (*cleanup_rx)(struct mlx5e_priv *priv); |
981 | int (*init_tx)(struct mlx5e_priv *priv); |
982 | void (*cleanup_tx)(struct mlx5e_priv *priv); |
983 | void (*enable)(struct mlx5e_priv *priv); |
984 | void (*disable)(struct mlx5e_priv *priv); |
985 | int (*update_rx)(struct mlx5e_priv *priv); |
986 | void (*update_stats)(struct mlx5e_priv *priv); |
987 | void (*update_carrier)(struct mlx5e_priv *priv); |
988 | int (*max_nch_limit)(struct mlx5_core_dev *mdev); |
989 | u32 (*get_tisn)(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv, |
990 | u8 lag_port, u8 tc); |
991 | unsigned int (*stats_grps_num)(struct mlx5e_priv *priv); |
992 | mlx5e_stats_grp_t *stats_grps; |
993 | const struct mlx5e_rx_handlers *rx_handlers; |
994 | int max_tc; |
995 | u32 features; |
996 | }; |
997 | |
998 | u32 mlx5e_profile_get_tisn(struct mlx5_core_dev *mdev, |
999 | struct mlx5e_priv *priv, |
1000 | const struct mlx5e_profile *profile, |
1001 | u8 lag_port, u8 tc); |
1002 | |
1003 | #define mlx5e_profile_feature_cap(profile, feature) \ |
1004 | ((profile)->features & BIT(MLX5E_PROFILE_FEATURE_##feature)) |
1005 | |
1006 | void mlx5e_build_ptys2ethtool_map(void); |
1007 | |
1008 | bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift, |
1009 | enum mlx5e_mpwrq_umr_mode umr_mode); |
1010 | |
1011 | void mlx5e_shampo_fill_umr(struct mlx5e_rq *rq, int len); |
1012 | void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq); |
1013 | void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); |
1014 | void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s); |
1015 | |
1016 | int mlx5e_self_test_num(struct mlx5e_priv *priv); |
1017 | int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data); |
1018 | void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, |
1019 | u64 *buf); |
1020 | void mlx5e_set_rx_mode_work(struct work_struct *work); |
1021 | |
1022 | int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr); |
1023 | int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr); |
1024 | int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter); |
1025 | |
1026 | int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, |
1027 | u16 vid); |
1028 | int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, |
1029 | u16 vid); |
1030 | void mlx5e_timestamp_init(struct mlx5e_priv *priv); |
1031 | |
1032 | struct mlx5e_xsk_param; |
1033 | |
1034 | struct mlx5e_rq_param; |
1035 | int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, |
1036 | struct mlx5e_xsk_param *xsk, int node, u16 q_counter, |
1037 | struct mlx5e_rq *rq); |
1038 | #define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */ |
1039 | int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time); |
1040 | void mlx5e_close_rq(struct mlx5e_rq *rq); |
1041 | int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter); |
1042 | void mlx5e_destroy_rq(struct mlx5e_rq *rq); |
1043 | |
1044 | bool mlx5e_reset_rx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode, |
1045 | bool dim_enabled); |
1046 | bool mlx5e_reset_rx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode, |
1047 | bool dim_enabled, bool keep_dim_state); |
1048 | |
1049 | struct mlx5e_sq_param; |
1050 | int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, |
1051 | struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool, |
1052 | struct mlx5e_xdpsq *sq, bool is_redirect); |
1053 | void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq); |
1054 | |
1055 | struct mlx5e_create_cq_param { |
1056 | struct net_device *netdev; |
1057 | struct workqueue_struct *wq; |
1058 | struct napi_struct *napi; |
1059 | struct mlx5e_ch_stats *ch_stats; |
1060 | int node; |
1061 | int ix; |
1062 | }; |
1063 | |
1064 | struct mlx5e_cq_param; |
1065 | int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder, |
1066 | struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, |
1067 | struct mlx5e_cq *cq); |
1068 | void mlx5e_close_cq(struct mlx5e_cq *cq); |
1069 | int mlx5e_modify_cq_period_mode(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, |
1070 | u8 cq_period_mode); |
1071 | int mlx5e_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, |
1072 | u16 cq_period, u16 cq_max_count, u8 cq_period_mode); |
1073 | |
1074 | int mlx5e_open_locked(struct net_device *netdev); |
1075 | int mlx5e_close_locked(struct net_device *netdev); |
1076 | |
1077 | void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c); |
1078 | void mlx5e_trigger_napi_sched(struct napi_struct *napi); |
1079 | |
1080 | int mlx5e_open_channels(struct mlx5e_priv *priv, |
1081 | struct mlx5e_channels *chs); |
1082 | void mlx5e_close_channels(struct mlx5e_channels *chs); |
1083 | |
1084 | /* Function pointer to be used to modify HW or kernel settings while |
1085 | * switching channels |
1086 | */ |
1087 | typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv, void *context); |
1088 | #define MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(fn) \ |
1089 | int fn##_ctx(struct mlx5e_priv *priv, void *context) \ |
1090 | { \ |
1091 | return fn(priv); \ |
1092 | } |
1093 | int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv); |
1094 | int mlx5e_safe_switch_params(struct mlx5e_priv *priv, |
1095 | struct mlx5e_params *new_params, |
1096 | mlx5e_fp_preactivate preactivate, |
1097 | void *context, bool reset); |
1098 | int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv); |
1099 | int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context); |
1100 | int mlx5e_update_tc_and_tx_queues_ctx(struct mlx5e_priv *priv, void *context); |
1101 | void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); |
1102 | void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); |
1103 | int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx); |
1104 | |
1105 | int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state); |
1106 | void mlx5e_activate_rq(struct mlx5e_rq *rq); |
1107 | void mlx5e_deactivate_rq(struct mlx5e_rq *rq); |
1108 | void mlx5e_activate_icosq(struct mlx5e_icosq *icosq); |
1109 | void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq); |
1110 | |
1111 | int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, |
1112 | struct mlx5e_modify_sq_param *p); |
1113 | int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, |
1114 | struct mlx5e_params *params, struct mlx5e_sq_param *param, |
1115 | struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, |
1116 | struct mlx5e_sq_stats *sq_stats); |
1117 | void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq); |
1118 | void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq); |
1119 | void mlx5e_free_txqsq(struct mlx5e_txqsq *sq); |
1120 | void mlx5e_tx_disable_queue(struct netdev_queue *txq); |
1121 | int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa); |
1122 | void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq); |
1123 | struct mlx5e_create_sq_param; |
1124 | int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, |
1125 | struct mlx5e_sq_param *param, |
1126 | struct mlx5e_create_sq_param *csp, |
1127 | u16 qos_queue_group_id, |
1128 | u32 *sqn); |
1129 | void mlx5e_tx_err_cqe_work(struct work_struct *recover_work); |
1130 | void mlx5e_close_txqsq(struct mlx5e_txqsq *sq); |
1131 | |
1132 | bool mlx5e_reset_tx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode, |
1133 | bool dim_enabled); |
1134 | bool mlx5e_reset_tx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode, |
1135 | bool dim_enabled, bool keep_dim_state); |
1136 | |
1137 | static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) |
1138 | { |
1139 | return MLX5_CAP_ETH(mdev, swp) && |
1140 | MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso); |
1141 | } |
1142 | |
1143 | extern const struct ethtool_ops mlx5e_ethtool_ops; |
1144 | |
1145 | int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey); |
1146 | int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises); |
1147 | void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); |
1148 | int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, |
1149 | bool enable_mc_lb); |
1150 | void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc); |
1151 | |
1152 | /* common netdev helpers */ |
1153 | void mlx5e_create_q_counters(struct mlx5e_priv *priv); |
1154 | void mlx5e_destroy_q_counters(struct mlx5e_priv *priv); |
1155 | int mlx5e_open_drop_rq(struct mlx5e_priv *priv, |
1156 | struct mlx5e_rq *drop_rq); |
1157 | void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq); |
1158 | |
1159 | int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn); |
1160 | void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn); |
1161 | |
1162 | void mlx5e_update_carrier(struct mlx5e_priv *priv); |
1163 | int mlx5e_close(struct net_device *netdev); |
1164 | int mlx5e_open(struct net_device *netdev); |
1165 | |
1166 | void mlx5e_queue_update_stats(struct mlx5e_priv *priv); |
1167 | |
1168 | int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv); |
1169 | int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context); |
1170 | int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, |
1171 | mlx5e_fp_preactivate preactivate); |
1172 | void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv); |
1173 | |
1174 | /* ethtool helpers */ |
1175 | void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, |
1176 | struct ethtool_drvinfo *drvinfo); |
1177 | void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, |
1178 | u32 stringset, u8 *data); |
1179 | int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset); |
1180 | void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, |
1181 | struct ethtool_stats *stats, u64 *data); |
1182 | void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, |
1183 | struct ethtool_ringparam *param, |
1184 | struct kernel_ethtool_ringparam *kernel_param); |
1185 | int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, |
1186 | struct ethtool_ringparam *param, |
1187 | struct netlink_ext_ack *extack); |
1188 | void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv, |
1189 | struct ethtool_channels *ch); |
1190 | int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, |
1191 | struct ethtool_channels *ch); |
1192 | int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, |
1193 | struct ethtool_coalesce *coal, |
1194 | struct kernel_ethtool_coalesce *kernel_coal, |
1195 | struct netlink_ext_ack *extack); |
1196 | int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, |
1197 | struct ethtool_coalesce *coal, |
1198 | struct kernel_ethtool_coalesce *kernel_coal, |
1199 | struct netlink_ext_ack *extack); |
1200 | int mlx5e_get_per_queue_coalesce(struct net_device *dev, u32 queue, |
1201 | struct ethtool_coalesce *coal); |
1202 | int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue, |
1203 | struct ethtool_coalesce *coal); |
1204 | u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv); |
1205 | u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); |
1206 | int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, |
1207 | struct kernel_ethtool_ts_info *info); |
1208 | int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, |
1209 | struct ethtool_flash *flash); |
1210 | |
1211 | /* mlx5e generic netdev management API */ |
1212 | static inline bool |
1213 | mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev) |
1214 | { |
1215 | return !is_kdump_kernel() && |
1216 | MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe); |
1217 | } |
1218 | |
1219 | int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev); |
1220 | int mlx5e_priv_init(struct mlx5e_priv *priv, |
1221 | const struct mlx5e_profile *profile, |
1222 | struct net_device *netdev, |
1223 | struct mlx5_core_dev *mdev); |
1224 | void mlx5e_priv_cleanup(struct mlx5e_priv *priv); |
1225 | struct net_device * |
1226 | mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile); |
1227 | int mlx5e_attach_netdev(struct mlx5e_priv *priv); |
1228 | void mlx5e_detach_netdev(struct mlx5e_priv *priv); |
1229 | void mlx5e_destroy_netdev(struct mlx5e_priv *priv); |
1230 | int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, |
1231 | const struct mlx5e_profile *new_profile, void *new_ppriv); |
1232 | void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv); |
1233 | void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv); |
1234 | void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu); |
1235 | |
1236 | void mlx5e_set_xdp_feature(struct net_device *netdev); |
1237 | netdev_features_t mlx5e_features_check(struct sk_buff *skb, |
1238 | struct net_device *netdev, |
1239 | netdev_features_t features); |
1240 | int mlx5e_set_features(struct net_device *netdev, netdev_features_t features); |
1241 | #ifdef CONFIG_MLX5_ESWITCH |
1242 | int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac); |
1243 | int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate); |
1244 | int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi); |
1245 | int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats); |
1246 | #endif |
1247 | int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey); |
1248 | #endif /* __MLX5_EN_H__ */ |
1249 | |