1 | /* |
2 | * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | */ |
32 | #ifndef __MLX5_EN_H__ |
33 | #define __MLX5_EN_H__ |
34 | |
35 | #include <linux/if_vlan.h> |
36 | #include <linux/etherdevice.h> |
37 | #include <linux/timecounter.h> |
38 | #include <linux/net_tstamp.h> |
39 | #include <linux/crash_dump.h> |
40 | #include <linux/mlx5/driver.h> |
41 | #include <linux/mlx5/qp.h> |
42 | #include <linux/mlx5/cq.h> |
43 | #include <linux/mlx5/port.h> |
44 | #include <linux/mlx5/vport.h> |
45 | #include <linux/mlx5/transobj.h> |
46 | #include <linux/mlx5/fs.h> |
47 | #include <linux/rhashtable.h> |
48 | #include <net/udp_tunnel.h> |
49 | #include <net/switchdev.h> |
50 | #include <net/xdp.h> |
51 | #include <linux/dim.h> |
52 | #include <linux/bits.h> |
53 | #include "wq.h" |
54 | #include "mlx5_core.h" |
55 | #include "en_stats.h" |
56 | #include "en/dcbnl.h" |
57 | #include "en/fs.h" |
58 | #include "en/qos.h" |
59 | #include "lib/hv_vhca.h" |
60 | #include "lib/clock.h" |
61 | #include "en/rx_res.h" |
62 | #include "en/selq.h" |
63 | #include "lib/sd.h" |
64 | |
65 | extern const struct net_device_ops mlx5e_netdev_ops; |
66 | struct page_pool; |
67 | |
68 | #define MLX5E_METADATA_ETHER_TYPE (0x8CE4) |
69 | #define MLX5E_METADATA_ETHER_LEN 8 |
70 | |
71 | #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) |
72 | |
73 | #define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu)) |
74 | #define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu)) |
75 | |
76 | #define MLX5E_MAX_NUM_MQPRIO_CH_TC TC_QOPT_MAX_QUEUE |
77 | |
78 | #define MLX5_RX_HEADROOM NET_SKB_PAD |
79 | #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ |
80 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) |
81 | |
82 | #define MLX5E_RX_MAX_HEAD (256) |
83 | #define (9) |
84 | #define (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE) |
85 | #define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64) |
86 | #define MLX5E_SHAMPO_WQ_RESRV_SIZE (64 * 1024) |
87 | #define MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE (4096) |
88 | |
89 | #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \ |
90 | (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */ |
91 | #define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \ |
92 | max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req) |
93 | #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \ |
94 | MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD)) |
95 | |
96 | #define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18 |
97 | |
98 | /* Keep in sync with mlx5e_mpwrq_log_wqe_sz. |
99 | * These are theoretical maximums, which can be further restricted by |
100 | * capabilities. These values are used for static resource allocations and |
101 | * sanity checks. |
102 | * MLX5_SEND_WQE_MAX_SIZE is a bit bigger than the maximum cacheline-aligned WQE |
103 | * size actually used at runtime, but it's not a problem when calculating static |
104 | * array sizes. |
105 | */ |
106 | #define MLX5_UMR_MAX_FLEX_SPACE \ |
107 | (ALIGN_DOWN(MLX5_SEND_WQE_MAX_SIZE - sizeof(struct mlx5e_umr_wqe), \ |
108 | MLX5_UMR_FLEX_ALIGNMENT)) |
109 | #define MLX5_MPWRQ_MAX_PAGES_PER_WQE \ |
110 | rounddown_pow_of_two(MLX5_UMR_MAX_FLEX_SPACE / sizeof(struct mlx5_mtt)) |
111 | |
112 | #define MLX5E_MAX_RQ_NUM_MTTS \ |
113 | (ALIGN_DOWN(U16_MAX, 4) * 2) /* Fits into u16 and aligned by WQEBB. */ |
114 | #define MLX5E_MAX_RQ_NUM_KSMS (U16_MAX - 1) /* So that num_ksms fits into u16. */ |
115 | #define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024)) |
116 | |
117 | #define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM)) |
118 | #define MLX5E_LOG_MAX_RX_WQE_BULK \ |
119 | (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ))) |
120 | |
121 | #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 |
122 | #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa |
123 | #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd |
124 | |
125 | #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK) |
126 | #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa |
127 | #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd |
128 | |
129 | #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 |
130 | |
131 | #define MLX5E_DEFAULT_LRO_TIMEOUT 32 |
132 | #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4 |
133 | |
134 | #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 |
135 | #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3 |
136 | #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 |
137 | #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10 |
138 | #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10 |
139 | #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 |
140 | #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80 |
141 | #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2 |
142 | |
143 | #define MLX5E_MIN_NUM_CHANNELS 0x1 |
144 | #define MLX5E_MAX_NUM_CHANNELS 256 |
145 | #define MLX5E_TX_CQ_POLL_BUDGET 128 |
146 | #define MLX5E_TX_XSK_POLL_BUDGET 64 |
147 | #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */ |
148 | |
149 | #define MLX5E_KLM_UMR_WQE_SZ(sgl_len)\ |
150 | (sizeof(struct mlx5e_umr_wqe) +\ |
151 | (sizeof(struct mlx5_klm) * (sgl_len))) |
152 | |
153 | #define MLX5E_KLM_UMR_WQEBBS(klm_entries) \ |
154 | (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_BB)) |
155 | |
156 | #define MLX5E_KLM_UMR_DS_CNT(klm_entries)\ |
157 | (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_DS)) |
158 | |
159 | #define MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size)\ |
160 | (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm)) |
161 | |
162 | #define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\ |
163 | ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT) |
164 | |
165 | #define MLX5E_MAX_KLM_PER_WQE(mdev) \ |
166 | MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev)) |
167 | |
168 | #define mlx5e_state_dereference(priv, p) \ |
169 | rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock)) |
170 | |
171 | enum mlx5e_devcom_events { |
172 | MPV_DEVCOM_MASTER_UP, |
173 | MPV_DEVCOM_MASTER_DOWN, |
174 | MPV_DEVCOM_IPSEC_MASTER_UP, |
175 | MPV_DEVCOM_IPSEC_MASTER_DOWN, |
176 | }; |
177 | |
178 | static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev) |
179 | { |
180 | if (mlx5_lag_is_lacp_owner(dev: mdev)) |
181 | return 1; |
182 | |
183 | return clamp_t(u8, MLX5_CAP_GEN(mdev, num_lag_ports), 1, MLX5_MAX_PORTS); |
184 | } |
185 | |
186 | static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) |
187 | { |
188 | switch (wq_type) { |
189 | case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
190 | return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW, |
191 | wq_size / 2); |
192 | default: |
193 | return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES, |
194 | wq_size / 2); |
195 | } |
196 | } |
197 | |
198 | /* Use this function to get max num channels (rxqs/txqs) only to create netdev */ |
199 | static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) |
200 | { |
201 | return is_kdump_kernel() ? |
202 | MLX5E_MIN_NUM_CHANNELS : |
203 | min3(mlx5_comp_vectors_max(mdev), (u32)MLX5E_MAX_NUM_CHANNELS, |
204 | (u32)(1 << MLX5_CAP_GEN(mdev, log_max_rqt_size))); |
205 | } |
206 | |
207 | /* The maximum WQE size can be retrieved by max_wqe_sz_sq in |
208 | * bytes units. Driver hardens the limitation to 1KB (16 |
209 | * WQEBBs), unless firmware capability is stricter. |
210 | */ |
211 | static inline u8 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev) |
212 | { |
213 | BUILD_BUG_ON(MLX5_SEND_WQE_MAX_WQEBBS > U8_MAX); |
214 | |
215 | return (u8)min_t(u16, MLX5_SEND_WQE_MAX_WQEBBS, |
216 | MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB); |
217 | } |
218 | |
219 | static inline u8 mlx5e_get_max_sq_aligned_wqebbs(struct mlx5_core_dev *mdev) |
220 | { |
221 | /* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS. |
222 | * Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16, |
223 | * see mlx5e_get_max_sq_wqebbs(), the multiplication (16 * 4 == 64) |
224 | * overflows the 6-bit DS field of Ctrl Segment. Use a bound lower |
225 | * than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be |
226 | * cache-aligned. |
227 | */ |
228 | u8 wqebbs = mlx5e_get_max_sq_wqebbs(mdev); |
229 | |
230 | wqebbs = min_t(u8, wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1); |
231 | #if L1_CACHE_BYTES >= 128 |
232 | wqebbs = ALIGN_DOWN(wqebbs, 2); |
233 | #endif |
234 | return wqebbs; |
235 | } |
236 | |
237 | struct mlx5e_tx_wqe { |
238 | struct mlx5_wqe_ctrl_seg ctrl; |
239 | struct mlx5_wqe_eth_seg eth; |
240 | struct mlx5_wqe_data_seg data[]; |
241 | }; |
242 | |
243 | struct mlx5e_rx_wqe_ll { |
244 | struct mlx5_wqe_srq_next_seg next; |
245 | struct mlx5_wqe_data_seg data[]; |
246 | }; |
247 | |
248 | struct mlx5e_rx_wqe_cyc { |
249 | DECLARE_FLEX_ARRAY(struct mlx5_wqe_data_seg, data); |
250 | }; |
251 | |
252 | struct mlx5e_umr_wqe { |
253 | struct mlx5_wqe_ctrl_seg ctrl; |
254 | struct mlx5_wqe_umr_ctrl_seg uctrl; |
255 | struct mlx5_mkey_seg mkc; |
256 | union { |
257 | DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts); |
258 | DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms); |
259 | DECLARE_FLEX_ARRAY(struct mlx5_ksm, inline_ksms); |
260 | }; |
261 | }; |
262 | |
263 | enum mlx5e_priv_flag { |
264 | MLX5E_PFLAG_RX_CQE_BASED_MODER, |
265 | MLX5E_PFLAG_TX_CQE_BASED_MODER, |
266 | MLX5E_PFLAG_RX_CQE_COMPRESS, |
267 | MLX5E_PFLAG_RX_STRIDING_RQ, |
268 | MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, |
269 | MLX5E_PFLAG_XDP_TX_MPWQE, |
270 | MLX5E_PFLAG_SKB_TX_MPWQE, |
271 | MLX5E_PFLAG_TX_PORT_TS, |
272 | MLX5E_NUM_PFLAGS, /* Keep last */ |
273 | }; |
274 | |
275 | #define MLX5E_SET_PFLAG(params, pflag, enable) \ |
276 | do { \ |
277 | if (enable) \ |
278 | (params)->pflags |= BIT(pflag); \ |
279 | else \ |
280 | (params)->pflags &= ~(BIT(pflag)); \ |
281 | } while (0) |
282 | |
283 | #define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag)))) |
284 | |
285 | enum packet_merge { |
286 | MLX5E_PACKET_MERGE_NONE, |
287 | MLX5E_PACKET_MERGE_LRO, |
288 | MLX5E_PACKET_MERGE_SHAMPO, |
289 | }; |
290 | |
291 | struct mlx5e_packet_merge_param { |
292 | enum packet_merge type; |
293 | u32 timeout; |
294 | struct { |
295 | u8 match_criteria_type; |
296 | u8 alignment_granularity; |
297 | } shampo; |
298 | }; |
299 | |
300 | struct mlx5e_params { |
301 | u8 log_sq_size; |
302 | u8 rq_wq_type; |
303 | u8 log_rq_mtu_frames; |
304 | u16 num_channels; |
305 | struct { |
306 | u16 mode; |
307 | u8 num_tc; |
308 | struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; |
309 | struct { |
310 | u64 max_rate[TC_MAX_QUEUE]; |
311 | u32 hw_id[TC_MAX_QUEUE]; |
312 | } channel; |
313 | } mqprio; |
314 | bool rx_cqe_compress_def; |
315 | struct dim_cq_moder rx_cq_moderation; |
316 | struct dim_cq_moder tx_cq_moderation; |
317 | struct mlx5e_packet_merge_param packet_merge; |
318 | u8 tx_min_inline_mode; |
319 | bool vlan_strip_disable; |
320 | bool scatter_fcs_en; |
321 | bool rx_dim_enabled; |
322 | bool tx_dim_enabled; |
323 | u32 pflags; |
324 | struct bpf_prog *xdp_prog; |
325 | struct mlx5e_xsk *xsk; |
326 | unsigned int sw_mtu; |
327 | int hard_mtu; |
328 | bool ptp_rx; |
329 | __be32 terminate_lkey_be; |
330 | }; |
331 | |
332 | static inline u8 mlx5e_get_dcb_num_tc(struct mlx5e_params *params) |
333 | { |
334 | return params->mqprio.mode == TC_MQPRIO_MODE_DCB ? |
335 | params->mqprio.num_tc : 1; |
336 | } |
337 | |
338 | /* Keep this enum consistent with the corresponding strings array |
339 | * declared in en/reporter_rx.c |
340 | */ |
341 | enum { |
342 | MLX5E_RQ_STATE_ENABLED = 0, |
343 | MLX5E_RQ_STATE_RECOVERING, |
344 | MLX5E_RQ_STATE_DIM, |
345 | MLX5E_RQ_STATE_NO_CSUM_COMPLETE, |
346 | MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */ |
347 | MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */ |
348 | MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */ |
349 | MLX5E_RQ_STATE_MINI_CQE_ENHANCED, /* set when enhanced mini_cqe_cap is used */ |
350 | MLX5E_RQ_STATE_XSK, /* set to indicate an xsk rq */ |
351 | MLX5E_NUM_RQ_STATES, /* Must be kept last */ |
352 | }; |
353 | |
354 | struct mlx5e_cq { |
355 | /* data path - accessed per cqe */ |
356 | struct mlx5_cqwq wq; |
357 | |
358 | /* data path - accessed per napi poll */ |
359 | u16 event_ctr; |
360 | struct napi_struct *napi; |
361 | struct mlx5_core_cq mcq; |
362 | struct mlx5e_ch_stats *ch_stats; |
363 | |
364 | /* control */ |
365 | struct net_device *netdev; |
366 | struct mlx5_core_dev *mdev; |
367 | struct workqueue_struct *workqueue; |
368 | struct mlx5_wq_ctrl wq_ctrl; |
369 | } ____cacheline_aligned_in_smp; |
370 | |
371 | struct mlx5e_cq_decomp { |
372 | /* cqe decompression */ |
373 | struct mlx5_cqe64 title; |
374 | struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE]; |
375 | u8 mini_arr_idx; |
376 | u16 left; |
377 | u16 wqe_counter; |
378 | bool last_cqe_title; |
379 | } ____cacheline_aligned_in_smp; |
380 | |
381 | enum mlx5e_dma_map_type { |
382 | MLX5E_DMA_MAP_SINGLE, |
383 | MLX5E_DMA_MAP_PAGE |
384 | }; |
385 | |
386 | struct mlx5e_sq_dma { |
387 | dma_addr_t addr; |
388 | u32 size; |
389 | enum mlx5e_dma_map_type type; |
390 | }; |
391 | |
392 | /* Keep this enum consistent with with the corresponding strings array |
393 | * declared in en/reporter_tx.c |
394 | */ |
395 | enum { |
396 | MLX5E_SQ_STATE_ENABLED = 0, |
397 | MLX5E_SQ_STATE_MPWQE, |
398 | MLX5E_SQ_STATE_RECOVERING, |
399 | MLX5E_SQ_STATE_IPSEC, |
400 | MLX5E_SQ_STATE_DIM, |
401 | MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, |
402 | MLX5E_SQ_STATE_PENDING_XSK_TX, |
403 | MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, |
404 | MLX5E_SQ_STATE_XDP_MULTIBUF, |
405 | MLX5E_NUM_SQ_STATES, /* Must be kept last */ |
406 | }; |
407 | |
408 | struct mlx5e_tx_mpwqe { |
409 | /* Current MPWQE session */ |
410 | struct mlx5e_tx_wqe *wqe; |
411 | u32 bytes_count; |
412 | u8 ds_count; |
413 | u8 pkt_count; |
414 | u8 inline_on; |
415 | }; |
416 | |
417 | struct mlx5e_skb_fifo { |
418 | struct sk_buff **fifo; |
419 | u16 *pc; |
420 | u16 *cc; |
421 | u16 mask; |
422 | }; |
423 | |
424 | struct mlx5e_ptpsq; |
425 | |
426 | struct mlx5e_txqsq { |
427 | /* data path */ |
428 | |
429 | /* dirtied @completion */ |
430 | u16 cc; |
431 | u16 skb_fifo_cc; |
432 | u32 dma_fifo_cc; |
433 | struct dim dim; /* Adaptive Moderation */ |
434 | |
435 | /* dirtied @xmit */ |
436 | u16 pc ____cacheline_aligned_in_smp; |
437 | u16 skb_fifo_pc; |
438 | u32 dma_fifo_pc; |
439 | struct mlx5e_tx_mpwqe mpwqe; |
440 | |
441 | struct mlx5e_cq cq; |
442 | |
443 | /* read only */ |
444 | struct mlx5_wq_cyc wq; |
445 | u32 dma_fifo_mask; |
446 | struct mlx5e_sq_stats *stats; |
447 | struct { |
448 | struct mlx5e_sq_dma *dma_fifo; |
449 | struct mlx5e_skb_fifo skb_fifo; |
450 | struct mlx5e_tx_wqe_info *wqe_info; |
451 | } db; |
452 | void __iomem *uar_map; |
453 | struct netdev_queue *txq; |
454 | u32 sqn; |
455 | u16 stop_room; |
456 | u8 max_sq_mpw_wqebbs; |
457 | u8 min_inline_mode; |
458 | struct device *pdev; |
459 | __be32 mkey_be; |
460 | unsigned long state; |
461 | unsigned int hw_mtu; |
462 | struct mlx5_clock *clock; |
463 | struct net_device *netdev; |
464 | struct mlx5_core_dev *mdev; |
465 | struct mlx5e_channel *channel; |
466 | struct mlx5e_priv *priv; |
467 | |
468 | /* control path */ |
469 | struct mlx5_wq_ctrl wq_ctrl; |
470 | int ch_ix; |
471 | int txq_ix; |
472 | u32 rate_limit; |
473 | struct work_struct recover_work; |
474 | struct mlx5e_ptpsq *ptpsq; |
475 | cqe_ts_to_ns ptp_cyc2time; |
476 | } ____cacheline_aligned_in_smp; |
477 | |
478 | struct mlx5e_xdp_info_fifo { |
479 | union mlx5e_xdp_info *xi; |
480 | u32 *cc; |
481 | u32 *pc; |
482 | u32 mask; |
483 | }; |
484 | |
485 | struct mlx5e_xdpsq; |
486 | struct mlx5e_xmit_data; |
487 | struct xsk_tx_metadata; |
488 | typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *); |
489 | typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *, |
490 | struct mlx5e_xmit_data *, |
491 | int, |
492 | struct xsk_tx_metadata *); |
493 | |
494 | struct mlx5e_xdpsq { |
495 | /* data path */ |
496 | |
497 | /* dirtied @completion */ |
498 | u32 xdpi_fifo_cc; |
499 | u16 cc; |
500 | |
501 | /* dirtied @xmit */ |
502 | u32 xdpi_fifo_pc ____cacheline_aligned_in_smp; |
503 | u16 pc; |
504 | struct mlx5_wqe_ctrl_seg *doorbell_cseg; |
505 | struct mlx5e_tx_mpwqe mpwqe; |
506 | |
507 | struct mlx5e_cq cq; |
508 | |
509 | /* read only */ |
510 | struct xsk_buff_pool *xsk_pool; |
511 | struct mlx5_wq_cyc wq; |
512 | struct mlx5e_xdpsq_stats *stats; |
513 | mlx5e_fp_xmit_xdp_frame_check xmit_xdp_frame_check; |
514 | mlx5e_fp_xmit_xdp_frame xmit_xdp_frame; |
515 | struct { |
516 | struct mlx5e_xdp_wqe_info *wqe_info; |
517 | struct mlx5e_xdp_info_fifo xdpi_fifo; |
518 | } db; |
519 | void __iomem *uar_map; |
520 | u32 sqn; |
521 | struct device *pdev; |
522 | __be32 mkey_be; |
523 | u16 stop_room; |
524 | u8 max_sq_mpw_wqebbs; |
525 | u8 min_inline_mode; |
526 | unsigned long state; |
527 | unsigned int hw_mtu; |
528 | |
529 | /* control path */ |
530 | struct mlx5_wq_ctrl wq_ctrl; |
531 | struct mlx5e_channel *channel; |
532 | } ____cacheline_aligned_in_smp; |
533 | |
534 | struct mlx5e_ktls_resync_resp; |
535 | |
536 | struct mlx5e_icosq { |
537 | /* data path */ |
538 | u16 cc; |
539 | u16 pc; |
540 | |
541 | struct mlx5_wqe_ctrl_seg *doorbell_cseg; |
542 | struct mlx5e_cq cq; |
543 | |
544 | /* write@xmit, read@completion */ |
545 | struct { |
546 | struct mlx5e_icosq_wqe_info *wqe_info; |
547 | } db; |
548 | |
549 | /* read only */ |
550 | struct mlx5_wq_cyc wq; |
551 | void __iomem *uar_map; |
552 | u32 sqn; |
553 | u16 reserved_room; |
554 | unsigned long state; |
555 | struct mlx5e_ktls_resync_resp *ktls_resync; |
556 | |
557 | /* control path */ |
558 | struct mlx5_wq_ctrl wq_ctrl; |
559 | struct mlx5e_channel *channel; |
560 | |
561 | struct work_struct recover_work; |
562 | } ____cacheline_aligned_in_smp; |
563 | |
564 | struct mlx5e_frag_page { |
565 | struct page *page; |
566 | u16 frags; |
567 | }; |
568 | |
569 | enum mlx5e_wqe_frag_flag { |
570 | MLX5E_WQE_FRAG_LAST_IN_PAGE, |
571 | MLX5E_WQE_FRAG_SKIP_RELEASE, |
572 | }; |
573 | |
574 | struct mlx5e_wqe_frag_info { |
575 | union { |
576 | struct mlx5e_frag_page *frag_page; |
577 | struct xdp_buff **xskp; |
578 | }; |
579 | u32 offset; |
580 | u8 flags; |
581 | }; |
582 | |
583 | union mlx5e_alloc_units { |
584 | DECLARE_FLEX_ARRAY(struct mlx5e_frag_page, frag_pages); |
585 | DECLARE_FLEX_ARRAY(struct page *, pages); |
586 | DECLARE_FLEX_ARRAY(struct xdp_buff *, xsk_buffs); |
587 | }; |
588 | |
589 | struct mlx5e_mpw_info { |
590 | u16 consumed_strides; |
591 | DECLARE_BITMAP(skip_release_bitmap, MLX5_MPWRQ_MAX_PAGES_PER_WQE); |
592 | struct mlx5e_frag_page linear_page; |
593 | union mlx5e_alloc_units alloc_units; |
594 | }; |
595 | |
596 | #define MLX5E_MAX_RX_FRAGS 4 |
597 | |
598 | struct mlx5e_rq; |
599 | typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*); |
600 | typedef struct sk_buff * |
601 | (*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, |
602 | struct mlx5_cqe64 *cqe, u16 cqe_bcnt, |
603 | u32 head_offset, u32 page_idx); |
604 | typedef struct sk_buff * |
605 | (*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, |
606 | struct mlx5_cqe64 *cqe, u32 cqe_bcnt); |
607 | typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); |
608 | typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); |
609 | typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool); |
610 | |
611 | int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk); |
612 | void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params); |
613 | |
614 | enum mlx5e_rq_flag { |
615 | MLX5E_RQ_FLAG_XDP_XMIT, |
616 | MLX5E_RQ_FLAG_XDP_REDIRECT, |
617 | }; |
618 | |
619 | struct mlx5e_rq_frag_info { |
620 | int frag_size; |
621 | int frag_stride; |
622 | }; |
623 | |
624 | struct mlx5e_rq_frags_info { |
625 | struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS]; |
626 | u8 num_frags; |
627 | u8 log_num_frags; |
628 | u16 wqe_bulk; |
629 | u16 refill_unit; |
630 | u8 wqe_index_mask; |
631 | }; |
632 | |
633 | struct mlx5e_dma_info { |
634 | dma_addr_t addr; |
635 | union { |
636 | struct mlx5e_frag_page *frag_page; |
637 | struct page *page; |
638 | }; |
639 | }; |
640 | |
641 | struct mlx5e_shampo_hd { |
642 | u32 mkey; |
643 | struct mlx5e_dma_info *info; |
644 | struct mlx5e_frag_page *pages; |
645 | u16 curr_page_index; |
646 | u16 hd_per_wq; |
647 | u16 hd_per_wqe; |
648 | unsigned long *bitmap; |
649 | u16 pi; |
650 | u16 ci; |
651 | __be32 key; |
652 | u64 last_addr; |
653 | }; |
654 | |
655 | struct mlx5e_hw_gro_data { |
656 | struct sk_buff *skb; |
657 | struct flow_keys fk; |
658 | int second_ip_id; |
659 | }; |
660 | |
661 | enum mlx5e_mpwrq_umr_mode { |
662 | MLX5E_MPWRQ_UMR_MODE_ALIGNED, |
663 | MLX5E_MPWRQ_UMR_MODE_UNALIGNED, |
664 | MLX5E_MPWRQ_UMR_MODE_OVERSIZED, |
665 | MLX5E_MPWRQ_UMR_MODE_TRIPLE, |
666 | }; |
667 | |
668 | struct mlx5e_rq { |
669 | /* data path */ |
670 | union { |
671 | struct { |
672 | struct mlx5_wq_cyc wq; |
673 | struct mlx5e_wqe_frag_info *frags; |
674 | union mlx5e_alloc_units *alloc_units; |
675 | struct mlx5e_rq_frags_info info; |
676 | mlx5e_fp_skb_from_cqe skb_from_cqe; |
677 | } wqe; |
678 | struct { |
679 | struct mlx5_wq_ll wq; |
680 | struct mlx5e_umr_wqe umr_wqe; |
681 | struct mlx5e_mpw_info *info; |
682 | mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq; |
683 | __be32 umr_mkey_be; |
684 | u16 num_strides; |
685 | u16 actual_wq_head; |
686 | u8 log_stride_sz; |
687 | u8 umr_in_progress; |
688 | u8 umr_last_bulk; |
689 | u8 umr_completed; |
690 | u8 min_wqe_bulk; |
691 | u8 page_shift; |
692 | u8 pages_per_wqe; |
693 | u8 umr_wqebbs; |
694 | u8 mtts_per_wqe; |
695 | u8 umr_mode; |
696 | struct mlx5e_shampo_hd *shampo; |
697 | } mpwqe; |
698 | }; |
699 | struct { |
700 | u16 headroom; |
701 | u32 frame0_sz; |
702 | u8 map_dir; /* dma map direction */ |
703 | } buff; |
704 | |
705 | struct device *pdev; |
706 | struct net_device *netdev; |
707 | struct mlx5e_rq_stats *stats; |
708 | struct mlx5e_cq cq; |
709 | struct mlx5e_cq_decomp cqd; |
710 | struct hwtstamp_config *tstamp; |
711 | struct mlx5_clock *clock; |
712 | struct mlx5e_icosq *icosq; |
713 | struct mlx5e_priv *priv; |
714 | |
715 | struct mlx5e_hw_gro_data *hw_gro_data; |
716 | |
717 | mlx5e_fp_handle_rx_cqe handle_rx_cqe; |
718 | mlx5e_fp_post_rx_wqes post_wqes; |
719 | mlx5e_fp_dealloc_wqe dealloc_wqe; |
720 | |
721 | unsigned long state; |
722 | int ix; |
723 | unsigned int hw_mtu; |
724 | |
725 | struct dim dim; /* Dynamic Interrupt Moderation */ |
726 | |
727 | /* XDP */ |
728 | struct bpf_prog __rcu *xdp_prog; |
729 | struct mlx5e_xdpsq *xdpsq; |
730 | DECLARE_BITMAP(flags, 8); |
731 | struct page_pool *page_pool; |
732 | |
733 | /* AF_XDP zero-copy */ |
734 | struct xsk_buff_pool *xsk_pool; |
735 | |
736 | struct work_struct recover_work; |
737 | |
738 | /* control */ |
739 | struct mlx5_wq_ctrl wq_ctrl; |
740 | __be32 mkey_be; |
741 | u8 wq_type; |
742 | u32 rqn; |
743 | struct mlx5_core_dev *mdev; |
744 | struct mlx5e_channel *channel; |
745 | struct mlx5e_dma_info wqe_overflow; |
746 | |
747 | /* XDP read-mostly */ |
748 | struct xdp_rxq_info xdp_rxq; |
749 | cqe_ts_to_ns ptp_cyc2time; |
750 | } ____cacheline_aligned_in_smp; |
751 | |
752 | enum mlx5e_channel_state { |
753 | MLX5E_CHANNEL_STATE_XSK, |
754 | MLX5E_CHANNEL_NUM_STATES |
755 | }; |
756 | |
757 | struct mlx5e_channel { |
758 | /* data path */ |
759 | struct mlx5e_rq rq; |
760 | struct mlx5e_xdpsq rq_xdpsq; |
761 | struct mlx5e_txqsq sq[MLX5_MAX_NUM_TC]; |
762 | struct mlx5e_icosq icosq; /* internal control operations */ |
763 | struct mlx5e_txqsq __rcu * __rcu *qos_sqs; |
764 | bool xdp; |
765 | struct napi_struct napi; |
766 | struct device *pdev; |
767 | struct net_device *netdev; |
768 | __be32 mkey_be; |
769 | u16 qos_sqs_size; |
770 | u8 num_tc; |
771 | u8 lag_port; |
772 | |
773 | /* XDP_REDIRECT */ |
774 | struct mlx5e_xdpsq xdpsq; |
775 | |
776 | /* AF_XDP zero-copy */ |
777 | struct mlx5e_rq xskrq; |
778 | struct mlx5e_xdpsq xsksq; |
779 | |
780 | /* Async ICOSQ */ |
781 | struct mlx5e_icosq async_icosq; |
782 | /* async_icosq can be accessed from any CPU - the spinlock protects it. */ |
783 | spinlock_t async_icosq_lock; |
784 | |
785 | /* data path - accessed per napi poll */ |
786 | const struct cpumask *aff_mask; |
787 | struct mlx5e_ch_stats *stats; |
788 | |
789 | /* control */ |
790 | struct mlx5e_priv *priv; |
791 | struct mlx5_core_dev *mdev; |
792 | struct hwtstamp_config *tstamp; |
793 | DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES); |
794 | int ix; |
795 | int vec_ix; |
796 | int sd_ix; |
797 | int cpu; |
798 | /* Sync between icosq recovery and XSK enable/disable. */ |
799 | struct mutex icosq_recovery_lock; |
800 | }; |
801 | |
802 | struct mlx5e_ptp; |
803 | |
804 | struct mlx5e_channels { |
805 | struct mlx5e_channel **c; |
806 | struct mlx5e_ptp *ptp; |
807 | unsigned int num; |
808 | struct mlx5e_params params; |
809 | }; |
810 | |
811 | struct mlx5e_channel_stats { |
812 | struct mlx5e_ch_stats ch; |
813 | struct mlx5e_sq_stats sq[MLX5_MAX_NUM_TC]; |
814 | struct mlx5e_rq_stats rq; |
815 | struct mlx5e_rq_stats xskrq; |
816 | struct mlx5e_xdpsq_stats rq_xdpsq; |
817 | struct mlx5e_xdpsq_stats xdpsq; |
818 | struct mlx5e_xdpsq_stats xsksq; |
819 | } ____cacheline_aligned_in_smp; |
820 | |
821 | struct mlx5e_ptp_stats { |
822 | struct mlx5e_ch_stats ch; |
823 | struct mlx5e_sq_stats sq[MLX5_MAX_NUM_TC]; |
824 | struct mlx5e_ptp_cq_stats cq[MLX5_MAX_NUM_TC]; |
825 | struct mlx5e_rq_stats rq; |
826 | } ____cacheline_aligned_in_smp; |
827 | |
828 | enum { |
829 | MLX5E_STATE_OPENED, |
830 | MLX5E_STATE_DESTROYING, |
831 | MLX5E_STATE_XDP_TX_ENABLED, |
832 | MLX5E_STATE_XDP_ACTIVE, |
833 | MLX5E_STATE_CHANNELS_ACTIVE, |
834 | }; |
835 | |
836 | struct mlx5e_modify_sq_param { |
837 | int curr_state; |
838 | int next_state; |
839 | int rl_update; |
840 | int rl_index; |
841 | bool qos_update; |
842 | u16 qos_queue_group_id; |
843 | }; |
844 | |
845 | #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE) |
846 | struct mlx5e_hv_vhca_stats_agent { |
847 | struct mlx5_hv_vhca_agent *agent; |
848 | struct delayed_work work; |
849 | u16 delay; |
850 | void *buf; |
851 | }; |
852 | #endif |
853 | |
854 | struct mlx5e_xsk { |
855 | /* XSK buffer pools are stored separately from channels, |
856 | * because we don't want to lose them when channels are |
857 | * recreated. The kernel also stores buffer pool, but it doesn't |
858 | * distinguish between zero-copy and non-zero-copy UMEMs, so |
859 | * rely on our mechanism. |
860 | */ |
861 | struct xsk_buff_pool **pools; |
862 | u16 refcnt; |
863 | bool ever_used; |
864 | }; |
865 | |
866 | /* Temporary storage for variables that are allocated when struct mlx5e_priv is |
867 | * initialized, and used where we can't allocate them because that functions |
868 | * must not fail. Use with care and make sure the same variable is not used |
869 | * simultaneously by multiple users. |
870 | */ |
871 | struct mlx5e_scratchpad { |
872 | cpumask_var_t cpumask; |
873 | }; |
874 | |
875 | struct mlx5e_trap; |
876 | struct mlx5e_htb; |
877 | |
878 | struct mlx5e_priv { |
879 | /* priv data path fields - start */ |
880 | struct mlx5e_selq selq; |
881 | struct mlx5e_txqsq **txq2sq; |
882 | #ifdef CONFIG_MLX5_CORE_EN_DCB |
883 | struct mlx5e_dcbx_dp dcbx_dp; |
884 | #endif |
885 | /* priv data path fields - end */ |
886 | |
887 | unsigned long state; |
888 | struct mutex state_lock; /* Protects Interface state */ |
889 | struct mlx5e_rq drop_rq; |
890 | |
891 | struct mlx5e_channels channels; |
892 | struct mlx5e_rx_res *rx_res; |
893 | u32 *tx_rates; |
894 | |
895 | struct mlx5e_flow_steering *fs; |
896 | |
897 | struct workqueue_struct *wq; |
898 | struct work_struct update_carrier_work; |
899 | struct work_struct set_rx_mode_work; |
900 | struct work_struct tx_timeout_work; |
901 | struct work_struct update_stats_work; |
902 | struct work_struct monitor_counters_work; |
903 | struct mlx5_nb monitor_counters_nb; |
904 | |
905 | struct mlx5_core_dev *mdev; |
906 | struct net_device *netdev; |
907 | struct mlx5e_trap *en_trap; |
908 | struct mlx5e_stats stats; |
909 | struct mlx5e_channel_stats **channel_stats; |
910 | struct mlx5e_channel_stats trap_stats; |
911 | struct mlx5e_ptp_stats ptp_stats; |
912 | struct mlx5e_sq_stats **htb_qos_sq_stats; |
913 | u16 htb_max_qos_sqs; |
914 | u16 stats_nch; |
915 | u16 max_nch; |
916 | u8 max_opened_tc; |
917 | bool tx_ptp_opened; |
918 | bool rx_ptp_opened; |
919 | struct hwtstamp_config tstamp; |
920 | u16 q_counter[MLX5_SD_MAX_GROUP_SZ]; |
921 | u16 drop_rq_q_counter; |
922 | struct notifier_block events_nb; |
923 | struct notifier_block blocking_events_nb; |
924 | |
925 | struct udp_tunnel_nic_info nic_info; |
926 | #ifdef CONFIG_MLX5_CORE_EN_DCB |
927 | struct mlx5e_dcbx dcbx; |
928 | #endif |
929 | |
930 | const struct mlx5e_profile *profile; |
931 | void *ppriv; |
932 | #ifdef CONFIG_MLX5_MACSEC |
933 | struct mlx5e_macsec *macsec; |
934 | #endif |
935 | #ifdef CONFIG_MLX5_EN_IPSEC |
936 | struct mlx5e_ipsec *ipsec; |
937 | #endif |
938 | #ifdef CONFIG_MLX5_EN_TLS |
939 | struct mlx5e_tls *tls; |
940 | #endif |
941 | struct devlink_health_reporter *tx_reporter; |
942 | struct devlink_health_reporter *rx_reporter; |
943 | struct mlx5e_xsk xsk; |
944 | #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE) |
945 | struct mlx5e_hv_vhca_stats_agent stats_agent; |
946 | #endif |
947 | struct mlx5e_scratchpad scratchpad; |
948 | struct mlx5e_htb *htb; |
949 | struct mlx5e_mqprio_rl *mqprio_rl; |
950 | struct dentry *dfs_root; |
951 | struct mlx5_devcom_comp_dev *devcom; |
952 | }; |
953 | |
954 | struct mlx5e_dev { |
955 | struct mlx5e_priv *priv; |
956 | struct devlink_port dl_port; |
957 | }; |
958 | |
959 | struct mlx5e_rx_handlers { |
960 | mlx5e_fp_handle_rx_cqe handle_rx_cqe; |
961 | mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; |
962 | mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe_shampo; |
963 | }; |
964 | |
965 | extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic; |
966 | |
967 | enum mlx5e_profile_feature { |
968 | MLX5E_PROFILE_FEATURE_PTP_RX, |
969 | MLX5E_PROFILE_FEATURE_PTP_TX, |
970 | MLX5E_PROFILE_FEATURE_QOS_HTB, |
971 | MLX5E_PROFILE_FEATURE_FS_VLAN, |
972 | MLX5E_PROFILE_FEATURE_FS_TC, |
973 | }; |
974 | |
975 | struct mlx5e_profile { |
976 | int (*init)(struct mlx5_core_dev *mdev, |
977 | struct net_device *netdev); |
978 | void (*cleanup)(struct mlx5e_priv *priv); |
979 | int (*init_rx)(struct mlx5e_priv *priv); |
980 | void (*cleanup_rx)(struct mlx5e_priv *priv); |
981 | int (*init_tx)(struct mlx5e_priv *priv); |
982 | void (*cleanup_tx)(struct mlx5e_priv *priv); |
983 | void (*enable)(struct mlx5e_priv *priv); |
984 | void (*disable)(struct mlx5e_priv *priv); |
985 | int (*update_rx)(struct mlx5e_priv *priv); |
986 | void (*update_stats)(struct mlx5e_priv *priv); |
987 | void (*update_carrier)(struct mlx5e_priv *priv); |
988 | int (*max_nch_limit)(struct mlx5_core_dev *mdev); |
989 | u32 (*get_tisn)(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv, |
990 | u8 lag_port, u8 tc); |
991 | unsigned int (*stats_grps_num)(struct mlx5e_priv *priv); |
992 | mlx5e_stats_grp_t *stats_grps; |
993 | const struct mlx5e_rx_handlers *rx_handlers; |
994 | int max_tc; |
995 | u32 features; |
996 | }; |
997 | |
998 | u32 mlx5e_profile_get_tisn(struct mlx5_core_dev *mdev, |
999 | struct mlx5e_priv *priv, |
1000 | const struct mlx5e_profile *profile, |
1001 | u8 lag_port, u8 tc); |
1002 | |
1003 | #define mlx5e_profile_feature_cap(profile, feature) \ |
1004 | ((profile)->features & BIT(MLX5E_PROFILE_FEATURE_##feature)) |
1005 | |
1006 | void mlx5e_build_ptys2ethtool_map(void); |
1007 | |
1008 | bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift, |
1009 | enum mlx5e_mpwrq_umr_mode umr_mode); |
1010 | |
1011 | void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close); |
1012 | void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); |
1013 | void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s); |
1014 | |
1015 | int mlx5e_self_test_num(struct mlx5e_priv *priv); |
1016 | int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data); |
1017 | void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, |
1018 | u64 *buf); |
1019 | void mlx5e_set_rx_mode_work(struct work_struct *work); |
1020 | |
1021 | int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr); |
1022 | int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr); |
1023 | int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter); |
1024 | |
1025 | int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, |
1026 | u16 vid); |
1027 | int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, |
1028 | u16 vid); |
1029 | void mlx5e_timestamp_init(struct mlx5e_priv *priv); |
1030 | |
1031 | struct mlx5e_xsk_param; |
1032 | |
1033 | struct mlx5e_rq_param; |
1034 | int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, |
1035 | struct mlx5e_xsk_param *xsk, int node, u16 q_counter, |
1036 | struct mlx5e_rq *rq); |
1037 | #define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */ |
1038 | int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time); |
1039 | void mlx5e_close_rq(struct mlx5e_rq *rq); |
1040 | int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter); |
1041 | void mlx5e_destroy_rq(struct mlx5e_rq *rq); |
1042 | |
1043 | struct mlx5e_sq_param; |
1044 | int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, |
1045 | struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool, |
1046 | struct mlx5e_xdpsq *sq, bool is_redirect); |
1047 | void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq); |
1048 | |
1049 | struct mlx5e_create_cq_param { |
1050 | struct net_device *netdev; |
1051 | struct workqueue_struct *wq; |
1052 | struct napi_struct *napi; |
1053 | struct mlx5e_ch_stats *ch_stats; |
1054 | int node; |
1055 | int ix; |
1056 | }; |
1057 | |
1058 | struct mlx5e_cq_param; |
1059 | int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder, |
1060 | struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, |
1061 | struct mlx5e_cq *cq); |
1062 | void mlx5e_close_cq(struct mlx5e_cq *cq); |
1063 | |
1064 | int mlx5e_open_locked(struct net_device *netdev); |
1065 | int mlx5e_close_locked(struct net_device *netdev); |
1066 | |
1067 | void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c); |
1068 | void mlx5e_trigger_napi_sched(struct napi_struct *napi); |
1069 | |
1070 | int mlx5e_open_channels(struct mlx5e_priv *priv, |
1071 | struct mlx5e_channels *chs); |
1072 | void mlx5e_close_channels(struct mlx5e_channels *chs); |
1073 | |
1074 | /* Function pointer to be used to modify HW or kernel settings while |
1075 | * switching channels |
1076 | */ |
1077 | typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv, void *context); |
1078 | #define MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(fn) \ |
1079 | int fn##_ctx(struct mlx5e_priv *priv, void *context) \ |
1080 | { \ |
1081 | return fn(priv); \ |
1082 | } |
1083 | int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv); |
1084 | int mlx5e_safe_switch_params(struct mlx5e_priv *priv, |
1085 | struct mlx5e_params *new_params, |
1086 | mlx5e_fp_preactivate preactivate, |
1087 | void *context, bool reset); |
1088 | int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv); |
1089 | int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context); |
1090 | void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); |
1091 | void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); |
1092 | int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx); |
1093 | |
1094 | int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state); |
1095 | void mlx5e_activate_rq(struct mlx5e_rq *rq); |
1096 | void mlx5e_deactivate_rq(struct mlx5e_rq *rq); |
1097 | void mlx5e_activate_icosq(struct mlx5e_icosq *icosq); |
1098 | void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq); |
1099 | |
1100 | int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, |
1101 | struct mlx5e_modify_sq_param *p); |
1102 | int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, |
1103 | struct mlx5e_params *params, struct mlx5e_sq_param *param, |
1104 | struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, |
1105 | struct mlx5e_sq_stats *sq_stats); |
1106 | void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq); |
1107 | void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq); |
1108 | void mlx5e_free_txqsq(struct mlx5e_txqsq *sq); |
1109 | void mlx5e_tx_disable_queue(struct netdev_queue *txq); |
1110 | int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa); |
1111 | void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq); |
1112 | struct mlx5e_create_sq_param; |
1113 | int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, |
1114 | struct mlx5e_sq_param *param, |
1115 | struct mlx5e_create_sq_param *csp, |
1116 | u16 qos_queue_group_id, |
1117 | u32 *sqn); |
1118 | void mlx5e_tx_err_cqe_work(struct work_struct *recover_work); |
1119 | void mlx5e_close_txqsq(struct mlx5e_txqsq *sq); |
1120 | |
1121 | static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) |
1122 | { |
1123 | return MLX5_CAP_ETH(mdev, swp) && |
1124 | MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso); |
1125 | } |
1126 | |
1127 | extern const struct ethtool_ops mlx5e_ethtool_ops; |
1128 | |
1129 | int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey); |
1130 | int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises); |
1131 | void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); |
1132 | int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, |
1133 | bool enable_mc_lb); |
1134 | void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc); |
1135 | |
1136 | /* common netdev helpers */ |
1137 | void mlx5e_create_q_counters(struct mlx5e_priv *priv); |
1138 | void mlx5e_destroy_q_counters(struct mlx5e_priv *priv); |
1139 | int mlx5e_open_drop_rq(struct mlx5e_priv *priv, |
1140 | struct mlx5e_rq *drop_rq); |
1141 | void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq); |
1142 | |
1143 | int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn); |
1144 | void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn); |
1145 | |
1146 | int mlx5e_update_nic_rx(struct mlx5e_priv *priv); |
1147 | void mlx5e_update_carrier(struct mlx5e_priv *priv); |
1148 | int mlx5e_close(struct net_device *netdev); |
1149 | int mlx5e_open(struct net_device *netdev); |
1150 | |
1151 | void mlx5e_queue_update_stats(struct mlx5e_priv *priv); |
1152 | |
1153 | int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv); |
1154 | int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context); |
1155 | int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, |
1156 | mlx5e_fp_preactivate preactivate); |
1157 | void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv); |
1158 | |
1159 | /* ethtool helpers */ |
1160 | void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, |
1161 | struct ethtool_drvinfo *drvinfo); |
1162 | void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, |
1163 | uint32_t stringset, uint8_t *data); |
1164 | int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset); |
1165 | void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, |
1166 | struct ethtool_stats *stats, u64 *data); |
1167 | void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, |
1168 | struct ethtool_ringparam *param, |
1169 | struct kernel_ethtool_ringparam *kernel_param); |
1170 | int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, |
1171 | struct ethtool_ringparam *param); |
1172 | void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv, |
1173 | struct ethtool_channels *ch); |
1174 | int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, |
1175 | struct ethtool_channels *ch); |
1176 | int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, |
1177 | struct ethtool_coalesce *coal, |
1178 | struct kernel_ethtool_coalesce *kernel_coal); |
1179 | int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, |
1180 | struct ethtool_coalesce *coal, |
1181 | struct kernel_ethtool_coalesce *kernel_coal, |
1182 | struct netlink_ext_ack *extack); |
1183 | int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, |
1184 | struct ethtool_link_ksettings *link_ksettings); |
1185 | int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, |
1186 | const struct ethtool_link_ksettings *link_ksettings); |
1187 | int mlx5e_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh); |
1188 | int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh, |
1189 | struct netlink_ext_ack *extack); |
1190 | u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv); |
1191 | u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); |
1192 | int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, |
1193 | struct ethtool_ts_info *info); |
1194 | int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, |
1195 | struct ethtool_flash *flash); |
1196 | void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv, |
1197 | struct ethtool_pauseparam *pauseparam); |
1198 | int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv, |
1199 | struct ethtool_pauseparam *pauseparam); |
1200 | |
1201 | /* mlx5e generic netdev management API */ |
1202 | static inline bool |
1203 | mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev) |
1204 | { |
1205 | return !is_kdump_kernel() && |
1206 | MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe); |
1207 | } |
1208 | |
1209 | int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev); |
1210 | int mlx5e_priv_init(struct mlx5e_priv *priv, |
1211 | const struct mlx5e_profile *profile, |
1212 | struct net_device *netdev, |
1213 | struct mlx5_core_dev *mdev); |
1214 | void mlx5e_priv_cleanup(struct mlx5e_priv *priv); |
1215 | struct net_device * |
1216 | mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile); |
1217 | int mlx5e_attach_netdev(struct mlx5e_priv *priv); |
1218 | void mlx5e_detach_netdev(struct mlx5e_priv *priv); |
1219 | void mlx5e_destroy_netdev(struct mlx5e_priv *priv); |
1220 | int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, |
1221 | const struct mlx5e_profile *new_profile, void *new_ppriv); |
1222 | void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv); |
1223 | void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv); |
1224 | void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu); |
1225 | void mlx5e_rx_dim_work(struct work_struct *work); |
1226 | void mlx5e_tx_dim_work(struct work_struct *work); |
1227 | |
1228 | void mlx5e_set_xdp_feature(struct net_device *netdev); |
1229 | netdev_features_t mlx5e_features_check(struct sk_buff *skb, |
1230 | struct net_device *netdev, |
1231 | netdev_features_t features); |
1232 | int mlx5e_set_features(struct net_device *netdev, netdev_features_t features); |
1233 | #ifdef CONFIG_MLX5_ESWITCH |
1234 | int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac); |
1235 | int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate); |
1236 | int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi); |
1237 | int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats); |
1238 | #endif |
1239 | int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey); |
1240 | #endif /* __MLX5_EN_H__ */ |
1241 | |