1 | /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ |
2 | /* QLogic qed NIC Driver |
3 | * Copyright (c) 2015-2017 QLogic Corporation |
4 | * Copyright (c) 2019-2020 Marvell International Ltd. |
5 | */ |
6 | |
7 | #ifndef _QED_L2_H |
8 | #define _QED_L2_H |
9 | #include <linux/types.h> |
10 | #include <linux/io.h> |
11 | #include <linux/kernel.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/qed/qed_eth_if.h> |
14 | #include "qed.h" |
15 | #include "qed_hw.h" |
16 | #include "qed_sp.h" |
17 | struct { |
18 | u8 ; |
19 | u8 ; |
20 | u8 ; |
21 | u8 ; |
22 | u8 ; |
23 | u8 ; |
24 | u8 ; |
25 | u8 ; |
26 | |
27 | /* Indirection table consist of rx queue handles */ |
28 | void *[QED_RSS_IND_TABLE_SIZE]; |
29 | u32 [QED_RSS_KEY_SIZE]; |
30 | }; |
31 | |
32 | struct qed_sge_tpa_params { |
33 | u8 max_buffers_per_cqe; |
34 | |
35 | u8 update_tpa_en_flg; |
36 | u8 tpa_ipv4_en_flg; |
37 | u8 tpa_ipv6_en_flg; |
38 | u8 tpa_ipv4_tunn_en_flg; |
39 | u8 tpa_ipv6_tunn_en_flg; |
40 | |
41 | u8 update_tpa_param_flg; |
42 | u8 tpa_pkt_split_flg; |
43 | u8 tpa_hdr_data_split_flg; |
44 | u8 tpa_gro_consistent_flg; |
45 | u8 tpa_max_aggs_num; |
46 | u16 tpa_max_size; |
47 | u16 tpa_min_size_to_start; |
48 | u16 tpa_min_size_to_cont; |
49 | }; |
50 | |
51 | enum qed_filter_opcode { |
52 | QED_FILTER_ADD, |
53 | QED_FILTER_REMOVE, |
54 | QED_FILTER_MOVE, |
55 | QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */ |
56 | QED_FILTER_FLUSH, /* Removes all filters */ |
57 | }; |
58 | |
59 | enum qed_filter_ucast_type { |
60 | QED_FILTER_MAC, |
61 | QED_FILTER_VLAN, |
62 | QED_FILTER_MAC_VLAN, |
63 | QED_FILTER_INNER_MAC, |
64 | QED_FILTER_INNER_VLAN, |
65 | QED_FILTER_INNER_PAIR, |
66 | QED_FILTER_INNER_MAC_VNI_PAIR, |
67 | QED_FILTER_MAC_VNI_PAIR, |
68 | QED_FILTER_VNI, |
69 | }; |
70 | |
71 | struct qed_filter_ucast { |
72 | enum qed_filter_opcode opcode; |
73 | enum qed_filter_ucast_type type; |
74 | u8 is_rx_filter; |
75 | u8 is_tx_filter; |
76 | u8 vport_to_add_to; |
77 | u8 vport_to_remove_from; |
78 | unsigned char mac[ETH_ALEN]; |
79 | u8 assert_on_error; |
80 | u16 vlan; |
81 | u32 vni; |
82 | }; |
83 | |
84 | struct qed_filter_mcast { |
85 | /* MOVE is not supported for multicast */ |
86 | enum qed_filter_opcode opcode; |
87 | u8 vport_to_add_to; |
88 | u8 vport_to_remove_from; |
89 | u8 num_mc_addrs; |
90 | #define QED_MAX_MC_ADDRS 64 |
91 | unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN]; |
92 | }; |
93 | |
94 | /** |
95 | * qed_eth_rx_queue_stop(): This ramrod closes an Rx queue. |
96 | * |
97 | * @p_hwfn: HW device data. |
98 | * @p_rxq: Handler of queue to close |
99 | * @eq_completion_only: If True completion will be on |
100 | * EQe, if False completion will be |
101 | * on EQe if p_hwfn opaque |
102 | * different from the RXQ opaque |
103 | * otherwise on CQe. |
104 | * @cqe_completion: If True completion will be receive on CQe. |
105 | * |
106 | * Return: Int. |
107 | */ |
108 | int |
109 | qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, |
110 | void *p_rxq, |
111 | bool eq_completion_only, bool cqe_completion); |
112 | |
113 | /** |
114 | * qed_eth_tx_queue_stop(): Closes a Tx queue. |
115 | * |
116 | * @p_hwfn: HW device data. |
117 | * @p_txq: handle to Tx queue needed to be closed. |
118 | * |
119 | * Return: Int. |
120 | */ |
121 | int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq); |
122 | |
123 | enum qed_tpa_mode { |
124 | QED_TPA_MODE_NONE, |
125 | QED_TPA_MODE_UNUSED, |
126 | QED_TPA_MODE_GRO, |
127 | QED_TPA_MODE_MAX |
128 | }; |
129 | |
130 | struct qed_sp_vport_start_params { |
131 | enum qed_tpa_mode tpa_mode; |
132 | bool remove_inner_vlan; |
133 | bool tx_switching; |
134 | bool handle_ptp_pkts; |
135 | bool only_untagged; |
136 | bool drop_ttl0; |
137 | u8 max_buffers_per_cqe; |
138 | u32 concrete_fid; |
139 | u16 opaque_fid; |
140 | u8 vport_id; |
141 | u16 mtu; |
142 | bool check_mac; |
143 | bool check_ethtype; |
144 | }; |
145 | |
146 | int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, |
147 | struct qed_sp_vport_start_params *p_params); |
148 | |
149 | struct qed_filter_accept_flags { |
150 | u8 update_rx_mode_config; |
151 | u8 update_tx_mode_config; |
152 | u8 rx_accept_filter; |
153 | u8 tx_accept_filter; |
154 | #define QED_ACCEPT_NONE 0x01 |
155 | #define QED_ACCEPT_UCAST_MATCHED 0x02 |
156 | #define QED_ACCEPT_UCAST_UNMATCHED 0x04 |
157 | #define QED_ACCEPT_MCAST_MATCHED 0x08 |
158 | #define QED_ACCEPT_MCAST_UNMATCHED 0x10 |
159 | #define QED_ACCEPT_BCAST 0x20 |
160 | #define QED_ACCEPT_ANY_VNI 0x40 |
161 | }; |
162 | |
163 | struct qed_arfs_config_params { |
164 | bool tcp; |
165 | bool udp; |
166 | bool ipv4; |
167 | bool ipv6; |
168 | enum qed_filter_config_mode mode; |
169 | }; |
170 | |
171 | struct qed_sp_vport_update_params { |
172 | u16 opaque_fid; |
173 | u8 vport_id; |
174 | u8 update_vport_active_rx_flg; |
175 | u8 vport_active_rx_flg; |
176 | u8 update_vport_active_tx_flg; |
177 | u8 vport_active_tx_flg; |
178 | u8 update_inner_vlan_removal_flg; |
179 | u8 inner_vlan_removal_flg; |
180 | u8 silent_vlan_removal_flg; |
181 | u8 update_default_vlan_enable_flg; |
182 | u8 default_vlan_enable_flg; |
183 | u8 update_default_vlan_flg; |
184 | u16 default_vlan; |
185 | u8 update_tx_switching_flg; |
186 | u8 tx_switching_flg; |
187 | u8 update_approx_mcast_flg; |
188 | u8 update_anti_spoofing_en_flg; |
189 | u8 anti_spoofing_en; |
190 | u8 update_accept_any_vlan_flg; |
191 | u8 accept_any_vlan; |
192 | u32 bins[8]; |
193 | struct qed_rss_params *; |
194 | struct qed_filter_accept_flags accept_flags; |
195 | struct qed_sge_tpa_params *sge_tpa_params; |
196 | u8 update_ctl_frame_check; |
197 | u8 mac_chk_en; |
198 | u8 ethtype_chk_en; |
199 | }; |
200 | |
201 | int qed_sp_vport_update(struct qed_hwfn *p_hwfn, |
202 | struct qed_sp_vport_update_params *p_params, |
203 | enum spq_mode comp_mode, |
204 | struct qed_spq_comp_cb *p_comp_data); |
205 | |
206 | /** |
207 | * qed_sp_vport_stop: This ramrod closes a VPort after all its |
208 | * RX and TX queues are terminated. |
209 | * An Assert is generated if any queues are left open. |
210 | * |
211 | * @p_hwfn: HW device data. |
212 | * @opaque_fid: Opaque FID |
213 | * @vport_id: VPort ID. |
214 | * |
215 | * Return: Int. |
216 | */ |
217 | int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id); |
218 | |
219 | int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, |
220 | u16 opaque_fid, |
221 | struct qed_filter_ucast *p_filter_cmd, |
222 | enum spq_mode comp_mode, |
223 | struct qed_spq_comp_cb *p_comp_data); |
224 | |
225 | /** |
226 | * qed_sp_eth_rx_queues_update(): This ramrod updates an RX queue. |
227 | * It is used for setting the active state |
228 | * of the queue and updating the TPA and |
229 | * SGE parameters. |
230 | * @p_hwfn: HW device data. |
231 | * @pp_rxq_handlers: An array of queue handlers to be updated. |
232 | * @num_rxqs: number of queues to update. |
233 | * @complete_cqe_flg: Post completion to the CQE Ring if set. |
234 | * @complete_event_flg: Post completion to the Event Ring if set. |
235 | * @comp_mode: Comp mode. |
236 | * @p_comp_data: Pointer Comp data. |
237 | * |
238 | * Return: Int. |
239 | * |
240 | * Note At the moment - only used by non-linux VFs. |
241 | */ |
242 | |
243 | int |
244 | qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, |
245 | void **pp_rxq_handlers, |
246 | u8 num_rxqs, |
247 | u8 complete_cqe_flg, |
248 | u8 complete_event_flg, |
249 | enum spq_mode comp_mode, |
250 | struct qed_spq_comp_cb *p_comp_data); |
251 | |
252 | /** |
253 | * qed_get_vport_stats(): Fills provided statistics |
254 | * struct with statistics. |
255 | * |
256 | * @cdev: Qed dev pointer. |
257 | * @stats: Points to struct that will be filled with statistics. |
258 | * |
259 | * Return: Void. |
260 | */ |
261 | void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats); |
262 | |
263 | /** |
264 | * qed_get_vport_stats_context(): Fills provided statistics |
265 | * struct with statistics. |
266 | * |
267 | * @cdev: Qed dev pointer. |
268 | * @stats: Points to struct that will be filled with statistics. |
269 | * @is_atomic: Hint from the caller - if the func can sleep or not. |
270 | * |
271 | * Context: The function should not sleep in case is_atomic == true. |
272 | * Return: Void. |
273 | */ |
274 | void qed_get_vport_stats_context(struct qed_dev *cdev, |
275 | struct qed_eth_stats *stats, |
276 | bool is_atomic); |
277 | |
278 | void qed_reset_vport_stats(struct qed_dev *cdev); |
279 | |
280 | /** |
281 | * qed_arfs_mode_configure(): Enable or disable rfs mode. |
282 | * It must accept at least one of tcp or udp true |
283 | * and at least one of ipv4 or ipv6 true to enable |
284 | * rfs mode. |
285 | * |
286 | * @p_hwfn: HW device data. |
287 | * @p_ptt: P_ptt. |
288 | * @p_cfg_params: arfs mode configuration parameters. |
289 | * |
290 | * Return. Void. |
291 | */ |
292 | void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, |
293 | struct qed_ptt *p_ptt, |
294 | struct qed_arfs_config_params *p_cfg_params); |
295 | |
296 | /** |
297 | * qed_configure_rfs_ntuple_filter(): This ramrod should be used to add |
298 | * or remove arfs hw filter |
299 | * |
300 | * @p_hwfn: HW device data. |
301 | * @p_cb: Used for QED_SPQ_MODE_CB,where client would initialize |
302 | * it with cookie and callback function address, if not |
303 | * using this mode then client must pass NULL. |
304 | * @p_params: Pointer to params. |
305 | * |
306 | * Return: Void. |
307 | */ |
308 | int |
309 | qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, |
310 | struct qed_spq_comp_cb *p_cb, |
311 | struct qed_ntuple_filter_params *p_params); |
312 | |
313 | #define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8) |
314 | #define QED_QUEUE_CID_SELF (0xff) |
315 | |
316 | /* Almost identical to the qed_queue_start_common_params, |
317 | * but here we maintain the SB index in IGU CAM. |
318 | */ |
319 | struct qed_queue_cid_params { |
320 | u8 vport_id; |
321 | u16 queue_id; |
322 | u8 stats_id; |
323 | }; |
324 | |
325 | /* Additional parameters required for initialization of the queue_cid |
326 | * and are relevant only for a PF initializing one for its VFs. |
327 | */ |
328 | struct qed_queue_cid_vf_params { |
329 | /* Should match the VF's relative index */ |
330 | u8 vfid; |
331 | |
332 | /* 0-based queue index. Should reflect the relative qzone the |
333 | * VF thinks is associated with it [in its range]. |
334 | */ |
335 | u8 vf_qid; |
336 | |
337 | /* Indicates a VF is legacy, making it differ in several things: |
338 | * - Producers would be placed in a different place. |
339 | * - Makes assumptions regarding the CIDs. |
340 | */ |
341 | u8 vf_legacy; |
342 | |
343 | u8 qid_usage_idx; |
344 | }; |
345 | |
346 | struct qed_queue_cid { |
347 | /* For stats-id, the `rel' is actually absolute as well */ |
348 | struct qed_queue_cid_params rel; |
349 | struct qed_queue_cid_params abs; |
350 | |
351 | /* These have no 'relative' meaning */ |
352 | u16 sb_igu_id; |
353 | u8 sb_idx; |
354 | |
355 | u32 cid; |
356 | u16 opaque_fid; |
357 | |
358 | bool b_is_rx; |
359 | |
360 | /* VFs queues are mapped differently, so we need to know the |
361 | * relative queue associated with them [0-based]. |
362 | * Notice this is relevant on the *PF* queue-cid of its VF's queues, |
363 | * and not on the VF itself. |
364 | */ |
365 | u8 vfid; |
366 | u8 vf_qid; |
367 | |
368 | /* We need an additional index to differentiate between queues opened |
369 | * for same queue-zone, as VFs would have to communicate the info |
370 | * to the PF [otherwise PF has no way to differentiate]. |
371 | */ |
372 | u8 qid_usage_idx; |
373 | |
374 | u8 vf_legacy; |
375 | #define QED_QCID_LEGACY_VF_RX_PROD (BIT(0)) |
376 | #define QED_QCID_LEGACY_VF_CID (BIT(1)) |
377 | |
378 | struct qed_hwfn *p_owner; |
379 | }; |
380 | |
381 | int qed_l2_alloc(struct qed_hwfn *p_hwfn); |
382 | void qed_l2_setup(struct qed_hwfn *p_hwfn); |
383 | void qed_l2_free(struct qed_hwfn *p_hwfn); |
384 | |
385 | void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, |
386 | struct qed_queue_cid *p_cid); |
387 | |
388 | struct qed_queue_cid * |
389 | qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, |
390 | u16 opaque_fid, |
391 | struct qed_queue_start_common_params *p_params, |
392 | bool b_is_rx, |
393 | struct qed_queue_cid_vf_params *p_vf_params); |
394 | |
395 | int |
396 | qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, |
397 | struct qed_sp_vport_start_params *p_params); |
398 | |
399 | /** |
400 | * qed_eth_rxq_start_ramrod(): Starts an Rx queue, when queue_cid is |
401 | * already prepared |
402 | * |
403 | * @p_hwfn: HW device data. |
404 | * @p_cid: Pointer CID. |
405 | * @bd_max_bytes: Max bytes. |
406 | * @bd_chain_phys_addr: Chain physcial address. |
407 | * @cqe_pbl_addr: PBL address. |
408 | * @cqe_pbl_size: PBL size. |
409 | * |
410 | * Return: Int. |
411 | */ |
412 | int |
413 | qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, |
414 | struct qed_queue_cid *p_cid, |
415 | u16 bd_max_bytes, |
416 | dma_addr_t bd_chain_phys_addr, |
417 | dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size); |
418 | |
419 | /** |
420 | * qed_eth_txq_start_ramrod(): Starts a Tx queue, where queue_cid is |
421 | * already prepared |
422 | * |
423 | * @p_hwfn: HW device data. |
424 | * @p_cid: Pointer CID. |
425 | * @pbl_addr: PBL address. |
426 | * @pbl_size: PBL size. |
427 | * @pq_id: Parameters for choosing the PQ for this Tx queue. |
428 | * |
429 | * Return: Int. |
430 | */ |
431 | int |
432 | qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, |
433 | struct qed_queue_cid *p_cid, |
434 | dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id); |
435 | |
436 | u8 qed_mcast_bin_from_mac(u8 *mac); |
437 | |
438 | int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, |
439 | struct qed_ptt *p_ptt, |
440 | u16 coalesce, struct qed_queue_cid *p_cid); |
441 | |
442 | int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, |
443 | struct qed_ptt *p_ptt, |
444 | u16 coalesce, struct qed_queue_cid *p_cid); |
445 | |
446 | int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, |
447 | struct qed_ptt *p_ptt, |
448 | struct qed_queue_cid *p_cid, u16 *p_hw_coal); |
449 | |
450 | int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn, |
451 | struct qed_ptt *p_ptt, |
452 | struct qed_queue_cid *p_cid, u16 *p_hw_coal); |
453 | |
454 | #endif |
455 | |