1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ |
2 | /* Copyright (c) 2021, Microsoft Corporation. */ |
3 | |
4 | #ifndef _MANA_H |
5 | #define _MANA_H |
6 | |
7 | #include <net/xdp.h> |
8 | |
9 | #include "gdma.h" |
10 | #include "hw_channel.h" |
11 | |
12 | /* Microsoft Azure Network Adapter (MANA)'s definitions |
13 | * |
14 | * Structures labeled with "HW DATA" are exchanged with the hardware. All of |
15 | * them are naturally aligned and hence don't need __packed. |
16 | */ |
17 | |
18 | /* MANA protocol version */ |
19 | #define MANA_MAJOR_VERSION 0 |
20 | #define MANA_MINOR_VERSION 1 |
21 | #define MANA_MICRO_VERSION 1 |
22 | |
23 | typedef u64 mana_handle_t; |
24 | #define INVALID_MANA_HANDLE ((mana_handle_t)-1) |
25 | |
26 | enum TRI_STATE { |
27 | TRI_STATE_UNKNOWN = -1, |
28 | TRI_STATE_FALSE = 0, |
29 | TRI_STATE_TRUE = 1 |
30 | }; |
31 | |
32 | /* Number of entries for hardware indirection table must be in power of 2 */ |
33 | #define MANA_INDIRECT_TABLE_SIZE 64 |
34 | #define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1) |
35 | |
36 | /* The Toeplitz hash key's length in bytes: should be multiple of 8 */ |
37 | #define MANA_HASH_KEY_SIZE 40 |
38 | |
39 | #define COMP_ENTRY_SIZE 64 |
40 | |
41 | #define RX_BUFFERS_PER_QUEUE 512 |
42 | |
43 | #define MAX_SEND_BUFFERS_PER_QUEUE 256 |
44 | |
45 | #define EQ_SIZE (8 * PAGE_SIZE) |
46 | #define LOG2_EQ_THROTTLE 3 |
47 | |
48 | #define MAX_PORTS_IN_MANA_DEV 256 |
49 | |
50 | /* Update this count whenever the respective structures are changed */ |
51 | #define MANA_STATS_RX_COUNT 5 |
52 | #define MANA_STATS_TX_COUNT 11 |
53 | |
54 | struct mana_stats_rx { |
55 | u64 packets; |
56 | u64 bytes; |
57 | u64 xdp_drop; |
58 | u64 xdp_tx; |
59 | u64 xdp_redirect; |
60 | struct u64_stats_sync syncp; |
61 | }; |
62 | |
63 | struct mana_stats_tx { |
64 | u64 packets; |
65 | u64 bytes; |
66 | u64 xdp_xmit; |
67 | u64 tso_packets; |
68 | u64 tso_bytes; |
69 | u64 tso_inner_packets; |
70 | u64 tso_inner_bytes; |
71 | u64 short_pkt_fmt; |
72 | u64 long_pkt_fmt; |
73 | u64 csum_partial; |
74 | u64 mana_map_err; |
75 | struct u64_stats_sync syncp; |
76 | }; |
77 | |
78 | struct mana_txq { |
79 | struct gdma_queue *gdma_sq; |
80 | |
81 | union { |
82 | u32 gdma_txq_id; |
83 | struct { |
84 | u32 reserved1 : 10; |
85 | u32 vsq_frame : 14; |
86 | u32 reserved2 : 8; |
87 | }; |
88 | }; |
89 | |
90 | u16 vp_offset; |
91 | |
92 | struct net_device *ndev; |
93 | |
94 | /* The SKBs are sent to the HW and we are waiting for the CQEs. */ |
95 | struct sk_buff_head pending_skbs; |
96 | struct netdev_queue *net_txq; |
97 | |
98 | atomic_t pending_sends; |
99 | |
100 | struct mana_stats_tx stats; |
101 | }; |
102 | |
103 | /* skb data and frags dma mappings */ |
104 | struct mana_skb_head { |
105 | /* GSO pkts may have 2 SGEs for the linear part*/ |
106 | dma_addr_t dma_handle[MAX_SKB_FRAGS + 2]; |
107 | |
108 | u32 size[MAX_SKB_FRAGS + 2]; |
109 | }; |
110 | |
111 | #define MANA_HEADROOM sizeof(struct mana_skb_head) |
112 | |
113 | enum mana_tx_pkt_format { |
114 | MANA_SHORT_PKT_FMT = 0, |
115 | MANA_LONG_PKT_FMT = 1, |
116 | }; |
117 | |
118 | struct mana_tx_short_oob { |
119 | u32 pkt_fmt : 2; |
120 | u32 is_outer_ipv4 : 1; |
121 | u32 is_outer_ipv6 : 1; |
122 | u32 comp_iphdr_csum : 1; |
123 | u32 comp_tcp_csum : 1; |
124 | u32 comp_udp_csum : 1; |
125 | u32 supress_txcqe_gen : 1; |
126 | u32 vcq_num : 24; |
127 | |
128 | u32 trans_off : 10; /* Transport header offset */ |
129 | u32 vsq_frame : 14; |
130 | u32 short_vp_offset : 8; |
131 | }; /* HW DATA */ |
132 | |
133 | struct mana_tx_long_oob { |
134 | u32 is_encap : 1; |
135 | u32 inner_is_ipv6 : 1; |
136 | u32 inner_tcp_opt : 1; |
137 | u32 inject_vlan_pri_tag : 1; |
138 | u32 reserved1 : 12; |
139 | u32 pcp : 3; /* 802.1Q */ |
140 | u32 dei : 1; /* 802.1Q */ |
141 | u32 vlan_id : 12; /* 802.1Q */ |
142 | |
143 | u32 inner_frame_offset : 10; |
144 | u32 inner_ip_rel_offset : 6; |
145 | u32 long_vp_offset : 12; |
146 | u32 reserved2 : 4; |
147 | |
148 | u32 reserved3; |
149 | u32 reserved4; |
150 | }; /* HW DATA */ |
151 | |
152 | struct mana_tx_oob { |
153 | struct mana_tx_short_oob s_oob; |
154 | struct mana_tx_long_oob l_oob; |
155 | }; /* HW DATA */ |
156 | |
157 | enum mana_cq_type { |
158 | MANA_CQ_TYPE_RX, |
159 | MANA_CQ_TYPE_TX, |
160 | }; |
161 | |
162 | enum mana_cqe_type { |
163 | CQE_INVALID = 0, |
164 | CQE_RX_OKAY = 1, |
165 | CQE_RX_COALESCED_4 = 2, |
166 | CQE_RX_OBJECT_FENCE = 3, |
167 | CQE_RX_TRUNCATED = 4, |
168 | |
169 | CQE_TX_OKAY = 32, |
170 | CQE_TX_SA_DROP = 33, |
171 | CQE_TX_MTU_DROP = 34, |
172 | CQE_TX_INVALID_OOB = 35, |
173 | CQE_TX_INVALID_ETH_TYPE = 36, |
174 | CQE_TX_HDR_PROCESSING_ERROR = 37, |
175 | CQE_TX_VF_DISABLED = 38, |
176 | CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39, |
177 | CQE_TX_VPORT_DISABLED = 40, |
178 | CQE_TX_VLAN_TAGGING_VIOLATION = 41, |
179 | }; |
180 | |
181 | #define MANA_CQE_COMPLETION 1 |
182 | |
183 | struct { |
184 | u32 : 6; |
185 | u32 : 2; |
186 | u32 : 24; |
187 | }; /* HW DATA */ |
188 | |
189 | /* NDIS HASH Types */ |
190 | #define NDIS_HASH_IPV4 BIT(0) |
191 | #define NDIS_HASH_TCP_IPV4 BIT(1) |
192 | #define NDIS_HASH_UDP_IPV4 BIT(2) |
193 | #define NDIS_HASH_IPV6 BIT(3) |
194 | #define NDIS_HASH_TCP_IPV6 BIT(4) |
195 | #define NDIS_HASH_UDP_IPV6 BIT(5) |
196 | #define NDIS_HASH_IPV6_EX BIT(6) |
197 | #define NDIS_HASH_TCP_IPV6_EX BIT(7) |
198 | #define NDIS_HASH_UDP_IPV6_EX BIT(8) |
199 | |
200 | #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX) |
201 | #define MANA_HASH_L4 \ |
202 | (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \ |
203 | NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX) |
204 | |
205 | struct mana_rxcomp_perpkt_info { |
206 | u32 pkt_len : 16; |
207 | u32 reserved1 : 16; |
208 | u32 reserved2; |
209 | u32 pkt_hash; |
210 | }; /* HW DATA */ |
211 | |
212 | #define MANA_RXCOMP_OOB_NUM_PPI 4 |
213 | |
214 | /* Receive completion OOB */ |
215 | struct mana_rxcomp_oob { |
216 | struct mana_cqe_header cqe_hdr; |
217 | |
218 | u32 rx_vlan_id : 12; |
219 | u32 rx_vlantag_present : 1; |
220 | u32 rx_outer_iphdr_csum_succeed : 1; |
221 | u32 rx_outer_iphdr_csum_fail : 1; |
222 | u32 reserved1 : 1; |
223 | u32 rx_hashtype : 9; |
224 | u32 rx_iphdr_csum_succeed : 1; |
225 | u32 rx_iphdr_csum_fail : 1; |
226 | u32 rx_tcp_csum_succeed : 1; |
227 | u32 rx_tcp_csum_fail : 1; |
228 | u32 rx_udp_csum_succeed : 1; |
229 | u32 rx_udp_csum_fail : 1; |
230 | u32 reserved2 : 1; |
231 | |
232 | struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI]; |
233 | |
234 | u32 rx_wqe_offset; |
235 | }; /* HW DATA */ |
236 | |
237 | struct mana_tx_comp_oob { |
238 | struct mana_cqe_header cqe_hdr; |
239 | |
240 | u32 tx_data_offset; |
241 | |
242 | u32 tx_sgl_offset : 5; |
243 | u32 tx_wqe_offset : 27; |
244 | |
245 | u32 reserved[12]; |
246 | }; /* HW DATA */ |
247 | |
248 | struct mana_rxq; |
249 | |
250 | #define CQE_POLLING_BUFFER 512 |
251 | |
252 | struct mana_cq { |
253 | struct gdma_queue *gdma_cq; |
254 | |
255 | /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */ |
256 | u32 gdma_id; |
257 | |
258 | /* Type of the CQ: TX or RX */ |
259 | enum mana_cq_type type; |
260 | |
261 | /* Pointer to the mana_rxq that is pushing RX CQEs to the queue. |
262 | * Only and must be non-NULL if type is MANA_CQ_TYPE_RX. |
263 | */ |
264 | struct mana_rxq *rxq; |
265 | |
266 | /* Pointer to the mana_txq that is pushing TX CQEs to the queue. |
267 | * Only and must be non-NULL if type is MANA_CQ_TYPE_TX. |
268 | */ |
269 | struct mana_txq *txq; |
270 | |
271 | /* Buffer which the CQ handler can copy the CQE's into. */ |
272 | struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER]; |
273 | |
274 | /* NAPI data */ |
275 | struct napi_struct napi; |
276 | int work_done; |
277 | int budget; |
278 | }; |
279 | |
280 | struct mana_recv_buf_oob { |
281 | /* A valid GDMA work request representing the data buffer. */ |
282 | struct gdma_wqe_request wqe_req; |
283 | |
284 | void *buf_va; |
285 | bool from_pool; /* allocated from a page pool */ |
286 | |
287 | /* SGL of the buffer going to be sent has part of the work request. */ |
288 | u32 num_sge; |
289 | struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES]; |
290 | |
291 | /* Required to store the result of mana_gd_post_work_request. |
292 | * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the |
293 | * work queue when the WQE is consumed. |
294 | */ |
295 | struct gdma_posted_wqe_info wqe_inf; |
296 | }; |
297 | |
298 | #define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \ |
299 | + ETH_HLEN) |
300 | |
301 | #define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM) |
302 | |
303 | struct mana_rxq { |
304 | struct gdma_queue *gdma_rq; |
305 | /* Cache the gdma receive queue id */ |
306 | u32 gdma_id; |
307 | |
308 | /* Index of RQ in the vPort, not gdma receive queue id */ |
309 | u32 rxq_idx; |
310 | |
311 | u32 datasize; |
312 | u32 alloc_size; |
313 | u32 headroom; |
314 | |
315 | mana_handle_t rxobj; |
316 | |
317 | struct mana_cq rx_cq; |
318 | |
319 | struct completion fence_event; |
320 | |
321 | struct net_device *ndev; |
322 | |
323 | /* Total number of receive buffers to be allocated */ |
324 | u32 num_rx_buf; |
325 | |
326 | u32 buf_index; |
327 | |
328 | struct mana_stats_rx stats; |
329 | |
330 | struct bpf_prog __rcu *bpf_prog; |
331 | struct xdp_rxq_info xdp_rxq; |
332 | void *xdp_save_va; /* for reusing */ |
333 | bool xdp_flush; |
334 | int xdp_rc; /* XDP redirect return code */ |
335 | |
336 | struct page_pool *page_pool; |
337 | |
338 | /* MUST BE THE LAST MEMBER: |
339 | * Each receive buffer has an associated mana_recv_buf_oob. |
340 | */ |
341 | struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf); |
342 | }; |
343 | |
344 | struct mana_tx_qp { |
345 | struct mana_txq txq; |
346 | |
347 | struct mana_cq tx_cq; |
348 | |
349 | mana_handle_t tx_object; |
350 | }; |
351 | |
352 | struct mana_ethtool_stats { |
353 | u64 stop_queue; |
354 | u64 wake_queue; |
355 | u64 hc_rx_discards_no_wqe; |
356 | u64 hc_rx_err_vport_disabled; |
357 | u64 hc_rx_bytes; |
358 | u64 hc_rx_ucast_pkts; |
359 | u64 hc_rx_ucast_bytes; |
360 | u64 hc_rx_bcast_pkts; |
361 | u64 hc_rx_bcast_bytes; |
362 | u64 hc_rx_mcast_pkts; |
363 | u64 hc_rx_mcast_bytes; |
364 | u64 hc_tx_err_gf_disabled; |
365 | u64 hc_tx_err_vport_disabled; |
366 | u64 hc_tx_err_inval_vportoffset_pkt; |
367 | u64 hc_tx_err_vlan_enforcement; |
368 | u64 hc_tx_err_eth_type_enforcement; |
369 | u64 hc_tx_err_sa_enforcement; |
370 | u64 hc_tx_err_sqpdid_enforcement; |
371 | u64 hc_tx_err_cqpdid_enforcement; |
372 | u64 hc_tx_err_mtu_violation; |
373 | u64 hc_tx_err_inval_oob; |
374 | u64 hc_tx_bytes; |
375 | u64 hc_tx_ucast_pkts; |
376 | u64 hc_tx_ucast_bytes; |
377 | u64 hc_tx_bcast_pkts; |
378 | u64 hc_tx_bcast_bytes; |
379 | u64 hc_tx_mcast_pkts; |
380 | u64 hc_tx_mcast_bytes; |
381 | u64 hc_tx_err_gdma; |
382 | u64 tx_cqe_err; |
383 | u64 tx_cqe_unknown_type; |
384 | u64 rx_coalesced_err; |
385 | u64 rx_cqe_unknown_type; |
386 | }; |
387 | |
388 | struct mana_context { |
389 | struct gdma_dev *gdma_dev; |
390 | |
391 | u16 num_ports; |
392 | |
393 | struct mana_eq *eqs; |
394 | |
395 | struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; |
396 | }; |
397 | |
398 | struct mana_port_context { |
399 | struct mana_context *ac; |
400 | struct net_device *ndev; |
401 | |
402 | u8 mac_addr[ETH_ALEN]; |
403 | |
404 | enum TRI_STATE ; |
405 | |
406 | mana_handle_t default_rxobj; |
407 | bool tx_shortform_allowed; |
408 | u16 tx_vp_offset; |
409 | |
410 | struct mana_tx_qp *tx_qp; |
411 | |
412 | /* Indirection Table for RX & TX. The values are queue indexes */ |
413 | u32 indir_table[MANA_INDIRECT_TABLE_SIZE]; |
414 | |
415 | /* Indirection table containing RxObject Handles */ |
416 | mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE]; |
417 | |
418 | /* Hash key used by the NIC */ |
419 | u8 hashkey[MANA_HASH_KEY_SIZE]; |
420 | |
421 | /* This points to an array of num_queues of RQ pointers. */ |
422 | struct mana_rxq **rxqs; |
423 | |
424 | /* pre-allocated rx buffer array */ |
425 | void **rxbufs_pre; |
426 | dma_addr_t *das_pre; |
427 | int rxbpre_total; |
428 | u32 rxbpre_datasize; |
429 | u32 rxbpre_alloc_size; |
430 | u32 rxbpre_headroom; |
431 | |
432 | struct bpf_prog *bpf_prog; |
433 | |
434 | /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */ |
435 | unsigned int max_queues; |
436 | unsigned int num_queues; |
437 | |
438 | mana_handle_t port_handle; |
439 | mana_handle_t pf_filter_handle; |
440 | |
441 | /* Mutex for sharing access to vport_use_count */ |
442 | struct mutex vport_mutex; |
443 | int vport_use_count; |
444 | |
445 | u16 port_idx; |
446 | |
447 | bool port_is_up; |
448 | bool port_st_save; /* Saved port state */ |
449 | |
450 | struct mana_ethtool_stats eth_stats; |
451 | }; |
452 | |
453 | netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev); |
454 | int (struct mana_port_context *ac, enum TRI_STATE rx, |
455 | bool update_hash, bool update_tab); |
456 | |
457 | int mana_alloc_queues(struct net_device *ndev); |
458 | int mana_attach(struct net_device *ndev); |
459 | int mana_detach(struct net_device *ndev, bool from_close); |
460 | |
461 | int mana_probe(struct gdma_dev *gd, bool resuming); |
462 | void mana_remove(struct gdma_dev *gd, bool suspending); |
463 | |
464 | void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev); |
465 | int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, |
466 | u32 flags); |
467 | u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, |
468 | struct xdp_buff *xdp, void *buf_va, uint pkt_len); |
469 | struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); |
470 | void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog); |
471 | int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); |
472 | void mana_query_gf_stats(struct mana_port_context *apc); |
473 | |
474 | extern const struct ethtool_ops mana_ethtool_ops; |
475 | |
476 | /* A CQ can be created not associated with any EQ */ |
477 | #define GDMA_CQ_NO_EQ 0xffff |
478 | |
479 | struct mana_obj_spec { |
480 | u32 queue_index; |
481 | u64 gdma_region; |
482 | u32 queue_size; |
483 | u32 attached_eq; |
484 | u32 modr_ctx_id; |
485 | }; |
486 | |
487 | enum mana_command_code { |
488 | MANA_QUERY_DEV_CONFIG = 0x20001, |
489 | MANA_QUERY_GF_STAT = 0x20002, |
490 | MANA_CONFIG_VPORT_TX = 0x20003, |
491 | MANA_CREATE_WQ_OBJ = 0x20004, |
492 | MANA_DESTROY_WQ_OBJ = 0x20005, |
493 | MANA_FENCE_RQ = 0x20006, |
494 | MANA_CONFIG_VPORT_RX = 0x20007, |
495 | MANA_QUERY_VPORT_CONFIG = 0x20008, |
496 | |
497 | /* Privileged commands for the PF mode */ |
498 | MANA_REGISTER_FILTER = 0x28000, |
499 | MANA_DEREGISTER_FILTER = 0x28001, |
500 | MANA_REGISTER_HW_PORT = 0x28003, |
501 | MANA_DEREGISTER_HW_PORT = 0x28004, |
502 | }; |
503 | |
504 | /* Query Device Configuration */ |
505 | struct mana_query_device_cfg_req { |
506 | struct gdma_req_hdr hdr; |
507 | |
508 | /* MANA Nic Driver Capability flags */ |
509 | u64 mn_drv_cap_flags1; |
510 | u64 mn_drv_cap_flags2; |
511 | u64 mn_drv_cap_flags3; |
512 | u64 mn_drv_cap_flags4; |
513 | |
514 | u32 proto_major_ver; |
515 | u32 proto_minor_ver; |
516 | u32 proto_micro_ver; |
517 | |
518 | u32 reserved; |
519 | }; /* HW DATA */ |
520 | |
521 | struct mana_query_device_cfg_resp { |
522 | struct gdma_resp_hdr hdr; |
523 | |
524 | u64 pf_cap_flags1; |
525 | u64 pf_cap_flags2; |
526 | u64 pf_cap_flags3; |
527 | u64 pf_cap_flags4; |
528 | |
529 | u16 max_num_vports; |
530 | u16 reserved; |
531 | u32 max_num_eqs; |
532 | |
533 | /* response v2: */ |
534 | u16 adapter_mtu; |
535 | u16 reserved2; |
536 | u32 reserved3; |
537 | }; /* HW DATA */ |
538 | |
539 | /* Query vPort Configuration */ |
540 | struct mana_query_vport_cfg_req { |
541 | struct gdma_req_hdr hdr; |
542 | u32 vport_index; |
543 | }; /* HW DATA */ |
544 | |
545 | struct mana_query_vport_cfg_resp { |
546 | struct gdma_resp_hdr hdr; |
547 | u32 max_num_sq; |
548 | u32 max_num_rq; |
549 | u32 num_indirection_ent; |
550 | u32 reserved1; |
551 | u8 mac_addr[6]; |
552 | u8 reserved2[2]; |
553 | mana_handle_t vport; |
554 | }; /* HW DATA */ |
555 | |
556 | /* Configure vPort */ |
557 | struct mana_config_vport_req { |
558 | struct gdma_req_hdr hdr; |
559 | mana_handle_t vport; |
560 | u32 pdid; |
561 | u32 doorbell_pageid; |
562 | }; /* HW DATA */ |
563 | |
564 | struct mana_config_vport_resp { |
565 | struct gdma_resp_hdr hdr; |
566 | u16 tx_vport_offset; |
567 | u8 short_form_allowed; |
568 | u8 reserved; |
569 | }; /* HW DATA */ |
570 | |
571 | /* Create WQ Object */ |
572 | struct mana_create_wqobj_req { |
573 | struct gdma_req_hdr hdr; |
574 | mana_handle_t vport; |
575 | u32 wq_type; |
576 | u32 reserved; |
577 | u64 wq_gdma_region; |
578 | u64 cq_gdma_region; |
579 | u32 wq_size; |
580 | u32 cq_size; |
581 | u32 cq_moderation_ctx_id; |
582 | u32 cq_parent_qid; |
583 | }; /* HW DATA */ |
584 | |
585 | struct mana_create_wqobj_resp { |
586 | struct gdma_resp_hdr hdr; |
587 | u32 wq_id; |
588 | u32 cq_id; |
589 | mana_handle_t wq_obj; |
590 | }; /* HW DATA */ |
591 | |
592 | /* Destroy WQ Object */ |
593 | struct mana_destroy_wqobj_req { |
594 | struct gdma_req_hdr hdr; |
595 | u32 wq_type; |
596 | u32 reserved; |
597 | mana_handle_t wq_obj_handle; |
598 | }; /* HW DATA */ |
599 | |
600 | struct mana_destroy_wqobj_resp { |
601 | struct gdma_resp_hdr hdr; |
602 | }; /* HW DATA */ |
603 | |
604 | /* Fence RQ */ |
605 | struct mana_fence_rq_req { |
606 | struct gdma_req_hdr hdr; |
607 | mana_handle_t wq_obj_handle; |
608 | }; /* HW DATA */ |
609 | |
610 | struct mana_fence_rq_resp { |
611 | struct gdma_resp_hdr hdr; |
612 | }; /* HW DATA */ |
613 | |
614 | /* Query stats RQ */ |
615 | struct mana_query_gf_stat_req { |
616 | struct gdma_req_hdr hdr; |
617 | u64 req_stats; |
618 | }; /* HW DATA */ |
619 | |
620 | struct mana_query_gf_stat_resp { |
621 | struct gdma_resp_hdr hdr; |
622 | u64 reported_stats; |
623 | /* rx errors/discards */ |
624 | u64 rx_discards_nowqe; |
625 | u64 rx_err_vport_disabled; |
626 | /* rx bytes/packets */ |
627 | u64 hc_rx_bytes; |
628 | u64 hc_rx_ucast_pkts; |
629 | u64 hc_rx_ucast_bytes; |
630 | u64 hc_rx_bcast_pkts; |
631 | u64 hc_rx_bcast_bytes; |
632 | u64 hc_rx_mcast_pkts; |
633 | u64 hc_rx_mcast_bytes; |
634 | /* tx errors */ |
635 | u64 tx_err_gf_disabled; |
636 | u64 tx_err_vport_disabled; |
637 | u64 tx_err_inval_vport_offset_pkt; |
638 | u64 tx_err_vlan_enforcement; |
639 | u64 tx_err_ethtype_enforcement; |
640 | u64 tx_err_SA_enforcement; |
641 | u64 tx_err_SQPDID_enforcement; |
642 | u64 tx_err_CQPDID_enforcement; |
643 | u64 tx_err_mtu_violation; |
644 | u64 tx_err_inval_oob; |
645 | /* tx bytes/packets */ |
646 | u64 hc_tx_bytes; |
647 | u64 hc_tx_ucast_pkts; |
648 | u64 hc_tx_ucast_bytes; |
649 | u64 hc_tx_bcast_pkts; |
650 | u64 hc_tx_bcast_bytes; |
651 | u64 hc_tx_mcast_pkts; |
652 | u64 hc_tx_mcast_bytes; |
653 | /* tx error */ |
654 | u64 tx_err_gdma; |
655 | }; /* HW DATA */ |
656 | |
657 | /* Configure vPort Rx Steering */ |
658 | struct mana_cfg_rx_steer_req_v2 { |
659 | struct gdma_req_hdr hdr; |
660 | mana_handle_t vport; |
661 | u16 num_indir_entries; |
662 | u16 indir_tab_offset; |
663 | u32 rx_enable; |
664 | u32 ; |
665 | u8 update_default_rxobj; |
666 | u8 update_hashkey; |
667 | u8 update_indir_tab; |
668 | u8 reserved; |
669 | mana_handle_t default_rxobj; |
670 | u8 hashkey[MANA_HASH_KEY_SIZE]; |
671 | u8 cqe_coalescing_enable; |
672 | u8 reserved2[7]; |
673 | }; /* HW DATA */ |
674 | |
675 | struct mana_cfg_rx_steer_resp { |
676 | struct gdma_resp_hdr hdr; |
677 | }; /* HW DATA */ |
678 | |
679 | /* Register HW vPort */ |
680 | struct mana_register_hw_vport_req { |
681 | struct gdma_req_hdr hdr; |
682 | u16 attached_gfid; |
683 | u8 is_pf_default_vport; |
684 | u8 reserved1; |
685 | u8 allow_all_ether_types; |
686 | u8 reserved2; |
687 | u8 reserved3; |
688 | u8 reserved4; |
689 | }; /* HW DATA */ |
690 | |
691 | struct mana_register_hw_vport_resp { |
692 | struct gdma_resp_hdr hdr; |
693 | mana_handle_t hw_vport_handle; |
694 | }; /* HW DATA */ |
695 | |
696 | /* Deregister HW vPort */ |
697 | struct mana_deregister_hw_vport_req { |
698 | struct gdma_req_hdr hdr; |
699 | mana_handle_t hw_vport_handle; |
700 | }; /* HW DATA */ |
701 | |
702 | struct mana_deregister_hw_vport_resp { |
703 | struct gdma_resp_hdr hdr; |
704 | }; /* HW DATA */ |
705 | |
706 | /* Register filter */ |
707 | struct mana_register_filter_req { |
708 | struct gdma_req_hdr hdr; |
709 | mana_handle_t vport; |
710 | u8 mac_addr[6]; |
711 | u8 reserved1; |
712 | u8 reserved2; |
713 | u8 reserved3; |
714 | u8 reserved4; |
715 | u16 reserved5; |
716 | u32 reserved6; |
717 | u32 reserved7; |
718 | u32 reserved8; |
719 | }; /* HW DATA */ |
720 | |
721 | struct mana_register_filter_resp { |
722 | struct gdma_resp_hdr hdr; |
723 | mana_handle_t filter_handle; |
724 | }; /* HW DATA */ |
725 | |
726 | /* Deregister filter */ |
727 | struct mana_deregister_filter_req { |
728 | struct gdma_req_hdr hdr; |
729 | mana_handle_t filter_handle; |
730 | }; /* HW DATA */ |
731 | |
732 | struct mana_deregister_filter_resp { |
733 | struct gdma_resp_hdr hdr; |
734 | }; /* HW DATA */ |
735 | |
736 | /* Requested GF stats Flags */ |
737 | /* Rx discards/Errors */ |
738 | #define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE 0x0000000000000001 |
739 | #define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED 0x0000000000000002 |
740 | /* Rx bytes/pkts */ |
741 | #define STATISTICS_FLAGS_HC_RX_BYTES 0x0000000000000004 |
742 | #define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS 0x0000000000000008 |
743 | #define STATISTICS_FLAGS_HC_RX_UCAST_BYTES 0x0000000000000010 |
744 | #define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS 0x0000000000000020 |
745 | #define STATISTICS_FLAGS_HC_RX_MCAST_BYTES 0x0000000000000040 |
746 | #define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS 0x0000000000000080 |
747 | #define STATISTICS_FLAGS_HC_RX_BCAST_BYTES 0x0000000000000100 |
748 | /* Tx errors */ |
749 | #define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED 0x0000000000000200 |
750 | #define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED 0x0000000000000400 |
751 | #define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS \ |
752 | 0x0000000000000800 |
753 | #define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT 0x0000000000001000 |
754 | #define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT \ |
755 | 0x0000000000002000 |
756 | #define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT 0x0000000000004000 |
757 | #define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT 0x0000000000008000 |
758 | #define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT 0x0000000000010000 |
759 | #define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION 0x0000000000020000 |
760 | #define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB 0x0000000000040000 |
761 | /* Tx bytes/pkts */ |
762 | #define STATISTICS_FLAGS_HC_TX_BYTES 0x0000000000080000 |
763 | #define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS 0x0000000000100000 |
764 | #define STATISTICS_FLAGS_HC_TX_UCAST_BYTES 0x0000000000200000 |
765 | #define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS 0x0000000000400000 |
766 | #define STATISTICS_FLAGS_HC_TX_MCAST_BYTES 0x0000000000800000 |
767 | #define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS 0x0000000001000000 |
768 | #define STATISTICS_FLAGS_HC_TX_BCAST_BYTES 0x0000000002000000 |
769 | /* Tx error */ |
770 | #define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR 0x0000000004000000 |
771 | |
772 | #define MANA_MAX_NUM_QUEUES 64 |
773 | |
774 | #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) |
775 | |
776 | struct mana_tx_package { |
777 | struct gdma_wqe_request wqe_req; |
778 | struct gdma_sge sgl_array[5]; |
779 | struct gdma_sge *sgl_ptr; |
780 | |
781 | struct mana_tx_oob tx_oob; |
782 | |
783 | struct gdma_posted_wqe_info wqe_info; |
784 | }; |
785 | |
786 | int mana_create_wq_obj(struct mana_port_context *apc, |
787 | mana_handle_t vport, |
788 | u32 wq_type, struct mana_obj_spec *wq_spec, |
789 | struct mana_obj_spec *cq_spec, |
790 | mana_handle_t *wq_obj); |
791 | |
792 | void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, |
793 | mana_handle_t wq_obj); |
794 | |
795 | int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, |
796 | u32 doorbell_pg_id); |
797 | void mana_uncfg_vport(struct mana_port_context *apc); |
798 | #endif /* _MANA_H */ |
799 | |