1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2/* Copyright (c) 2015 - 2020 Intel Corporation */
3#ifndef IRDMA_PUDA_H
4#define IRDMA_PUDA_H
5
6#define IRDMA_IEQ_MPA_FRAMING 6
7#define IRDMA_TCP_OFFSET 40
8#define IRDMA_IPV4_PAD 20
9#define IRDMA_MRK_BLK_SZ 512
10
11enum puda_rsrc_type {
12 IRDMA_PUDA_RSRC_TYPE_ILQ = 1,
13 IRDMA_PUDA_RSRC_TYPE_IEQ,
14 IRDMA_PUDA_RSRC_TYPE_MAX, /* Must be last entry */
15};
16
17enum puda_rsrc_complete {
18 PUDA_CQ_CREATED = 1,
19 PUDA_QP_CREATED,
20 PUDA_TX_COMPLETE,
21 PUDA_RX_COMPLETE,
22 PUDA_HASH_CRC_COMPLETE,
23};
24
25struct irdma_sc_dev;
26struct irdma_sc_qp;
27struct irdma_sc_cq;
28
29struct irdma_puda_cmpl_info {
30 struct irdma_qp_uk *qp;
31 u8 q_type;
32 u8 l3proto;
33 u8 l4proto;
34 u16 vlan;
35 u32 payload_len;
36 u32 compl_error; /* No_err=0, else major and minor err code */
37 u32 qp_id;
38 u32 wqe_idx;
39 bool ipv4:1;
40 bool smac_valid:1;
41 bool vlan_valid:1;
42 u8 smac[ETH_ALEN];
43};
44
45struct irdma_puda_send_info {
46 u64 paddr; /* Physical address */
47 u32 len;
48 u32 ah_id;
49 u8 tcplen;
50 u8 maclen;
51 bool ipv4:1;
52 bool do_lpb:1;
53 void *scratch;
54};
55
56struct irdma_puda_buf {
57 struct list_head list; /* MUST be first entry */
58 struct irdma_dma_mem mem; /* DMA memory for the buffer */
59 struct irdma_puda_buf *next; /* for alloclist in rsrc struct */
60 struct irdma_virt_mem buf_mem; /* Buffer memory for this buffer */
61 void *scratch;
62 u8 *iph;
63 u8 *tcph;
64 u8 *data;
65 u16 datalen;
66 u16 vlan_id;
67 u8 tcphlen; /* tcp length in bytes */
68 u8 maclen; /* mac length in bytes */
69 u32 totallen; /* machlen+iphlen+tcphlen+datalen */
70 refcount_t refcount;
71 u8 hdrlen;
72 bool ipv4:1;
73 bool vlan_valid:1;
74 bool do_lpb:1; /* Loopback buffer */
75 bool smac_valid:1;
76 u32 seqnum;
77 u32 ah_id;
78 u8 smac[ETH_ALEN];
79 struct irdma_sc_vsi *vsi;
80};
81
82struct irdma_puda_rsrc_info {
83 void (*receive)(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *buf);
84 void (*xmit_complete)(struct irdma_sc_vsi *vsi, void *sqwrid);
85 enum puda_rsrc_type type; /* ILQ or IEQ */
86 u32 count;
87 u32 pd_id;
88 u32 cq_id;
89 u32 qp_id;
90 u32 sq_size;
91 u32 rq_size;
92 u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */
93 u16 buf_size;
94 u8 stats_idx;
95 bool stats_idx_valid:1;
96 int abi_ver;
97};
98
99struct irdma_puda_rsrc {
100 struct irdma_sc_cq cq;
101 struct irdma_sc_qp qp;
102 struct irdma_sc_pd sc_pd;
103 struct irdma_sc_dev *dev;
104 struct irdma_sc_vsi *vsi;
105 struct irdma_dma_mem cqmem;
106 struct irdma_dma_mem qpmem;
107 struct irdma_virt_mem ilq_mem;
108 enum puda_rsrc_complete cmpl;
109 enum puda_rsrc_type type;
110 u16 buf_size; /*buf must be max datalen + tcpip hdr + mac */
111 u32 cq_id;
112 u32 qp_id;
113 u32 sq_size;
114 u32 rq_size;
115 u32 cq_size;
116 struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
117 u64 *rq_wrid_array;
118 u32 compl_rxwqe_idx;
119 u32 rx_wqe_idx;
120 u32 rxq_invalid_cnt;
121 u32 tx_wqe_avail_cnt;
122 struct shash_desc *hash_desc;
123 struct list_head txpend;
124 struct list_head bufpool; /* free buffers pool list for recv and xmit */
125 u32 alloc_buf_count;
126 u32 avail_buf_count; /* snapshot of currently available buffers */
127 spinlock_t bufpool_lock;
128 struct irdma_puda_buf *alloclist;
129 void (*receive)(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *buf);
130 void (*xmit_complete)(struct irdma_sc_vsi *vsi, void *sqwrid);
131 /* puda stats */
132 u64 stats_buf_alloc_fail;
133 u64 stats_pkt_rcvd;
134 u64 stats_pkt_sent;
135 u64 stats_rcvd_pkt_err;
136 u64 stats_sent_pkt_q;
137 u64 stats_bad_qp_id;
138 /* IEQ stats */
139 u64 fpdu_processed;
140 u64 bad_seq_num;
141 u64 crc_err;
142 u64 pmode_count;
143 u64 partials_handled;
144 u8 stats_idx;
145 bool check_crc:1;
146 bool stats_idx_valid:1;
147};
148
149struct irdma_puda_buf *irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc);
150void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc,
151 struct irdma_puda_buf *buf);
152void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
153 struct irdma_puda_buf *buf);
154int irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info);
155int irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
156 struct irdma_puda_rsrc_info *info);
157void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
158 bool reset);
159int irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq,
160 u32 *compl_err);
161
162struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
163 struct irdma_puda_buf *buf);
164int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
165 struct irdma_puda_buf *buf);
166int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len, u32 val);
167int irdma_init_hash_desc(struct shash_desc **desc);
168void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
169void irdma_free_hash_desc(struct shash_desc *desc);
170void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len, u32 seqnum);
171int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
172int irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq);
173int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
174void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq);
175void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp,
176 struct irdma_ah_info *ah_info);
177int irdma_puda_create_ah(struct irdma_sc_dev *dev,
178 struct irdma_ah_info *ah_info, bool wait,
179 enum puda_rsrc_type type, void *cb_param,
180 struct irdma_sc_ah **ah);
181void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah);
182void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,
183 struct irdma_puda_rsrc *ieq);
184void irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp);
185#endif /*IRDMA_PROTOS_H */
186

source code of linux/drivers/infiniband/hw/irdma/puda.h