1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2/* Copyright (c) 2015 - 2021 Intel Corporation */
3#ifndef IRDMA_VERBS_H
4#define IRDMA_VERBS_H
5
6#define IRDMA_MAX_SAVED_PHY_PGADDR 4
7#define IRDMA_FLUSH_DELAY_MS 20
8
9#define IRDMA_PKEY_TBL_SZ 1
10#define IRDMA_DEFAULT_PKEY 0xFFFF
11
12struct irdma_ucontext {
13 struct ib_ucontext ibucontext;
14 struct irdma_device *iwdev;
15 struct rdma_user_mmap_entry *db_mmap_entry;
16 struct list_head cq_reg_mem_list;
17 spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
18 struct list_head qp_reg_mem_list;
19 spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
20 int abi_ver;
21 u8 legacy_mode : 1;
22 u8 use_raw_attrs : 1;
23};
24
25struct irdma_pd {
26 struct ib_pd ibpd;
27 struct irdma_sc_pd sc_pd;
28};
29
30union irdma_sockaddr {
31 struct sockaddr_in saddr_in;
32 struct sockaddr_in6 saddr_in6;
33};
34
35struct irdma_av {
36 u8 macaddr[16];
37 struct rdma_ah_attr attrs;
38 union irdma_sockaddr sgid_addr;
39 union irdma_sockaddr dgid_addr;
40 u8 net_type;
41};
42
43struct irdma_ah {
44 struct ib_ah ibah;
45 struct irdma_sc_ah sc_ah;
46 struct irdma_pd *pd;
47 struct irdma_av av;
48 u8 sgid_index;
49 union ib_gid dgid;
50 struct hlist_node list;
51 refcount_t refcnt;
52 struct irdma_ah *parent_ah; /* AH from cached list */
53};
54
55struct irdma_hmc_pble {
56 union {
57 u32 idx;
58 dma_addr_t addr;
59 };
60};
61
62struct irdma_cq_mr {
63 struct irdma_hmc_pble cq_pbl;
64 dma_addr_t shadow;
65 bool split;
66};
67
68struct irdma_qp_mr {
69 struct irdma_hmc_pble sq_pbl;
70 struct irdma_hmc_pble rq_pbl;
71 dma_addr_t shadow;
72 struct page *sq_page;
73};
74
75struct irdma_cq_buf {
76 struct irdma_dma_mem kmem_buf;
77 struct irdma_cq_uk cq_uk;
78 struct irdma_hw *hw;
79 struct list_head list;
80 struct work_struct work;
81};
82
83struct irdma_pbl {
84 struct list_head list;
85 union {
86 struct irdma_qp_mr qp_mr;
87 struct irdma_cq_mr cq_mr;
88 };
89
90 bool pbl_allocated:1;
91 bool on_list:1;
92 u64 user_base;
93 struct irdma_pble_alloc pble_alloc;
94 struct irdma_mr *iwmr;
95};
96
97struct irdma_mr {
98 union {
99 struct ib_mr ibmr;
100 struct ib_mw ibmw;
101 };
102 struct ib_umem *region;
103 int access;
104 u8 is_hwreg;
105 u16 type;
106 u32 page_cnt;
107 u64 page_size;
108 u32 npages;
109 u32 stag;
110 u64 len;
111 u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
112 struct irdma_pbl iwpbl;
113};
114
115struct irdma_cq {
116 struct ib_cq ibcq;
117 struct irdma_sc_cq sc_cq;
118 u16 cq_head;
119 u16 cq_size;
120 u16 cq_num;
121 bool user_mode;
122 atomic_t armed;
123 enum irdma_cmpl_notify last_notify;
124 u32 polled_cmpls;
125 u32 cq_mem_size;
126 struct irdma_dma_mem kmem;
127 struct irdma_dma_mem kmem_shadow;
128 struct completion free_cq;
129 refcount_t refcnt;
130 spinlock_t lock; /* for poll cq */
131 struct irdma_pbl *iwpbl;
132 struct irdma_pbl *iwpbl_shadow;
133 struct list_head resize_list;
134 struct irdma_cq_poll_info cur_cqe;
135 struct list_head cmpl_generated;
136};
137
138struct irdma_cmpl_gen {
139 struct list_head list;
140 struct irdma_cq_poll_info cpi;
141};
142
143struct disconn_work {
144 struct work_struct work;
145 struct irdma_qp *iwqp;
146};
147
148struct iw_cm_id;
149
150struct irdma_qp_kmode {
151 struct irdma_dma_mem dma_mem;
152 struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
153 u64 *rq_wrid_mem;
154};
155
156struct irdma_qp {
157 struct ib_qp ibqp;
158 struct irdma_sc_qp sc_qp;
159 struct irdma_device *iwdev;
160 struct irdma_cq *iwscq;
161 struct irdma_cq *iwrcq;
162 struct irdma_pd *iwpd;
163 struct rdma_user_mmap_entry *push_wqe_mmap_entry;
164 struct rdma_user_mmap_entry *push_db_mmap_entry;
165 struct irdma_qp_host_ctx_info ctx_info;
166 union {
167 struct irdma_iwarp_offload_info iwarp_info;
168 struct irdma_roce_offload_info roce_info;
169 };
170
171 union {
172 struct irdma_tcp_offload_info tcp_info;
173 struct irdma_udp_offload_info udp_info;
174 };
175
176 struct irdma_ah roce_ah;
177 struct list_head teardown_entry;
178 refcount_t refcnt;
179 struct iw_cm_id *cm_id;
180 struct irdma_cm_node *cm_node;
181 struct delayed_work dwork_flush;
182 struct ib_mr *lsmm_mr;
183 atomic_t hw_mod_qp_pend;
184 enum ib_qp_state ibqp_state;
185 u32 qp_mem_size;
186 u32 last_aeq;
187 int max_send_wr;
188 int max_recv_wr;
189 atomic_t close_timer_started;
190 spinlock_t lock; /* serialize posting WRs to SQ/RQ */
191 struct irdma_qp_context *iwqp_context;
192 void *pbl_vbase;
193 dma_addr_t pbl_pbase;
194 struct page *page;
195 u8 active_conn : 1;
196 u8 user_mode : 1;
197 u8 hte_added : 1;
198 u8 flush_issued : 1;
199 u8 sig_all : 1;
200 u8 pau_mode : 1;
201 u8 rsvd : 1;
202 u8 iwarp_state;
203 u16 term_sq_flush_code;
204 u16 term_rq_flush_code;
205 u8 hw_iwarp_state;
206 u8 hw_tcp_state;
207 struct irdma_qp_kmode kqp;
208 struct irdma_dma_mem host_ctx;
209 struct timer_list terminate_timer;
210 struct irdma_pbl *iwpbl;
211 struct irdma_dma_mem q2_ctx_mem;
212 struct irdma_dma_mem ietf_mem;
213 struct completion free_qp;
214 wait_queue_head_t waitq;
215 wait_queue_head_t mod_qp_waitq;
216 u8 rts_ae_rcvd;
217};
218
219enum irdma_mmap_flag {
220 IRDMA_MMAP_IO_NC,
221 IRDMA_MMAP_IO_WC,
222};
223
224struct irdma_user_mmap_entry {
225 struct rdma_user_mmap_entry rdma_entry;
226 u64 bar_offset;
227 u8 mmap_flag;
228};
229
230static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
231{
232 return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
233}
234
235static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
236{
237 return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
238}
239
240static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
241 struct ib_wc *entry)
242{
243 switch (cq_poll_info->op_type) {
244 case IRDMA_OP_TYPE_RDMA_WRITE:
245 case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
246 entry->opcode = IB_WC_RDMA_WRITE;
247 break;
248 case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
249 case IRDMA_OP_TYPE_RDMA_READ:
250 entry->opcode = IB_WC_RDMA_READ;
251 break;
252 case IRDMA_OP_TYPE_SEND_SOL:
253 case IRDMA_OP_TYPE_SEND_SOL_INV:
254 case IRDMA_OP_TYPE_SEND_INV:
255 case IRDMA_OP_TYPE_SEND:
256 entry->opcode = IB_WC_SEND;
257 break;
258 case IRDMA_OP_TYPE_FAST_REG_NSMR:
259 entry->opcode = IB_WC_REG_MR;
260 break;
261 case IRDMA_OP_TYPE_INV_STAG:
262 entry->opcode = IB_WC_LOCAL_INV;
263 break;
264 default:
265 entry->status = IB_WC_GENERAL_ERR;
266 }
267}
268
269static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
270 struct ib_wc *entry, bool send_imm_support)
271{
272 /**
273 * iWARP does not support sendImm, so the presence of Imm data
274 * must be WriteImm.
275 */
276 if (!send_imm_support) {
277 entry->opcode = cq_poll_info->imm_valid ?
278 IB_WC_RECV_RDMA_WITH_IMM :
279 IB_WC_RECV;
280 return;
281 }
282
283 switch (cq_poll_info->op_type) {
284 case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
285 case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
286 entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
287 break;
288 default:
289 entry->opcode = IB_WC_RECV;
290 }
291}
292
293void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4);
294int irdma_ib_register_device(struct irdma_device *iwdev);
295void irdma_ib_unregister_device(struct irdma_device *iwdev);
296void irdma_ib_dealloc_device(struct ib_device *ibdev);
297void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
298void irdma_generate_flush_completions(struct irdma_qp *iwqp);
299void irdma_remove_cmpls_list(struct irdma_cq *iwcq);
300int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info);
301#endif /* IRDMA_VERBS_H */
302

source code of linux/drivers/infiniband/hw/irdma/verbs.h