1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
2 | /* Copyright (c) 2015 - 2021 Intel Corporation */ |
3 | #include "main.h" |
4 | |
5 | static struct irdma_rsrc_limits rsrc_limits_table[] = { |
6 | [0] = { |
7 | .qplimit = SZ_128, |
8 | }, |
9 | [1] = { |
10 | .qplimit = SZ_1K, |
11 | }, |
12 | [2] = { |
13 | .qplimit = SZ_2K, |
14 | }, |
15 | [3] = { |
16 | .qplimit = SZ_4K, |
17 | }, |
18 | [4] = { |
19 | .qplimit = SZ_16K, |
20 | }, |
21 | [5] = { |
22 | .qplimit = SZ_64K, |
23 | }, |
24 | [6] = { |
25 | .qplimit = SZ_128K, |
26 | }, |
27 | [7] = { |
28 | .qplimit = SZ_256K, |
29 | }, |
30 | }; |
31 | |
32 | /* types of hmc objects */ |
33 | static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = { |
34 | IRDMA_HMC_IW_QP, |
35 | IRDMA_HMC_IW_CQ, |
36 | IRDMA_HMC_IW_HTE, |
37 | IRDMA_HMC_IW_ARP, |
38 | IRDMA_HMC_IW_APBVT_ENTRY, |
39 | IRDMA_HMC_IW_MR, |
40 | IRDMA_HMC_IW_XF, |
41 | IRDMA_HMC_IW_XFFL, |
42 | IRDMA_HMC_IW_Q1, |
43 | IRDMA_HMC_IW_Q1FL, |
44 | IRDMA_HMC_IW_PBLE, |
45 | IRDMA_HMC_IW_TIMER, |
46 | IRDMA_HMC_IW_FSIMC, |
47 | IRDMA_HMC_IW_FSIAV, |
48 | IRDMA_HMC_IW_RRF, |
49 | IRDMA_HMC_IW_RRFFL, |
50 | IRDMA_HMC_IW_HDR, |
51 | IRDMA_HMC_IW_MD, |
52 | IRDMA_HMC_IW_OOISC, |
53 | IRDMA_HMC_IW_OOISCFFL, |
54 | }; |
55 | |
56 | /** |
57 | * irdma_iwarp_ce_handler - handle iwarp completions |
58 | * @iwcq: iwarp cq receiving event |
59 | */ |
60 | static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq) |
61 | { |
62 | struct irdma_cq *cq = iwcq->back_cq; |
63 | |
64 | if (!cq->user_mode) |
65 | atomic_set(v: &cq->armed, i: 0); |
66 | if (cq->ibcq.comp_handler) |
67 | cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); |
68 | } |
69 | |
70 | /** |
71 | * irdma_puda_ce_handler - handle puda completion events |
72 | * @rf: RDMA PCI function |
73 | * @cq: puda completion q for event |
74 | */ |
75 | static void irdma_puda_ce_handler(struct irdma_pci_f *rf, |
76 | struct irdma_sc_cq *cq) |
77 | { |
78 | struct irdma_sc_dev *dev = &rf->sc_dev; |
79 | u32 compl_error; |
80 | int status; |
81 | |
82 | do { |
83 | status = irdma_puda_poll_cmpl(dev, cq, compl_err: &compl_error); |
84 | if (status == -ENOENT) |
85 | break; |
86 | if (status) { |
87 | ibdev_dbg(to_ibdev(dev), "ERR: puda status = %d\n" , status); |
88 | break; |
89 | } |
90 | if (compl_error) { |
91 | ibdev_dbg(to_ibdev(dev), "ERR: puda compl_err =0x%x\n" , |
92 | compl_error); |
93 | break; |
94 | } |
95 | } while (1); |
96 | |
97 | irdma_sc_ccq_arm(ccq: cq); |
98 | } |
99 | |
100 | /** |
101 | * irdma_process_ceq - handle ceq for completions |
102 | * @rf: RDMA PCI function |
103 | * @ceq: ceq having cq for completion |
104 | */ |
105 | static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq) |
106 | { |
107 | struct irdma_sc_dev *dev = &rf->sc_dev; |
108 | struct irdma_sc_ceq *sc_ceq; |
109 | struct irdma_sc_cq *cq; |
110 | unsigned long flags; |
111 | |
112 | sc_ceq = &ceq->sc_ceq; |
113 | do { |
114 | spin_lock_irqsave(&ceq->ce_lock, flags); |
115 | cq = irdma_sc_process_ceq(dev, ceq: sc_ceq); |
116 | if (!cq) { |
117 | spin_unlock_irqrestore(lock: &ceq->ce_lock, flags); |
118 | break; |
119 | } |
120 | |
121 | if (cq->cq_type == IRDMA_CQ_TYPE_IWARP) |
122 | irdma_iwarp_ce_handler(iwcq: cq); |
123 | |
124 | spin_unlock_irqrestore(lock: &ceq->ce_lock, flags); |
125 | |
126 | if (cq->cq_type == IRDMA_CQ_TYPE_CQP) |
127 | queue_work(wq: rf->cqp_cmpl_wq, work: &rf->cqp_cmpl_work); |
128 | else if (cq->cq_type == IRDMA_CQ_TYPE_ILQ || |
129 | cq->cq_type == IRDMA_CQ_TYPE_IEQ) |
130 | irdma_puda_ce_handler(rf, cq); |
131 | } while (1); |
132 | } |
133 | |
134 | static void irdma_set_flush_fields(struct irdma_sc_qp *qp, |
135 | struct irdma_aeqe_info *info) |
136 | { |
137 | qp->sq_flush_code = info->sq; |
138 | qp->rq_flush_code = info->rq; |
139 | qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; |
140 | |
141 | switch (info->ae_id) { |
142 | case IRDMA_AE_AMP_BOUNDS_VIOLATION: |
143 | case IRDMA_AE_AMP_INVALID_STAG: |
144 | case IRDMA_AE_AMP_RIGHTS_VIOLATION: |
145 | case IRDMA_AE_AMP_UNALLOCATED_STAG: |
146 | case IRDMA_AE_AMP_BAD_PD: |
147 | case IRDMA_AE_AMP_BAD_QP: |
148 | case IRDMA_AE_AMP_BAD_STAG_KEY: |
149 | case IRDMA_AE_AMP_BAD_STAG_INDEX: |
150 | case IRDMA_AE_AMP_TO_WRAP: |
151 | case IRDMA_AE_PRIV_OPERATION_DENIED: |
152 | qp->flush_code = FLUSH_PROT_ERR; |
153 | qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; |
154 | break; |
155 | case IRDMA_AE_UDA_XMIT_BAD_PD: |
156 | case IRDMA_AE_WQE_UNEXPECTED_OPCODE: |
157 | qp->flush_code = FLUSH_LOC_QP_OP_ERR; |
158 | qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; |
159 | break; |
160 | case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: |
161 | case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT: |
162 | case IRDMA_AE_UDA_L4LEN_INVALID: |
163 | case IRDMA_AE_DDP_UBE_INVALID_MO: |
164 | case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: |
165 | qp->flush_code = FLUSH_LOC_LEN_ERR; |
166 | qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; |
167 | break; |
168 | case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: |
169 | case IRDMA_AE_IB_REMOTE_ACCESS_ERROR: |
170 | qp->flush_code = FLUSH_REM_ACCESS_ERR; |
171 | qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; |
172 | break; |
173 | case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: |
174 | case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: |
175 | case IRDMA_AE_ROCE_RSP_LENGTH_ERROR: |
176 | case IRDMA_AE_IB_REMOTE_OP_ERROR: |
177 | qp->flush_code = FLUSH_REM_OP_ERR; |
178 | qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; |
179 | break; |
180 | case IRDMA_AE_LCE_QP_CATASTROPHIC: |
181 | qp->flush_code = FLUSH_FATAL_ERR; |
182 | qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; |
183 | break; |
184 | case IRDMA_AE_IB_RREQ_AND_Q1_FULL: |
185 | qp->flush_code = FLUSH_GENERAL_ERR; |
186 | break; |
187 | case IRDMA_AE_LLP_TOO_MANY_RETRIES: |
188 | qp->flush_code = FLUSH_RETRY_EXC_ERR; |
189 | qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; |
190 | break; |
191 | case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS: |
192 | case IRDMA_AE_AMP_MWBIND_BIND_DISABLED: |
193 | case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS: |
194 | case IRDMA_AE_AMP_MWBIND_VALID_STAG: |
195 | qp->flush_code = FLUSH_MW_BIND_ERR; |
196 | qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; |
197 | break; |
198 | case IRDMA_AE_IB_INVALID_REQUEST: |
199 | qp->flush_code = FLUSH_REM_INV_REQ_ERR; |
200 | qp->event_type = IRDMA_QP_EVENT_REQ_ERR; |
201 | break; |
202 | default: |
203 | qp->flush_code = FLUSH_GENERAL_ERR; |
204 | qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; |
205 | break; |
206 | } |
207 | } |
208 | |
209 | /** |
210 | * irdma_process_aeq - handle aeq events |
211 | * @rf: RDMA PCI function |
212 | */ |
213 | static void irdma_process_aeq(struct irdma_pci_f *rf) |
214 | { |
215 | struct irdma_sc_dev *dev = &rf->sc_dev; |
216 | struct irdma_aeq *aeq = &rf->aeq; |
217 | struct irdma_sc_aeq *sc_aeq = &aeq->sc_aeq; |
218 | struct irdma_aeqe_info aeinfo; |
219 | struct irdma_aeqe_info *info = &aeinfo; |
220 | int ret; |
221 | struct irdma_qp *iwqp = NULL; |
222 | struct irdma_cq *iwcq = NULL; |
223 | struct irdma_sc_qp *qp = NULL; |
224 | struct irdma_qp_host_ctx_info *ctx_info = NULL; |
225 | struct irdma_device *iwdev = rf->iwdev; |
226 | unsigned long flags; |
227 | |
228 | u32 aeqcnt = 0; |
229 | |
230 | if (!sc_aeq->size) |
231 | return; |
232 | |
233 | do { |
234 | memset(info, 0, sizeof(*info)); |
235 | ret = irdma_sc_get_next_aeqe(aeq: sc_aeq, info); |
236 | if (ret) |
237 | break; |
238 | |
239 | aeqcnt++; |
240 | ibdev_dbg(&iwdev->ibdev, |
241 | "AEQ: ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n" , |
242 | info->ae_id, info->qp, info->qp_cq_id, info->tcp_state, |
243 | info->iwarp_state, info->ae_src); |
244 | |
245 | if (info->qp) { |
246 | spin_lock_irqsave(&rf->qptable_lock, flags); |
247 | iwqp = rf->qp_table[info->qp_cq_id]; |
248 | if (!iwqp) { |
249 | spin_unlock_irqrestore(lock: &rf->qptable_lock, |
250 | flags); |
251 | if (info->ae_id == IRDMA_AE_QP_SUSPEND_COMPLETE) { |
252 | atomic_dec(v: &iwdev->vsi.qp_suspend_reqs); |
253 | wake_up(&iwdev->suspend_wq); |
254 | continue; |
255 | } |
256 | ibdev_dbg(&iwdev->ibdev, "AEQ: qp_id %d is already freed\n" , |
257 | info->qp_cq_id); |
258 | continue; |
259 | } |
260 | irdma_qp_add_ref(ibqp: &iwqp->ibqp); |
261 | spin_unlock_irqrestore(lock: &rf->qptable_lock, flags); |
262 | qp = &iwqp->sc_qp; |
263 | spin_lock_irqsave(&iwqp->lock, flags); |
264 | iwqp->hw_tcp_state = info->tcp_state; |
265 | iwqp->hw_iwarp_state = info->iwarp_state; |
266 | if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE) |
267 | iwqp->last_aeq = info->ae_id; |
268 | spin_unlock_irqrestore(lock: &iwqp->lock, flags); |
269 | ctx_info = &iwqp->ctx_info; |
270 | } else { |
271 | if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR) |
272 | continue; |
273 | } |
274 | |
275 | switch (info->ae_id) { |
276 | struct irdma_cm_node *cm_node; |
277 | case IRDMA_AE_LLP_CONNECTION_ESTABLISHED: |
278 | cm_node = iwqp->cm_node; |
279 | if (cm_node->accept_pend) { |
280 | atomic_dec(v: &cm_node->listener->pend_accepts_cnt); |
281 | cm_node->accept_pend = 0; |
282 | } |
283 | iwqp->rts_ae_rcvd = 1; |
284 | wake_up_interruptible(&iwqp->waitq); |
285 | break; |
286 | case IRDMA_AE_LLP_FIN_RECEIVED: |
287 | case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE: |
288 | if (qp->term_flags) |
289 | break; |
290 | if (atomic_inc_return(v: &iwqp->close_timer_started) == 1) { |
291 | iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSE_WAIT; |
292 | if (iwqp->hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT && |
293 | iwqp->ibqp_state == IB_QPS_RTS) { |
294 | irdma_next_iw_state(iwqp, |
295 | IRDMA_QP_STATE_CLOSING, |
296 | del_hash: 0, term: 0, term_len: 0); |
297 | irdma_cm_disconn(qp: iwqp); |
298 | } |
299 | irdma_schedule_cm_timer(cm_node: iwqp->cm_node, |
300 | sqbuf: (struct irdma_puda_buf *)iwqp, |
301 | type: IRDMA_TIMER_TYPE_CLOSE, |
302 | send_retrans: 1, close_when_complete: 0); |
303 | } |
304 | break; |
305 | case IRDMA_AE_LLP_CLOSE_COMPLETE: |
306 | if (qp->term_flags) |
307 | irdma_terminate_done(qp, timeout_occurred: 0); |
308 | else |
309 | irdma_cm_disconn(qp: iwqp); |
310 | break; |
311 | case IRDMA_AE_BAD_CLOSE: |
312 | case IRDMA_AE_RESET_SENT: |
313 | irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, del_hash: 1, term: 0, |
314 | term_len: 0); |
315 | irdma_cm_disconn(qp: iwqp); |
316 | break; |
317 | case IRDMA_AE_LLP_CONNECTION_RESET: |
318 | if (atomic_read(v: &iwqp->close_timer_started)) |
319 | break; |
320 | irdma_cm_disconn(qp: iwqp); |
321 | break; |
322 | case IRDMA_AE_QP_SUSPEND_COMPLETE: |
323 | if (iwqp->iwdev->vsi.tc_change_pending) { |
324 | if (!atomic_dec_return(v: &qp->vsi->qp_suspend_reqs)) |
325 | wake_up(&iwqp->iwdev->suspend_wq); |
326 | } |
327 | if (iwqp->suspend_pending) { |
328 | iwqp->suspend_pending = false; |
329 | wake_up(&iwqp->iwdev->suspend_wq); |
330 | } |
331 | break; |
332 | case IRDMA_AE_TERMINATE_SENT: |
333 | irdma_terminate_send_fin(qp); |
334 | break; |
335 | case IRDMA_AE_LLP_TERMINATE_RECEIVED: |
336 | irdma_terminate_received(qp, info); |
337 | break; |
338 | case IRDMA_AE_CQ_OPERATION_ERROR: |
339 | ibdev_err(ibdev: &iwdev->ibdev, |
340 | format: "Processing an iWARP related AE for CQ misc = 0x%04X\n" , |
341 | info->ae_id); |
342 | |
343 | spin_lock_irqsave(&rf->cqtable_lock, flags); |
344 | iwcq = rf->cq_table[info->qp_cq_id]; |
345 | if (!iwcq) { |
346 | spin_unlock_irqrestore(lock: &rf->cqtable_lock, |
347 | flags); |
348 | ibdev_dbg(to_ibdev(dev), |
349 | "cq_id %d is already freed\n" , info->qp_cq_id); |
350 | continue; |
351 | } |
352 | irdma_cq_add_ref(ibcq: &iwcq->ibcq); |
353 | spin_unlock_irqrestore(lock: &rf->cqtable_lock, flags); |
354 | |
355 | if (iwcq->ibcq.event_handler) { |
356 | struct ib_event ibevent; |
357 | |
358 | ibevent.device = iwcq->ibcq.device; |
359 | ibevent.event = IB_EVENT_CQ_ERR; |
360 | ibevent.element.cq = &iwcq->ibcq; |
361 | iwcq->ibcq.event_handler(&ibevent, |
362 | iwcq->ibcq.cq_context); |
363 | } |
364 | irdma_cq_rem_ref(ibcq: &iwcq->ibcq); |
365 | break; |
366 | case IRDMA_AE_RESET_NOT_SENT: |
367 | case IRDMA_AE_LLP_DOUBT_REACHABILITY: |
368 | case IRDMA_AE_RESOURCE_EXHAUSTION: |
369 | break; |
370 | case IRDMA_AE_PRIV_OPERATION_DENIED: |
371 | case IRDMA_AE_STAG_ZERO_INVALID: |
372 | case IRDMA_AE_IB_RREQ_AND_Q1_FULL: |
373 | case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION: |
374 | case IRDMA_AE_DDP_UBE_INVALID_MO: |
375 | case IRDMA_AE_DDP_UBE_INVALID_QN: |
376 | case IRDMA_AE_DDP_NO_L_BIT: |
377 | case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION: |
378 | case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE: |
379 | case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST: |
380 | case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: |
381 | case IRDMA_AE_INVALID_ARP_ENTRY: |
382 | case IRDMA_AE_INVALID_TCP_OPTION_RCVD: |
383 | case IRDMA_AE_STALE_ARP_ENTRY: |
384 | case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: |
385 | case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: |
386 | case IRDMA_AE_LLP_SYN_RECEIVED: |
387 | case IRDMA_AE_LLP_TOO_MANY_RETRIES: |
388 | case IRDMA_AE_LCE_QP_CATASTROPHIC: |
389 | case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC: |
390 | case IRDMA_AE_LLP_TOO_MANY_RNRS: |
391 | case IRDMA_AE_LCE_CQ_CATASTROPHIC: |
392 | case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: |
393 | default: |
394 | ibdev_err(ibdev: &iwdev->ibdev, format: "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_src=%d\n" , |
395 | info->ae_id, info->qp, info->qp_cq_id, info->ae_src); |
396 | if (rdma_protocol_roce(device: &iwdev->ibdev, port_num: 1)) { |
397 | ctx_info->roce_info->err_rq_idx_valid = info->rq; |
398 | if (info->rq) { |
399 | ctx_info->roce_info->err_rq_idx = info->wqe_idx; |
400 | irdma_sc_qp_setctx_roce(qp: &iwqp->sc_qp, qp_ctx: iwqp->host_ctx.va, |
401 | info: ctx_info); |
402 | } |
403 | irdma_set_flush_fields(qp, info); |
404 | irdma_cm_disconn(qp: iwqp); |
405 | break; |
406 | } |
407 | ctx_info->iwarp_info->err_rq_idx_valid = info->rq; |
408 | if (info->rq) { |
409 | ctx_info->iwarp_info->err_rq_idx = info->wqe_idx; |
410 | ctx_info->tcp_info_valid = false; |
411 | ctx_info->iwarp_info_valid = true; |
412 | irdma_sc_qp_setctx(qp: &iwqp->sc_qp, qp_ctx: iwqp->host_ctx.va, |
413 | info: ctx_info); |
414 | } |
415 | if (iwqp->hw_iwarp_state != IRDMA_QP_STATE_RTS && |
416 | iwqp->hw_iwarp_state != IRDMA_QP_STATE_TERMINATE) { |
417 | irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, del_hash: 1, term: 0, term_len: 0); |
418 | irdma_cm_disconn(qp: iwqp); |
419 | } else { |
420 | irdma_terminate_connection(qp, info); |
421 | } |
422 | break; |
423 | } |
424 | if (info->qp) |
425 | irdma_qp_rem_ref(ibqp: &iwqp->ibqp); |
426 | } while (1); |
427 | |
428 | if (aeqcnt) |
429 | irdma_sc_repost_aeq_entries(dev, count: aeqcnt); |
430 | } |
431 | |
432 | /** |
433 | * irdma_ena_intr - set up device interrupts |
434 | * @dev: hardware control device structure |
435 | * @msix_id: id of the interrupt to be enabled |
436 | */ |
437 | static void irdma_ena_intr(struct irdma_sc_dev *dev, u32 msix_id) |
438 | { |
439 | dev->irq_ops->irdma_en_irq(dev, msix_id); |
440 | } |
441 | |
442 | /** |
443 | * irdma_dpc - tasklet for aeq and ceq 0 |
444 | * @t: tasklet_struct ptr |
445 | */ |
446 | static void irdma_dpc(struct tasklet_struct *t) |
447 | { |
448 | struct irdma_pci_f *rf = from_tasklet(rf, t, dpc_tasklet); |
449 | |
450 | if (rf->msix_shared) |
451 | irdma_process_ceq(rf, ceq: rf->ceqlist); |
452 | irdma_process_aeq(rf); |
453 | irdma_ena_intr(dev: &rf->sc_dev, msix_id: rf->iw_msixtbl[0].idx); |
454 | } |
455 | |
456 | /** |
457 | * irdma_ceq_dpc - dpc handler for CEQ |
458 | * @t: tasklet_struct ptr |
459 | */ |
460 | static void irdma_ceq_dpc(struct tasklet_struct *t) |
461 | { |
462 | struct irdma_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet); |
463 | struct irdma_pci_f *rf = iwceq->rf; |
464 | |
465 | irdma_process_ceq(rf, ceq: iwceq); |
466 | irdma_ena_intr(dev: &rf->sc_dev, msix_id: iwceq->msix_idx); |
467 | } |
468 | |
469 | /** |
470 | * irdma_save_msix_info - copy msix vector information to iwarp device |
471 | * @rf: RDMA PCI function |
472 | * |
473 | * Allocate iwdev msix table and copy the msix info to the table |
474 | * Return 0 if successful, otherwise return error |
475 | */ |
476 | static int irdma_save_msix_info(struct irdma_pci_f *rf) |
477 | { |
478 | struct irdma_qvlist_info *iw_qvlist; |
479 | struct irdma_qv_info *iw_qvinfo; |
480 | struct msix_entry *pmsix; |
481 | u32 ceq_idx; |
482 | u32 i; |
483 | size_t size; |
484 | |
485 | if (!rf->msix_count) |
486 | return -EINVAL; |
487 | |
488 | size = sizeof(struct irdma_msix_vector) * rf->msix_count; |
489 | size += struct_size(iw_qvlist, qv_info, rf->msix_count); |
490 | rf->iw_msixtbl = kzalloc(size, GFP_KERNEL); |
491 | if (!rf->iw_msixtbl) |
492 | return -ENOMEM; |
493 | |
494 | rf->iw_qvlist = (struct irdma_qvlist_info *) |
495 | (&rf->iw_msixtbl[rf->msix_count]); |
496 | iw_qvlist = rf->iw_qvlist; |
497 | iw_qvinfo = iw_qvlist->qv_info; |
498 | iw_qvlist->num_vectors = rf->msix_count; |
499 | if (rf->msix_count <= num_online_cpus()) |
500 | rf->msix_shared = true; |
501 | else if (rf->msix_count > num_online_cpus() + 1) |
502 | rf->msix_count = num_online_cpus() + 1; |
503 | |
504 | pmsix = rf->msix_entries; |
505 | for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) { |
506 | rf->iw_msixtbl[i].idx = pmsix->entry; |
507 | rf->iw_msixtbl[i].irq = pmsix->vector; |
508 | rf->iw_msixtbl[i].cpu_affinity = ceq_idx; |
509 | if (!i) { |
510 | iw_qvinfo->aeq_idx = 0; |
511 | if (rf->msix_shared) |
512 | iw_qvinfo->ceq_idx = ceq_idx++; |
513 | else |
514 | iw_qvinfo->ceq_idx = IRDMA_Q_INVALID_IDX; |
515 | } else { |
516 | iw_qvinfo->aeq_idx = IRDMA_Q_INVALID_IDX; |
517 | iw_qvinfo->ceq_idx = ceq_idx++; |
518 | } |
519 | iw_qvinfo->itr_idx = 3; |
520 | iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx; |
521 | pmsix++; |
522 | } |
523 | |
524 | return 0; |
525 | } |
526 | |
527 | /** |
528 | * irdma_irq_handler - interrupt handler for aeq and ceq0 |
529 | * @irq: Interrupt request number |
530 | * @data: RDMA PCI function |
531 | */ |
532 | static irqreturn_t irdma_irq_handler(int irq, void *data) |
533 | { |
534 | struct irdma_pci_f *rf = data; |
535 | |
536 | tasklet_schedule(t: &rf->dpc_tasklet); |
537 | |
538 | return IRQ_HANDLED; |
539 | } |
540 | |
541 | /** |
542 | * irdma_ceq_handler - interrupt handler for ceq |
543 | * @irq: interrupt request number |
544 | * @data: ceq pointer |
545 | */ |
546 | static irqreturn_t irdma_ceq_handler(int irq, void *data) |
547 | { |
548 | struct irdma_ceq *iwceq = data; |
549 | |
550 | if (iwceq->irq != irq) |
551 | ibdev_err(ibdev: to_ibdev(dev: &iwceq->rf->sc_dev), format: "expected irq = %d received irq = %d\n" , |
552 | iwceq->irq, irq); |
553 | tasklet_schedule(t: &iwceq->dpc_tasklet); |
554 | |
555 | return IRQ_HANDLED; |
556 | } |
557 | |
558 | /** |
559 | * irdma_destroy_irq - destroy device interrupts |
560 | * @rf: RDMA PCI function |
561 | * @msix_vec: msix vector to disable irq |
562 | * @dev_id: parameter to pass to free_irq (used during irq setup) |
563 | * |
564 | * The function is called when destroying aeq/ceq |
565 | */ |
566 | static void irdma_destroy_irq(struct irdma_pci_f *rf, |
567 | struct irdma_msix_vector *msix_vec, void *dev_id) |
568 | { |
569 | struct irdma_sc_dev *dev = &rf->sc_dev; |
570 | |
571 | dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx); |
572 | irq_update_affinity_hint(irq: msix_vec->irq, NULL); |
573 | free_irq(msix_vec->irq, dev_id); |
574 | if (rf == dev_id) { |
575 | tasklet_kill(t: &rf->dpc_tasklet); |
576 | } else { |
577 | struct irdma_ceq *iwceq = (struct irdma_ceq *)dev_id; |
578 | |
579 | tasklet_kill(t: &iwceq->dpc_tasklet); |
580 | } |
581 | } |
582 | |
583 | /** |
584 | * irdma_destroy_cqp - destroy control qp |
585 | * @rf: RDMA PCI function |
586 | * |
587 | * Issue destroy cqp request and |
588 | * free the resources associated with the cqp |
589 | */ |
590 | static void irdma_destroy_cqp(struct irdma_pci_f *rf) |
591 | { |
592 | struct irdma_sc_dev *dev = &rf->sc_dev; |
593 | struct irdma_cqp *cqp = &rf->cqp; |
594 | int status = 0; |
595 | |
596 | status = irdma_sc_cqp_destroy(cqp: dev->cqp); |
597 | if (status) |
598 | ibdev_dbg(to_ibdev(dev), "ERR: Destroy CQP failed %d\n" , status); |
599 | |
600 | irdma_cleanup_pending_cqp_op(rf); |
601 | dma_free_coherent(dev: dev->hw->device, size: cqp->sq.size, cpu_addr: cqp->sq.va, |
602 | dma_handle: cqp->sq.pa); |
603 | cqp->sq.va = NULL; |
604 | kfree(objp: cqp->scratch_array); |
605 | cqp->scratch_array = NULL; |
606 | kfree(objp: cqp->cqp_requests); |
607 | cqp->cqp_requests = NULL; |
608 | } |
609 | |
610 | static void irdma_destroy_virt_aeq(struct irdma_pci_f *rf) |
611 | { |
612 | struct irdma_aeq *aeq = &rf->aeq; |
613 | u32 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE); |
614 | dma_addr_t *pg_arr = (dma_addr_t *)aeq->palloc.level1.addr; |
615 | |
616 | irdma_unmap_vm_page_list(hw: &rf->hw, pg_dma: pg_arr, pg_cnt); |
617 | irdma_free_pble(pble_rsrc: rf->pble_rsrc, palloc: &aeq->palloc); |
618 | vfree(addr: aeq->mem.va); |
619 | } |
620 | |
621 | /** |
622 | * irdma_destroy_aeq - destroy aeq |
623 | * @rf: RDMA PCI function |
624 | * |
625 | * Issue a destroy aeq request and |
626 | * free the resources associated with the aeq |
627 | * The function is called during driver unload |
628 | */ |
629 | static void irdma_destroy_aeq(struct irdma_pci_f *rf) |
630 | { |
631 | struct irdma_sc_dev *dev = &rf->sc_dev; |
632 | struct irdma_aeq *aeq = &rf->aeq; |
633 | int status = -EBUSY; |
634 | |
635 | if (!rf->msix_shared) { |
636 | rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false); |
637 | irdma_destroy_irq(rf, msix_vec: rf->iw_msixtbl, dev_id: rf); |
638 | } |
639 | if (rf->reset) |
640 | goto exit; |
641 | |
642 | aeq->sc_aeq.size = 0; |
643 | status = irdma_cqp_aeq_cmd(dev, sc_aeq: &aeq->sc_aeq, op: IRDMA_OP_AEQ_DESTROY); |
644 | if (status) |
645 | ibdev_dbg(to_ibdev(dev), "ERR: Destroy AEQ failed %d\n" , status); |
646 | |
647 | exit: |
648 | if (aeq->virtual_map) { |
649 | irdma_destroy_virt_aeq(rf); |
650 | } else { |
651 | dma_free_coherent(dev: dev->hw->device, size: aeq->mem.size, cpu_addr: aeq->mem.va, |
652 | dma_handle: aeq->mem.pa); |
653 | aeq->mem.va = NULL; |
654 | } |
655 | } |
656 | |
657 | /** |
658 | * irdma_destroy_ceq - destroy ceq |
659 | * @rf: RDMA PCI function |
660 | * @iwceq: ceq to be destroyed |
661 | * |
662 | * Issue a destroy ceq request and |
663 | * free the resources associated with the ceq |
664 | */ |
665 | static void irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq) |
666 | { |
667 | struct irdma_sc_dev *dev = &rf->sc_dev; |
668 | int status; |
669 | |
670 | if (rf->reset) |
671 | goto exit; |
672 | |
673 | status = irdma_sc_ceq_destroy(ceq: &iwceq->sc_ceq, scratch: 0, post_sq: 1); |
674 | if (status) { |
675 | ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy command failed %d\n" , status); |
676 | goto exit; |
677 | } |
678 | |
679 | status = irdma_sc_cceq_destroy_done(ceq: &iwceq->sc_ceq); |
680 | if (status) |
681 | ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy completion failed %d\n" , |
682 | status); |
683 | exit: |
684 | dma_free_coherent(dev: dev->hw->device, size: iwceq->mem.size, cpu_addr: iwceq->mem.va, |
685 | dma_handle: iwceq->mem.pa); |
686 | iwceq->mem.va = NULL; |
687 | } |
688 | |
689 | /** |
690 | * irdma_del_ceq_0 - destroy ceq 0 |
691 | * @rf: RDMA PCI function |
692 | * |
693 | * Disable the ceq 0 interrupt and destroy the ceq 0 |
694 | */ |
695 | static void irdma_del_ceq_0(struct irdma_pci_f *rf) |
696 | { |
697 | struct irdma_ceq *iwceq = rf->ceqlist; |
698 | struct irdma_msix_vector *msix_vec; |
699 | |
700 | if (rf->msix_shared) { |
701 | msix_vec = &rf->iw_msixtbl[0]; |
702 | rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, |
703 | msix_vec->ceq_id, |
704 | msix_vec->idx, false); |
705 | irdma_destroy_irq(rf, msix_vec, dev_id: rf); |
706 | } else { |
707 | msix_vec = &rf->iw_msixtbl[1]; |
708 | irdma_destroy_irq(rf, msix_vec, dev_id: iwceq); |
709 | } |
710 | |
711 | irdma_destroy_ceq(rf, iwceq); |
712 | rf->sc_dev.ceq_valid = false; |
713 | rf->ceqs_count = 0; |
714 | } |
715 | |
716 | /** |
717 | * irdma_del_ceqs - destroy all ceq's except CEQ 0 |
718 | * @rf: RDMA PCI function |
719 | * |
720 | * Go through all of the device ceq's, except 0, and for each |
721 | * ceq disable the ceq interrupt and destroy the ceq |
722 | */ |
723 | static void irdma_del_ceqs(struct irdma_pci_f *rf) |
724 | { |
725 | struct irdma_ceq *iwceq = &rf->ceqlist[1]; |
726 | struct irdma_msix_vector *msix_vec; |
727 | u32 i = 0; |
728 | |
729 | if (rf->msix_shared) |
730 | msix_vec = &rf->iw_msixtbl[1]; |
731 | else |
732 | msix_vec = &rf->iw_msixtbl[2]; |
733 | |
734 | for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) { |
735 | rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id, |
736 | msix_vec->idx, false); |
737 | irdma_destroy_irq(rf, msix_vec, dev_id: iwceq); |
738 | irdma_cqp_ceq_cmd(dev: &rf->sc_dev, sc_ceq: &iwceq->sc_ceq, |
739 | op: IRDMA_OP_CEQ_DESTROY); |
740 | dma_free_coherent(dev: rf->sc_dev.hw->device, size: iwceq->mem.size, |
741 | cpu_addr: iwceq->mem.va, dma_handle: iwceq->mem.pa); |
742 | iwceq->mem.va = NULL; |
743 | } |
744 | rf->ceqs_count = 1; |
745 | } |
746 | |
747 | /** |
748 | * irdma_destroy_ccq - destroy control cq |
749 | * @rf: RDMA PCI function |
750 | * |
751 | * Issue destroy ccq request and |
752 | * free the resources associated with the ccq |
753 | */ |
754 | static void irdma_destroy_ccq(struct irdma_pci_f *rf) |
755 | { |
756 | struct irdma_sc_dev *dev = &rf->sc_dev; |
757 | struct irdma_ccq *ccq = &rf->ccq; |
758 | int status = 0; |
759 | |
760 | if (rf->cqp_cmpl_wq) |
761 | destroy_workqueue(wq: rf->cqp_cmpl_wq); |
762 | |
763 | if (!rf->reset) |
764 | status = irdma_sc_ccq_destroy(ccq: dev->ccq, scratch: 0, post_sq: true); |
765 | if (status) |
766 | ibdev_dbg(to_ibdev(dev), "ERR: CCQ destroy failed %d\n" , status); |
767 | dma_free_coherent(dev: dev->hw->device, size: ccq->mem_cq.size, cpu_addr: ccq->mem_cq.va, |
768 | dma_handle: ccq->mem_cq.pa); |
769 | ccq->mem_cq.va = NULL; |
770 | } |
771 | |
772 | /** |
773 | * irdma_close_hmc_objects_type - delete hmc objects of a given type |
774 | * @dev: iwarp device |
775 | * @obj_type: the hmc object type to be deleted |
776 | * @hmc_info: host memory info struct |
777 | * @privileged: permission to close HMC objects |
778 | * @reset: true if called before reset |
779 | */ |
780 | static void irdma_close_hmc_objects_type(struct irdma_sc_dev *dev, |
781 | enum irdma_hmc_rsrc_type obj_type, |
782 | struct irdma_hmc_info *hmc_info, |
783 | bool privileged, bool reset) |
784 | { |
785 | struct irdma_hmc_del_obj_info info = {}; |
786 | |
787 | info.hmc_info = hmc_info; |
788 | info.rsrc_type = obj_type; |
789 | info.count = hmc_info->hmc_obj[obj_type].cnt; |
790 | info.privileged = privileged; |
791 | if (irdma_sc_del_hmc_obj(dev, info: &info, reset)) |
792 | ibdev_dbg(to_ibdev(dev), "ERR: del HMC obj of type %d failed\n" , |
793 | obj_type); |
794 | } |
795 | |
796 | /** |
797 | * irdma_del_hmc_objects - remove all device hmc objects |
798 | * @dev: iwarp device |
799 | * @hmc_info: hmc_info to free |
800 | * @privileged: permission to delete HMC objects |
801 | * @reset: true if called before reset |
802 | * @vers: hardware version |
803 | */ |
804 | static void irdma_del_hmc_objects(struct irdma_sc_dev *dev, |
805 | struct irdma_hmc_info *hmc_info, bool privileged, |
806 | bool reset, enum irdma_vers vers) |
807 | { |
808 | unsigned int i; |
809 | |
810 | for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) { |
811 | if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) |
812 | irdma_close_hmc_objects_type(dev, obj_type: iw_hmc_obj_types[i], |
813 | hmc_info, privileged, reset); |
814 | if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER) |
815 | break; |
816 | } |
817 | } |
818 | |
819 | /** |
820 | * irdma_create_hmc_obj_type - create hmc object of a given type |
821 | * @dev: hardware control device structure |
822 | * @info: information for the hmc object to create |
823 | */ |
824 | static int irdma_create_hmc_obj_type(struct irdma_sc_dev *dev, |
825 | struct irdma_hmc_create_obj_info *info) |
826 | { |
827 | return irdma_sc_create_hmc_obj(dev, info); |
828 | } |
829 | |
830 | /** |
831 | * irdma_create_hmc_objs - create all hmc objects for the device |
832 | * @rf: RDMA PCI function |
833 | * @privileged: permission to create HMC objects |
834 | * @vers: HW version |
835 | * |
836 | * Create the device hmc objects and allocate hmc pages |
837 | * Return 0 if successful, otherwise clean up and return error |
838 | */ |
839 | static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged, |
840 | enum irdma_vers vers) |
841 | { |
842 | struct irdma_sc_dev *dev = &rf->sc_dev; |
843 | struct irdma_hmc_create_obj_info info = {}; |
844 | int i, status = 0; |
845 | |
846 | info.hmc_info = dev->hmc_info; |
847 | info.privileged = privileged; |
848 | info.entry_type = rf->sd_type; |
849 | |
850 | for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) { |
851 | if (iw_hmc_obj_types[i] == IRDMA_HMC_IW_PBLE) |
852 | continue; |
853 | if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) { |
854 | info.rsrc_type = iw_hmc_obj_types[i]; |
855 | info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt; |
856 | info.add_sd_cnt = 0; |
857 | status = irdma_create_hmc_obj_type(dev, info: &info); |
858 | if (status) { |
859 | ibdev_dbg(to_ibdev(dev), |
860 | "ERR: create obj type %d status = %d\n" , |
861 | iw_hmc_obj_types[i], status); |
862 | break; |
863 | } |
864 | } |
865 | if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER) |
866 | break; |
867 | } |
868 | |
869 | if (!status) |
870 | return irdma_sc_static_hmc_pages_allocated(cqp: dev->cqp, scratch: 0, hmc_fn_id: dev->hmc_fn_id, |
871 | post_sq: true, poll_registers: true); |
872 | |
873 | while (i) { |
874 | i--; |
875 | /* destroy the hmc objects of a given type */ |
876 | if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) |
877 | irdma_close_hmc_objects_type(dev, obj_type: iw_hmc_obj_types[i], |
878 | hmc_info: dev->hmc_info, privileged, |
879 | reset: false); |
880 | } |
881 | |
882 | return status; |
883 | } |
884 | |
885 | /** |
886 | * irdma_obj_aligned_mem - get aligned memory from device allocated memory |
887 | * @rf: RDMA PCI function |
888 | * @memptr: points to the memory addresses |
889 | * @size: size of memory needed |
890 | * @mask: mask for the aligned memory |
891 | * |
892 | * Get aligned memory of the requested size and |
893 | * update the memptr to point to the new aligned memory |
894 | * Return 0 if successful, otherwise return no memory error |
895 | */ |
896 | static int irdma_obj_aligned_mem(struct irdma_pci_f *rf, |
897 | struct irdma_dma_mem *memptr, u32 size, |
898 | u32 mask) |
899 | { |
900 | unsigned long va, newva; |
901 | unsigned long ; |
902 | |
903 | va = (unsigned long)rf->obj_next.va; |
904 | newva = va; |
905 | if (mask) |
906 | newva = ALIGN(va, (unsigned long)mask + 1ULL); |
907 | extra = newva - va; |
908 | memptr->va = (u8 *)va + extra; |
909 | memptr->pa = rf->obj_next.pa + extra; |
910 | memptr->size = size; |
911 | if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size)) |
912 | return -ENOMEM; |
913 | |
914 | rf->obj_next.va = (u8 *)memptr->va + size; |
915 | rf->obj_next.pa = memptr->pa + size; |
916 | |
917 | return 0; |
918 | } |
919 | |
920 | /** |
921 | * irdma_create_cqp - create control qp |
922 | * @rf: RDMA PCI function |
923 | * |
924 | * Return 0, if the cqp and all the resources associated with it |
925 | * are successfully created, otherwise return error |
926 | */ |
927 | static int irdma_create_cqp(struct irdma_pci_f *rf) |
928 | { |
929 | u32 sqsize = IRDMA_CQP_SW_SQSIZE_2048; |
930 | struct irdma_dma_mem mem; |
931 | struct irdma_sc_dev *dev = &rf->sc_dev; |
932 | struct irdma_cqp_init_info cqp_init_info = {}; |
933 | struct irdma_cqp *cqp = &rf->cqp; |
934 | u16 maj_err, min_err; |
935 | int i, status; |
936 | |
937 | cqp->cqp_requests = kcalloc(n: sqsize, size: sizeof(*cqp->cqp_requests), GFP_KERNEL); |
938 | if (!cqp->cqp_requests) |
939 | return -ENOMEM; |
940 | |
941 | cqp->scratch_array = kcalloc(n: sqsize, size: sizeof(*cqp->scratch_array), GFP_KERNEL); |
942 | if (!cqp->scratch_array) { |
943 | status = -ENOMEM; |
944 | goto err_scratch; |
945 | } |
946 | |
947 | dev->cqp = &cqp->sc_cqp; |
948 | dev->cqp->dev = dev; |
949 | cqp->sq.size = ALIGN(sizeof(struct irdma_cqp_sq_wqe) * sqsize, |
950 | IRDMA_CQP_ALIGNMENT); |
951 | cqp->sq.va = dma_alloc_coherent(dev: dev->hw->device, size: cqp->sq.size, |
952 | dma_handle: &cqp->sq.pa, GFP_KERNEL); |
953 | if (!cqp->sq.va) { |
954 | status = -ENOMEM; |
955 | goto err_sq; |
956 | } |
957 | |
958 | status = irdma_obj_aligned_mem(rf, memptr: &mem, size: sizeof(struct irdma_cqp_ctx), |
959 | mask: IRDMA_HOST_CTX_ALIGNMENT_M); |
960 | if (status) |
961 | goto err_ctx; |
962 | |
963 | dev->cqp->host_ctx_pa = mem.pa; |
964 | dev->cqp->host_ctx = mem.va; |
965 | /* populate the cqp init info */ |
966 | cqp_init_info.dev = dev; |
967 | cqp_init_info.sq_size = sqsize; |
968 | cqp_init_info.sq = cqp->sq.va; |
969 | cqp_init_info.sq_pa = cqp->sq.pa; |
970 | cqp_init_info.host_ctx_pa = mem.pa; |
971 | cqp_init_info.host_ctx = mem.va; |
972 | cqp_init_info.hmc_profile = rf->rsrc_profile; |
973 | cqp_init_info.scratch_array = cqp->scratch_array; |
974 | cqp_init_info.protocol_used = rf->protocol_used; |
975 | |
976 | switch (rf->rdma_ver) { |
977 | case IRDMA_GEN_1: |
978 | cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_1; |
979 | break; |
980 | case IRDMA_GEN_2: |
981 | cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2; |
982 | break; |
983 | } |
984 | status = irdma_sc_cqp_init(cqp: dev->cqp, info: &cqp_init_info); |
985 | if (status) { |
986 | ibdev_dbg(to_ibdev(dev), "ERR: cqp init status %d\n" , status); |
987 | goto err_ctx; |
988 | } |
989 | |
990 | spin_lock_init(&cqp->req_lock); |
991 | spin_lock_init(&cqp->compl_lock); |
992 | |
993 | status = irdma_sc_cqp_create(cqp: dev->cqp, maj_err: &maj_err, min_err: &min_err); |
994 | if (status) { |
995 | ibdev_dbg(to_ibdev(dev), |
996 | "ERR: cqp create failed - status %d maj_err %d min_err %d\n" , |
997 | status, maj_err, min_err); |
998 | goto err_ctx; |
999 | } |
1000 | |
1001 | INIT_LIST_HEAD(list: &cqp->cqp_avail_reqs); |
1002 | INIT_LIST_HEAD(list: &cqp->cqp_pending_reqs); |
1003 | |
1004 | /* init the waitqueue of the cqp_requests and add them to the list */ |
1005 | for (i = 0; i < sqsize; i++) { |
1006 | init_waitqueue_head(&cqp->cqp_requests[i].waitq); |
1007 | list_add_tail(new: &cqp->cqp_requests[i].list, head: &cqp->cqp_avail_reqs); |
1008 | } |
1009 | init_waitqueue_head(&cqp->remove_wq); |
1010 | return 0; |
1011 | |
1012 | err_ctx: |
1013 | dma_free_coherent(dev: dev->hw->device, size: cqp->sq.size, |
1014 | cpu_addr: cqp->sq.va, dma_handle: cqp->sq.pa); |
1015 | cqp->sq.va = NULL; |
1016 | err_sq: |
1017 | kfree(objp: cqp->scratch_array); |
1018 | cqp->scratch_array = NULL; |
1019 | err_scratch: |
1020 | kfree(objp: cqp->cqp_requests); |
1021 | cqp->cqp_requests = NULL; |
1022 | |
1023 | return status; |
1024 | } |
1025 | |
1026 | /** |
1027 | * irdma_create_ccq - create control cq |
1028 | * @rf: RDMA PCI function |
1029 | * |
1030 | * Return 0, if the ccq and the resources associated with it |
1031 | * are successfully created, otherwise return error |
1032 | */ |
1033 | static int irdma_create_ccq(struct irdma_pci_f *rf) |
1034 | { |
1035 | struct irdma_sc_dev *dev = &rf->sc_dev; |
1036 | struct irdma_ccq_init_info info = {}; |
1037 | struct irdma_ccq *ccq = &rf->ccq; |
1038 | int status; |
1039 | |
1040 | dev->ccq = &ccq->sc_cq; |
1041 | dev->ccq->dev = dev; |
1042 | info.dev = dev; |
1043 | ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area); |
1044 | ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * IW_CCQ_SIZE, |
1045 | IRDMA_CQ0_ALIGNMENT); |
1046 | ccq->mem_cq.va = dma_alloc_coherent(dev: dev->hw->device, size: ccq->mem_cq.size, |
1047 | dma_handle: &ccq->mem_cq.pa, GFP_KERNEL); |
1048 | if (!ccq->mem_cq.va) |
1049 | return -ENOMEM; |
1050 | |
1051 | status = irdma_obj_aligned_mem(rf, memptr: &ccq->shadow_area, |
1052 | size: ccq->shadow_area.size, |
1053 | mask: IRDMA_SHADOWAREA_M); |
1054 | if (status) |
1055 | goto exit; |
1056 | |
1057 | ccq->sc_cq.back_cq = ccq; |
1058 | /* populate the ccq init info */ |
1059 | info.cq_base = ccq->mem_cq.va; |
1060 | info.cq_pa = ccq->mem_cq.pa; |
1061 | info.num_elem = IW_CCQ_SIZE; |
1062 | info.shadow_area = ccq->shadow_area.va; |
1063 | info.shadow_area_pa = ccq->shadow_area.pa; |
1064 | info.ceqe_mask = false; |
1065 | info.ceq_id_valid = true; |
1066 | info.shadow_read_threshold = 16; |
1067 | info.vsi = &rf->default_vsi; |
1068 | status = irdma_sc_ccq_init(ccq: dev->ccq, info: &info); |
1069 | if (!status) |
1070 | status = irdma_sc_ccq_create(ccq: dev->ccq, scratch: 0, check_overflow: true, post_sq: true); |
1071 | exit: |
1072 | if (status) { |
1073 | dma_free_coherent(dev: dev->hw->device, size: ccq->mem_cq.size, |
1074 | cpu_addr: ccq->mem_cq.va, dma_handle: ccq->mem_cq.pa); |
1075 | ccq->mem_cq.va = NULL; |
1076 | } |
1077 | |
1078 | return status; |
1079 | } |
1080 | |
1081 | /** |
1082 | * irdma_alloc_set_mac - set up a mac address table entry |
1083 | * @iwdev: irdma device |
1084 | * |
1085 | * Allocate a mac ip entry and add it to the hw table Return 0 |
1086 | * if successful, otherwise return error |
1087 | */ |
1088 | static int irdma_alloc_set_mac(struct irdma_device *iwdev) |
1089 | { |
1090 | int status; |
1091 | |
1092 | status = irdma_alloc_local_mac_entry(rf: iwdev->rf, |
1093 | mac_tbl_idx: &iwdev->mac_ip_table_idx); |
1094 | if (!status) { |
1095 | status = irdma_add_local_mac_entry(rf: iwdev->rf, |
1096 | mac_addr: (const u8 *)iwdev->netdev->dev_addr, |
1097 | idx: (u8)iwdev->mac_ip_table_idx); |
1098 | if (status) |
1099 | irdma_del_local_mac_entry(rf: iwdev->rf, |
1100 | idx: (u8)iwdev->mac_ip_table_idx); |
1101 | } |
1102 | return status; |
1103 | } |
1104 | |
1105 | /** |
1106 | * irdma_cfg_ceq_vector - set up the msix interrupt vector for |
1107 | * ceq |
1108 | * @rf: RDMA PCI function |
1109 | * @iwceq: ceq associated with the vector |
1110 | * @ceq_id: the id number of the iwceq |
1111 | * @msix_vec: interrupt vector information |
1112 | * |
1113 | * Allocate interrupt resources and enable irq handling |
1114 | * Return 0 if successful, otherwise return error |
1115 | */ |
1116 | static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, |
1117 | u32 ceq_id, struct irdma_msix_vector *msix_vec) |
1118 | { |
1119 | int status; |
1120 | |
1121 | if (rf->msix_shared && !ceq_id) { |
1122 | snprintf(buf: msix_vec->name, size: sizeof(msix_vec->name) - 1, |
1123 | fmt: "irdma-%s-AEQCEQ-0" , dev_name(dev: &rf->pcidev->dev)); |
1124 | tasklet_setup(t: &rf->dpc_tasklet, callback: irdma_dpc); |
1125 | status = request_irq(irq: msix_vec->irq, handler: irdma_irq_handler, flags: 0, |
1126 | name: msix_vec->name, dev: rf); |
1127 | } else { |
1128 | snprintf(buf: msix_vec->name, size: sizeof(msix_vec->name) - 1, |
1129 | fmt: "irdma-%s-CEQ-%d" , |
1130 | dev_name(dev: &rf->pcidev->dev), ceq_id); |
1131 | tasklet_setup(t: &iwceq->dpc_tasklet, callback: irdma_ceq_dpc); |
1132 | |
1133 | status = request_irq(irq: msix_vec->irq, handler: irdma_ceq_handler, flags: 0, |
1134 | name: msix_vec->name, dev: iwceq); |
1135 | } |
1136 | cpumask_clear(dstp: &msix_vec->mask); |
1137 | cpumask_set_cpu(cpu: msix_vec->cpu_affinity, dstp: &msix_vec->mask); |
1138 | irq_update_affinity_hint(irq: msix_vec->irq, m: &msix_vec->mask); |
1139 | if (status) { |
1140 | ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n" ); |
1141 | return status; |
1142 | } |
1143 | |
1144 | msix_vec->ceq_id = ceq_id; |
1145 | rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true); |
1146 | |
1147 | return 0; |
1148 | } |
1149 | |
1150 | /** |
1151 | * irdma_cfg_aeq_vector - set up the msix vector for aeq |
1152 | * @rf: RDMA PCI function |
1153 | * |
1154 | * Allocate interrupt resources and enable irq handling |
1155 | * Return 0 if successful, otherwise return error |
1156 | */ |
1157 | static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf) |
1158 | { |
1159 | struct irdma_msix_vector *msix_vec = rf->iw_msixtbl; |
1160 | u32 ret = 0; |
1161 | |
1162 | if (!rf->msix_shared) { |
1163 | snprintf(buf: msix_vec->name, size: sizeof(msix_vec->name) - 1, |
1164 | fmt: "irdma-%s-AEQ" , dev_name(dev: &rf->pcidev->dev)); |
1165 | tasklet_setup(t: &rf->dpc_tasklet, callback: irdma_dpc); |
1166 | ret = request_irq(irq: msix_vec->irq, handler: irdma_irq_handler, flags: 0, |
1167 | name: msix_vec->name, dev: rf); |
1168 | } |
1169 | if (ret) { |
1170 | ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n" ); |
1171 | return -EINVAL; |
1172 | } |
1173 | |
1174 | rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true); |
1175 | |
1176 | return 0; |
1177 | } |
1178 | |
1179 | /** |
1180 | * irdma_create_ceq - create completion event queue |
1181 | * @rf: RDMA PCI function |
1182 | * @iwceq: pointer to the ceq resources to be created |
1183 | * @ceq_id: the id number of the iwceq |
1184 | * @vsi: SC vsi struct |
1185 | * |
1186 | * Return 0, if the ceq and the resources associated with it |
1187 | * are successfully created, otherwise return error |
1188 | */ |
1189 | static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, |
1190 | u32 ceq_id, struct irdma_sc_vsi *vsi) |
1191 | { |
1192 | int status; |
1193 | struct irdma_ceq_init_info info = {}; |
1194 | struct irdma_sc_dev *dev = &rf->sc_dev; |
1195 | u32 ceq_size; |
1196 | |
1197 | info.ceq_id = ceq_id; |
1198 | iwceq->rf = rf; |
1199 | ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt, |
1200 | dev->hw_attrs.max_hw_ceq_size); |
1201 | iwceq->mem.size = ALIGN(sizeof(struct irdma_ceqe) * ceq_size, |
1202 | IRDMA_CEQ_ALIGNMENT); |
1203 | iwceq->mem.va = dma_alloc_coherent(dev: dev->hw->device, size: iwceq->mem.size, |
1204 | dma_handle: &iwceq->mem.pa, GFP_KERNEL); |
1205 | if (!iwceq->mem.va) |
1206 | return -ENOMEM; |
1207 | |
1208 | info.ceq_id = ceq_id; |
1209 | info.ceqe_base = iwceq->mem.va; |
1210 | info.ceqe_pa = iwceq->mem.pa; |
1211 | info.elem_cnt = ceq_size; |
1212 | iwceq->sc_ceq.ceq_id = ceq_id; |
1213 | info.dev = dev; |
1214 | info.vsi = vsi; |
1215 | status = irdma_sc_ceq_init(ceq: &iwceq->sc_ceq, info: &info); |
1216 | if (!status) { |
1217 | if (dev->ceq_valid) |
1218 | status = irdma_cqp_ceq_cmd(dev: &rf->sc_dev, sc_ceq: &iwceq->sc_ceq, |
1219 | op: IRDMA_OP_CEQ_CREATE); |
1220 | else |
1221 | status = irdma_sc_cceq_create(ceq: &iwceq->sc_ceq, scratch: 0); |
1222 | } |
1223 | |
1224 | if (status) { |
1225 | dma_free_coherent(dev: dev->hw->device, size: iwceq->mem.size, |
1226 | cpu_addr: iwceq->mem.va, dma_handle: iwceq->mem.pa); |
1227 | iwceq->mem.va = NULL; |
1228 | } |
1229 | |
1230 | return status; |
1231 | } |
1232 | |
1233 | /** |
1234 | * irdma_setup_ceq_0 - create CEQ 0 and it's interrupt resource |
1235 | * @rf: RDMA PCI function |
1236 | * |
1237 | * Allocate a list for all device completion event queues |
1238 | * Create the ceq 0 and configure it's msix interrupt vector |
1239 | * Return 0, if successfully set up, otherwise return error |
1240 | */ |
1241 | static int irdma_setup_ceq_0(struct irdma_pci_f *rf) |
1242 | { |
1243 | struct irdma_ceq *iwceq; |
1244 | struct irdma_msix_vector *msix_vec; |
1245 | u32 i; |
1246 | int status = 0; |
1247 | u32 num_ceqs; |
1248 | |
1249 | num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs); |
1250 | rf->ceqlist = kcalloc(n: num_ceqs, size: sizeof(*rf->ceqlist), GFP_KERNEL); |
1251 | if (!rf->ceqlist) { |
1252 | status = -ENOMEM; |
1253 | goto exit; |
1254 | } |
1255 | |
1256 | iwceq = &rf->ceqlist[0]; |
1257 | status = irdma_create_ceq(rf, iwceq, ceq_id: 0, vsi: &rf->default_vsi); |
1258 | if (status) { |
1259 | ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n" , |
1260 | status); |
1261 | goto exit; |
1262 | } |
1263 | |
1264 | spin_lock_init(&iwceq->ce_lock); |
1265 | i = rf->msix_shared ? 0 : 1; |
1266 | msix_vec = &rf->iw_msixtbl[i]; |
1267 | iwceq->irq = msix_vec->irq; |
1268 | iwceq->msix_idx = msix_vec->idx; |
1269 | status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id: 0, msix_vec); |
1270 | if (status) { |
1271 | irdma_destroy_ceq(rf, iwceq); |
1272 | goto exit; |
1273 | } |
1274 | |
1275 | irdma_ena_intr(dev: &rf->sc_dev, msix_id: msix_vec->idx); |
1276 | rf->ceqs_count++; |
1277 | |
1278 | exit: |
1279 | if (status && !rf->ceqs_count) { |
1280 | kfree(objp: rf->ceqlist); |
1281 | rf->ceqlist = NULL; |
1282 | return status; |
1283 | } |
1284 | rf->sc_dev.ceq_valid = true; |
1285 | |
1286 | return 0; |
1287 | } |
1288 | |
1289 | /** |
1290 | * irdma_setup_ceqs - manage the device ceq's and their interrupt resources |
1291 | * @rf: RDMA PCI function |
1292 | * @vsi: VSI structure for this CEQ |
1293 | * |
1294 | * Allocate a list for all device completion event queues |
1295 | * Create the ceq's and configure their msix interrupt vectors |
1296 | * Return 0, if ceqs are successfully set up, otherwise return error |
1297 | */ |
1298 | static int irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi) |
1299 | { |
1300 | u32 i; |
1301 | u32 ceq_id; |
1302 | struct irdma_ceq *iwceq; |
1303 | struct irdma_msix_vector *msix_vec; |
1304 | int status; |
1305 | u32 num_ceqs; |
1306 | |
1307 | num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs); |
1308 | i = (rf->msix_shared) ? 1 : 2; |
1309 | for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) { |
1310 | iwceq = &rf->ceqlist[ceq_id]; |
1311 | status = irdma_create_ceq(rf, iwceq, ceq_id, vsi); |
1312 | if (status) { |
1313 | ibdev_dbg(&rf->iwdev->ibdev, |
1314 | "ERR: create ceq status = %d\n" , status); |
1315 | goto del_ceqs; |
1316 | } |
1317 | spin_lock_init(&iwceq->ce_lock); |
1318 | msix_vec = &rf->iw_msixtbl[i]; |
1319 | iwceq->irq = msix_vec->irq; |
1320 | iwceq->msix_idx = msix_vec->idx; |
1321 | status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec); |
1322 | if (status) { |
1323 | irdma_destroy_ceq(rf, iwceq); |
1324 | goto del_ceqs; |
1325 | } |
1326 | irdma_ena_intr(dev: &rf->sc_dev, msix_id: msix_vec->idx); |
1327 | rf->ceqs_count++; |
1328 | } |
1329 | |
1330 | return 0; |
1331 | |
1332 | del_ceqs: |
1333 | irdma_del_ceqs(rf); |
1334 | |
1335 | return status; |
1336 | } |
1337 | |
1338 | static int irdma_create_virt_aeq(struct irdma_pci_f *rf, u32 size) |
1339 | { |
1340 | struct irdma_aeq *aeq = &rf->aeq; |
1341 | dma_addr_t *pg_arr; |
1342 | u32 pg_cnt; |
1343 | int status; |
1344 | |
1345 | if (rf->rdma_ver < IRDMA_GEN_2) |
1346 | return -EOPNOTSUPP; |
1347 | |
1348 | aeq->mem.size = sizeof(struct irdma_sc_aeqe) * size; |
1349 | aeq->mem.va = vzalloc(size: aeq->mem.size); |
1350 | |
1351 | if (!aeq->mem.va) |
1352 | return -ENOMEM; |
1353 | |
1354 | pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE); |
1355 | status = irdma_get_pble(pble_rsrc: rf->pble_rsrc, palloc: &aeq->palloc, pble_cnt: pg_cnt, lvl: true); |
1356 | if (status) { |
1357 | vfree(addr: aeq->mem.va); |
1358 | return status; |
1359 | } |
1360 | |
1361 | pg_arr = (dma_addr_t *)aeq->palloc.level1.addr; |
1362 | status = irdma_map_vm_page_list(hw: &rf->hw, va: aeq->mem.va, pg_dma: pg_arr, pg_cnt); |
1363 | if (status) { |
1364 | irdma_free_pble(pble_rsrc: rf->pble_rsrc, palloc: &aeq->palloc); |
1365 | vfree(addr: aeq->mem.va); |
1366 | return status; |
1367 | } |
1368 | |
1369 | return 0; |
1370 | } |
1371 | |
1372 | /** |
1373 | * irdma_create_aeq - create async event queue |
1374 | * @rf: RDMA PCI function |
1375 | * |
1376 | * Return 0, if the aeq and the resources associated with it |
1377 | * are successfully created, otherwise return error |
1378 | */ |
1379 | static int irdma_create_aeq(struct irdma_pci_f *rf) |
1380 | { |
1381 | struct irdma_aeq_init_info info = {}; |
1382 | struct irdma_sc_dev *dev = &rf->sc_dev; |
1383 | struct irdma_aeq *aeq = &rf->aeq; |
1384 | struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info; |
1385 | u32 aeq_size; |
1386 | u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1; |
1387 | int status; |
1388 | |
1389 | aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt + |
1390 | hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt; |
1391 | aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size); |
1392 | |
1393 | aeq->mem.size = ALIGN(sizeof(struct irdma_sc_aeqe) * aeq_size, |
1394 | IRDMA_AEQ_ALIGNMENT); |
1395 | aeq->mem.va = dma_alloc_coherent(dev: dev->hw->device, size: aeq->mem.size, |
1396 | dma_handle: &aeq->mem.pa, |
1397 | GFP_KERNEL | __GFP_NOWARN); |
1398 | if (aeq->mem.va) |
1399 | goto skip_virt_aeq; |
1400 | |
1401 | /* physically mapped aeq failed. setup virtual aeq */ |
1402 | status = irdma_create_virt_aeq(rf, size: aeq_size); |
1403 | if (status) |
1404 | return status; |
1405 | |
1406 | info.virtual_map = true; |
1407 | aeq->virtual_map = info.virtual_map; |
1408 | info.pbl_chunk_size = 1; |
1409 | info.first_pm_pbl_idx = aeq->palloc.level1.idx; |
1410 | |
1411 | skip_virt_aeq: |
1412 | info.aeqe_base = aeq->mem.va; |
1413 | info.aeq_elem_pa = aeq->mem.pa; |
1414 | info.elem_cnt = aeq_size; |
1415 | info.dev = dev; |
1416 | info.msix_idx = rf->iw_msixtbl->idx; |
1417 | status = irdma_sc_aeq_init(aeq: &aeq->sc_aeq, info: &info); |
1418 | if (status) |
1419 | goto err; |
1420 | |
1421 | status = irdma_cqp_aeq_cmd(dev, sc_aeq: &aeq->sc_aeq, op: IRDMA_OP_AEQ_CREATE); |
1422 | if (status) |
1423 | goto err; |
1424 | |
1425 | return 0; |
1426 | |
1427 | err: |
1428 | if (aeq->virtual_map) { |
1429 | irdma_destroy_virt_aeq(rf); |
1430 | } else { |
1431 | dma_free_coherent(dev: dev->hw->device, size: aeq->mem.size, cpu_addr: aeq->mem.va, |
1432 | dma_handle: aeq->mem.pa); |
1433 | aeq->mem.va = NULL; |
1434 | } |
1435 | |
1436 | return status; |
1437 | } |
1438 | |
1439 | /** |
1440 | * irdma_setup_aeq - set up the device aeq |
1441 | * @rf: RDMA PCI function |
1442 | * |
1443 | * Create the aeq and configure its msix interrupt vector |
1444 | * Return 0 if successful, otherwise return error |
1445 | */ |
1446 | static int irdma_setup_aeq(struct irdma_pci_f *rf) |
1447 | { |
1448 | struct irdma_sc_dev *dev = &rf->sc_dev; |
1449 | int status; |
1450 | |
1451 | status = irdma_create_aeq(rf); |
1452 | if (status) |
1453 | return status; |
1454 | |
1455 | status = irdma_cfg_aeq_vector(rf); |
1456 | if (status) { |
1457 | irdma_destroy_aeq(rf); |
1458 | return status; |
1459 | } |
1460 | |
1461 | if (!rf->msix_shared) |
1462 | irdma_ena_intr(dev, msix_id: rf->iw_msixtbl[0].idx); |
1463 | |
1464 | return 0; |
1465 | } |
1466 | |
1467 | /** |
1468 | * irdma_initialize_ilq - create iwarp local queue for cm |
1469 | * @iwdev: irdma device |
1470 | * |
1471 | * Return 0 if successful, otherwise return error |
1472 | */ |
1473 | static int irdma_initialize_ilq(struct irdma_device *iwdev) |
1474 | { |
1475 | struct irdma_puda_rsrc_info info = {}; |
1476 | int status; |
1477 | |
1478 | info.type = IRDMA_PUDA_RSRC_TYPE_ILQ; |
1479 | info.cq_id = 1; |
1480 | info.qp_id = 1; |
1481 | info.count = 1; |
1482 | info.pd_id = 1; |
1483 | info.abi_ver = IRDMA_ABI_VER; |
1484 | info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768); |
1485 | info.rq_size = info.sq_size; |
1486 | info.buf_size = 1024; |
1487 | info.tx_buf_cnt = 2 * info.sq_size; |
1488 | info.receive = irdma_receive_ilq; |
1489 | info.xmit_complete = irdma_free_sqbuf; |
1490 | status = irdma_puda_create_rsrc(vsi: &iwdev->vsi, info: &info); |
1491 | if (status) |
1492 | ibdev_dbg(&iwdev->ibdev, "ERR: ilq create fail\n" ); |
1493 | |
1494 | return status; |
1495 | } |
1496 | |
1497 | /** |
1498 | * irdma_initialize_ieq - create iwarp exception queue |
1499 | * @iwdev: irdma device |
1500 | * |
1501 | * Return 0 if successful, otherwise return error |
1502 | */ |
1503 | static int irdma_initialize_ieq(struct irdma_device *iwdev) |
1504 | { |
1505 | struct irdma_puda_rsrc_info info = {}; |
1506 | int status; |
1507 | |
1508 | info.type = IRDMA_PUDA_RSRC_TYPE_IEQ; |
1509 | info.cq_id = 2; |
1510 | info.qp_id = iwdev->vsi.exception_lan_q; |
1511 | info.count = 1; |
1512 | info.pd_id = 2; |
1513 | info.abi_ver = IRDMA_ABI_VER; |
1514 | info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768); |
1515 | info.rq_size = info.sq_size; |
1516 | info.buf_size = iwdev->vsi.mtu + IRDMA_IPV4_PAD; |
1517 | info.tx_buf_cnt = 4096; |
1518 | status = irdma_puda_create_rsrc(vsi: &iwdev->vsi, info: &info); |
1519 | if (status) |
1520 | ibdev_dbg(&iwdev->ibdev, "ERR: ieq create fail\n" ); |
1521 | |
1522 | return status; |
1523 | } |
1524 | |
1525 | /** |
1526 | * irdma_reinitialize_ieq - destroy and re-create ieq |
1527 | * @vsi: VSI structure |
1528 | */ |
1529 | void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi) |
1530 | { |
1531 | struct irdma_device *iwdev = vsi->back_vsi; |
1532 | struct irdma_pci_f *rf = iwdev->rf; |
1533 | |
1534 | irdma_puda_dele_rsrc(vsi, type: IRDMA_PUDA_RSRC_TYPE_IEQ, reset: false); |
1535 | if (irdma_initialize_ieq(iwdev)) { |
1536 | iwdev->rf->reset = true; |
1537 | rf->gen_ops.request_reset(rf); |
1538 | } |
1539 | } |
1540 | |
1541 | /** |
1542 | * irdma_hmc_setup - create hmc objects for the device |
1543 | * @rf: RDMA PCI function |
1544 | * |
1545 | * Set up the device private memory space for the number and size of |
1546 | * the hmc objects and create the objects |
1547 | * Return 0 if successful, otherwise return error |
1548 | */ |
1549 | static int irdma_hmc_setup(struct irdma_pci_f *rf) |
1550 | { |
1551 | int status; |
1552 | u32 qpcnt; |
1553 | |
1554 | qpcnt = rsrc_limits_table[rf->limits_sel].qplimit; |
1555 | |
1556 | rf->sd_type = IRDMA_SD_TYPE_DIRECT; |
1557 | status = irdma_cfg_fpm_val(dev: &rf->sc_dev, qp_count: qpcnt); |
1558 | if (status) |
1559 | return status; |
1560 | |
1561 | status = irdma_create_hmc_objs(rf, privileged: true, vers: rf->rdma_ver); |
1562 | |
1563 | return status; |
1564 | } |
1565 | |
1566 | /** |
1567 | * irdma_del_init_mem - deallocate memory resources |
1568 | * @rf: RDMA PCI function |
1569 | */ |
1570 | static void irdma_del_init_mem(struct irdma_pci_f *rf) |
1571 | { |
1572 | struct irdma_sc_dev *dev = &rf->sc_dev; |
1573 | |
1574 | kfree(objp: dev->hmc_info->sd_table.sd_entry); |
1575 | dev->hmc_info->sd_table.sd_entry = NULL; |
1576 | vfree(addr: rf->mem_rsrc); |
1577 | rf->mem_rsrc = NULL; |
1578 | dma_free_coherent(dev: rf->hw.device, size: rf->obj_mem.size, cpu_addr: rf->obj_mem.va, |
1579 | dma_handle: rf->obj_mem.pa); |
1580 | rf->obj_mem.va = NULL; |
1581 | if (rf->rdma_ver != IRDMA_GEN_1) { |
1582 | bitmap_free(bitmap: rf->allocated_ws_nodes); |
1583 | rf->allocated_ws_nodes = NULL; |
1584 | } |
1585 | kfree(objp: rf->ceqlist); |
1586 | rf->ceqlist = NULL; |
1587 | kfree(objp: rf->iw_msixtbl); |
1588 | rf->iw_msixtbl = NULL; |
1589 | kfree(objp: rf->hmc_info_mem); |
1590 | rf->hmc_info_mem = NULL; |
1591 | } |
1592 | |
1593 | /** |
1594 | * irdma_initialize_dev - initialize device |
1595 | * @rf: RDMA PCI function |
1596 | * |
1597 | * Allocate memory for the hmc objects and initialize iwdev |
1598 | * Return 0 if successful, otherwise clean up the resources |
1599 | * and return error |
1600 | */ |
1601 | static int irdma_initialize_dev(struct irdma_pci_f *rf) |
1602 | { |
1603 | int status; |
1604 | struct irdma_sc_dev *dev = &rf->sc_dev; |
1605 | struct irdma_device_init_info info = {}; |
1606 | struct irdma_dma_mem mem; |
1607 | u32 size; |
1608 | |
1609 | size = sizeof(struct irdma_hmc_pble_rsrc) + |
1610 | sizeof(struct irdma_hmc_info) + |
1611 | (sizeof(struct irdma_hmc_obj_info) * IRDMA_HMC_IW_MAX); |
1612 | |
1613 | rf->hmc_info_mem = kzalloc(size, GFP_KERNEL); |
1614 | if (!rf->hmc_info_mem) |
1615 | return -ENOMEM; |
1616 | |
1617 | rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem; |
1618 | dev->hmc_info = &rf->hw.hmc; |
1619 | dev->hmc_info->hmc_obj = (struct irdma_hmc_obj_info *) |
1620 | (rf->pble_rsrc + 1); |
1621 | |
1622 | status = irdma_obj_aligned_mem(rf, memptr: &mem, size: IRDMA_QUERY_FPM_BUF_SIZE, |
1623 | mask: IRDMA_FPM_QUERY_BUF_ALIGNMENT_M); |
1624 | if (status) |
1625 | goto error; |
1626 | |
1627 | info.fpm_query_buf_pa = mem.pa; |
1628 | info.fpm_query_buf = mem.va; |
1629 | |
1630 | status = irdma_obj_aligned_mem(rf, memptr: &mem, size: IRDMA_COMMIT_FPM_BUF_SIZE, |
1631 | mask: IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M); |
1632 | if (status) |
1633 | goto error; |
1634 | |
1635 | info.fpm_commit_buf_pa = mem.pa; |
1636 | info.fpm_commit_buf = mem.va; |
1637 | |
1638 | info.bar0 = rf->hw.hw_addr; |
1639 | info.hmc_fn_id = rf->pf_id; |
1640 | info.hw = &rf->hw; |
1641 | status = irdma_sc_dev_init(ver: rf->rdma_ver, dev: &rf->sc_dev, info: &info); |
1642 | if (status) |
1643 | goto error; |
1644 | |
1645 | return status; |
1646 | error: |
1647 | kfree(objp: rf->hmc_info_mem); |
1648 | rf->hmc_info_mem = NULL; |
1649 | |
1650 | return status; |
1651 | } |
1652 | |
1653 | /** |
1654 | * irdma_rt_deinit_hw - clean up the irdma device resources |
1655 | * @iwdev: irdma device |
1656 | * |
1657 | * remove the mac ip entry and ipv4/ipv6 addresses, destroy the |
1658 | * device queues and free the pble and the hmc objects |
1659 | */ |
1660 | void irdma_rt_deinit_hw(struct irdma_device *iwdev) |
1661 | { |
1662 | ibdev_dbg(&iwdev->ibdev, "INIT: state = %d\n" , iwdev->init_state); |
1663 | |
1664 | switch (iwdev->init_state) { |
1665 | case IP_ADDR_REGISTERED: |
1666 | if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) |
1667 | irdma_del_local_mac_entry(rf: iwdev->rf, |
1668 | idx: (u8)iwdev->mac_ip_table_idx); |
1669 | fallthrough; |
1670 | case AEQ_CREATED: |
1671 | case PBLE_CHUNK_MEM: |
1672 | case CEQS_CREATED: |
1673 | case IEQ_CREATED: |
1674 | if (!iwdev->roce_mode) |
1675 | irdma_puda_dele_rsrc(vsi: &iwdev->vsi, type: IRDMA_PUDA_RSRC_TYPE_IEQ, |
1676 | reset: iwdev->rf->reset); |
1677 | fallthrough; |
1678 | case ILQ_CREATED: |
1679 | if (!iwdev->roce_mode) |
1680 | irdma_puda_dele_rsrc(vsi: &iwdev->vsi, |
1681 | type: IRDMA_PUDA_RSRC_TYPE_ILQ, |
1682 | reset: iwdev->rf->reset); |
1683 | break; |
1684 | default: |
1685 | ibdev_warn(ibdev: &iwdev->ibdev, format: "bad init_state = %d\n" , iwdev->init_state); |
1686 | break; |
1687 | } |
1688 | |
1689 | irdma_cleanup_cm_core(cm_core: &iwdev->cm_core); |
1690 | if (iwdev->vsi.pestat) { |
1691 | irdma_vsi_stats_free(vsi: &iwdev->vsi); |
1692 | kfree(objp: iwdev->vsi.pestat); |
1693 | } |
1694 | if (iwdev->cleanup_wq) |
1695 | destroy_workqueue(wq: iwdev->cleanup_wq); |
1696 | } |
1697 | |
1698 | static int irdma_setup_init_state(struct irdma_pci_f *rf) |
1699 | { |
1700 | int status; |
1701 | |
1702 | status = irdma_save_msix_info(rf); |
1703 | if (status) |
1704 | return status; |
1705 | |
1706 | rf->hw.device = &rf->pcidev->dev; |
1707 | rf->obj_mem.size = ALIGN(8192, IRDMA_HW_PAGE_SIZE); |
1708 | rf->obj_mem.va = dma_alloc_coherent(dev: rf->hw.device, size: rf->obj_mem.size, |
1709 | dma_handle: &rf->obj_mem.pa, GFP_KERNEL); |
1710 | if (!rf->obj_mem.va) { |
1711 | status = -ENOMEM; |
1712 | goto clean_msixtbl; |
1713 | } |
1714 | |
1715 | rf->obj_next = rf->obj_mem; |
1716 | status = irdma_initialize_dev(rf); |
1717 | if (status) |
1718 | goto clean_obj_mem; |
1719 | |
1720 | return 0; |
1721 | |
1722 | clean_obj_mem: |
1723 | dma_free_coherent(dev: rf->hw.device, size: rf->obj_mem.size, cpu_addr: rf->obj_mem.va, |
1724 | dma_handle: rf->obj_mem.pa); |
1725 | rf->obj_mem.va = NULL; |
1726 | clean_msixtbl: |
1727 | kfree(objp: rf->iw_msixtbl); |
1728 | rf->iw_msixtbl = NULL; |
1729 | return status; |
1730 | } |
1731 | |
1732 | /** |
1733 | * irdma_get_used_rsrc - determine resources used internally |
1734 | * @iwdev: irdma device |
1735 | * |
1736 | * Called at the end of open to get all internal allocations |
1737 | */ |
1738 | static void irdma_get_used_rsrc(struct irdma_device *iwdev) |
1739 | { |
1740 | iwdev->rf->used_pds = find_first_zero_bit(addr: iwdev->rf->allocated_pds, |
1741 | size: iwdev->rf->max_pd); |
1742 | iwdev->rf->used_qps = find_first_zero_bit(addr: iwdev->rf->allocated_qps, |
1743 | size: iwdev->rf->max_qp); |
1744 | iwdev->rf->used_cqs = find_first_zero_bit(addr: iwdev->rf->allocated_cqs, |
1745 | size: iwdev->rf->max_cq); |
1746 | iwdev->rf->used_mrs = find_first_zero_bit(addr: iwdev->rf->allocated_mrs, |
1747 | size: iwdev->rf->max_mr); |
1748 | } |
1749 | |
1750 | void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf) |
1751 | { |
1752 | enum init_completion_state state = rf->init_state; |
1753 | |
1754 | rf->init_state = INVALID_STATE; |
1755 | if (rf->rsrc_created) { |
1756 | irdma_destroy_aeq(rf); |
1757 | irdma_destroy_pble_prm(pble_rsrc: rf->pble_rsrc); |
1758 | irdma_del_ceqs(rf); |
1759 | rf->rsrc_created = false; |
1760 | } |
1761 | switch (state) { |
1762 | case CEQ0_CREATED: |
1763 | irdma_del_ceq_0(rf); |
1764 | fallthrough; |
1765 | case CCQ_CREATED: |
1766 | irdma_destroy_ccq(rf); |
1767 | fallthrough; |
1768 | case HW_RSRC_INITIALIZED: |
1769 | case HMC_OBJS_CREATED: |
1770 | irdma_del_hmc_objects(dev: &rf->sc_dev, hmc_info: rf->sc_dev.hmc_info, privileged: true, |
1771 | reset: rf->reset, vers: rf->rdma_ver); |
1772 | fallthrough; |
1773 | case CQP_CREATED: |
1774 | irdma_destroy_cqp(rf); |
1775 | fallthrough; |
1776 | case INITIAL_STATE: |
1777 | irdma_del_init_mem(rf); |
1778 | break; |
1779 | case INVALID_STATE: |
1780 | default: |
1781 | ibdev_warn(ibdev: &rf->iwdev->ibdev, format: "bad init_state = %d\n" , rf->init_state); |
1782 | break; |
1783 | } |
1784 | } |
1785 | |
1786 | /** |
1787 | * irdma_rt_init_hw - Initializes runtime portion of HW |
1788 | * @iwdev: irdma device |
1789 | * @l2params: qos, tc, mtu info from netdev driver |
1790 | * |
1791 | * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup irdma |
1792 | * device resource objects. |
1793 | */ |
1794 | int irdma_rt_init_hw(struct irdma_device *iwdev, |
1795 | struct irdma_l2params *l2params) |
1796 | { |
1797 | struct irdma_pci_f *rf = iwdev->rf; |
1798 | struct irdma_sc_dev *dev = &rf->sc_dev; |
1799 | struct irdma_vsi_init_info vsi_info = {}; |
1800 | struct irdma_vsi_stats_info stats_info = {}; |
1801 | int status; |
1802 | |
1803 | vsi_info.dev = dev; |
1804 | vsi_info.back_vsi = iwdev; |
1805 | vsi_info.params = l2params; |
1806 | vsi_info.pf_data_vsi_num = iwdev->vsi_num; |
1807 | vsi_info.register_qset = rf->gen_ops.register_qset; |
1808 | vsi_info.unregister_qset = rf->gen_ops.unregister_qset; |
1809 | vsi_info.exception_lan_q = 2; |
1810 | irdma_sc_vsi_init(vsi: &iwdev->vsi, info: &vsi_info); |
1811 | |
1812 | status = irdma_setup_cm_core(iwdev, ver: rf->rdma_ver); |
1813 | if (status) |
1814 | return status; |
1815 | |
1816 | stats_info.pestat = kzalloc(size: sizeof(*stats_info.pestat), GFP_KERNEL); |
1817 | if (!stats_info.pestat) { |
1818 | irdma_cleanup_cm_core(cm_core: &iwdev->cm_core); |
1819 | return -ENOMEM; |
1820 | } |
1821 | stats_info.fcn_id = dev->hmc_fn_id; |
1822 | status = irdma_vsi_stats_init(vsi: &iwdev->vsi, info: &stats_info); |
1823 | if (status) { |
1824 | irdma_cleanup_cm_core(cm_core: &iwdev->cm_core); |
1825 | kfree(objp: stats_info.pestat); |
1826 | return status; |
1827 | } |
1828 | |
1829 | do { |
1830 | if (!iwdev->roce_mode) { |
1831 | status = irdma_initialize_ilq(iwdev); |
1832 | if (status) |
1833 | break; |
1834 | iwdev->init_state = ILQ_CREATED; |
1835 | status = irdma_initialize_ieq(iwdev); |
1836 | if (status) |
1837 | break; |
1838 | iwdev->init_state = IEQ_CREATED; |
1839 | } |
1840 | if (!rf->rsrc_created) { |
1841 | status = irdma_setup_ceqs(rf, vsi: &iwdev->vsi); |
1842 | if (status) |
1843 | break; |
1844 | |
1845 | iwdev->init_state = CEQS_CREATED; |
1846 | |
1847 | status = irdma_hmc_init_pble(dev: &rf->sc_dev, |
1848 | pble_rsrc: rf->pble_rsrc); |
1849 | if (status) { |
1850 | irdma_del_ceqs(rf); |
1851 | break; |
1852 | } |
1853 | |
1854 | iwdev->init_state = PBLE_CHUNK_MEM; |
1855 | |
1856 | status = irdma_setup_aeq(rf); |
1857 | if (status) { |
1858 | irdma_destroy_pble_prm(pble_rsrc: rf->pble_rsrc); |
1859 | irdma_del_ceqs(rf); |
1860 | break; |
1861 | } |
1862 | iwdev->init_state = AEQ_CREATED; |
1863 | rf->rsrc_created = true; |
1864 | } |
1865 | |
1866 | if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) |
1867 | irdma_alloc_set_mac(iwdev); |
1868 | irdma_add_ip(iwdev); |
1869 | iwdev->init_state = IP_ADDR_REGISTERED; |
1870 | |
1871 | /* handles asynch cleanup tasks - disconnect CM , free qp, |
1872 | * free cq bufs |
1873 | */ |
1874 | iwdev->cleanup_wq = alloc_workqueue(fmt: "irdma-cleanup-wq" , |
1875 | flags: WQ_UNBOUND, max_active: WQ_UNBOUND_MAX_ACTIVE); |
1876 | if (!iwdev->cleanup_wq) |
1877 | return -ENOMEM; |
1878 | irdma_get_used_rsrc(iwdev); |
1879 | init_waitqueue_head(&iwdev->suspend_wq); |
1880 | |
1881 | return 0; |
1882 | } while (0); |
1883 | |
1884 | dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n" , |
1885 | status, iwdev->init_state); |
1886 | irdma_rt_deinit_hw(iwdev); |
1887 | |
1888 | return status; |
1889 | } |
1890 | |
1891 | /** |
1892 | * irdma_ctrl_init_hw - Initializes control portion of HW |
1893 | * @rf: RDMA PCI function |
1894 | * |
1895 | * Create admin queues, HMC obejcts and RF resource objects |
1896 | */ |
1897 | int irdma_ctrl_init_hw(struct irdma_pci_f *rf) |
1898 | { |
1899 | struct irdma_sc_dev *dev = &rf->sc_dev; |
1900 | int status; |
1901 | do { |
1902 | status = irdma_setup_init_state(rf); |
1903 | if (status) |
1904 | break; |
1905 | rf->init_state = INITIAL_STATE; |
1906 | |
1907 | status = irdma_create_cqp(rf); |
1908 | if (status) |
1909 | break; |
1910 | rf->init_state = CQP_CREATED; |
1911 | |
1912 | status = irdma_hmc_setup(rf); |
1913 | if (status) |
1914 | break; |
1915 | rf->init_state = HMC_OBJS_CREATED; |
1916 | |
1917 | status = irdma_initialize_hw_rsrc(rf); |
1918 | if (status) |
1919 | break; |
1920 | rf->init_state = HW_RSRC_INITIALIZED; |
1921 | |
1922 | status = irdma_create_ccq(rf); |
1923 | if (status) |
1924 | break; |
1925 | rf->init_state = CCQ_CREATED; |
1926 | |
1927 | dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT; |
1928 | if (rf->rdma_ver != IRDMA_GEN_1) { |
1929 | status = irdma_get_rdma_features(dev); |
1930 | if (status) |
1931 | break; |
1932 | } |
1933 | |
1934 | status = irdma_setup_ceq_0(rf); |
1935 | if (status) |
1936 | break; |
1937 | rf->init_state = CEQ0_CREATED; |
1938 | /* Handles processing of CQP completions */ |
1939 | rf->cqp_cmpl_wq = |
1940 | alloc_ordered_workqueue("cqp_cmpl_wq" , WQ_HIGHPRI); |
1941 | if (!rf->cqp_cmpl_wq) { |
1942 | status = -ENOMEM; |
1943 | break; |
1944 | } |
1945 | INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker); |
1946 | irdma_sc_ccq_arm(ccq: dev->ccq); |
1947 | return 0; |
1948 | } while (0); |
1949 | |
1950 | dev_err(&rf->pcidev->dev, "IRDMA hardware initialization FAILED init_state=%d status=%d\n" , |
1951 | rf->init_state, status); |
1952 | irdma_ctrl_deinit_hw(rf); |
1953 | return status; |
1954 | } |
1955 | |
1956 | /** |
1957 | * irdma_set_hw_rsrc - set hw memory resources. |
1958 | * @rf: RDMA PCI function |
1959 | */ |
1960 | static void irdma_set_hw_rsrc(struct irdma_pci_f *rf) |
1961 | { |
1962 | rf->allocated_qps = (void *)(rf->mem_rsrc + |
1963 | (sizeof(struct irdma_arp_entry) * rf->arp_table_size)); |
1964 | rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)]; |
1965 | rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)]; |
1966 | rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)]; |
1967 | rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)]; |
1968 | rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)]; |
1969 | rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)]; |
1970 | rf->qp_table = (struct irdma_qp **) |
1971 | (&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]); |
1972 | rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]); |
1973 | |
1974 | spin_lock_init(&rf->rsrc_lock); |
1975 | spin_lock_init(&rf->arp_lock); |
1976 | spin_lock_init(&rf->qptable_lock); |
1977 | spin_lock_init(&rf->cqtable_lock); |
1978 | spin_lock_init(&rf->qh_list_lock); |
1979 | } |
1980 | |
1981 | /** |
1982 | * irdma_calc_mem_rsrc_size - calculate memory resources size. |
1983 | * @rf: RDMA PCI function |
1984 | */ |
1985 | static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf) |
1986 | { |
1987 | u32 rsrc_size; |
1988 | |
1989 | rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size; |
1990 | rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp); |
1991 | rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr); |
1992 | rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq); |
1993 | rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd); |
1994 | rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size); |
1995 | rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah); |
1996 | rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg); |
1997 | rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp; |
1998 | rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq; |
1999 | |
2000 | return rsrc_size; |
2001 | } |
2002 | |
2003 | /** |
2004 | * irdma_initialize_hw_rsrc - initialize hw resource tracking array |
2005 | * @rf: RDMA PCI function |
2006 | */ |
2007 | u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf) |
2008 | { |
2009 | u32 rsrc_size; |
2010 | u32 mrdrvbits; |
2011 | u32 ret; |
2012 | |
2013 | if (rf->rdma_ver != IRDMA_GEN_1) { |
2014 | rf->allocated_ws_nodes = bitmap_zalloc(IRDMA_MAX_WS_NODES, |
2015 | GFP_KERNEL); |
2016 | if (!rf->allocated_ws_nodes) |
2017 | return -ENOMEM; |
2018 | |
2019 | set_bit(nr: 0, addr: rf->allocated_ws_nodes); |
2020 | rf->max_ws_node_id = IRDMA_MAX_WS_NODES; |
2021 | } |
2022 | rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size; |
2023 | rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt; |
2024 | rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt; |
2025 | rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt; |
2026 | rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds; |
2027 | rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt; |
2028 | rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt; |
2029 | rf->max_mcg = rf->max_qp; |
2030 | |
2031 | rsrc_size = irdma_calc_mem_rsrc_size(rf); |
2032 | rf->mem_rsrc = vzalloc(size: rsrc_size); |
2033 | if (!rf->mem_rsrc) { |
2034 | ret = -ENOMEM; |
2035 | goto mem_rsrc_vzalloc_fail; |
2036 | } |
2037 | |
2038 | rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc; |
2039 | |
2040 | irdma_set_hw_rsrc(rf); |
2041 | |
2042 | set_bit(nr: 0, addr: rf->allocated_mrs); |
2043 | set_bit(nr: 0, addr: rf->allocated_qps); |
2044 | set_bit(nr: 0, addr: rf->allocated_cqs); |
2045 | set_bit(nr: 0, addr: rf->allocated_pds); |
2046 | set_bit(nr: 0, addr: rf->allocated_arps); |
2047 | set_bit(nr: 0, addr: rf->allocated_ahs); |
2048 | set_bit(nr: 0, addr: rf->allocated_mcgs); |
2049 | set_bit(nr: 2, addr: rf->allocated_qps); /* qp 2 IEQ */ |
2050 | set_bit(nr: 1, addr: rf->allocated_qps); /* qp 1 ILQ */ |
2051 | set_bit(nr: 1, addr: rf->allocated_cqs); |
2052 | set_bit(nr: 1, addr: rf->allocated_pds); |
2053 | set_bit(nr: 2, addr: rf->allocated_cqs); |
2054 | set_bit(nr: 2, addr: rf->allocated_pds); |
2055 | |
2056 | INIT_LIST_HEAD(list: &rf->mc_qht_list.list); |
2057 | /* stag index mask has a minimum of 14 bits */ |
2058 | mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14); |
2059 | rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits)); |
2060 | |
2061 | return 0; |
2062 | |
2063 | mem_rsrc_vzalloc_fail: |
2064 | bitmap_free(bitmap: rf->allocated_ws_nodes); |
2065 | rf->allocated_ws_nodes = NULL; |
2066 | |
2067 | return ret; |
2068 | } |
2069 | |
2070 | /** |
2071 | * irdma_cqp_ce_handler - handle cqp completions |
2072 | * @rf: RDMA PCI function |
2073 | * @cq: cq for cqp completions |
2074 | */ |
2075 | void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq) |
2076 | { |
2077 | struct irdma_cqp_request *cqp_request; |
2078 | struct irdma_sc_dev *dev = &rf->sc_dev; |
2079 | u32 cqe_count = 0; |
2080 | struct irdma_ccq_cqe_info info; |
2081 | unsigned long flags; |
2082 | int ret; |
2083 | |
2084 | do { |
2085 | memset(&info, 0, sizeof(info)); |
2086 | spin_lock_irqsave(&rf->cqp.compl_lock, flags); |
2087 | ret = irdma_sc_ccq_get_cqe_info(ccq: cq, info: &info); |
2088 | spin_unlock_irqrestore(lock: &rf->cqp.compl_lock, flags); |
2089 | if (ret) |
2090 | break; |
2091 | |
2092 | cqp_request = (struct irdma_cqp_request *) |
2093 | (unsigned long)info.scratch; |
2094 | if (info.error && irdma_cqp_crit_err(dev, cqp_cmd: cqp_request->info.cqp_cmd, |
2095 | maj_err_code: info.maj_err_code, |
2096 | min_err_code: info.min_err_code)) |
2097 | ibdev_err(ibdev: &rf->iwdev->ibdev, format: "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n" , |
2098 | info.op_code, info.maj_err_code, info.min_err_code); |
2099 | if (cqp_request) { |
2100 | cqp_request->compl_info.maj_err_code = info.maj_err_code; |
2101 | cqp_request->compl_info.min_err_code = info.min_err_code; |
2102 | cqp_request->compl_info.op_ret_val = info.op_ret_val; |
2103 | cqp_request->compl_info.error = info.error; |
2104 | |
2105 | if (cqp_request->waiting) { |
2106 | WRITE_ONCE(cqp_request->request_done, true); |
2107 | wake_up(&cqp_request->waitq); |
2108 | irdma_put_cqp_request(cqp: &rf->cqp, cqp_request); |
2109 | } else { |
2110 | if (cqp_request->callback_fcn) |
2111 | cqp_request->callback_fcn(cqp_request); |
2112 | irdma_put_cqp_request(cqp: &rf->cqp, cqp_request); |
2113 | } |
2114 | } |
2115 | |
2116 | cqe_count++; |
2117 | } while (1); |
2118 | |
2119 | if (cqe_count) { |
2120 | irdma_process_bh(dev); |
2121 | irdma_sc_ccq_arm(ccq: cq); |
2122 | } |
2123 | } |
2124 | |
2125 | /** |
2126 | * cqp_compl_worker - Handle cqp completions |
2127 | * @work: Pointer to work structure |
2128 | */ |
2129 | void cqp_compl_worker(struct work_struct *work) |
2130 | { |
2131 | struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f, |
2132 | cqp_cmpl_work); |
2133 | struct irdma_sc_cq *cq = &rf->ccq.sc_cq; |
2134 | |
2135 | irdma_cqp_ce_handler(rf, cq); |
2136 | } |
2137 | |
2138 | /** |
2139 | * irdma_lookup_apbvt_entry - lookup hash table for an existing apbvt entry corresponding to port |
2140 | * @cm_core: cm's core |
2141 | * @port: port to identify apbvt entry |
2142 | */ |
2143 | static struct irdma_apbvt_entry *irdma_lookup_apbvt_entry(struct irdma_cm_core *cm_core, |
2144 | u16 port) |
2145 | { |
2146 | struct irdma_apbvt_entry *entry; |
2147 | |
2148 | hash_for_each_possible(cm_core->apbvt_hash_tbl, entry, hlist, port) { |
2149 | if (entry->port == port) { |
2150 | entry->use_cnt++; |
2151 | return entry; |
2152 | } |
2153 | } |
2154 | |
2155 | return NULL; |
2156 | } |
2157 | |
2158 | /** |
2159 | * irdma_next_iw_state - modify qp state |
2160 | * @iwqp: iwarp qp to modify |
2161 | * @state: next state for qp |
2162 | * @del_hash: del hash |
2163 | * @term: term message |
2164 | * @termlen: length of term message |
2165 | */ |
2166 | void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term, |
2167 | u8 termlen) |
2168 | { |
2169 | struct irdma_modify_qp_info info = {}; |
2170 | |
2171 | info.next_iwarp_state = state; |
2172 | info.remove_hash_idx = del_hash; |
2173 | info.cq_num_valid = true; |
2174 | info.arp_cache_idx_valid = true; |
2175 | info.dont_send_term = true; |
2176 | info.dont_send_fin = true; |
2177 | info.termlen = termlen; |
2178 | |
2179 | if (term & IRDMAQP_TERM_SEND_TERM_ONLY) |
2180 | info.dont_send_term = false; |
2181 | if (term & IRDMAQP_TERM_SEND_FIN_ONLY) |
2182 | info.dont_send_fin = false; |
2183 | if (iwqp->sc_qp.term_flags && state == IRDMA_QP_STATE_ERROR) |
2184 | info.reset_tcp_conn = true; |
2185 | iwqp->hw_iwarp_state = state; |
2186 | irdma_hw_modify_qp(iwdev: iwqp->iwdev, iwqp, info: &info, wait: 0); |
2187 | iwqp->iwarp_state = info.next_iwarp_state; |
2188 | } |
2189 | |
2190 | /** |
2191 | * irdma_del_local_mac_entry - remove a mac entry from the hw |
2192 | * table |
2193 | * @rf: RDMA PCI function |
2194 | * @idx: the index of the mac ip address to delete |
2195 | */ |
2196 | void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx) |
2197 | { |
2198 | struct irdma_cqp *iwcqp = &rf->cqp; |
2199 | struct irdma_cqp_request *cqp_request; |
2200 | struct cqp_cmds_info *cqp_info; |
2201 | |
2202 | cqp_request = irdma_alloc_and_get_cqp_request(cqp: iwcqp, wait: true); |
2203 | if (!cqp_request) |
2204 | return; |
2205 | |
2206 | cqp_info = &cqp_request->info; |
2207 | cqp_info->cqp_cmd = IRDMA_OP_DELETE_LOCAL_MAC_ENTRY; |
2208 | cqp_info->post_sq = 1; |
2209 | cqp_info->in.u.del_local_mac_entry.cqp = &iwcqp->sc_cqp; |
2210 | cqp_info->in.u.del_local_mac_entry.scratch = (uintptr_t)cqp_request; |
2211 | cqp_info->in.u.del_local_mac_entry.entry_idx = idx; |
2212 | cqp_info->in.u.del_local_mac_entry.ignore_ref_count = 0; |
2213 | |
2214 | irdma_handle_cqp_op(rf, cqp_request); |
2215 | irdma_put_cqp_request(cqp: iwcqp, cqp_request); |
2216 | } |
2217 | |
2218 | /** |
2219 | * irdma_add_local_mac_entry - add a mac ip address entry to the |
2220 | * hw table |
2221 | * @rf: RDMA PCI function |
2222 | * @mac_addr: pointer to mac address |
2223 | * @idx: the index of the mac ip address to add |
2224 | */ |
2225 | int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx) |
2226 | { |
2227 | struct irdma_local_mac_entry_info *info; |
2228 | struct irdma_cqp *iwcqp = &rf->cqp; |
2229 | struct irdma_cqp_request *cqp_request; |
2230 | struct cqp_cmds_info *cqp_info; |
2231 | int status; |
2232 | |
2233 | cqp_request = irdma_alloc_and_get_cqp_request(cqp: iwcqp, wait: true); |
2234 | if (!cqp_request) |
2235 | return -ENOMEM; |
2236 | |
2237 | cqp_info = &cqp_request->info; |
2238 | cqp_info->post_sq = 1; |
2239 | info = &cqp_info->in.u.add_local_mac_entry.info; |
2240 | ether_addr_copy(dst: info->mac_addr, src: mac_addr); |
2241 | info->entry_idx = idx; |
2242 | cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request; |
2243 | cqp_info->cqp_cmd = IRDMA_OP_ADD_LOCAL_MAC_ENTRY; |
2244 | cqp_info->in.u.add_local_mac_entry.cqp = &iwcqp->sc_cqp; |
2245 | cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request; |
2246 | |
2247 | status = irdma_handle_cqp_op(rf, cqp_request); |
2248 | irdma_put_cqp_request(cqp: iwcqp, cqp_request); |
2249 | |
2250 | return status; |
2251 | } |
2252 | |
2253 | /** |
2254 | * irdma_alloc_local_mac_entry - allocate a mac entry |
2255 | * @rf: RDMA PCI function |
2256 | * @mac_tbl_idx: the index of the new mac address |
2257 | * |
2258 | * Allocate a mac address entry and update the mac_tbl_idx |
2259 | * to hold the index of the newly created mac address |
2260 | * Return 0 if successful, otherwise return error |
2261 | */ |
2262 | int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx) |
2263 | { |
2264 | struct irdma_cqp *iwcqp = &rf->cqp; |
2265 | struct irdma_cqp_request *cqp_request; |
2266 | struct cqp_cmds_info *cqp_info; |
2267 | int status = 0; |
2268 | |
2269 | cqp_request = irdma_alloc_and_get_cqp_request(cqp: iwcqp, wait: true); |
2270 | if (!cqp_request) |
2271 | return -ENOMEM; |
2272 | |
2273 | cqp_info = &cqp_request->info; |
2274 | cqp_info->cqp_cmd = IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY; |
2275 | cqp_info->post_sq = 1; |
2276 | cqp_info->in.u.alloc_local_mac_entry.cqp = &iwcqp->sc_cqp; |
2277 | cqp_info->in.u.alloc_local_mac_entry.scratch = (uintptr_t)cqp_request; |
2278 | status = irdma_handle_cqp_op(rf, cqp_request); |
2279 | if (!status) |
2280 | *mac_tbl_idx = (u16)cqp_request->compl_info.op_ret_val; |
2281 | |
2282 | irdma_put_cqp_request(cqp: iwcqp, cqp_request); |
2283 | |
2284 | return status; |
2285 | } |
2286 | |
2287 | /** |
2288 | * irdma_cqp_manage_apbvt_cmd - send cqp command manage apbvt |
2289 | * @iwdev: irdma device |
2290 | * @accel_local_port: port for apbvt |
2291 | * @add_port: add ordelete port |
2292 | */ |
2293 | static int irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev, |
2294 | u16 accel_local_port, bool add_port) |
2295 | { |
2296 | struct irdma_apbvt_info *info; |
2297 | struct irdma_cqp_request *cqp_request; |
2298 | struct cqp_cmds_info *cqp_info; |
2299 | int status; |
2300 | |
2301 | cqp_request = irdma_alloc_and_get_cqp_request(cqp: &iwdev->rf->cqp, wait: add_port); |
2302 | if (!cqp_request) |
2303 | return -ENOMEM; |
2304 | |
2305 | cqp_info = &cqp_request->info; |
2306 | info = &cqp_info->in.u.manage_apbvt_entry.info; |
2307 | memset(info, 0, sizeof(*info)); |
2308 | info->add = add_port; |
2309 | info->port = accel_local_port; |
2310 | cqp_info->cqp_cmd = IRDMA_OP_MANAGE_APBVT_ENTRY; |
2311 | cqp_info->post_sq = 1; |
2312 | cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp; |
2313 | cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request; |
2314 | ibdev_dbg(&iwdev->ibdev, "DEV: %s: port=0x%04x\n" , |
2315 | (!add_port) ? "DELETE" : "ADD" , accel_local_port); |
2316 | |
2317 | status = irdma_handle_cqp_op(rf: iwdev->rf, cqp_request); |
2318 | irdma_put_cqp_request(cqp: &iwdev->rf->cqp, cqp_request); |
2319 | |
2320 | return status; |
2321 | } |
2322 | |
2323 | /** |
2324 | * irdma_add_apbvt - add tcp port to HW apbvt table |
2325 | * @iwdev: irdma device |
2326 | * @port: port for apbvt |
2327 | */ |
2328 | struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port) |
2329 | { |
2330 | struct irdma_cm_core *cm_core = &iwdev->cm_core; |
2331 | struct irdma_apbvt_entry *entry; |
2332 | unsigned long flags; |
2333 | |
2334 | spin_lock_irqsave(&cm_core->apbvt_lock, flags); |
2335 | entry = irdma_lookup_apbvt_entry(cm_core, port); |
2336 | if (entry) { |
2337 | spin_unlock_irqrestore(lock: &cm_core->apbvt_lock, flags); |
2338 | return entry; |
2339 | } |
2340 | |
2341 | entry = kzalloc(size: sizeof(*entry), GFP_ATOMIC); |
2342 | if (!entry) { |
2343 | spin_unlock_irqrestore(lock: &cm_core->apbvt_lock, flags); |
2344 | return NULL; |
2345 | } |
2346 | |
2347 | entry->port = port; |
2348 | entry->use_cnt = 1; |
2349 | hash_add(cm_core->apbvt_hash_tbl, &entry->hlist, entry->port); |
2350 | spin_unlock_irqrestore(lock: &cm_core->apbvt_lock, flags); |
2351 | |
2352 | if (irdma_cqp_manage_apbvt_cmd(iwdev, accel_local_port: port, add_port: true)) { |
2353 | kfree(objp: entry); |
2354 | return NULL; |
2355 | } |
2356 | |
2357 | return entry; |
2358 | } |
2359 | |
2360 | /** |
2361 | * irdma_del_apbvt - delete tcp port from HW apbvt table |
2362 | * @iwdev: irdma device |
2363 | * @entry: apbvt entry object |
2364 | */ |
2365 | void irdma_del_apbvt(struct irdma_device *iwdev, |
2366 | struct irdma_apbvt_entry *entry) |
2367 | { |
2368 | struct irdma_cm_core *cm_core = &iwdev->cm_core; |
2369 | unsigned long flags; |
2370 | |
2371 | spin_lock_irqsave(&cm_core->apbvt_lock, flags); |
2372 | if (--entry->use_cnt) { |
2373 | spin_unlock_irqrestore(lock: &cm_core->apbvt_lock, flags); |
2374 | return; |
2375 | } |
2376 | |
2377 | hash_del(node: &entry->hlist); |
2378 | /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to |
2379 | * protect against race where add APBVT CQP can race ahead of the delete |
2380 | * APBVT for same port. |
2381 | */ |
2382 | irdma_cqp_manage_apbvt_cmd(iwdev, accel_local_port: entry->port, add_port: false); |
2383 | kfree(objp: entry); |
2384 | spin_unlock_irqrestore(lock: &cm_core->apbvt_lock, flags); |
2385 | } |
2386 | |
2387 | /** |
2388 | * irdma_manage_arp_cache - manage hw arp cache |
2389 | * @rf: RDMA PCI function |
2390 | * @mac_addr: mac address ptr |
2391 | * @ip_addr: ip addr for arp cache |
2392 | * @ipv4: flag inicating IPv4 |
2393 | * @action: add, delete or modify |
2394 | */ |
2395 | void irdma_manage_arp_cache(struct irdma_pci_f *rf, |
2396 | const unsigned char *mac_addr, |
2397 | u32 *ip_addr, bool ipv4, u32 action) |
2398 | { |
2399 | struct irdma_add_arp_cache_entry_info *info; |
2400 | struct irdma_cqp_request *cqp_request; |
2401 | struct cqp_cmds_info *cqp_info; |
2402 | int arp_index; |
2403 | |
2404 | arp_index = irdma_arp_table(rf, ip_addr, ipv4, mac_addr, action); |
2405 | if (arp_index == -1) |
2406 | return; |
2407 | |
2408 | cqp_request = irdma_alloc_and_get_cqp_request(cqp: &rf->cqp, wait: false); |
2409 | if (!cqp_request) |
2410 | return; |
2411 | |
2412 | cqp_info = &cqp_request->info; |
2413 | if (action == IRDMA_ARP_ADD) { |
2414 | cqp_info->cqp_cmd = IRDMA_OP_ADD_ARP_CACHE_ENTRY; |
2415 | info = &cqp_info->in.u.add_arp_cache_entry.info; |
2416 | memset(info, 0, sizeof(*info)); |
2417 | info->arp_index = (u16)arp_index; |
2418 | info->permanent = true; |
2419 | ether_addr_copy(dst: info->mac_addr, src: mac_addr); |
2420 | cqp_info->in.u.add_arp_cache_entry.scratch = |
2421 | (uintptr_t)cqp_request; |
2422 | cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp; |
2423 | } else { |
2424 | cqp_info->cqp_cmd = IRDMA_OP_DELETE_ARP_CACHE_ENTRY; |
2425 | cqp_info->in.u.del_arp_cache_entry.scratch = |
2426 | (uintptr_t)cqp_request; |
2427 | cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp; |
2428 | cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index; |
2429 | } |
2430 | |
2431 | cqp_info->post_sq = 1; |
2432 | irdma_handle_cqp_op(rf, cqp_request); |
2433 | irdma_put_cqp_request(cqp: &rf->cqp, cqp_request); |
2434 | } |
2435 | |
2436 | /** |
2437 | * irdma_send_syn_cqp_callback - do syn/ack after qhash |
2438 | * @cqp_request: qhash cqp completion |
2439 | */ |
2440 | static void irdma_send_syn_cqp_callback(struct irdma_cqp_request *cqp_request) |
2441 | { |
2442 | struct irdma_cm_node *cm_node = cqp_request->param; |
2443 | |
2444 | irdma_send_syn(cm_node, sendack: 1); |
2445 | irdma_rem_ref_cm_node(cm_node); |
2446 | } |
2447 | |
2448 | /** |
2449 | * irdma_manage_qhash - add or modify qhash |
2450 | * @iwdev: irdma device |
2451 | * @cminfo: cm info for qhash |
2452 | * @etype: type (syn or quad) |
2453 | * @mtype: type of qhash |
2454 | * @cmnode: cmnode associated with connection |
2455 | * @wait: wait for completion |
2456 | */ |
2457 | int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo, |
2458 | enum irdma_quad_entry_type etype, |
2459 | enum irdma_quad_hash_manage_type mtype, void *cmnode, |
2460 | bool wait) |
2461 | { |
2462 | struct irdma_qhash_table_info *info; |
2463 | struct irdma_cqp *iwcqp = &iwdev->rf->cqp; |
2464 | struct irdma_cqp_request *cqp_request; |
2465 | struct cqp_cmds_info *cqp_info; |
2466 | struct irdma_cm_node *cm_node = cmnode; |
2467 | int status; |
2468 | |
2469 | cqp_request = irdma_alloc_and_get_cqp_request(cqp: iwcqp, wait); |
2470 | if (!cqp_request) |
2471 | return -ENOMEM; |
2472 | |
2473 | cqp_info = &cqp_request->info; |
2474 | info = &cqp_info->in.u.manage_qhash_table_entry.info; |
2475 | memset(info, 0, sizeof(*info)); |
2476 | info->vsi = &iwdev->vsi; |
2477 | info->manage = mtype; |
2478 | info->entry_type = etype; |
2479 | if (cminfo->vlan_id < VLAN_N_VID) { |
2480 | info->vlan_valid = true; |
2481 | info->vlan_id = cminfo->vlan_id; |
2482 | } else { |
2483 | info->vlan_valid = false; |
2484 | } |
2485 | info->ipv4_valid = cminfo->ipv4; |
2486 | info->user_pri = cminfo->user_pri; |
2487 | ether_addr_copy(dst: info->mac_addr, src: iwdev->netdev->dev_addr); |
2488 | info->qp_num = cminfo->qh_qpid; |
2489 | info->dest_port = cminfo->loc_port; |
2490 | info->dest_ip[0] = cminfo->loc_addr[0]; |
2491 | info->dest_ip[1] = cminfo->loc_addr[1]; |
2492 | info->dest_ip[2] = cminfo->loc_addr[2]; |
2493 | info->dest_ip[3] = cminfo->loc_addr[3]; |
2494 | if (etype == IRDMA_QHASH_TYPE_TCP_ESTABLISHED || |
2495 | etype == IRDMA_QHASH_TYPE_UDP_UNICAST || |
2496 | etype == IRDMA_QHASH_TYPE_UDP_MCAST || |
2497 | etype == IRDMA_QHASH_TYPE_ROCE_MCAST || |
2498 | etype == IRDMA_QHASH_TYPE_ROCEV2_HW) { |
2499 | info->src_port = cminfo->rem_port; |
2500 | info->src_ip[0] = cminfo->rem_addr[0]; |
2501 | info->src_ip[1] = cminfo->rem_addr[1]; |
2502 | info->src_ip[2] = cminfo->rem_addr[2]; |
2503 | info->src_ip[3] = cminfo->rem_addr[3]; |
2504 | } |
2505 | if (cmnode) { |
2506 | cqp_request->callback_fcn = irdma_send_syn_cqp_callback; |
2507 | cqp_request->param = cmnode; |
2508 | if (!wait) |
2509 | refcount_inc(r: &cm_node->refcnt); |
2510 | } |
2511 | if (info->ipv4_valid) |
2512 | ibdev_dbg(&iwdev->ibdev, |
2513 | "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI4 rem_addr=%pI4 mac=%pM, vlan_id=%d cm_node=%p\n" , |
2514 | (!mtype) ? "DELETE" : "ADD" , |
2515 | __builtin_return_address(0), info->dest_port, |
2516 | info->src_port, info->dest_ip, info->src_ip, |
2517 | info->mac_addr, cminfo->vlan_id, |
2518 | cmnode ? cmnode : NULL); |
2519 | else |
2520 | ibdev_dbg(&iwdev->ibdev, |
2521 | "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI6 rem_addr=%pI6 mac=%pM, vlan_id=%d cm_node=%p\n" , |
2522 | (!mtype) ? "DELETE" : "ADD" , |
2523 | __builtin_return_address(0), info->dest_port, |
2524 | info->src_port, info->dest_ip, info->src_ip, |
2525 | info->mac_addr, cminfo->vlan_id, |
2526 | cmnode ? cmnode : NULL); |
2527 | |
2528 | cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp; |
2529 | cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request; |
2530 | cqp_info->cqp_cmd = IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY; |
2531 | cqp_info->post_sq = 1; |
2532 | status = irdma_handle_cqp_op(rf: iwdev->rf, cqp_request); |
2533 | if (status && cm_node && !wait) |
2534 | irdma_rem_ref_cm_node(cm_node); |
2535 | |
2536 | irdma_put_cqp_request(cqp: iwcqp, cqp_request); |
2537 | |
2538 | return status; |
2539 | } |
2540 | |
2541 | /** |
2542 | * irdma_hw_flush_wqes_callback - Check return code after flush |
2543 | * @cqp_request: qhash cqp completion |
2544 | */ |
2545 | static void irdma_hw_flush_wqes_callback(struct irdma_cqp_request *cqp_request) |
2546 | { |
2547 | struct irdma_qp_flush_info *hw_info; |
2548 | struct irdma_sc_qp *qp; |
2549 | struct irdma_qp *iwqp; |
2550 | struct cqp_cmds_info *cqp_info; |
2551 | |
2552 | cqp_info = &cqp_request->info; |
2553 | hw_info = &cqp_info->in.u.qp_flush_wqes.info; |
2554 | qp = cqp_info->in.u.qp_flush_wqes.qp; |
2555 | iwqp = qp->qp_uk.back_qp; |
2556 | |
2557 | if (cqp_request->compl_info.maj_err_code) |
2558 | return; |
2559 | |
2560 | if (hw_info->rq && |
2561 | (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED || |
2562 | cqp_request->compl_info.min_err_code == 0)) { |
2563 | /* RQ WQE flush was requested but did not happen */ |
2564 | qp->qp_uk.rq_flush_complete = true; |
2565 | } |
2566 | if (hw_info->sq && |
2567 | (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED || |
2568 | cqp_request->compl_info.min_err_code == 0)) { |
2569 | if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) { |
2570 | ibdev_err(ibdev: &iwqp->iwdev->ibdev, format: "Flush QP[%d] failed, SQ has more work" , |
2571 | qp->qp_uk.qp_id); |
2572 | irdma_ib_qp_event(iwqp, event: IRDMA_QP_EVENT_CATASTROPHIC); |
2573 | } |
2574 | qp->qp_uk.sq_flush_complete = true; |
2575 | } |
2576 | } |
2577 | |
2578 | /** |
2579 | * irdma_hw_flush_wqes - flush qp's wqe |
2580 | * @rf: RDMA PCI function |
2581 | * @qp: hardware control qp |
2582 | * @info: info for flush |
2583 | * @wait: flag wait for completion |
2584 | */ |
2585 | int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp, |
2586 | struct irdma_qp_flush_info *info, bool wait) |
2587 | { |
2588 | int status; |
2589 | struct irdma_qp_flush_info *hw_info; |
2590 | struct irdma_cqp_request *cqp_request; |
2591 | struct cqp_cmds_info *cqp_info; |
2592 | struct irdma_qp *iwqp = qp->qp_uk.back_qp; |
2593 | |
2594 | cqp_request = irdma_alloc_and_get_cqp_request(cqp: &rf->cqp, wait); |
2595 | if (!cqp_request) |
2596 | return -ENOMEM; |
2597 | |
2598 | cqp_info = &cqp_request->info; |
2599 | if (!wait) |
2600 | cqp_request->callback_fcn = irdma_hw_flush_wqes_callback; |
2601 | hw_info = &cqp_request->info.in.u.qp_flush_wqes.info; |
2602 | memcpy(hw_info, info, sizeof(*hw_info)); |
2603 | cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES; |
2604 | cqp_info->post_sq = 1; |
2605 | cqp_info->in.u.qp_flush_wqes.qp = qp; |
2606 | cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request; |
2607 | status = irdma_handle_cqp_op(rf, cqp_request); |
2608 | if (status) { |
2609 | qp->qp_uk.sq_flush_complete = true; |
2610 | qp->qp_uk.rq_flush_complete = true; |
2611 | irdma_put_cqp_request(cqp: &rf->cqp, cqp_request); |
2612 | return status; |
2613 | } |
2614 | |
2615 | if (!wait || cqp_request->compl_info.maj_err_code) |
2616 | goto put_cqp; |
2617 | |
2618 | if (info->rq) { |
2619 | if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED || |
2620 | cqp_request->compl_info.min_err_code == 0) { |
2621 | /* RQ WQE flush was requested but did not happen */ |
2622 | qp->qp_uk.rq_flush_complete = true; |
2623 | } |
2624 | } |
2625 | if (info->sq) { |
2626 | if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED || |
2627 | cqp_request->compl_info.min_err_code == 0) { |
2628 | /* |
2629 | * Handling case where WQE is posted to empty SQ when |
2630 | * flush has not completed |
2631 | */ |
2632 | if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) { |
2633 | struct irdma_cqp_request *new_req; |
2634 | |
2635 | if (!qp->qp_uk.sq_flush_complete) |
2636 | goto put_cqp; |
2637 | qp->qp_uk.sq_flush_complete = false; |
2638 | qp->flush_sq = false; |
2639 | |
2640 | info->rq = false; |
2641 | info->sq = true; |
2642 | new_req = irdma_alloc_and_get_cqp_request(cqp: &rf->cqp, wait: true); |
2643 | if (!new_req) { |
2644 | status = -ENOMEM; |
2645 | goto put_cqp; |
2646 | } |
2647 | cqp_info = &new_req->info; |
2648 | hw_info = &new_req->info.in.u.qp_flush_wqes.info; |
2649 | memcpy(hw_info, info, sizeof(*hw_info)); |
2650 | cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES; |
2651 | cqp_info->post_sq = 1; |
2652 | cqp_info->in.u.qp_flush_wqes.qp = qp; |
2653 | cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)new_req; |
2654 | |
2655 | status = irdma_handle_cqp_op(rf, cqp_request: new_req); |
2656 | if (new_req->compl_info.maj_err_code || |
2657 | new_req->compl_info.min_err_code != IRDMA_CQP_COMPL_SQ_WQE_FLUSHED || |
2658 | status) { |
2659 | ibdev_err(ibdev: &iwqp->iwdev->ibdev, format: "fatal QP event: SQ in error but not flushed, qp: %d" , |
2660 | iwqp->ibqp.qp_num); |
2661 | qp->qp_uk.sq_flush_complete = false; |
2662 | irdma_ib_qp_event(iwqp, event: IRDMA_QP_EVENT_CATASTROPHIC); |
2663 | } |
2664 | irdma_put_cqp_request(cqp: &rf->cqp, cqp_request: new_req); |
2665 | } else { |
2666 | /* SQ WQE flush was requested but did not happen */ |
2667 | qp->qp_uk.sq_flush_complete = true; |
2668 | } |
2669 | } else { |
2670 | if (!IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) |
2671 | qp->qp_uk.sq_flush_complete = true; |
2672 | } |
2673 | } |
2674 | |
2675 | ibdev_dbg(&rf->iwdev->ibdev, |
2676 | "VERBS: qp_id=%d qp_type=%d qpstate=%d ibqpstate=%d last_aeq=%d hw_iw_state=%d maj_err_code=%d min_err_code=%d\n" , |
2677 | iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state, |
2678 | iwqp->ibqp_state, iwqp->last_aeq, iwqp->hw_iwarp_state, |
2679 | cqp_request->compl_info.maj_err_code, |
2680 | cqp_request->compl_info.min_err_code); |
2681 | put_cqp: |
2682 | irdma_put_cqp_request(cqp: &rf->cqp, cqp_request); |
2683 | |
2684 | return status; |
2685 | } |
2686 | |
2687 | /** |
2688 | * irdma_gen_ae - generate AE |
2689 | * @rf: RDMA PCI function |
2690 | * @qp: qp associated with AE |
2691 | * @info: info for ae |
2692 | * @wait: wait for completion |
2693 | */ |
2694 | void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp, |
2695 | struct irdma_gen_ae_info *info, bool wait) |
2696 | { |
2697 | struct irdma_gen_ae_info *ae_info; |
2698 | struct irdma_cqp_request *cqp_request; |
2699 | struct cqp_cmds_info *cqp_info; |
2700 | |
2701 | cqp_request = irdma_alloc_and_get_cqp_request(cqp: &rf->cqp, wait); |
2702 | if (!cqp_request) |
2703 | return; |
2704 | |
2705 | cqp_info = &cqp_request->info; |
2706 | ae_info = &cqp_request->info.in.u.gen_ae.info; |
2707 | memcpy(ae_info, info, sizeof(*ae_info)); |
2708 | cqp_info->cqp_cmd = IRDMA_OP_GEN_AE; |
2709 | cqp_info->post_sq = 1; |
2710 | cqp_info->in.u.gen_ae.qp = qp; |
2711 | cqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request; |
2712 | |
2713 | irdma_handle_cqp_op(rf, cqp_request); |
2714 | irdma_put_cqp_request(cqp: &rf->cqp, cqp_request); |
2715 | } |
2716 | |
2717 | void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask) |
2718 | { |
2719 | struct irdma_qp_flush_info info = {}; |
2720 | struct irdma_pci_f *rf = iwqp->iwdev->rf; |
2721 | u8 flush_code = iwqp->sc_qp.flush_code; |
2722 | |
2723 | if (!(flush_mask & IRDMA_FLUSH_SQ) && !(flush_mask & IRDMA_FLUSH_RQ)) |
2724 | return; |
2725 | |
2726 | /* Set flush info fields*/ |
2727 | info.sq = flush_mask & IRDMA_FLUSH_SQ; |
2728 | info.rq = flush_mask & IRDMA_FLUSH_RQ; |
2729 | |
2730 | /* Generate userflush errors in CQE */ |
2731 | info.sq_major_code = IRDMA_FLUSH_MAJOR_ERR; |
2732 | info.sq_minor_code = FLUSH_GENERAL_ERR; |
2733 | info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR; |
2734 | info.rq_minor_code = FLUSH_GENERAL_ERR; |
2735 | info.userflushcode = true; |
2736 | |
2737 | if (flush_mask & IRDMA_REFLUSH) { |
2738 | if (info.sq) |
2739 | iwqp->sc_qp.flush_sq = false; |
2740 | if (info.rq) |
2741 | iwqp->sc_qp.flush_rq = false; |
2742 | } else { |
2743 | if (flush_code) { |
2744 | if (info.sq && iwqp->sc_qp.sq_flush_code) |
2745 | info.sq_minor_code = flush_code; |
2746 | if (info.rq && iwqp->sc_qp.rq_flush_code) |
2747 | info.rq_minor_code = flush_code; |
2748 | } |
2749 | if (!iwqp->user_mode) |
2750 | queue_delayed_work(wq: iwqp->iwdev->cleanup_wq, |
2751 | dwork: &iwqp->dwork_flush, |
2752 | delay: msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); |
2753 | } |
2754 | |
2755 | /* Issue flush */ |
2756 | (void)irdma_hw_flush_wqes(rf, qp: &iwqp->sc_qp, info: &info, |
2757 | wait: flush_mask & IRDMA_FLUSH_WAIT); |
2758 | iwqp->flush_issued = true; |
2759 | } |
2760 | |