1 | /* |
2 | * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management |
3 | * |
4 | * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. |
5 | * |
6 | * This software is available to you under a choice of one of two |
7 | * licenses. You may choose to be licensed under the terms of the GNU |
8 | * General Public License (GPL) Version 2, available from the file |
9 | * COPYING in the main directory of this source tree, or the |
10 | * OpenIB.org BSD license below: |
11 | * |
12 | * Redistribution and use in source and binary forms, with or |
13 | * without modification, are permitted provided that the following |
14 | * conditions are met: |
15 | * |
16 | * - Redistributions of source code must retain the above |
17 | * copyright notice, this list of conditions and the following |
18 | * disclaimer. |
19 | * |
20 | * - Redistributions in binary form must reproduce the above |
21 | * copyright notice, this list of conditions and the following |
22 | * disclaimer in the documentation and/or other materials |
23 | * provided with the distribution. |
24 | * |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
32 | * SOFTWARE. |
33 | * |
34 | * Written by: Atul Gupta (atul.gupta@chelsio.com) |
35 | * Written by: Hariprasad Shenai (hariprasad@chelsio.com) |
36 | */ |
37 | |
38 | #include <linux/kernel.h> |
39 | #include <linux/module.h> |
40 | #include <linux/errno.h> |
41 | #include <linux/types.h> |
42 | #include <linux/debugfs.h> |
43 | #include <linux/export.h> |
44 | #include <linux/list.h> |
45 | #include <linux/skbuff.h> |
46 | #include <linux/pci.h> |
47 | |
48 | #include "cxgb4.h" |
49 | #include "cxgb4_uld.h" |
50 | #include "t4_regs.h" |
51 | #include "t4fw_api.h" |
52 | #include "t4_msg.h" |
53 | |
54 | #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++) |
55 | |
56 | /* Flush the aggregated lro sessions */ |
57 | static void uldrx_flush_handler(struct sge_rspq *q) |
58 | { |
59 | struct adapter *adap = q->adap; |
60 | |
61 | if (adap->uld[q->uld].lro_flush) |
62 | adap->uld[q->uld].lro_flush(&q->lro_mgr); |
63 | } |
64 | |
65 | /** |
66 | * uldrx_handler - response queue handler for ULD queues |
67 | * @q: the response queue that received the packet |
68 | * @rsp: the response queue descriptor holding the offload message |
69 | * @gl: the gather list of packet fragments |
70 | * |
71 | * Deliver an ingress offload packet to a ULD. All processing is done by |
72 | * the ULD, we just maintain statistics. |
73 | */ |
74 | static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, |
75 | const struct pkt_gl *gl) |
76 | { |
77 | struct adapter *adap = q->adap; |
78 | struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); |
79 | int ret; |
80 | |
81 | /* FW can send CPLs encapsulated in a CPL_FW4_MSG */ |
82 | if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG && |
83 | ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL) |
84 | rsp += 2; |
85 | |
86 | if (q->flush_handler) |
87 | ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle, |
88 | rsp, gl, &q->lro_mgr, |
89 | &q->napi); |
90 | else |
91 | ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle, |
92 | rsp, gl); |
93 | |
94 | if (ret) { |
95 | rxq->stats.nomem++; |
96 | return -1; |
97 | } |
98 | |
99 | if (!gl) |
100 | rxq->stats.imm++; |
101 | else if (gl == CXGB4_MSG_AN) |
102 | rxq->stats.an++; |
103 | else |
104 | rxq->stats.pkts++; |
105 | return 0; |
106 | } |
107 | |
108 | static int alloc_uld_rxqs(struct adapter *adap, |
109 | struct sge_uld_rxq_info *rxq_info, bool lro) |
110 | { |
111 | unsigned int nq = rxq_info->nrxq + rxq_info->nciq; |
112 | struct sge_ofld_rxq *q = rxq_info->uldrxq; |
113 | unsigned short *ids = rxq_info->rspq_id; |
114 | int i, err, msi_idx, que_idx = 0; |
115 | struct sge *s = &adap->sge; |
116 | unsigned int per_chan; |
117 | |
118 | per_chan = rxq_info->nrxq / adap->params.nports; |
119 | |
120 | if (adap->flags & CXGB4_USING_MSIX) |
121 | msi_idx = 1; |
122 | else |
123 | msi_idx = -((int)s->intrq.abs_id + 1); |
124 | |
125 | for (i = 0; i < nq; i++, q++) { |
126 | if (i == rxq_info->nrxq) { |
127 | /* start allocation of concentrator queues */ |
128 | per_chan = rxq_info->nciq / adap->params.nports; |
129 | que_idx = 0; |
130 | } |
131 | |
132 | if (msi_idx >= 0) { |
133 | msi_idx = cxgb4_get_msix_idx_from_bmap(adap); |
134 | if (msi_idx < 0) { |
135 | err = -ENOSPC; |
136 | goto freeout; |
137 | } |
138 | |
139 | snprintf(buf: adap->msix_info[msi_idx].desc, |
140 | size: sizeof(adap->msix_info[msi_idx].desc), |
141 | fmt: "%s-%s%d" , |
142 | adap->port[0]->name, rxq_info->name, i); |
143 | |
144 | q->msix = &adap->msix_info[msi_idx]; |
145 | } |
146 | err = t4_sge_alloc_rxq(adap, iq: &q->rspq, fwevtq: false, |
147 | dev: adap->port[que_idx++ / per_chan], |
148 | intr_idx: msi_idx, |
149 | fl: q->fl.size ? &q->fl : NULL, |
150 | hnd: uldrx_handler, |
151 | flush_handler: lro ? uldrx_flush_handler : NULL, |
152 | cong: 0); |
153 | if (err) |
154 | goto freeout; |
155 | |
156 | memset(&q->stats, 0, sizeof(q->stats)); |
157 | if (ids) |
158 | ids[i] = q->rspq.abs_id; |
159 | } |
160 | return 0; |
161 | freeout: |
162 | q = rxq_info->uldrxq; |
163 | for ( ; i; i--, q++) { |
164 | if (q->rspq.desc) |
165 | free_rspq_fl(adap, rq: &q->rspq, |
166 | fl: q->fl.size ? &q->fl : NULL); |
167 | if (q->msix) |
168 | cxgb4_free_msix_idx_in_bmap(adap, msix_idx: q->msix->idx); |
169 | } |
170 | return err; |
171 | } |
172 | |
173 | static int |
174 | setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) |
175 | { |
176 | struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; |
177 | int i, ret; |
178 | |
179 | ret = alloc_uld_rxqs(adap, rxq_info, lro); |
180 | if (ret) |
181 | return ret; |
182 | |
183 | /* Tell uP to route control queue completions to rdma rspq */ |
184 | if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) { |
185 | struct sge *s = &adap->sge; |
186 | unsigned int cmplqid; |
187 | u32 param, cmdop; |
188 | |
189 | cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL; |
190 | for_each_port(adap, i) { |
191 | cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id; |
192 | param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | |
193 | FW_PARAMS_PARAM_X_V(cmdop) | |
194 | FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id)); |
195 | ret = t4_set_params(adap, mbox: adap->mbox, pf: adap->pf, |
196 | vf: 0, nparams: 1, params: ¶m, val: &cmplqid); |
197 | } |
198 | } |
199 | return ret; |
200 | } |
201 | |
202 | static void t4_free_uld_rxqs(struct adapter *adap, int n, |
203 | struct sge_ofld_rxq *q) |
204 | { |
205 | for ( ; n; n--, q++) { |
206 | if (q->rspq.desc) |
207 | free_rspq_fl(adap, rq: &q->rspq, |
208 | fl: q->fl.size ? &q->fl : NULL); |
209 | } |
210 | } |
211 | |
212 | static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) |
213 | { |
214 | struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; |
215 | |
216 | if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) { |
217 | struct sge *s = &adap->sge; |
218 | u32 param, cmdop, cmplqid = 0; |
219 | int i; |
220 | |
221 | cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL; |
222 | for_each_port(adap, i) { |
223 | param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | |
224 | FW_PARAMS_PARAM_X_V(cmdop) | |
225 | FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id)); |
226 | t4_set_params(adap, mbox: adap->mbox, pf: adap->pf, |
227 | vf: 0, nparams: 1, params: ¶m, val: &cmplqid); |
228 | } |
229 | } |
230 | |
231 | if (rxq_info->nciq) |
232 | t4_free_uld_rxqs(adap, n: rxq_info->nciq, |
233 | q: rxq_info->uldrxq + rxq_info->nrxq); |
234 | t4_free_uld_rxqs(adap, n: rxq_info->nrxq, q: rxq_info->uldrxq); |
235 | } |
236 | |
237 | static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, |
238 | const struct cxgb4_uld_info *uld_info) |
239 | { |
240 | struct sge *s = &adap->sge; |
241 | struct sge_uld_rxq_info *rxq_info; |
242 | int i, nrxq, ciq_size; |
243 | |
244 | rxq_info = kzalloc(size: sizeof(*rxq_info), GFP_KERNEL); |
245 | if (!rxq_info) |
246 | return -ENOMEM; |
247 | |
248 | if (adap->flags & CXGB4_USING_MSIX && uld_info->nrxq > s->nqs_per_uld) { |
249 | i = s->nqs_per_uld; |
250 | rxq_info->nrxq = roundup(i, adap->params.nports); |
251 | } else { |
252 | i = min_t(int, uld_info->nrxq, |
253 | num_online_cpus()); |
254 | rxq_info->nrxq = roundup(i, adap->params.nports); |
255 | } |
256 | if (!uld_info->ciq) { |
257 | rxq_info->nciq = 0; |
258 | } else { |
259 | if (adap->flags & CXGB4_USING_MSIX) |
260 | rxq_info->nciq = min_t(int, s->nqs_per_uld, |
261 | num_online_cpus()); |
262 | else |
263 | rxq_info->nciq = min_t(int, MAX_OFLD_QSETS, |
264 | num_online_cpus()); |
265 | rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) * |
266 | adap->params.nports); |
267 | rxq_info->nciq = max_t(int, rxq_info->nciq, |
268 | adap->params.nports); |
269 | } |
270 | |
271 | nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */ |
272 | rxq_info->uldrxq = kcalloc(n: nrxq, size: sizeof(struct sge_ofld_rxq), |
273 | GFP_KERNEL); |
274 | if (!rxq_info->uldrxq) { |
275 | kfree(objp: rxq_info); |
276 | return -ENOMEM; |
277 | } |
278 | |
279 | rxq_info->rspq_id = kcalloc(n: nrxq, size: sizeof(unsigned short), GFP_KERNEL); |
280 | if (!rxq_info->rspq_id) { |
281 | kfree(objp: rxq_info->uldrxq); |
282 | kfree(objp: rxq_info); |
283 | return -ENOMEM; |
284 | } |
285 | |
286 | for (i = 0; i < rxq_info->nrxq; i++) { |
287 | struct sge_ofld_rxq *r = &rxq_info->uldrxq[i]; |
288 | |
289 | init_rspq(adap, q: &r->rspq, us: 5, cnt: 1, size: uld_info->rxq_size, iqe_size: 64); |
290 | r->rspq.uld = uld_type; |
291 | r->fl.size = 72; |
292 | } |
293 | |
294 | ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids; |
295 | if (ciq_size > SGE_MAX_IQ_SIZE) { |
296 | dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n" ); |
297 | ciq_size = SGE_MAX_IQ_SIZE; |
298 | } |
299 | |
300 | for (i = rxq_info->nrxq; i < nrxq; i++) { |
301 | struct sge_ofld_rxq *r = &rxq_info->uldrxq[i]; |
302 | |
303 | init_rspq(adap, q: &r->rspq, us: 5, cnt: 1, size: ciq_size, iqe_size: 64); |
304 | r->rspq.uld = uld_type; |
305 | } |
306 | |
307 | memcpy(rxq_info->name, uld_info->name, IFNAMSIZ); |
308 | adap->sge.uld_rxq_info[uld_type] = rxq_info; |
309 | |
310 | return 0; |
311 | } |
312 | |
313 | static void free_queues_uld(struct adapter *adap, unsigned int uld_type) |
314 | { |
315 | struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; |
316 | |
317 | adap->sge.uld_rxq_info[uld_type] = NULL; |
318 | kfree(objp: rxq_info->rspq_id); |
319 | kfree(objp: rxq_info->uldrxq); |
320 | kfree(objp: rxq_info); |
321 | } |
322 | |
323 | static int |
324 | request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) |
325 | { |
326 | struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; |
327 | struct msix_info *minfo; |
328 | unsigned int idx; |
329 | int err = 0; |
330 | |
331 | for_each_uldrxq(rxq_info, idx) { |
332 | minfo = rxq_info->uldrxq[idx].msix; |
333 | err = request_irq(irq: minfo->vec, |
334 | handler: t4_sge_intr_msix, flags: 0, |
335 | name: minfo->desc, |
336 | dev: &rxq_info->uldrxq[idx].rspq); |
337 | if (err) |
338 | goto unwind; |
339 | |
340 | cxgb4_set_msix_aff(adap, vec: minfo->vec, |
341 | aff_mask: &minfo->aff_mask, idx); |
342 | } |
343 | return 0; |
344 | |
345 | unwind: |
346 | while (idx-- > 0) { |
347 | minfo = rxq_info->uldrxq[idx].msix; |
348 | cxgb4_clear_msix_aff(vec: minfo->vec, aff_mask: minfo->aff_mask); |
349 | cxgb4_free_msix_idx_in_bmap(adap, msix_idx: minfo->idx); |
350 | free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq); |
351 | } |
352 | return err; |
353 | } |
354 | |
355 | static void |
356 | free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) |
357 | { |
358 | struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; |
359 | struct msix_info *minfo; |
360 | unsigned int idx; |
361 | |
362 | for_each_uldrxq(rxq_info, idx) { |
363 | minfo = rxq_info->uldrxq[idx].msix; |
364 | cxgb4_clear_msix_aff(vec: minfo->vec, aff_mask: minfo->aff_mask); |
365 | cxgb4_free_msix_idx_in_bmap(adap, msix_idx: minfo->idx); |
366 | free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq); |
367 | } |
368 | } |
369 | |
370 | static void enable_rx_uld(struct adapter *adap, unsigned int uld_type) |
371 | { |
372 | struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; |
373 | int idx; |
374 | |
375 | for_each_uldrxq(rxq_info, idx) { |
376 | struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq; |
377 | |
378 | if (!q) |
379 | continue; |
380 | |
381 | cxgb4_enable_rx(adap, q); |
382 | } |
383 | } |
384 | |
385 | static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type) |
386 | { |
387 | struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; |
388 | int idx; |
389 | |
390 | for_each_uldrxq(rxq_info, idx) { |
391 | struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq; |
392 | |
393 | if (!q) |
394 | continue; |
395 | |
396 | cxgb4_quiesce_rx(q); |
397 | } |
398 | } |
399 | |
400 | static void |
401 | free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info) |
402 | { |
403 | int nq = txq_info->ntxq; |
404 | int i; |
405 | |
406 | for (i = 0; i < nq; i++) { |
407 | struct sge_uld_txq *txq = &txq_info->uldtxq[i]; |
408 | |
409 | if (txq && txq->q.desc) { |
410 | tasklet_kill(t: &txq->qresume_tsk); |
411 | t4_ofld_eq_free(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, |
412 | eqid: txq->q.cntxt_id); |
413 | free_tx_desc(adap, q: &txq->q, n: txq->q.in_use, unmap: false); |
414 | kfree(objp: txq->q.sdesc); |
415 | __skb_queue_purge(list: &txq->sendq); |
416 | free_txq(adap, q: &txq->q); |
417 | } |
418 | } |
419 | } |
420 | |
421 | static int |
422 | alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info, |
423 | unsigned int uld_type) |
424 | { |
425 | struct sge *s = &adap->sge; |
426 | int nq = txq_info->ntxq; |
427 | int i, j, err; |
428 | |
429 | j = nq / adap->params.nports; |
430 | for (i = 0; i < nq; i++) { |
431 | struct sge_uld_txq *txq = &txq_info->uldtxq[i]; |
432 | |
433 | txq->q.size = 1024; |
434 | err = t4_sge_alloc_uld_txq(adap, txq, dev: adap->port[i / j], |
435 | iqid: s->fw_evtq.cntxt_id, uld_type); |
436 | if (err) |
437 | goto freeout; |
438 | } |
439 | return 0; |
440 | freeout: |
441 | free_sge_txq_uld(adap, txq_info); |
442 | return err; |
443 | } |
444 | |
445 | static void |
446 | release_sge_txq_uld(struct adapter *adap, unsigned int uld_type) |
447 | { |
448 | struct sge_uld_txq_info *txq_info = NULL; |
449 | int tx_uld_type = TX_ULD(uld_type); |
450 | |
451 | txq_info = adap->sge.uld_txq_info[tx_uld_type]; |
452 | |
453 | if (txq_info && atomic_dec_and_test(v: &txq_info->users)) { |
454 | free_sge_txq_uld(adap, txq_info); |
455 | kfree(objp: txq_info->uldtxq); |
456 | kfree(objp: txq_info); |
457 | adap->sge.uld_txq_info[tx_uld_type] = NULL; |
458 | } |
459 | } |
460 | |
461 | static int |
462 | setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type, |
463 | const struct cxgb4_uld_info *uld_info) |
464 | { |
465 | struct sge_uld_txq_info *txq_info = NULL; |
466 | int tx_uld_type, i; |
467 | |
468 | tx_uld_type = TX_ULD(uld_type); |
469 | txq_info = adap->sge.uld_txq_info[tx_uld_type]; |
470 | |
471 | if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info && |
472 | (atomic_inc_return(v: &txq_info->users) > 1)) |
473 | return 0; |
474 | |
475 | txq_info = kzalloc(size: sizeof(*txq_info), GFP_KERNEL); |
476 | if (!txq_info) |
477 | return -ENOMEM; |
478 | if (uld_type == CXGB4_ULD_CRYPTO) { |
479 | i = min_t(int, adap->vres.ncrypto_fc, |
480 | num_online_cpus()); |
481 | txq_info->ntxq = rounddown(i, adap->params.nports); |
482 | if (txq_info->ntxq <= 0) { |
483 | dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n" ); |
484 | kfree(objp: txq_info); |
485 | return -EINVAL; |
486 | } |
487 | |
488 | } else { |
489 | i = min_t(int, uld_info->ntxq, num_online_cpus()); |
490 | txq_info->ntxq = roundup(i, adap->params.nports); |
491 | } |
492 | txq_info->uldtxq = kcalloc(n: txq_info->ntxq, size: sizeof(struct sge_uld_txq), |
493 | GFP_KERNEL); |
494 | if (!txq_info->uldtxq) { |
495 | kfree(objp: txq_info); |
496 | return -ENOMEM; |
497 | } |
498 | |
499 | if (alloc_sge_txq_uld(adap, txq_info, uld_type: tx_uld_type)) { |
500 | kfree(objp: txq_info->uldtxq); |
501 | kfree(objp: txq_info); |
502 | return -ENOMEM; |
503 | } |
504 | |
505 | atomic_inc(v: &txq_info->users); |
506 | adap->sge.uld_txq_info[tx_uld_type] = txq_info; |
507 | return 0; |
508 | } |
509 | |
510 | static void uld_queue_init(struct adapter *adap, unsigned int uld_type, |
511 | struct cxgb4_lld_info *lli) |
512 | { |
513 | struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; |
514 | int tx_uld_type = TX_ULD(uld_type); |
515 | struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type]; |
516 | |
517 | lli->rxq_ids = rxq_info->rspq_id; |
518 | lli->nrxq = rxq_info->nrxq; |
519 | lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq; |
520 | lli->nciq = rxq_info->nciq; |
521 | lli->ntxq = txq_info->ntxq; |
522 | } |
523 | |
524 | int t4_uld_mem_alloc(struct adapter *adap) |
525 | { |
526 | struct sge *s = &adap->sge; |
527 | |
528 | adap->uld = kcalloc(n: CXGB4_ULD_MAX, size: sizeof(*adap->uld), GFP_KERNEL); |
529 | if (!adap->uld) |
530 | return -ENOMEM; |
531 | |
532 | s->uld_rxq_info = kcalloc(n: CXGB4_ULD_MAX, |
533 | size: sizeof(struct sge_uld_rxq_info *), |
534 | GFP_KERNEL); |
535 | if (!s->uld_rxq_info) |
536 | goto err_uld; |
537 | |
538 | s->uld_txq_info = kcalloc(n: CXGB4_TX_MAX, |
539 | size: sizeof(struct sge_uld_txq_info *), |
540 | GFP_KERNEL); |
541 | if (!s->uld_txq_info) |
542 | goto err_uld_rx; |
543 | return 0; |
544 | |
545 | err_uld_rx: |
546 | kfree(objp: s->uld_rxq_info); |
547 | err_uld: |
548 | kfree(objp: adap->uld); |
549 | return -ENOMEM; |
550 | } |
551 | |
552 | void t4_uld_mem_free(struct adapter *adap) |
553 | { |
554 | struct sge *s = &adap->sge; |
555 | |
556 | kfree(objp: s->uld_txq_info); |
557 | kfree(objp: s->uld_rxq_info); |
558 | kfree(objp: adap->uld); |
559 | } |
560 | |
561 | /* This function should be called with uld_mutex taken. */ |
562 | static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type) |
563 | { |
564 | if (adap->uld[type].handle) { |
565 | adap->uld[type].handle = NULL; |
566 | adap->uld[type].add = NULL; |
567 | release_sge_txq_uld(adap, uld_type: type); |
568 | |
569 | if (adap->flags & CXGB4_FULL_INIT_DONE) |
570 | quiesce_rx_uld(adap, uld_type: type); |
571 | |
572 | if (adap->flags & CXGB4_USING_MSIX) |
573 | free_msix_queue_irqs_uld(adap, uld_type: type); |
574 | |
575 | free_sge_queues_uld(adap, uld_type: type); |
576 | free_queues_uld(adap, uld_type: type); |
577 | } |
578 | } |
579 | |
580 | void t4_uld_clean_up(struct adapter *adap) |
581 | { |
582 | unsigned int i; |
583 | |
584 | if (!is_uld(adap)) |
585 | return; |
586 | |
587 | mutex_lock(&uld_mutex); |
588 | for (i = 0; i < CXGB4_ULD_MAX; i++) { |
589 | if (!adap->uld[i].handle) |
590 | continue; |
591 | |
592 | cxgb4_shutdown_uld_adapter(adap, type: i); |
593 | } |
594 | mutex_unlock(lock: &uld_mutex); |
595 | } |
596 | |
597 | static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) |
598 | { |
599 | int i; |
600 | |
601 | lld->pdev = adap->pdev; |
602 | lld->pf = adap->pf; |
603 | lld->l2t = adap->l2t; |
604 | lld->tids = &adap->tids; |
605 | lld->ports = adap->port; |
606 | lld->vr = &adap->vres; |
607 | lld->mtus = adap->params.mtus; |
608 | lld->nchan = adap->params.nports; |
609 | lld->nports = adap->params.nports; |
610 | lld->wr_cred = adap->params.ofldq_wr_cred; |
611 | lld->crypto = adap->params.crypto; |
612 | lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A)); |
613 | lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A); |
614 | lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A); |
615 | lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A); |
616 | lld->iscsi_ppm = &adap->iscsi_ppm; |
617 | lld->adapter_type = adap->params.chip; |
618 | lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; |
619 | lld->udb_density = 1 << adap->params.sge.eq_qpp; |
620 | lld->ucq_density = 1 << adap->params.sge.iq_qpp; |
621 | lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10); |
622 | lld->filt_mode = adap->params.tp.vlan_pri_map; |
623 | /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ |
624 | for (i = 0; i < NCHAN; i++) |
625 | lld->tx_modq[i] = i; |
626 | lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A); |
627 | lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A); |
628 | lld->fw_vers = adap->params.fw_vers; |
629 | lld->dbfifo_int_thresh = dbfifo_int_thresh; |
630 | lld->sge_ingpadboundary = adap->sge.fl_align; |
631 | lld->sge_egrstatuspagesize = adap->sge.stat_len; |
632 | lld->sge_pktshift = adap->sge.pktshift; |
633 | lld->ulp_crypto = adap->params.crypto; |
634 | lld->enable_fw_ofld_conn = adap->flags & CXGB4_FW_OFLD_CONN; |
635 | lld->max_ordird_qp = adap->params.max_ordird_qp; |
636 | lld->max_ird_adapter = adap->params.max_ird_adapter; |
637 | lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; |
638 | lld->nodeid = dev_to_node(dev: adap->pdev_dev); |
639 | lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support; |
640 | lld->write_w_imm_support = adap->params.write_w_imm_support; |
641 | lld->write_cmpl_support = adap->params.write_cmpl_support; |
642 | } |
643 | |
644 | static int uld_attach(struct adapter *adap, unsigned int uld) |
645 | { |
646 | struct cxgb4_lld_info lli; |
647 | void *handle; |
648 | |
649 | uld_init(adap, lld: &lli); |
650 | uld_queue_init(adap, uld_type: uld, lli: &lli); |
651 | |
652 | handle = adap->uld[uld].add(&lli); |
653 | if (IS_ERR(ptr: handle)) { |
654 | dev_warn(adap->pdev_dev, |
655 | "could not attach to the %s driver, error %ld\n" , |
656 | adap->uld[uld].name, PTR_ERR(handle)); |
657 | return PTR_ERR(ptr: handle); |
658 | } |
659 | |
660 | adap->uld[uld].handle = handle; |
661 | t4_register_netevent_notifier(); |
662 | |
663 | if (adap->flags & CXGB4_FULL_INIT_DONE) |
664 | adap->uld[uld].state_change(handle, CXGB4_STATE_UP); |
665 | |
666 | return 0; |
667 | } |
668 | |
669 | #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) |
670 | static bool cxgb4_uld_in_use(struct adapter *adap) |
671 | { |
672 | const struct tid_info *t = &adap->tids; |
673 | |
674 | return (atomic_read(v: &t->conns_in_use) || t->stids_in_use); |
675 | } |
676 | |
677 | /* cxgb4_set_ktls_feature: request FW to enable/disable ktls settings. |
678 | * @adap: adapter info |
679 | * @enable: 1 to enable / 0 to disable ktls settings. |
680 | */ |
681 | int cxgb4_set_ktls_feature(struct adapter *adap, bool enable) |
682 | { |
683 | int ret = 0; |
684 | u32 params = |
685 | FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | |
686 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_KTLS_HW) | |
687 | FW_PARAMS_PARAM_Y_V(enable) | |
688 | FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE); |
689 | |
690 | if (enable) { |
691 | if (!refcount_read(r: &adap->chcr_ktls.ktls_refcount)) { |
692 | /* At this moment if ULD connection are up means, other |
693 | * ULD is/are already active, return failure. |
694 | */ |
695 | if (cxgb4_uld_in_use(adap)) { |
696 | dev_dbg(adap->pdev_dev, |
697 | "ULD connections (tid/stid) active. Can't enable kTLS\n" ); |
698 | return -EINVAL; |
699 | } |
700 | ret = t4_set_params(adap, mbox: adap->mbox, pf: adap->pf, |
701 | vf: 0, nparams: 1, params: ¶ms, val: ¶ms); |
702 | if (ret) |
703 | return ret; |
704 | refcount_set(r: &adap->chcr_ktls.ktls_refcount, n: 1); |
705 | pr_debug("kTLS has been enabled. Restrictions placed on ULD support\n" ); |
706 | } else { |
707 | /* ktls settings already up, just increment refcount. */ |
708 | refcount_inc(r: &adap->chcr_ktls.ktls_refcount); |
709 | } |
710 | } else { |
711 | /* return failure if refcount is already 0. */ |
712 | if (!refcount_read(r: &adap->chcr_ktls.ktls_refcount)) |
713 | return -EINVAL; |
714 | /* decrement refcount and test, if 0, disable ktls feature, |
715 | * else return command success. |
716 | */ |
717 | if (refcount_dec_and_test(r: &adap->chcr_ktls.ktls_refcount)) { |
718 | ret = t4_set_params(adap, mbox: adap->mbox, pf: adap->pf, |
719 | vf: 0, nparams: 1, params: ¶ms, val: ¶ms); |
720 | if (ret) |
721 | return ret; |
722 | pr_debug("kTLS is disabled. Restrictions on ULD support removed\n" ); |
723 | } |
724 | } |
725 | |
726 | return ret; |
727 | } |
728 | #endif |
729 | |
730 | static void cxgb4_uld_alloc_resources(struct adapter *adap, |
731 | enum cxgb4_uld type, |
732 | const struct cxgb4_uld_info *p) |
733 | { |
734 | int ret = 0; |
735 | |
736 | if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || |
737 | (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) |
738 | return; |
739 | if (type == CXGB4_ULD_ISCSIT && is_t4(chip: adap->params.chip)) |
740 | return; |
741 | ret = cfg_queues_uld(adap, uld_type: type, uld_info: p); |
742 | if (ret) |
743 | goto out; |
744 | ret = setup_sge_queues_uld(adap, uld_type: type, lro: p->lro); |
745 | if (ret) |
746 | goto free_queues; |
747 | if (adap->flags & CXGB4_USING_MSIX) { |
748 | ret = request_msix_queue_irqs_uld(adap, uld_type: type); |
749 | if (ret) |
750 | goto free_rxq; |
751 | } |
752 | if (adap->flags & CXGB4_FULL_INIT_DONE) |
753 | enable_rx_uld(adap, uld_type: type); |
754 | if (adap->uld[type].add) |
755 | goto free_irq; |
756 | ret = setup_sge_txq_uld(adap, uld_type: type, uld_info: p); |
757 | if (ret) |
758 | goto free_irq; |
759 | adap->uld[type] = *p; |
760 | ret = uld_attach(adap, uld: type); |
761 | if (ret) |
762 | goto free_txq; |
763 | return; |
764 | free_txq: |
765 | release_sge_txq_uld(adap, uld_type: type); |
766 | free_irq: |
767 | if (adap->flags & CXGB4_FULL_INIT_DONE) |
768 | quiesce_rx_uld(adap, uld_type: type); |
769 | if (adap->flags & CXGB4_USING_MSIX) |
770 | free_msix_queue_irqs_uld(adap, uld_type: type); |
771 | free_rxq: |
772 | free_sge_queues_uld(adap, uld_type: type); |
773 | free_queues: |
774 | free_queues_uld(adap, uld_type: type); |
775 | out: |
776 | dev_warn(adap->pdev_dev, |
777 | "ULD registration failed for uld type %d\n" , type); |
778 | } |
779 | |
780 | void cxgb4_uld_enable(struct adapter *adap) |
781 | { |
782 | struct cxgb4_uld_list *uld_entry; |
783 | |
784 | mutex_lock(&uld_mutex); |
785 | list_add_tail(new: &adap->list_node, head: &adapter_list); |
786 | list_for_each_entry(uld_entry, &uld_list, list_node) |
787 | cxgb4_uld_alloc_resources(adap, type: uld_entry->uld_type, |
788 | p: &uld_entry->uld_info); |
789 | mutex_unlock(lock: &uld_mutex); |
790 | } |
791 | |
792 | /* cxgb4_register_uld - register an upper-layer driver |
793 | * @type: the ULD type |
794 | * @p: the ULD methods |
795 | * |
796 | * Registers an upper-layer driver with this driver and notifies the ULD |
797 | * about any presently available devices that support its type. |
798 | */ |
799 | void cxgb4_register_uld(enum cxgb4_uld type, |
800 | const struct cxgb4_uld_info *p) |
801 | { |
802 | struct cxgb4_uld_list *uld_entry; |
803 | struct adapter *adap; |
804 | |
805 | if (type >= CXGB4_ULD_MAX) |
806 | return; |
807 | |
808 | uld_entry = kzalloc(size: sizeof(*uld_entry), GFP_KERNEL); |
809 | if (!uld_entry) |
810 | return; |
811 | |
812 | memcpy(&uld_entry->uld_info, p, sizeof(struct cxgb4_uld_info)); |
813 | mutex_lock(&uld_mutex); |
814 | list_for_each_entry(adap, &adapter_list, list_node) |
815 | cxgb4_uld_alloc_resources(adap, type, p); |
816 | |
817 | uld_entry->uld_type = type; |
818 | list_add_tail(new: &uld_entry->list_node, head: &uld_list); |
819 | mutex_unlock(lock: &uld_mutex); |
820 | return; |
821 | } |
822 | EXPORT_SYMBOL(cxgb4_register_uld); |
823 | |
824 | /** |
825 | * cxgb4_unregister_uld - unregister an upper-layer driver |
826 | * @type: the ULD type |
827 | * |
828 | * Unregisters an existing upper-layer driver. |
829 | */ |
830 | int cxgb4_unregister_uld(enum cxgb4_uld type) |
831 | { |
832 | struct cxgb4_uld_list *uld_entry, *tmp; |
833 | struct adapter *adap; |
834 | |
835 | if (type >= CXGB4_ULD_MAX) |
836 | return -EINVAL; |
837 | |
838 | mutex_lock(&uld_mutex); |
839 | list_for_each_entry(adap, &adapter_list, list_node) { |
840 | if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || |
841 | (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) |
842 | continue; |
843 | if (type == CXGB4_ULD_ISCSIT && is_t4(chip: adap->params.chip)) |
844 | continue; |
845 | |
846 | cxgb4_shutdown_uld_adapter(adap, type); |
847 | } |
848 | |
849 | list_for_each_entry_safe(uld_entry, tmp, &uld_list, list_node) { |
850 | if (uld_entry->uld_type == type) { |
851 | list_del(entry: &uld_entry->list_node); |
852 | kfree(objp: uld_entry); |
853 | } |
854 | } |
855 | mutex_unlock(lock: &uld_mutex); |
856 | |
857 | return 0; |
858 | } |
859 | EXPORT_SYMBOL(cxgb4_unregister_uld); |
860 | |