1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (C) 2020 Marvell. */
3
4#include "otx2_cpt_common.h"
5#include "otx2_cptvf.h"
6#include "otx2_cptlf.h"
7#include "otx2_cptvf_algs.h"
8#include "cn10k_cpt.h"
9#include <rvu_reg.h>
10
11#define OTX2_CPTVF_DRV_NAME "rvu_cptvf"
12
13static void cptvf_enable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
14{
15 /* Clear interrupt if any */
16 otx2_cpt_write64(reg_base: cptvf->reg_base, blk: BLKADDR_RVUM, slot: 0, OTX2_RVU_VF_INT,
17 val: 0x1ULL);
18
19 /* Enable PF-VF interrupt */
20 otx2_cpt_write64(reg_base: cptvf->reg_base, blk: BLKADDR_RVUM, slot: 0,
21 OTX2_RVU_VF_INT_ENA_W1S, val: 0x1ULL);
22}
23
24static void cptvf_disable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
25{
26 /* Disable PF-VF interrupt */
27 otx2_cpt_write64(reg_base: cptvf->reg_base, blk: BLKADDR_RVUM, slot: 0,
28 OTX2_RVU_VF_INT_ENA_W1C, val: 0x1ULL);
29
30 /* Clear interrupt if any */
31 otx2_cpt_write64(reg_base: cptvf->reg_base, blk: BLKADDR_RVUM, slot: 0, OTX2_RVU_VF_INT,
32 val: 0x1ULL);
33}
34
35static int cptvf_register_interrupts(struct otx2_cptvf_dev *cptvf)
36{
37 int ret, irq;
38 int num_vec;
39
40 num_vec = pci_msix_vec_count(dev: cptvf->pdev);
41 if (num_vec <= 0)
42 return -EINVAL;
43
44 /* Enable MSI-X */
45 ret = pci_alloc_irq_vectors(dev: cptvf->pdev, min_vecs: num_vec, max_vecs: num_vec,
46 PCI_IRQ_MSIX);
47 if (ret < 0) {
48 dev_err(&cptvf->pdev->dev,
49 "Request for %d msix vectors failed\n", num_vec);
50 return ret;
51 }
52 irq = pci_irq_vector(dev: cptvf->pdev, nr: OTX2_CPT_VF_INT_VEC_E_MBOX);
53 /* Register VF<=>PF mailbox interrupt handler */
54 ret = devm_request_irq(dev: &cptvf->pdev->dev, irq,
55 handler: otx2_cptvf_pfvf_mbox_intr, irqflags: 0,
56 devname: "CPTPFVF Mbox", dev_id: cptvf);
57 if (ret)
58 return ret;
59 /* Enable PF-VF mailbox interrupts */
60 cptvf_enable_pfvf_mbox_intrs(cptvf);
61
62 ret = otx2_cpt_send_ready_msg(mbox: &cptvf->pfvf_mbox, pdev: cptvf->pdev);
63 if (ret) {
64 dev_warn(&cptvf->pdev->dev,
65 "PF not responding to mailbox, deferring probe\n");
66 cptvf_disable_pfvf_mbox_intrs(cptvf);
67 return -EPROBE_DEFER;
68 }
69 return 0;
70}
71
72static int cptvf_pfvf_mbox_init(struct otx2_cptvf_dev *cptvf)
73{
74 struct pci_dev *pdev = cptvf->pdev;
75 resource_size_t offset, size;
76 int ret;
77
78 cptvf->pfvf_mbox_wq =
79 alloc_ordered_workqueue("cpt_pfvf_mailbox",
80 WQ_HIGHPRI | WQ_MEM_RECLAIM);
81 if (!cptvf->pfvf_mbox_wq)
82 return -ENOMEM;
83
84 if (test_bit(CN10K_MBOX, &cptvf->cap_flag)) {
85 /* For cn10k platform, VF mailbox region is in its BAR2
86 * register space
87 */
88 cptvf->pfvf_mbox_base = cptvf->reg_base +
89 CN10K_CPT_VF_MBOX_REGION;
90 } else {
91 offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
92 size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
93 /* Map PF-VF mailbox memory */
94 cptvf->pfvf_mbox_base = devm_ioremap_wc(dev: &pdev->dev, offset,
95 size);
96 if (!cptvf->pfvf_mbox_base) {
97 dev_err(&pdev->dev, "Unable to map BAR4\n");
98 ret = -ENOMEM;
99 goto free_wqe;
100 }
101 }
102
103 ret = otx2_mbox_init(mbox: &cptvf->pfvf_mbox, hwbase: cptvf->pfvf_mbox_base,
104 pdev, reg_base: cptvf->reg_base, MBOX_DIR_VFPF, ndevs: 1);
105 if (ret)
106 goto free_wqe;
107
108 ret = otx2_cpt_mbox_bbuf_init(cptvf, pdev);
109 if (ret)
110 goto destroy_mbox;
111
112 INIT_WORK(&cptvf->pfvf_mbox_work, otx2_cptvf_pfvf_mbox_handler);
113 return 0;
114
115destroy_mbox:
116 otx2_mbox_destroy(mbox: &cptvf->pfvf_mbox);
117free_wqe:
118 destroy_workqueue(wq: cptvf->pfvf_mbox_wq);
119 return ret;
120}
121
122static void cptvf_pfvf_mbox_destroy(struct otx2_cptvf_dev *cptvf)
123{
124 destroy_workqueue(wq: cptvf->pfvf_mbox_wq);
125 otx2_mbox_destroy(mbox: &cptvf->pfvf_mbox);
126}
127
128static void cptlf_work_handler(unsigned long data)
129{
130 otx2_cpt_post_process(wqe: (struct otx2_cptlf_wqe *) data);
131}
132
133static void cleanup_tasklet_work(struct otx2_cptlfs_info *lfs)
134{
135 int i;
136
137 for (i = 0; i < lfs->lfs_num; i++) {
138 if (!lfs->lf[i].wqe)
139 continue;
140
141 tasklet_kill(t: &lfs->lf[i].wqe->work);
142 kfree(objp: lfs->lf[i].wqe);
143 lfs->lf[i].wqe = NULL;
144 }
145}
146
147static int init_tasklet_work(struct otx2_cptlfs_info *lfs)
148{
149 struct otx2_cptlf_wqe *wqe;
150 int i, ret = 0;
151
152 for (i = 0; i < lfs->lfs_num; i++) {
153 wqe = kzalloc(size: sizeof(struct otx2_cptlf_wqe), GFP_KERNEL);
154 if (!wqe) {
155 ret = -ENOMEM;
156 goto cleanup_tasklet;
157 }
158
159 tasklet_init(t: &wqe->work, func: cptlf_work_handler, data: (u64) wqe);
160 wqe->lfs = lfs;
161 wqe->lf_num = i;
162 lfs->lf[i].wqe = wqe;
163 }
164 return 0;
165
166cleanup_tasklet:
167 cleanup_tasklet_work(lfs);
168 return ret;
169}
170
171static void free_pending_queues(struct otx2_cptlfs_info *lfs)
172{
173 int i;
174
175 for (i = 0; i < lfs->lfs_num; i++) {
176 kfree(objp: lfs->lf[i].pqueue.head);
177 lfs->lf[i].pqueue.head = NULL;
178 }
179}
180
181static int alloc_pending_queues(struct otx2_cptlfs_info *lfs)
182{
183 int size, ret, i;
184
185 if (!lfs->lfs_num)
186 return -EINVAL;
187
188 for (i = 0; i < lfs->lfs_num; i++) {
189 lfs->lf[i].pqueue.qlen = OTX2_CPT_INST_QLEN_MSGS;
190 size = lfs->lf[i].pqueue.qlen *
191 sizeof(struct otx2_cpt_pending_entry);
192
193 lfs->lf[i].pqueue.head = kzalloc(size, GFP_KERNEL);
194 if (!lfs->lf[i].pqueue.head) {
195 ret = -ENOMEM;
196 goto error;
197 }
198
199 /* Initialize spin lock */
200 spin_lock_init(&lfs->lf[i].pqueue.lock);
201 }
202 return 0;
203
204error:
205 free_pending_queues(lfs);
206 return ret;
207}
208
209static void lf_sw_cleanup(struct otx2_cptlfs_info *lfs)
210{
211 cleanup_tasklet_work(lfs);
212 free_pending_queues(lfs);
213}
214
215static int lf_sw_init(struct otx2_cptlfs_info *lfs)
216{
217 int ret;
218
219 ret = alloc_pending_queues(lfs);
220 if (ret) {
221 dev_err(&lfs->pdev->dev,
222 "Allocating pending queues failed\n");
223 return ret;
224 }
225 ret = init_tasklet_work(lfs);
226 if (ret) {
227 dev_err(&lfs->pdev->dev,
228 "Tasklet work init failed\n");
229 goto pending_queues_free;
230 }
231 return 0;
232
233pending_queues_free:
234 free_pending_queues(lfs);
235 return ret;
236}
237
238static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs)
239{
240 atomic_set(v: &lfs->state, i: OTX2_CPTLF_IN_RESET);
241
242 /* Remove interrupts affinity */
243 otx2_cptlf_free_irqs_affinity(lfs);
244 /* Disable instruction queue */
245 otx2_cptlf_disable_iqueues(lfs);
246 /* Unregister crypto algorithms */
247 otx2_cpt_crypto_exit(pdev: lfs->pdev, THIS_MODULE);
248 /* Unregister LFs interrupts */
249 otx2_cptlf_unregister_misc_interrupts(lfs);
250 otx2_cptlf_unregister_done_interrupts(lfs);
251 /* Cleanup LFs software side */
252 lf_sw_cleanup(lfs);
253 /* Free instruction queues */
254 otx2_cpt_free_instruction_queues(lfs);
255 /* Send request to detach LFs */
256 otx2_cpt_detach_rsrcs_msg(lfs);
257 lfs->lfs_num = 0;
258}
259
260static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
261{
262 struct otx2_cptlfs_info *lfs = &cptvf->lfs;
263 struct device *dev = &cptvf->pdev->dev;
264 int ret, lfs_num;
265 u8 eng_grp_msk;
266
267 /* Get engine group number for symmetric crypto */
268 cptvf->lfs.kcrypto_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
269 ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, eng_type: OTX2_CPT_SE_TYPES);
270 if (ret)
271 return ret;
272
273 if (cptvf->lfs.kcrypto_eng_grp_num == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
274 dev_err(dev, "Engine group for kernel crypto not available\n");
275 ret = -ENOENT;
276 return ret;
277 }
278 eng_grp_msk = 1 << cptvf->lfs.kcrypto_eng_grp_num;
279
280 ret = otx2_cptvf_send_kvf_limits_msg(cptvf);
281 if (ret)
282 return ret;
283
284 lfs_num = cptvf->lfs.kvf_limits;
285
286 otx2_cptlf_set_dev_info(lfs, pdev: cptvf->pdev, reg_base: cptvf->reg_base,
287 mbox: &cptvf->pfvf_mbox, blkaddr: cptvf->blkaddr);
288 ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO,
289 lfs_num);
290 if (ret)
291 return ret;
292
293 /* Get msix offsets for attached LFs */
294 ret = otx2_cpt_msix_offset_msg(lfs);
295 if (ret)
296 goto cleanup_lf;
297
298 /* Initialize LFs software side */
299 ret = lf_sw_init(lfs);
300 if (ret)
301 goto cleanup_lf;
302
303 /* Register LFs interrupts */
304 ret = otx2_cptlf_register_misc_interrupts(lfs);
305 if (ret)
306 goto cleanup_lf_sw;
307
308 ret = otx2_cptlf_register_done_interrupts(lfs);
309 if (ret)
310 goto cleanup_lf_sw;
311
312 /* Set interrupts affinity */
313 ret = otx2_cptlf_set_irqs_affinity(lfs);
314 if (ret)
315 goto unregister_intr;
316
317 atomic_set(v: &lfs->state, i: OTX2_CPTLF_STARTED);
318 /* Register crypto algorithms */
319 ret = otx2_cpt_crypto_init(pdev: lfs->pdev, THIS_MODULE, num_queues: lfs_num, num_devices: 1);
320 if (ret) {
321 dev_err(&lfs->pdev->dev, "algorithms registration failed\n");
322 goto disable_irqs;
323 }
324 return 0;
325
326disable_irqs:
327 otx2_cptlf_free_irqs_affinity(lfs);
328unregister_intr:
329 otx2_cptlf_unregister_misc_interrupts(lfs);
330 otx2_cptlf_unregister_done_interrupts(lfs);
331cleanup_lf_sw:
332 lf_sw_cleanup(lfs);
333cleanup_lf:
334 otx2_cptlf_shutdown(lfs);
335
336 return ret;
337}
338
339static int otx2_cptvf_probe(struct pci_dev *pdev,
340 const struct pci_device_id *ent)
341{
342 struct device *dev = &pdev->dev;
343 struct otx2_cptvf_dev *cptvf;
344 int ret;
345
346 cptvf = devm_kzalloc(dev, size: sizeof(*cptvf), GFP_KERNEL);
347 if (!cptvf)
348 return -ENOMEM;
349
350 ret = pcim_enable_device(pdev);
351 if (ret) {
352 dev_err(dev, "Failed to enable PCI device\n");
353 goto clear_drvdata;
354 }
355
356 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
357 if (ret) {
358 dev_err(dev, "Unable to get usable DMA configuration\n");
359 goto clear_drvdata;
360 }
361 /* Map VF's configuration registers */
362 ret = pcim_iomap_regions_request_all(pdev, mask: 1 << PCI_PF_REG_BAR_NUM,
363 OTX2_CPTVF_DRV_NAME);
364 if (ret) {
365 dev_err(dev, "Couldn't get PCI resources 0x%x\n", ret);
366 goto clear_drvdata;
367 }
368 pci_set_master(dev: pdev);
369 pci_set_drvdata(pdev, data: cptvf);
370 cptvf->pdev = pdev;
371
372 cptvf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
373
374 otx2_cpt_set_hw_caps(pdev, cap_flag: &cptvf->cap_flag);
375
376 ret = cn10k_cptvf_lmtst_init(cptvf);
377 if (ret)
378 goto clear_drvdata;
379
380 /* Initialize PF<=>VF mailbox */
381 ret = cptvf_pfvf_mbox_init(cptvf);
382 if (ret)
383 goto clear_drvdata;
384
385 /* Register interrupts */
386 ret = cptvf_register_interrupts(cptvf);
387 if (ret)
388 goto destroy_pfvf_mbox;
389
390 cptvf->blkaddr = BLKADDR_CPT0;
391
392 cptvf_hw_ops_get(cptvf);
393
394 ret = otx2_cptvf_send_caps_msg(cptvf);
395 if (ret) {
396 dev_err(&pdev->dev, "Couldn't get CPT engine capabilities.\n");
397 goto unregister_interrupts;
398 }
399 if (cptvf->eng_caps[OTX2_CPT_SE_TYPES] & BIT_ULL(35))
400 cptvf->lfs.ops->cpt_sg_info_create = cn10k_sgv2_info_create;
401
402 /* Initialize CPT LFs */
403 ret = cptvf_lf_init(cptvf);
404 if (ret)
405 goto unregister_interrupts;
406
407 return 0;
408
409unregister_interrupts:
410 cptvf_disable_pfvf_mbox_intrs(cptvf);
411destroy_pfvf_mbox:
412 cptvf_pfvf_mbox_destroy(cptvf);
413clear_drvdata:
414 pci_set_drvdata(pdev, NULL);
415
416 return ret;
417}
418
419static void otx2_cptvf_remove(struct pci_dev *pdev)
420{
421 struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
422
423 if (!cptvf) {
424 dev_err(&pdev->dev, "Invalid CPT VF device.\n");
425 return;
426 }
427 cptvf_lf_shutdown(lfs: &cptvf->lfs);
428 /* Disable PF-VF mailbox interrupt */
429 cptvf_disable_pfvf_mbox_intrs(cptvf);
430 /* Destroy PF-VF mbox */
431 cptvf_pfvf_mbox_destroy(cptvf);
432 pci_set_drvdata(pdev, NULL);
433}
434
435/* Supported devices */
436static const struct pci_device_id otx2_cptvf_id_table[] = {
437 {PCI_VDEVICE(CAVIUM, OTX2_CPT_PCI_VF_DEVICE_ID), 0},
438 {PCI_VDEVICE(CAVIUM, CN10K_CPT_PCI_VF_DEVICE_ID), 0},
439 { 0, } /* end of table */
440};
441
442static struct pci_driver otx2_cptvf_pci_driver = {
443 .name = OTX2_CPTVF_DRV_NAME,
444 .id_table = otx2_cptvf_id_table,
445 .probe = otx2_cptvf_probe,
446 .remove = otx2_cptvf_remove,
447};
448
449module_pci_driver(otx2_cptvf_pci_driver);
450
451MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
452
453MODULE_AUTHOR("Marvell");
454MODULE_DESCRIPTION("Marvell RVU CPT Virtual Function Driver");
455MODULE_LICENSE("GPL v2");
456MODULE_DEVICE_TABLE(pci, otx2_cptvf_id_table);
457

source code of linux/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c