1 | /* SPDX-License-Identifier: GPL-2.0-only |
2 | * Copyright (C) 2020 Marvell. |
3 | */ |
4 | #ifndef __OTX2_CPTLF_H |
5 | #define __OTX2_CPTLF_H |
6 | |
7 | #include <linux/soc/marvell/octeontx2/asm.h> |
8 | #include <linux/bitfield.h> |
9 | #include <mbox.h> |
10 | #include <rvu.h> |
11 | #include "otx2_cpt_common.h" |
12 | #include "otx2_cpt_reqmgr.h" |
13 | |
14 | /* |
15 | * CPT instruction and pending queues user requested length in CPT_INST_S msgs |
16 | */ |
17 | #define OTX2_CPT_USER_REQUESTED_QLEN_MSGS 8200 |
18 | |
19 | /* |
20 | * CPT instruction queue size passed to HW is in units of 40*CPT_INST_S |
21 | * messages. |
22 | */ |
23 | #define OTX2_CPT_SIZE_DIV40 (OTX2_CPT_USER_REQUESTED_QLEN_MSGS/40) |
24 | |
25 | /* |
26 | * CPT instruction and pending queues length in CPT_INST_S messages |
27 | */ |
28 | #define OTX2_CPT_INST_QLEN_MSGS ((OTX2_CPT_SIZE_DIV40 - 1) * 40) |
29 | |
30 | /* |
31 | * LDWB is getting incorrectly used when IQB_LDWB = 1 and CPT instruction |
32 | * queue has less than 320 free entries. So, increase HW instruction queue |
33 | * size by 320 and give 320 entries less for SW/NIX RX as a workaround. |
34 | */ |
35 | #define (320 * OTX2_CPT_INST_SIZE) |
36 | #define (320/40) |
37 | |
38 | /* CPT instruction queue length in bytes */ |
39 | #define OTX2_CPT_INST_QLEN_BYTES \ |
40 | ((OTX2_CPT_SIZE_DIV40 * 40 * OTX2_CPT_INST_SIZE) + \ |
41 | OTX2_CPT_INST_QLEN_EXTRA_BYTES) |
42 | |
43 | /* CPT instruction group queue length in bytes */ |
44 | #define OTX2_CPT_INST_GRP_QLEN_BYTES \ |
45 | ((OTX2_CPT_SIZE_DIV40 + OTX2_CPT_EXTRA_SIZE_DIV40) * 16) |
46 | |
47 | /* CPT FC length in bytes */ |
48 | #define OTX2_CPT_Q_FC_LEN 128 |
49 | |
50 | /* CPT instruction queue alignment */ |
51 | #define OTX2_CPT_INST_Q_ALIGNMENT 128 |
52 | |
53 | /* Mask which selects all engine groups */ |
54 | #define OTX2_CPT_ALL_ENG_GRPS_MASK 0xFF |
55 | |
56 | /* Maximum LFs supported in OcteonTX2 for CPT */ |
57 | #define OTX2_CPT_MAX_LFS_NUM 64 |
58 | |
59 | /* Queue priority */ |
60 | #define OTX2_CPT_QUEUE_HI_PRIO 0x1 |
61 | #define OTX2_CPT_QUEUE_LOW_PRIO 0x0 |
62 | |
63 | enum otx2_cptlf_state { |
64 | OTX2_CPTLF_IN_RESET, |
65 | OTX2_CPTLF_STARTED, |
66 | }; |
67 | |
68 | struct otx2_cpt_inst_queue { |
69 | u8 *vaddr; |
70 | u8 *real_vaddr; |
71 | dma_addr_t dma_addr; |
72 | dma_addr_t real_dma_addr; |
73 | u32 size; |
74 | }; |
75 | |
76 | struct otx2_cptlfs_info; |
77 | struct otx2_cptlf_wqe { |
78 | struct tasklet_struct work; |
79 | struct otx2_cptlfs_info *lfs; |
80 | u8 lf_num; |
81 | }; |
82 | |
83 | struct otx2_cptlf_info { |
84 | struct otx2_cptlfs_info *lfs; /* Ptr to cptlfs_info struct */ |
85 | void __iomem *lmtline; /* Address of LMTLINE */ |
86 | void __iomem *ioreg; /* LMTLINE send register */ |
87 | int msix_offset; /* MSI-X interrupts offset */ |
88 | cpumask_var_t affinity_mask; /* IRQs affinity mask */ |
89 | u8 irq_name[OTX2_CPT_LF_MSIX_VECTORS][32];/* Interrupts name */ |
90 | u8 is_irq_reg[OTX2_CPT_LF_MSIX_VECTORS]; /* Is interrupt registered */ |
91 | u8 slot; /* Slot number of this LF */ |
92 | |
93 | struct otx2_cpt_inst_queue iqueue;/* Instruction queue */ |
94 | struct otx2_cpt_pending_queue pqueue; /* Pending queue */ |
95 | struct otx2_cptlf_wqe *wqe; /* Tasklet work info */ |
96 | }; |
97 | |
98 | struct cpt_hw_ops { |
99 | void (*send_cmd)(union otx2_cpt_inst_s *cptinst, u32 insts_num, |
100 | struct otx2_cptlf_info *lf); |
101 | u8 (*cpt_get_compcode)(union otx2_cpt_res_s *result); |
102 | u8 (*cpt_get_uc_compcode)(union otx2_cpt_res_s *result); |
103 | struct otx2_cpt_inst_info * |
104 | (*cpt_sg_info_create)(struct pci_dev *pdev, struct otx2_cpt_req_info *req, |
105 | gfp_t gfp); |
106 | }; |
107 | |
108 | struct otx2_cptlfs_info { |
109 | /* Registers start address of VF/PF LFs are attached to */ |
110 | void __iomem *reg_base; |
111 | #define LMTLINE_SIZE 128 |
112 | void __iomem *lmt_base; |
113 | struct pci_dev *pdev; /* Device LFs are attached to */ |
114 | struct otx2_cptlf_info lf[OTX2_CPT_MAX_LFS_NUM]; |
115 | struct otx2_mbox *mbox; |
116 | struct cpt_hw_ops *ops; |
117 | u8 are_lfs_attached; /* Whether CPT LFs are attached */ |
118 | u8 lfs_num; /* Number of CPT LFs */ |
119 | u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */ |
120 | u8 kvf_limits; /* Kernel crypto limits */ |
121 | atomic_t state; /* LF's state. started/reset */ |
122 | int blkaddr; /* CPT blkaddr: BLKADDR_CPT0/BLKADDR_CPT1 */ |
123 | int global_slot; /* Global slot across the blocks */ |
124 | u8 ctx_ilen; |
125 | u8 ctx_ilen_ovrd; |
126 | }; |
127 | |
128 | static inline void otx2_cpt_free_instruction_queues( |
129 | struct otx2_cptlfs_info *lfs) |
130 | { |
131 | struct otx2_cpt_inst_queue *iq; |
132 | int i; |
133 | |
134 | for (i = 0; i < lfs->lfs_num; i++) { |
135 | iq = &lfs->lf[i].iqueue; |
136 | if (iq->real_vaddr) |
137 | dma_free_coherent(dev: &lfs->pdev->dev, |
138 | size: iq->size, |
139 | cpu_addr: iq->real_vaddr, |
140 | dma_handle: iq->real_dma_addr); |
141 | iq->real_vaddr = NULL; |
142 | iq->vaddr = NULL; |
143 | } |
144 | } |
145 | |
146 | static inline int otx2_cpt_alloc_instruction_queues( |
147 | struct otx2_cptlfs_info *lfs) |
148 | { |
149 | struct otx2_cpt_inst_queue *iq; |
150 | int ret = 0, i; |
151 | |
152 | if (!lfs->lfs_num) |
153 | return -EINVAL; |
154 | |
155 | for (i = 0; i < lfs->lfs_num; i++) { |
156 | iq = &lfs->lf[i].iqueue; |
157 | iq->size = OTX2_CPT_INST_QLEN_BYTES + |
158 | OTX2_CPT_Q_FC_LEN + |
159 | OTX2_CPT_INST_GRP_QLEN_BYTES + |
160 | OTX2_CPT_INST_Q_ALIGNMENT; |
161 | iq->real_vaddr = dma_alloc_coherent(dev: &lfs->pdev->dev, size: iq->size, |
162 | dma_handle: &iq->real_dma_addr, GFP_KERNEL); |
163 | if (!iq->real_vaddr) { |
164 | ret = -ENOMEM; |
165 | goto error; |
166 | } |
167 | iq->vaddr = iq->real_vaddr + OTX2_CPT_INST_GRP_QLEN_BYTES; |
168 | iq->dma_addr = iq->real_dma_addr + OTX2_CPT_INST_GRP_QLEN_BYTES; |
169 | |
170 | /* Align pointers */ |
171 | iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_CPT_INST_Q_ALIGNMENT); |
172 | iq->dma_addr = PTR_ALIGN(iq->dma_addr, |
173 | OTX2_CPT_INST_Q_ALIGNMENT); |
174 | } |
175 | return 0; |
176 | |
177 | error: |
178 | otx2_cpt_free_instruction_queues(lfs); |
179 | return ret; |
180 | } |
181 | |
182 | static inline void otx2_cptlf_set_iqueues_base_addr( |
183 | struct otx2_cptlfs_info *lfs) |
184 | { |
185 | union otx2_cptx_lf_q_base lf_q_base; |
186 | int slot; |
187 | |
188 | for (slot = 0; slot < lfs->lfs_num; slot++) { |
189 | lf_q_base.u = lfs->lf[slot].iqueue.dma_addr; |
190 | otx2_cpt_write64(reg_base: lfs->reg_base, blk: lfs->blkaddr, slot, |
191 | OTX2_CPT_LF_Q_BASE, val: lf_q_base.u); |
192 | } |
193 | } |
194 | |
195 | static inline void otx2_cptlf_do_set_iqueue_size(struct otx2_cptlf_info *lf) |
196 | { |
197 | union otx2_cptx_lf_q_size lf_q_size = { .u = 0x0 }; |
198 | |
199 | lf_q_size.s.size_div40 = OTX2_CPT_SIZE_DIV40 + |
200 | OTX2_CPT_EXTRA_SIZE_DIV40; |
201 | otx2_cpt_write64(reg_base: lf->lfs->reg_base, blk: lf->lfs->blkaddr, slot: lf->slot, |
202 | OTX2_CPT_LF_Q_SIZE, val: lf_q_size.u); |
203 | } |
204 | |
205 | static inline void otx2_cptlf_set_iqueues_size(struct otx2_cptlfs_info *lfs) |
206 | { |
207 | int slot; |
208 | |
209 | for (slot = 0; slot < lfs->lfs_num; slot++) |
210 | otx2_cptlf_do_set_iqueue_size(lf: &lfs->lf[slot]); |
211 | } |
212 | |
213 | #define INFLIGHT GENMASK_ULL(8, 0) |
214 | #define GRB_CNT GENMASK_ULL(39, 32) |
215 | #define GWB_CNT GENMASK_ULL(47, 40) |
216 | #define XQ_XOR GENMASK_ULL(63, 63) |
217 | #define DQPTR GENMASK_ULL(19, 0) |
218 | #define NQPTR GENMASK_ULL(51, 32) |
219 | |
220 | static inline void otx2_cptlf_do_disable_iqueue(struct otx2_cptlf_info *lf) |
221 | { |
222 | void __iomem *reg_base = lf->lfs->reg_base; |
223 | struct pci_dev *pdev = lf->lfs->pdev; |
224 | u8 blkaddr = lf->lfs->blkaddr; |
225 | int timeout = 1000000; |
226 | u64 inprog, inst_ptr; |
227 | u64 slot = lf->slot; |
228 | u64 qsize, pending; |
229 | int i = 0; |
230 | |
231 | /* Disable instructions enqueuing */ |
232 | otx2_cpt_write64(reg_base, blk: blkaddr, slot, OTX2_CPT_LF_CTL, val: 0x0); |
233 | |
234 | inprog = otx2_cpt_read64(reg_base, blk: blkaddr, slot, OTX2_CPT_LF_INPROG); |
235 | inprog |= BIT_ULL(16); |
236 | otx2_cpt_write64(reg_base, blk: blkaddr, slot, OTX2_CPT_LF_INPROG, val: inprog); |
237 | |
238 | qsize = otx2_cpt_read64(reg_base, blk: blkaddr, slot, OTX2_CPT_LF_Q_SIZE) & 0x7FFF; |
239 | do { |
240 | inst_ptr = otx2_cpt_read64(reg_base, blk: blkaddr, slot, OTX2_CPT_LF_Q_INST_PTR); |
241 | pending = (FIELD_GET(XQ_XOR, inst_ptr) * qsize * 40) + |
242 | FIELD_GET(NQPTR, inst_ptr) - FIELD_GET(DQPTR, inst_ptr); |
243 | udelay(1); |
244 | timeout--; |
245 | } while ((pending != 0) && (timeout != 0)); |
246 | |
247 | if (timeout == 0) |
248 | dev_warn(&pdev->dev, "TIMEOUT: CPT poll on pending instructions\n" ); |
249 | |
250 | timeout = 1000000; |
251 | /* Wait for CPT queue to become execution-quiescent */ |
252 | do { |
253 | inprog = otx2_cpt_read64(reg_base, blk: blkaddr, slot, OTX2_CPT_LF_INPROG); |
254 | |
255 | if ((FIELD_GET(INFLIGHT, inprog) == 0) && |
256 | (FIELD_GET(GRB_CNT, inprog) == 0)) { |
257 | i++; |
258 | } else { |
259 | i = 0; |
260 | timeout--; |
261 | } |
262 | } while ((timeout != 0) && (i < 10)); |
263 | |
264 | if (timeout == 0) |
265 | dev_warn(&pdev->dev, "TIMEOUT: CPT poll on inflight count\n" ); |
266 | /* Wait for 2 us to flush all queue writes to memory */ |
267 | udelay(2); |
268 | } |
269 | |
270 | static inline void otx2_cptlf_disable_iqueues(struct otx2_cptlfs_info *lfs) |
271 | { |
272 | int slot; |
273 | |
274 | for (slot = 0; slot < lfs->lfs_num; slot++) { |
275 | otx2_cptlf_do_disable_iqueue(lf: &lfs->lf[slot]); |
276 | otx2_cpt_lf_reset_msg(lfs, slot: lfs->global_slot + slot); |
277 | } |
278 | } |
279 | |
280 | static inline void otx2_cptlf_set_iqueue_enq(struct otx2_cptlf_info *lf, |
281 | bool enable) |
282 | { |
283 | u8 blkaddr = lf->lfs->blkaddr; |
284 | union otx2_cptx_lf_ctl lf_ctl; |
285 | |
286 | lf_ctl.u = otx2_cpt_read64(reg_base: lf->lfs->reg_base, blk: blkaddr, slot: lf->slot, |
287 | OTX2_CPT_LF_CTL); |
288 | |
289 | /* Set iqueue's enqueuing */ |
290 | lf_ctl.s.ena = enable ? 0x1 : 0x0; |
291 | otx2_cpt_write64(reg_base: lf->lfs->reg_base, blk: blkaddr, slot: lf->slot, |
292 | OTX2_CPT_LF_CTL, val: lf_ctl.u); |
293 | } |
294 | |
295 | static inline void otx2_cptlf_enable_iqueue_enq(struct otx2_cptlf_info *lf) |
296 | { |
297 | otx2_cptlf_set_iqueue_enq(lf, enable: true); |
298 | } |
299 | |
300 | static inline void otx2_cptlf_set_iqueue_exec(struct otx2_cptlf_info *lf, |
301 | bool enable) |
302 | { |
303 | union otx2_cptx_lf_inprog lf_inprog; |
304 | u8 blkaddr = lf->lfs->blkaddr; |
305 | |
306 | lf_inprog.u = otx2_cpt_read64(reg_base: lf->lfs->reg_base, blk: blkaddr, slot: lf->slot, |
307 | OTX2_CPT_LF_INPROG); |
308 | |
309 | /* Set iqueue's execution */ |
310 | lf_inprog.s.eena = enable ? 0x1 : 0x0; |
311 | otx2_cpt_write64(reg_base: lf->lfs->reg_base, blk: blkaddr, slot: lf->slot, |
312 | OTX2_CPT_LF_INPROG, val: lf_inprog.u); |
313 | } |
314 | |
315 | static inline void otx2_cptlf_set_ctx_flr_flush(struct otx2_cptlf_info *lf) |
316 | { |
317 | u8 blkaddr = lf->lfs->blkaddr; |
318 | u64 val; |
319 | |
320 | val = otx2_cpt_read64(reg_base: lf->lfs->reg_base, blk: blkaddr, slot: lf->slot, |
321 | OTX2_CPT_LF_CTX_CTL); |
322 | val |= BIT_ULL(0); |
323 | |
324 | otx2_cpt_write64(reg_base: lf->lfs->reg_base, blk: blkaddr, slot: lf->slot, |
325 | OTX2_CPT_LF_CTX_CTL, val); |
326 | } |
327 | |
328 | static inline void otx2_cptlf_enable_iqueue_exec(struct otx2_cptlf_info *lf) |
329 | { |
330 | otx2_cptlf_set_iqueue_exec(lf, enable: true); |
331 | } |
332 | |
333 | static inline void otx2_cptlf_disable_iqueue_exec(struct otx2_cptlf_info *lf) |
334 | { |
335 | otx2_cptlf_set_iqueue_exec(lf, enable: false); |
336 | } |
337 | |
338 | static inline void otx2_cptlf_enable_iqueues(struct otx2_cptlfs_info *lfs) |
339 | { |
340 | int slot; |
341 | |
342 | for (slot = 0; slot < lfs->lfs_num; slot++) { |
343 | /* Enable flush on FLR for Errata */ |
344 | if (is_dev_cn10kb(pdev: lfs->pdev)) |
345 | otx2_cptlf_set_ctx_flr_flush(lf: &lfs->lf[slot]); |
346 | |
347 | otx2_cptlf_enable_iqueue_exec(lf: &lfs->lf[slot]); |
348 | otx2_cptlf_enable_iqueue_enq(lf: &lfs->lf[slot]); |
349 | } |
350 | } |
351 | |
352 | static inline void otx2_cpt_fill_inst(union otx2_cpt_inst_s *cptinst, |
353 | struct otx2_cpt_iq_command *iq_cmd, |
354 | u64 comp_baddr) |
355 | { |
356 | cptinst->u[0] = 0x0; |
357 | cptinst->s.doneint = true; |
358 | cptinst->s.res_addr = comp_baddr; |
359 | cptinst->u[2] = 0x0; |
360 | cptinst->u[3] = 0x0; |
361 | cptinst->s.ei0 = iq_cmd->cmd.u; |
362 | cptinst->s.ei1 = iq_cmd->dptr; |
363 | cptinst->s.ei2 = iq_cmd->rptr; |
364 | cptinst->s.ei3 = iq_cmd->cptr.u; |
365 | } |
366 | |
367 | /* |
368 | * On OcteonTX2 platform the parameter insts_num is used as a count of |
369 | * instructions to be enqueued. The valid values for insts_num are: |
370 | * 1 - 1 CPT instruction will be enqueued during LMTST operation |
371 | * 2 - 2 CPT instructions will be enqueued during LMTST operation |
372 | */ |
373 | static inline void otx2_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, |
374 | u32 insts_num, struct otx2_cptlf_info *lf) |
375 | { |
376 | void __iomem *lmtline = lf->lmtline; |
377 | long ret; |
378 | |
379 | /* |
380 | * Make sure memory areas pointed in CPT_INST_S |
381 | * are flushed before the instruction is sent to CPT |
382 | */ |
383 | dma_wmb(); |
384 | |
385 | do { |
386 | /* Copy CPT command to LMTLINE */ |
387 | memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE); |
388 | |
389 | /* |
390 | * LDEOR initiates atomic transfer to I/O device |
391 | * The following will cause the LMTST to fail (the LDEOR |
392 | * returns zero): |
393 | * - No stores have been performed to the LMTLINE since it was |
394 | * last invalidated. |
395 | * - The bytes which have been stored to LMTLINE since it was |
396 | * last invalidated form a pattern that is non-contiguous, does |
397 | * not start at byte 0, or does not end on a 8-byte boundary. |
398 | * (i.e.comprises a formation of other than 1–16 8-byte |
399 | * words.) |
400 | * |
401 | * These rules are designed such that an operating system |
402 | * context switch or hypervisor guest switch need have no |
403 | * knowledge of the LMTST operations; the switch code does not |
404 | * need to store to LMTCANCEL. Also note as LMTLINE data cannot |
405 | * be read, there is no information leakage between processes. |
406 | */ |
407 | ret = otx2_lmt_flush(lf->ioreg); |
408 | |
409 | } while (!ret); |
410 | } |
411 | |
412 | static inline bool otx2_cptlf_started(struct otx2_cptlfs_info *lfs) |
413 | { |
414 | return atomic_read(v: &lfs->state) == OTX2_CPTLF_STARTED; |
415 | } |
416 | |
417 | static inline void otx2_cptlf_set_dev_info(struct otx2_cptlfs_info *lfs, |
418 | struct pci_dev *pdev, |
419 | void __iomem *reg_base, |
420 | struct otx2_mbox *mbox, |
421 | int blkaddr) |
422 | { |
423 | lfs->pdev = pdev; |
424 | lfs->reg_base = reg_base; |
425 | lfs->mbox = mbox; |
426 | lfs->blkaddr = blkaddr; |
427 | } |
428 | |
429 | int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_msk, int pri, |
430 | int lfs_num); |
431 | void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs); |
432 | int otx2_cptlf_register_misc_interrupts(struct otx2_cptlfs_info *lfs); |
433 | int otx2_cptlf_register_done_interrupts(struct otx2_cptlfs_info *lfs); |
434 | void otx2_cptlf_unregister_misc_interrupts(struct otx2_cptlfs_info *lfs); |
435 | void otx2_cptlf_unregister_done_interrupts(struct otx2_cptlfs_info *lfs); |
436 | void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs); |
437 | int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs); |
438 | |
439 | #endif /* __OTX2_CPTLF_H */ |
440 | |