1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* Marvell RVU Admin Function driver |
3 | * |
4 | * Copyright (C) 2018 Marvell. |
5 | * |
6 | */ |
7 | |
8 | #ifndef RVU_H |
9 | #define RVU_H |
10 | |
11 | #include <linux/pci.h> |
12 | #include <net/devlink.h> |
13 | |
14 | #include "rvu_struct.h" |
15 | #include "rvu_devlink.h" |
16 | #include "common.h" |
17 | #include "mbox.h" |
18 | #include "npc.h" |
19 | #include "rvu_reg.h" |
20 | #include "ptp.h" |
21 | |
22 | /* PCI device IDs */ |
23 | #define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065 |
24 | #define PCI_DEVID_OCTEONTX2_LBK 0xA061 |
25 | |
26 | /* Subsystem Device ID */ |
27 | #define PCI_SUBSYS_DEVID_98XX 0xB100 |
28 | #define PCI_SUBSYS_DEVID_96XX 0xB200 |
29 | #define PCI_SUBSYS_DEVID_CN10K_A 0xB900 |
30 | #define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00 |
31 | #define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00 |
32 | #define PCI_SUBSYS_DEVID_CN10K_B 0xBD00 |
33 | |
34 | /* PCI BAR nos */ |
35 | #define PCI_AF_REG_BAR_NUM 0 |
36 | #define PCI_PF_REG_BAR_NUM 2 |
37 | #define PCI_MBOX_BAR_NUM 4 |
38 | |
39 | #define NAME_SIZE 32 |
40 | #define MAX_NIX_BLKS 2 |
41 | #define MAX_CPT_BLKS 2 |
42 | |
43 | /* PF_FUNC */ |
44 | #define RVU_PFVF_PF_SHIFT 10 |
45 | #define RVU_PFVF_PF_MASK 0x3F |
46 | #define RVU_PFVF_FUNC_SHIFT 0 |
47 | #define RVU_PFVF_FUNC_MASK 0x3FF |
48 | |
49 | #ifdef CONFIG_DEBUG_FS |
50 | struct dump_ctx { |
51 | int lf; |
52 | int id; |
53 | bool all; |
54 | }; |
55 | |
56 | struct cpt_ctx { |
57 | int blkaddr; |
58 | struct rvu *rvu; |
59 | }; |
60 | |
61 | struct rvu_debugfs { |
62 | struct dentry *root; |
63 | struct dentry *cgx_root; |
64 | struct dentry *cgx; |
65 | struct dentry *lmac; |
66 | struct dentry *npa; |
67 | struct dentry *nix; |
68 | struct dentry *npc; |
69 | struct dentry *cpt; |
70 | struct dentry *mcs_root; |
71 | struct dentry *mcs; |
72 | struct dentry *mcs_rx; |
73 | struct dentry *mcs_tx; |
74 | struct dump_ctx npa_aura_ctx; |
75 | struct dump_ctx npa_pool_ctx; |
76 | struct dump_ctx nix_cq_ctx; |
77 | struct dump_ctx nix_rq_ctx; |
78 | struct dump_ctx nix_sq_ctx; |
79 | struct cpt_ctx cpt_ctx[MAX_CPT_BLKS]; |
80 | int npa_qsize_id; |
81 | int nix_qsize_id; |
82 | }; |
83 | #endif |
84 | |
85 | struct rvu_work { |
86 | struct work_struct work; |
87 | struct rvu *rvu; |
88 | int num_msgs; |
89 | int up_num_msgs; |
90 | }; |
91 | |
92 | struct rsrc_bmap { |
93 | unsigned long *bmap; /* Pointer to resource bitmap */ |
94 | u16 max; /* Max resource id or count */ |
95 | }; |
96 | |
97 | struct rvu_block { |
98 | struct rsrc_bmap lf; |
99 | struct admin_queue *aq; /* NIX/NPA AQ */ |
100 | u16 *fn_map; /* LF to pcifunc mapping */ |
101 | bool multislot; |
102 | bool implemented; |
103 | u8 addr; /* RVU_BLOCK_ADDR_E */ |
104 | u8 type; /* RVU_BLOCK_TYPE_E */ |
105 | u8 lfshift; |
106 | u64 lookup_reg; |
107 | u64 pf_lfcnt_reg; |
108 | u64 vf_lfcnt_reg; |
109 | u64 lfcfg_reg; |
110 | u64 msixcfg_reg; |
111 | u64 lfreset_reg; |
112 | unsigned char name[NAME_SIZE]; |
113 | struct rvu *rvu; |
114 | u64 cpt_flt_eng_map[3]; |
115 | u64 cpt_rcvrd_eng_map[3]; |
116 | }; |
117 | |
118 | struct nix_mcast { |
119 | struct qmem *mce_ctx; |
120 | struct qmem *mcast_buf; |
121 | int replay_pkind; |
122 | struct rsrc_bmap mce_counter[2]; |
123 | /* Counters for both ingress and egress mcast lists */ |
124 | struct mutex mce_lock; /* Serialize MCE updates */ |
125 | }; |
126 | |
127 | struct nix_mce_list { |
128 | struct hlist_head head; |
129 | int count; |
130 | int max; |
131 | }; |
132 | |
133 | struct nix_mcast_grp_elem { |
134 | struct nix_mce_list mcast_mce_list; |
135 | u32 mcast_grp_idx; |
136 | u32 pcifunc; |
137 | int mcam_index; |
138 | int mce_start_index; |
139 | struct list_head list; |
140 | u8 dir; |
141 | }; |
142 | |
143 | struct nix_mcast_grp { |
144 | struct list_head mcast_grp_head; |
145 | int count; |
146 | int next_grp_index; |
147 | struct mutex mcast_grp_lock; /* Serialize MCE updates */ |
148 | }; |
149 | |
150 | /* layer metadata to uniquely identify a packet header field */ |
151 | struct npc_layer_mdata { |
152 | u8 lid; |
153 | u8 ltype; |
154 | u8 hdr; |
155 | u8 key; |
156 | u8 len; |
157 | }; |
158 | |
159 | /* Structure to represent a field present in the |
160 | * generated key. A key field may present anywhere and can |
161 | * be of any size in the generated key. Once this structure |
162 | * is populated for fields of interest then field's presence |
163 | * and location (if present) can be known. |
164 | */ |
165 | struct npc_key_field { |
166 | /* Masks where all set bits indicate position |
167 | * of a field in the key |
168 | */ |
169 | u64 kw_mask[NPC_MAX_KWS_IN_KEY]; |
170 | /* Number of words in the key a field spans. If a field is |
171 | * of 16 bytes and key offset is 4 then the field will use |
172 | * 4 bytes in KW0, 8 bytes in KW1 and 4 bytes in KW2 and |
173 | * nr_kws will be 3(KW0, KW1 and KW2). |
174 | */ |
175 | int nr_kws; |
176 | /* used by packet header fields */ |
177 | struct npc_layer_mdata layer_mdata; |
178 | }; |
179 | |
180 | struct npc_mcam { |
181 | struct rsrc_bmap counters; |
182 | struct mutex lock; /* MCAM entries and counters update lock */ |
183 | unsigned long *bmap; /* bitmap, 0 => bmap_entries */ |
184 | unsigned long *bmap_reverse; /* Reverse bitmap, bmap_entries => 0 */ |
185 | u16 bmap_entries; /* Number of unreserved MCAM entries */ |
186 | u16 bmap_fcnt; /* MCAM entries free count */ |
187 | u16 *entry2pfvf_map; |
188 | u16 *entry2cntr_map; |
189 | u16 *cntr2pfvf_map; |
190 | u16 *cntr_refcnt; |
191 | u16 *entry2target_pffunc; |
192 | u8 keysize; /* MCAM keysize 112/224/448 bits */ |
193 | u8 banks; /* Number of MCAM banks */ |
194 | u8 banks_per_entry;/* Number of keywords in key */ |
195 | u16 banksize; /* Number of MCAM entries in each bank */ |
196 | u16 total_entries; /* Total number of MCAM entries */ |
197 | u16 nixlf_offset; /* Offset of nixlf rsvd uncast entries */ |
198 | u16 pf_offset; /* Offset of PF's rsvd bcast, promisc entries */ |
199 | u16 lprio_count; |
200 | u16 lprio_start; |
201 | u16 hprio_count; |
202 | u16 hprio_end; |
203 | u16 rx_miss_act_cntr; /* Counter for RX MISS action */ |
204 | /* fields present in the generated key */ |
205 | struct npc_key_field tx_key_fields[NPC_KEY_FIELDS_MAX]; |
206 | struct npc_key_field rx_key_fields[NPC_KEY_FIELDS_MAX]; |
207 | u64 tx_features; |
208 | u64 rx_features; |
209 | struct list_head mcam_rules; |
210 | }; |
211 | |
212 | /* Structure for per RVU func info ie PF/VF */ |
213 | struct rvu_pfvf { |
214 | bool npalf; /* Only one NPALF per RVU_FUNC */ |
215 | bool nixlf; /* Only one NIXLF per RVU_FUNC */ |
216 | u16 sso; |
217 | u16 ssow; |
218 | u16 cptlfs; |
219 | u16 timlfs; |
220 | u16 cpt1_lfs; |
221 | u8 cgx_lmac; |
222 | |
223 | /* Block LF's MSIX vector info */ |
224 | struct rsrc_bmap msix; /* Bitmap for MSIX vector alloc */ |
225 | #define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF)) |
226 | u16 *msix_lfmap; /* Vector to block LF mapping */ |
227 | |
228 | /* NPA contexts */ |
229 | struct qmem *aura_ctx; |
230 | struct qmem *pool_ctx; |
231 | struct qmem *npa_qints_ctx; |
232 | unsigned long *aura_bmap; |
233 | unsigned long *pool_bmap; |
234 | |
235 | /* NIX contexts */ |
236 | struct qmem *rq_ctx; |
237 | struct qmem *sq_ctx; |
238 | struct qmem *cq_ctx; |
239 | struct qmem *; |
240 | struct qmem *cq_ints_ctx; |
241 | struct qmem *nix_qints_ctx; |
242 | unsigned long *sq_bmap; |
243 | unsigned long *rq_bmap; |
244 | unsigned long *cq_bmap; |
245 | |
246 | u16 rx_chan_base; |
247 | u16 tx_chan_base; |
248 | u8 rx_chan_cnt; /* total number of RX channels */ |
249 | u8 tx_chan_cnt; /* total number of TX channels */ |
250 | u16 maxlen; |
251 | u16 minlen; |
252 | |
253 | bool hw_rx_tstamp_en; /* Is rx_tstamp enabled */ |
254 | u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */ |
255 | u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */ |
256 | |
257 | /* Broadcast/Multicast/Promisc pkt replication info */ |
258 | u16 bcast_mce_idx; |
259 | u16 mcast_mce_idx; |
260 | u16 promisc_mce_idx; |
261 | struct nix_mce_list bcast_mce_list; |
262 | struct nix_mce_list mcast_mce_list; |
263 | struct nix_mce_list promisc_mce_list; |
264 | bool use_mce_list; |
265 | |
266 | struct rvu_npc_mcam_rule *def_ucast_rule; |
267 | |
268 | bool cgx_in_use; /* this PF/VF using CGX? */ |
269 | int cgx_users; /* number of cgx users - used only by PFs */ |
270 | |
271 | int intf_mode; |
272 | u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */ |
273 | u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */ |
274 | u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */ |
275 | u8 lbkid; /* NIX0/1 lbk link ID */ |
276 | u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/ |
277 | u64 lmt_map_ent_w1; /* Preseving the word1 of lmtst map table entry*/ |
278 | unsigned long flags; |
279 | struct sdp_node_info *sdp_info; |
280 | }; |
281 | |
282 | enum rvu_pfvf_flags { |
283 | NIXLF_INITIALIZED = 0, |
284 | PF_SET_VF_MAC, |
285 | PF_SET_VF_CFG, |
286 | PF_SET_VF_TRUSTED, |
287 | }; |
288 | |
289 | #define RVU_CLEAR_VF_PERM ~GENMASK(PF_SET_VF_TRUSTED, PF_SET_VF_MAC) |
290 | |
291 | struct nix_bp { |
292 | struct rsrc_bmap bpids; /* free bpids bitmap */ |
293 | u16 cgx_bpid_cnt; |
294 | u16 sdp_bpid_cnt; |
295 | u16 free_pool_base; |
296 | u16 *fn_map; /* pcifunc mapping */ |
297 | u8 *intf_map; /* interface type map */ |
298 | u8 *ref_cnt; |
299 | }; |
300 | |
301 | struct nix_txsch { |
302 | struct rsrc_bmap schq; |
303 | u8 lvl; |
304 | #define NIX_TXSCHQ_FREE BIT_ULL(1) |
305 | #define NIX_TXSCHQ_CFG_DONE BIT_ULL(0) |
306 | #define TXSCH_MAP_FUNC(__pfvf_map) ((__pfvf_map) & 0xFFFF) |
307 | #define TXSCH_MAP_FLAGS(__pfvf_map) ((__pfvf_map) >> 16) |
308 | #define TXSCH_MAP(__func, __flags) (((__func) & 0xFFFF) | ((__flags) << 16)) |
309 | #define TXSCH_SET_FLAG(__pfvf_map, flag) ((__pfvf_map) | ((flag) << 16)) |
310 | u32 *pfvf_map; |
311 | }; |
312 | |
313 | struct nix_mark_format { |
314 | u8 total; |
315 | u8 in_use; |
316 | u32 *cfg; |
317 | }; |
318 | |
319 | /* smq(flush) to tl1 cir/pir info */ |
320 | struct nix_smq_tree_ctx { |
321 | u64 cir_off; |
322 | u64 cir_val; |
323 | u64 pir_off; |
324 | u64 pir_val; |
325 | }; |
326 | |
327 | /* smq flush context */ |
328 | struct nix_smq_flush_ctx { |
329 | int smq; |
330 | u16 tl1_schq; |
331 | u16 tl2_schq; |
332 | struct nix_smq_tree_ctx smq_tree_ctx[NIX_TXSCH_LVL_CNT]; |
333 | }; |
334 | |
335 | struct npc_pkind { |
336 | struct rsrc_bmap rsrc; |
337 | u32 *pfchan_map; |
338 | }; |
339 | |
340 | struct nix_flowkey { |
341 | #define NIX_FLOW_KEY_ALG_MAX 32 |
342 | u32 flowkey[NIX_FLOW_KEY_ALG_MAX]; |
343 | int in_use; |
344 | }; |
345 | |
346 | struct nix_lso { |
347 | u8 total; |
348 | u8 in_use; |
349 | }; |
350 | |
351 | struct nix_txvlan { |
352 | #define NIX_TX_VTAG_DEF_MAX 0x400 |
353 | struct rsrc_bmap rsrc; |
354 | u16 *entry2pfvf_map; |
355 | struct mutex rsrc_lock; /* Serialize resource alloc/free */ |
356 | }; |
357 | |
358 | struct nix_ipolicer { |
359 | struct rsrc_bmap band_prof; |
360 | u16 *pfvf_map; |
361 | u16 *match_id; |
362 | u16 *ref_count; |
363 | }; |
364 | |
365 | struct nix_hw { |
366 | int blkaddr; |
367 | struct rvu *rvu; |
368 | struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */ |
369 | struct nix_mcast mcast; |
370 | struct nix_mcast_grp mcast_grp; |
371 | struct nix_flowkey flowkey; |
372 | struct nix_mark_format mark_format; |
373 | struct nix_lso lso; |
374 | struct nix_txvlan txvlan; |
375 | struct nix_ipolicer *ipolicer; |
376 | struct nix_bp bp; |
377 | u64 *tx_credits; |
378 | u8 cc_mcs_cnt; |
379 | }; |
380 | |
381 | /* RVU block's capabilities or functionality, |
382 | * which vary by silicon version/skew. |
383 | */ |
384 | struct hw_cap { |
385 | /* Transmit side supported functionality */ |
386 | u8 nix_tx_aggr_lvl; /* Tx link's traffic aggregation level */ |
387 | u16 nix_txsch_per_cgx_lmac; /* Max Q's transmitting to CGX LMAC */ |
388 | u16 nix_txsch_per_lbk_lmac; /* Max Q's transmitting to LBK LMAC */ |
389 | u16 nix_txsch_per_sdp_lmac; /* Max Q's transmitting to SDP LMAC */ |
390 | bool nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */ |
391 | bool nix_shaping; /* Is shaping and coloring supported */ |
392 | bool nix_shaper_toggle_wait; /* Shaping toggle needs poll/wait */ |
393 | bool nix_tx_link_bp; /* Can link backpressure TL queues ? */ |
394 | bool nix_rx_multicast; /* Rx packet replication support */ |
395 | bool nix_common_dwrr_mtu; /* Common DWRR MTU for quantum config */ |
396 | bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */ |
397 | bool programmable_chans; /* Channels programmable ? */ |
398 | bool ipolicer; |
399 | bool nix_multiple_dwrr_mtu; /* Multiple DWRR_MTU to choose from */ |
400 | bool ; /* Hash extract enabled ? */ |
401 | bool npc_exact_match_enabled; /* Exact match supported ? */ |
402 | }; |
403 | |
404 | struct rvu_hwinfo { |
405 | u8 total_pfs; /* MAX RVU PFs HW supports */ |
406 | u16 total_vfs; /* Max RVU VFs HW supports */ |
407 | u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */ |
408 | u8 cgx; |
409 | u8 lmac_per_cgx; |
410 | u16 cgx_chan_base; /* CGX base channel number */ |
411 | u16 lbk_chan_base; /* LBK base channel number */ |
412 | u16 sdp_chan_base; /* SDP base channel number */ |
413 | u16 cpt_chan_base; /* CPT base channel number */ |
414 | u8 cgx_links; |
415 | u8 lbk_links; |
416 | u8 sdp_links; |
417 | u8 cpt_links; /* Number of CPT links */ |
418 | u8 npc_kpus; /* No of parser units */ |
419 | u8 npc_pkinds; /* No of port kinds */ |
420 | u8 npc_intfs; /* No of interfaces */ |
421 | u8 npc_kpu_entries; /* No of KPU entries */ |
422 | u16 npc_counters; /* No of match stats counters */ |
423 | u32 lbk_bufsize; /* FIFO size supported by LBK */ |
424 | bool npc_ext_set; /* Extended register set */ |
425 | u64 npc_stat_ena; /* Match stats enable bit */ |
426 | |
427 | struct hw_cap cap; |
428 | struct rvu_block block[BLK_COUNT]; /* Block info */ |
429 | struct nix_hw *nix; |
430 | struct rvu *rvu; |
431 | struct npc_pkind pkind; |
432 | struct npc_mcam mcam; |
433 | struct npc_exact_table *table; |
434 | }; |
435 | |
436 | struct mbox_wq_info { |
437 | struct otx2_mbox mbox; |
438 | struct rvu_work *mbox_wrk; |
439 | |
440 | struct otx2_mbox mbox_up; |
441 | struct rvu_work *mbox_wrk_up; |
442 | |
443 | struct workqueue_struct *mbox_wq; |
444 | }; |
445 | |
446 | struct channel_fwdata { |
447 | struct sdp_node_info info; |
448 | u8 valid; |
449 | #define RVU_CHANL_INFO_RESERVED 379 |
450 | u8 reserved[RVU_CHANL_INFO_RESERVED]; |
451 | }; |
452 | |
453 | struct rvu_fwdata { |
454 | #define 0xCFDA /* Custom Firmware Data*/ |
455 | #define RVU_FWDATA_VERSION 0x0001 |
456 | u32 ; |
457 | u32 version; /* version id */ |
458 | |
459 | /* MAC address */ |
460 | #define PF_MACNUM_MAX 32 |
461 | #define VF_MACNUM_MAX 256 |
462 | u64 pf_macs[PF_MACNUM_MAX]; |
463 | u64 vf_macs[VF_MACNUM_MAX]; |
464 | u64 sclk; |
465 | u64 rclk; |
466 | u64 mcam_addr; |
467 | u64 mcam_sz; |
468 | u64 msixtr_base; |
469 | u32 ptp_ext_clk_rate; |
470 | u32 ptp_ext_tstamp; |
471 | struct channel_fwdata channel_data; |
472 | #define FWDATA_RESERVED_MEM 958 |
473 | u64 reserved[FWDATA_RESERVED_MEM]; |
474 | #define CGX_MAX 9 |
475 | #define CGX_LMACS_MAX 4 |
476 | #define CGX_LMACS_USX 8 |
477 | #define FWDATA_CGX_LMAC_OFFSET 10536 |
478 | union { |
479 | struct cgx_lmac_fwdata_s |
480 | cgx_fw_data[CGX_MAX][CGX_LMACS_MAX]; |
481 | struct cgx_lmac_fwdata_s |
482 | cgx_fw_data_usx[CGX_MAX][CGX_LMACS_USX]; |
483 | }; |
484 | /* Do not add new fields below this line */ |
485 | }; |
486 | |
487 | struct ptp; |
488 | |
489 | /* KPU profile adapter structure gathering all KPU configuration data and abstracting out the |
490 | * source where it came from. |
491 | */ |
492 | struct npc_kpu_profile_adapter { |
493 | const char *name; |
494 | u64 version; |
495 | const struct npc_lt_def_cfg *lt_def; |
496 | const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */ |
497 | const struct npc_kpu_profile *kpu; /* array[kpus] */ |
498 | struct npc_mcam_kex *mkex; |
499 | struct npc_mcam_kex_hash *mkex_hash; |
500 | bool custom; |
501 | size_t pkinds; |
502 | size_t kpus; |
503 | }; |
504 | |
505 | #define RVU_SWITCH_LBK_CHAN 63 |
506 | |
507 | struct rvu_switch { |
508 | struct mutex switch_lock; /* Serialize flow installation */ |
509 | u32 used_entries; |
510 | u16 *entry2pcifunc; |
511 | u16 mode; |
512 | u16 start_entry; |
513 | }; |
514 | |
515 | struct rvu { |
516 | void __iomem *afreg_base; |
517 | void __iomem *pfreg_base; |
518 | struct pci_dev *pdev; |
519 | struct device *dev; |
520 | struct rvu_hwinfo *hw; |
521 | struct rvu_pfvf *pf; |
522 | struct rvu_pfvf *hwvf; |
523 | struct mutex rsrc_lock; /* Serialize resource alloc/free */ |
524 | struct mutex alias_lock; /* Serialize bar2 alias access */ |
525 | int vfs; /* Number of VFs attached to RVU */ |
526 | u16 vf_devid; /* VF devices id */ |
527 | int nix_blkaddr[MAX_NIX_BLKS]; |
528 | |
529 | /* Mbox */ |
530 | struct mbox_wq_info afpf_wq_info; |
531 | struct mbox_wq_info afvf_wq_info; |
532 | |
533 | /* PF FLR */ |
534 | struct rvu_work *flr_wrk; |
535 | struct workqueue_struct *flr_wq; |
536 | struct mutex flr_lock; /* Serialize FLRs */ |
537 | |
538 | /* MSI-X */ |
539 | u16 num_vec; |
540 | char *irq_name; |
541 | bool *irq_allocated; |
542 | dma_addr_t msix_base_iova; |
543 | u64 msixtr_base_phy; /* Register reset value */ |
544 | |
545 | /* CGX */ |
546 | #define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */ |
547 | u16 cgx_mapped_vfs; /* maximum CGX mapped VFs */ |
548 | u8 cgx_mapped_pfs; |
549 | u8 cgx_cnt_max; /* CGX port count max */ |
550 | u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */ |
551 | u64 *cgxlmac2pf_map; /* bitmap of mapped pfs for |
552 | * every cgx lmac port |
553 | */ |
554 | unsigned long pf_notify_bmap; /* Flags for PF notification */ |
555 | void **cgx_idmap; /* cgx id to cgx data map table */ |
556 | struct work_struct cgx_evh_work; |
557 | struct workqueue_struct *cgx_evh_wq; |
558 | spinlock_t cgx_evq_lock; /* cgx event queue lock */ |
559 | struct list_head cgx_evq_head; /* cgx event queue head */ |
560 | struct mutex cgx_cfg_lock; /* serialize cgx configuration */ |
561 | |
562 | char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */ |
563 | char kpu_pfl_name[KPU_NAME_LEN]; /* Configured KPU profile name */ |
564 | |
565 | /* Firmware data */ |
566 | struct rvu_fwdata *fwdata; |
567 | void *kpu_fwdata; |
568 | size_t kpu_fwdata_sz; |
569 | void __iomem *kpu_prfl_addr; |
570 | |
571 | /* NPC KPU data */ |
572 | struct npc_kpu_profile_adapter kpu; |
573 | |
574 | struct ptp *ptp; |
575 | |
576 | int mcs_blk_cnt; |
577 | int cpt_pf_num; |
578 | |
579 | #ifdef CONFIG_DEBUG_FS |
580 | struct rvu_debugfs rvu_dbg; |
581 | #endif |
582 | struct rvu_devlink *rvu_dl; |
583 | |
584 | /* RVU switch implementation over NPC with DMAC rules */ |
585 | struct rvu_switch rswitch; |
586 | |
587 | struct work_struct mcs_intr_work; |
588 | struct workqueue_struct *mcs_intr_wq; |
589 | struct list_head mcs_intrq_head; |
590 | /* mcs interrupt queue lock */ |
591 | spinlock_t mcs_intrq_lock; |
592 | /* CPT interrupt lock */ |
593 | spinlock_t cpt_intr_lock; |
594 | |
595 | struct mutex mbox_lock; /* Serialize mbox up and down msgs */ |
596 | }; |
597 | |
598 | static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) |
599 | { |
600 | writeq(val, addr: rvu->afreg_base + ((block << 28) | offset)); |
601 | } |
602 | |
603 | static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset) |
604 | { |
605 | return readq(addr: rvu->afreg_base + ((block << 28) | offset)); |
606 | } |
607 | |
608 | static inline void rvupf_write64(struct rvu *rvu, u64 offset, u64 val) |
609 | { |
610 | writeq(val, addr: rvu->pfreg_base + offset); |
611 | } |
612 | |
613 | static inline u64 rvupf_read64(struct rvu *rvu, u64 offset) |
614 | { |
615 | return readq(addr: rvu->pfreg_base + offset); |
616 | } |
617 | |
618 | static inline void rvu_bar2_sel_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) |
619 | { |
620 | /* HW requires read back of RVU_AF_BAR2_SEL register to make sure completion of |
621 | * write operation. |
622 | */ |
623 | rvu_write64(rvu, block, offset, val); |
624 | rvu_read64(rvu, block, offset); |
625 | /* Barrier to ensure read completes before accessing LF registers */ |
626 | mb(); |
627 | } |
628 | |
629 | /* Silicon revisions */ |
630 | static inline bool is_rvu_pre_96xx_C0(struct rvu *rvu) |
631 | { |
632 | struct pci_dev *pdev = rvu->pdev; |
633 | /* 96XX A0/B0, 95XX A0/A1/B0 chips */ |
634 | return ((pdev->revision == 0x00) || (pdev->revision == 0x01) || |
635 | (pdev->revision == 0x10) || (pdev->revision == 0x11) || |
636 | (pdev->revision == 0x14)); |
637 | } |
638 | |
639 | static inline bool is_rvu_96xx_A0(struct rvu *rvu) |
640 | { |
641 | struct pci_dev *pdev = rvu->pdev; |
642 | |
643 | return (pdev->revision == 0x00); |
644 | } |
645 | |
646 | static inline bool is_rvu_96xx_B0(struct rvu *rvu) |
647 | { |
648 | struct pci_dev *pdev = rvu->pdev; |
649 | |
650 | return (pdev->revision == 0x00) || (pdev->revision == 0x01); |
651 | } |
652 | |
653 | static inline bool is_rvu_95xx_A0(struct rvu *rvu) |
654 | { |
655 | struct pci_dev *pdev = rvu->pdev; |
656 | |
657 | return (pdev->revision == 0x10) || (pdev->revision == 0x11); |
658 | } |
659 | |
660 | /* REVID for PCIe devices. |
661 | * Bits 0..1: minor pass, bit 3..2: major pass |
662 | * bits 7..4: midr id |
663 | */ |
664 | #define PCI_REVISION_ID_96XX 0x00 |
665 | #define PCI_REVISION_ID_95XX 0x10 |
666 | #define PCI_REVISION_ID_95XXN 0x20 |
667 | #define PCI_REVISION_ID_98XX 0x30 |
668 | #define PCI_REVISION_ID_95XXMM 0x40 |
669 | #define PCI_REVISION_ID_95XXO 0xE0 |
670 | |
671 | static inline bool is_rvu_otx2(struct rvu *rvu) |
672 | { |
673 | struct pci_dev *pdev = rvu->pdev; |
674 | |
675 | u8 midr = pdev->revision & 0xF0; |
676 | |
677 | return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX || |
678 | midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX || |
679 | midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO); |
680 | } |
681 | |
682 | static inline bool is_cnf10ka_a0(struct rvu *rvu) |
683 | { |
684 | struct pci_dev *pdev = rvu->pdev; |
685 | |
686 | if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A && |
687 | (pdev->revision & 0x0F) == 0x0) |
688 | return true; |
689 | return false; |
690 | } |
691 | |
692 | static inline bool (struct rvu *rvu) |
693 | { |
694 | u64 npc_const3; |
695 | |
696 | npc_const3 = rvu_read64(rvu, block: BLKADDR_NPC, NPC_AF_CONST3); |
697 | if (!(npc_const3 & BIT_ULL(62))) |
698 | return false; |
699 | |
700 | return true; |
701 | } |
702 | |
703 | static inline u16 rvu_nix_chan_cgx(struct rvu *rvu, u8 cgxid, |
704 | u8 lmacid, u8 chan) |
705 | { |
706 | u64 nix_const = rvu_read64(rvu, block: BLKADDR_NIX0, NIX_AF_CONST); |
707 | u16 cgx_chans = nix_const & 0xFFULL; |
708 | struct rvu_hwinfo *hw = rvu->hw; |
709 | |
710 | if (!hw->cap.programmable_chans) |
711 | return NIX_CHAN_CGX_LMAC_CHX(cgxid, lmacid, chan); |
712 | |
713 | return rvu->hw->cgx_chan_base + |
714 | (cgxid * hw->lmac_per_cgx + lmacid) * cgx_chans + chan; |
715 | } |
716 | |
717 | static inline u16 rvu_nix_chan_lbk(struct rvu *rvu, u8 lbkid, |
718 | u8 chan) |
719 | { |
720 | u64 nix_const = rvu_read64(rvu, block: BLKADDR_NIX0, NIX_AF_CONST); |
721 | u16 lbk_chans = (nix_const >> 16) & 0xFFULL; |
722 | struct rvu_hwinfo *hw = rvu->hw; |
723 | |
724 | if (!hw->cap.programmable_chans) |
725 | return NIX_CHAN_LBK_CHX(lbkid, chan); |
726 | |
727 | return rvu->hw->lbk_chan_base + lbkid * lbk_chans + chan; |
728 | } |
729 | |
730 | static inline u16 rvu_nix_chan_sdp(struct rvu *rvu, u8 chan) |
731 | { |
732 | struct rvu_hwinfo *hw = rvu->hw; |
733 | |
734 | if (!hw->cap.programmable_chans) |
735 | return NIX_CHAN_SDP_CHX(chan); |
736 | |
737 | return hw->sdp_chan_base + chan; |
738 | } |
739 | |
740 | static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan) |
741 | { |
742 | return rvu->hw->cpt_chan_base + chan; |
743 | } |
744 | |
745 | static inline bool is_rvu_supports_nix1(struct rvu *rvu) |
746 | { |
747 | struct pci_dev *pdev = rvu->pdev; |
748 | |
749 | if (pdev->subsystem_device == PCI_SUBSYS_DEVID_98XX) |
750 | return true; |
751 | |
752 | return false; |
753 | } |
754 | |
755 | /* Function Prototypes |
756 | * RVU |
757 | */ |
758 | #define RVU_LBK_VF_DEVID 0xA0F8 |
759 | static inline bool is_lbk_vf(struct rvu *rvu, u16 pcifunc) |
760 | { |
761 | return (!(pcifunc & ~RVU_PFVF_FUNC_MASK) && |
762 | (rvu->vf_devid == RVU_LBK_VF_DEVID)); |
763 | } |
764 | |
765 | static inline bool is_vf(u16 pcifunc) |
766 | { |
767 | return !!(pcifunc & RVU_PFVF_FUNC_MASK); |
768 | } |
769 | |
770 | /* check if PF_FUNC is AF */ |
771 | static inline bool is_pffunc_af(u16 pcifunc) |
772 | { |
773 | return !pcifunc; |
774 | } |
775 | |
776 | static inline bool is_rvu_fwdata_valid(struct rvu *rvu) |
777 | { |
778 | return (rvu->fwdata->header_magic == RVU_FWDATA_HEADER_MAGIC) && |
779 | (rvu->fwdata->version == RVU_FWDATA_VERSION); |
780 | } |
781 | |
782 | int rvu_alloc_bitmap(struct rsrc_bmap *rsrc); |
783 | void rvu_free_bitmap(struct rsrc_bmap *rsrc); |
784 | int rvu_alloc_rsrc(struct rsrc_bmap *rsrc); |
785 | void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id); |
786 | bool is_rsrc_free(struct rsrc_bmap *rsrc, int id); |
787 | int rvu_rsrc_free_count(struct rsrc_bmap *rsrc); |
788 | int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc); |
789 | void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start); |
790 | bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc); |
791 | u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr); |
792 | int rvu_get_pf(u16 pcifunc); |
793 | struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc); |
794 | void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf); |
795 | bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr); |
796 | bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype); |
797 | int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot); |
798 | int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf); |
799 | int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc); |
800 | int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero); |
801 | int rvu_get_num_lbk_chans(void); |
802 | int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc, |
803 | u16 global_slot, u16 *slot_in_block); |
804 | |
805 | /* RVU HW reg validation */ |
806 | enum regmap_block { |
807 | TXSCHQ_HWREGMAP = 0, |
808 | MAX_HWREGMAP, |
809 | }; |
810 | |
811 | bool rvu_check_valid_reg(int regmap, int regblk, u64 reg); |
812 | |
813 | /* NPA/NIX AQ APIs */ |
814 | int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue, |
815 | int qsize, int inst_size, int res_size); |
816 | void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq); |
817 | |
818 | /* SDP APIs */ |
819 | int rvu_sdp_init(struct rvu *rvu); |
820 | bool is_sdp_pfvf(u16 pcifunc); |
821 | bool is_sdp_pf(u16 pcifunc); |
822 | bool is_sdp_vf(struct rvu *rvu, u16 pcifunc); |
823 | |
824 | /* CGX APIs */ |
825 | static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf) |
826 | { |
827 | return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs) && |
828 | !is_sdp_pf(pcifunc: pf << RVU_PFVF_PF_SHIFT); |
829 | } |
830 | |
831 | static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id) |
832 | { |
833 | *cgx_id = (map >> 4) & 0xF; |
834 | *lmac_id = (map & 0xF); |
835 | } |
836 | |
837 | static inline bool is_cgx_vf(struct rvu *rvu, u16 pcifunc) |
838 | { |
839 | return ((pcifunc & RVU_PFVF_FUNC_MASK) && |
840 | is_pf_cgxmapped(rvu, pf: rvu_get_pf(pcifunc))); |
841 | } |
842 | |
843 | #define M(_name, _id, fn_name, req, rsp) \ |
844 | int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *); |
845 | MBOX_MESSAGES |
846 | #undef M |
847 | |
848 | int rvu_cgx_init(struct rvu *rvu); |
849 | int rvu_cgx_exit(struct rvu *rvu); |
850 | void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu); |
851 | int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start); |
852 | void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable); |
853 | int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start); |
854 | int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index, |
855 | int rxtxflag, u64 *stat); |
856 | void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc); |
857 | |
858 | /* NPA APIs */ |
859 | int rvu_npa_init(struct rvu *rvu); |
860 | void rvu_npa_freemem(struct rvu *rvu); |
861 | void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf); |
862 | int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req, |
863 | struct npa_aq_enq_rsp *rsp); |
864 | |
865 | /* NIX APIs */ |
866 | bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc); |
867 | int rvu_nix_init(struct rvu *rvu); |
868 | int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, |
869 | int blkaddr, u32 cfg); |
870 | void rvu_nix_freemem(struct rvu *rvu); |
871 | int rvu_get_nixlf_count(struct rvu *rvu); |
872 | void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf); |
873 | int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr); |
874 | int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, |
875 | struct nix_mce_list *mce_list, |
876 | int mce_idx, int mcam_index, bool add); |
877 | void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type, |
878 | struct nix_mce_list **mce_list, int *mce_idx); |
879 | struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr); |
880 | int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr); |
881 | void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc); |
882 | int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc, |
883 | struct nix_hw **nix_hw, int *blkaddr); |
884 | int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, |
885 | u16 rq_idx, u16 match_id); |
886 | int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, |
887 | struct nix_cn10k_aq_enq_req *aq_req, |
888 | struct nix_cn10k_aq_enq_rsp *aq_rsp, |
889 | u16 pcifunc, u8 ctype, u32 qidx); |
890 | int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc); |
891 | int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type); |
892 | u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu); |
893 | u32 convert_bytes_to_dwrr_mtu(u32 bytes); |
894 | void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, |
895 | struct nix_txsch *txsch, bool enable); |
896 | void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc); |
897 | int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, |
898 | u32 mcast_grp_idx); |
899 | int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc, |
900 | u32 mcast_grp_idx, u16 mcam_index); |
901 | void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc); |
902 | |
903 | /* NPC APIs */ |
904 | void rvu_npc_freemem(struct rvu *rvu); |
905 | int rvu_npc_get_pkind(struct rvu *rvu, u16 pf); |
906 | void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf); |
907 | int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en); |
908 | void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, |
909 | int nixlf, u64 chan, u8 *mac_addr); |
910 | void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, |
911 | int nixlf, u64 chan, u8 chan_cnt); |
912 | void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf, |
913 | bool enable); |
914 | void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, |
915 | int nixlf, u64 chan); |
916 | void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, |
917 | bool enable); |
918 | void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, |
919 | u64 chan); |
920 | void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, |
921 | bool enable); |
922 | |
923 | void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc, |
924 | int nixlf, int type, bool enable); |
925 | void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf); |
926 | bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable); |
927 | void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf); |
928 | void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); |
929 | void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); |
930 | void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, |
931 | int group, int alg_idx, int mcam_index); |
932 | |
933 | void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, |
934 | int blkaddr, int *alloc_cnt, |
935 | int *enable_cnt); |
936 | void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, |
937 | int blkaddr, int *alloc_cnt, |
938 | int *enable_cnt); |
939 | bool is_npc_intf_tx(u8 intf); |
940 | bool is_npc_intf_rx(u8 intf); |
941 | bool is_npc_interface_valid(struct rvu *rvu, u8 intf); |
942 | int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena); |
943 | int npc_flow_steering_init(struct rvu *rvu, int blkaddr); |
944 | const char *npc_get_field_name(u8 hdr); |
945 | int npc_get_bank(struct npc_mcam *mcam, int index); |
946 | void npc_mcam_enable_flows(struct rvu *rvu, u16 target); |
947 | void npc_mcam_disable_flows(struct rvu *rvu, u16 target); |
948 | void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, |
949 | int blkaddr, int index, bool enable); |
950 | u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, |
951 | int blkaddr, int index); |
952 | void npc_set_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, |
953 | int blkaddr, int index, u64 cfg); |
954 | void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, |
955 | int blkaddr, u16 src, struct mcam_entry *entry, |
956 | u8 *intf, u8 *ena); |
957 | bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc); |
958 | bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature); |
959 | u32 rvu_cgx_get_fifolen(struct rvu *rvu); |
960 | void *rvu_first_cgx_pdata(struct rvu *rvu); |
961 | int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id); |
962 | int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable); |
963 | int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable); |
964 | int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause, |
965 | u16 pfc_en); |
966 | int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause); |
967 | void rvu_mac_reset(struct rvu *rvu, u16 pcifunc); |
968 | u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac); |
969 | int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf, |
970 | int type); |
971 | bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, |
972 | int index); |
973 | int rvu_npc_init(struct rvu *rvu); |
974 | int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx, |
975 | u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask, |
976 | u64 bcast_mcast_val, u64 bcast_mcast_mask); |
977 | void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx); |
978 | bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf); |
979 | int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr); |
980 | void npc_mcam_rsrcs_deinit(struct rvu *rvu); |
981 | |
982 | /* CPT APIs */ |
983 | int rvu_cpt_register_interrupts(struct rvu *rvu); |
984 | void rvu_cpt_unregister_interrupts(struct rvu *rvu); |
985 | int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, |
986 | int slot); |
987 | int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc); |
988 | int rvu_cpt_init(struct rvu *rvu); |
989 | |
990 | #define NDC_AF_BANK_MASK GENMASK_ULL(7, 0) |
991 | #define NDC_AF_BANK_LINE_MASK GENMASK_ULL(31, 16) |
992 | |
993 | /* CN10K RVU */ |
994 | int rvu_set_channels_base(struct rvu *rvu); |
995 | void rvu_program_channels(struct rvu *rvu); |
996 | |
997 | /* CN10K NIX */ |
998 | void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw); |
999 | |
1000 | /* CN10K RVU - LMT*/ |
1001 | void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc); |
1002 | void rvu_apr_block_cn10k_init(struct rvu *rvu); |
1003 | |
1004 | #ifdef CONFIG_DEBUG_FS |
1005 | void rvu_dbg_init(struct rvu *rvu); |
1006 | void rvu_dbg_exit(struct rvu *rvu); |
1007 | #else |
1008 | static inline void rvu_dbg_init(struct rvu *rvu) {} |
1009 | static inline void rvu_dbg_exit(struct rvu *rvu) {} |
1010 | #endif |
1011 | |
1012 | int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr); |
1013 | |
1014 | /* RVU Switch */ |
1015 | void rvu_switch_enable(struct rvu *rvu); |
1016 | void rvu_switch_disable(struct rvu *rvu); |
1017 | void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc); |
1018 | |
1019 | int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, |
1020 | u64 pkind, u8 var_len_off, u8 var_len_off_mask, |
1021 | u8 shift_dir); |
1022 | int rvu_get_hwvf(struct rvu *rvu, int pcifunc); |
1023 | |
1024 | /* CN10K MCS */ |
1025 | int rvu_mcs_init(struct rvu *rvu); |
1026 | int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc); |
1027 | void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena); |
1028 | void rvu_mcs_exit(struct rvu *rvu); |
1029 | |
1030 | #endif /* RVU_H */ |
1031 | |