1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2016 Cavium, Inc. |
4 | */ |
5 | #include <linux/module.h> |
6 | #include "cptpf.h" |
7 | |
8 | static void cpt_send_msg_to_vf(struct cpt_device *cpt, int vf, |
9 | struct cpt_mbox *mbx) |
10 | { |
11 | /* Writing mbox(0) causes interrupt */ |
12 | cpt_write_csr64(hw_addr: cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 1), |
13 | val: mbx->data); |
14 | cpt_write_csr64(hw_addr: cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 0), val: mbx->msg); |
15 | } |
16 | |
17 | /* ACKs VF's mailbox message |
18 | * @vf: VF to which ACK to be sent |
19 | */ |
20 | static void cpt_mbox_send_ack(struct cpt_device *cpt, int vf, |
21 | struct cpt_mbox *mbx) |
22 | { |
23 | mbx->data = 0ull; |
24 | mbx->msg = CPT_MBOX_MSG_TYPE_ACK; |
25 | cpt_send_msg_to_vf(cpt, vf, mbx); |
26 | } |
27 | |
28 | static void cpt_clear_mbox_intr(struct cpt_device *cpt, u32 vf) |
29 | { |
30 | /* W1C for the VF */ |
31 | cpt_write_csr64(hw_addr: cpt->reg_base, CPTX_PF_MBOX_INTX(0, 0), val: (1 << vf)); |
32 | } |
33 | |
34 | /* |
35 | * Configure QLEN/Chunk sizes for VF |
36 | */ |
37 | static void cpt_cfg_qlen_for_vf(struct cpt_device *cpt, int vf, u32 size) |
38 | { |
39 | union cptx_pf_qx_ctl pf_qx_ctl; |
40 | |
41 | pf_qx_ctl.u = cpt_read_csr64(hw_addr: cpt->reg_base, CPTX_PF_QX_CTL(0, vf)); |
42 | pf_qx_ctl.s.size = size; |
43 | pf_qx_ctl.s.cont_err = true; |
44 | cpt_write_csr64(hw_addr: cpt->reg_base, CPTX_PF_QX_CTL(0, vf), val: pf_qx_ctl.u); |
45 | } |
46 | |
47 | /* |
48 | * Configure VQ priority |
49 | */ |
50 | static void cpt_cfg_vq_priority(struct cpt_device *cpt, int vf, u32 pri) |
51 | { |
52 | union cptx_pf_qx_ctl pf_qx_ctl; |
53 | |
54 | pf_qx_ctl.u = cpt_read_csr64(hw_addr: cpt->reg_base, CPTX_PF_QX_CTL(0, vf)); |
55 | pf_qx_ctl.s.pri = pri; |
56 | cpt_write_csr64(hw_addr: cpt->reg_base, CPTX_PF_QX_CTL(0, vf), val: pf_qx_ctl.u); |
57 | } |
58 | |
59 | static int cpt_bind_vq_to_grp(struct cpt_device *cpt, u8 q, u8 grp) |
60 | { |
61 | struct microcode *mcode = cpt->mcode; |
62 | union cptx_pf_qx_ctl pf_qx_ctl; |
63 | struct device *dev = &cpt->pdev->dev; |
64 | |
65 | if (q >= CPT_MAX_VF_NUM) { |
66 | dev_err(dev, "Queues are more than cores in the group" ); |
67 | return -EINVAL; |
68 | } |
69 | if (grp >= CPT_MAX_CORE_GROUPS) { |
70 | dev_err(dev, "Request group is more than possible groups" ); |
71 | return -EINVAL; |
72 | } |
73 | if (grp >= cpt->next_mc_idx) { |
74 | dev_err(dev, "Request group is higher than available functional groups" ); |
75 | return -EINVAL; |
76 | } |
77 | pf_qx_ctl.u = cpt_read_csr64(hw_addr: cpt->reg_base, CPTX_PF_QX_CTL(0, q)); |
78 | pf_qx_ctl.s.grp = mcode[grp].group; |
79 | cpt_write_csr64(hw_addr: cpt->reg_base, CPTX_PF_QX_CTL(0, q), val: pf_qx_ctl.u); |
80 | dev_dbg(dev, "VF %d TYPE %s" , q, (mcode[grp].is_ae ? "AE" : "SE" )); |
81 | |
82 | return mcode[grp].is_ae ? AE_TYPES : SE_TYPES; |
83 | } |
84 | |
85 | /* Interrupt handler to handle mailbox messages from VFs */ |
86 | static void cpt_handle_mbox_intr(struct cpt_device *cpt, int vf) |
87 | { |
88 | struct cpt_vf_info *vfx = &cpt->vfinfo[vf]; |
89 | struct cpt_mbox mbx = {}; |
90 | int vftype; |
91 | struct device *dev = &cpt->pdev->dev; |
92 | /* |
93 | * MBOX[0] contains msg |
94 | * MBOX[1] contains data |
95 | */ |
96 | mbx.msg = cpt_read_csr64(hw_addr: cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 0)); |
97 | mbx.data = cpt_read_csr64(hw_addr: cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 1)); |
98 | dev_dbg(dev, "%s: Mailbox msg 0x%llx from VF%d" , __func__, mbx.msg, vf); |
99 | switch (mbx.msg) { |
100 | case CPT_MSG_VF_UP: |
101 | vfx->state = VF_STATE_UP; |
102 | try_module_get(THIS_MODULE); |
103 | cpt_mbox_send_ack(cpt, vf, mbx: &mbx); |
104 | break; |
105 | case CPT_MSG_READY: |
106 | mbx.msg = CPT_MSG_READY; |
107 | mbx.data = vf; |
108 | cpt_send_msg_to_vf(cpt, vf, mbx: &mbx); |
109 | break; |
110 | case CPT_MSG_VF_DOWN: |
111 | /* First msg in VF teardown sequence */ |
112 | vfx->state = VF_STATE_DOWN; |
113 | module_put(THIS_MODULE); |
114 | cpt_mbox_send_ack(cpt, vf, mbx: &mbx); |
115 | break; |
116 | case CPT_MSG_QLEN: |
117 | vfx->qlen = mbx.data; |
118 | cpt_cfg_qlen_for_vf(cpt, vf, size: vfx->qlen); |
119 | cpt_mbox_send_ack(cpt, vf, mbx: &mbx); |
120 | break; |
121 | case CPT_MSG_QBIND_GRP: |
122 | vftype = cpt_bind_vq_to_grp(cpt, q: vf, grp: (u8)mbx.data); |
123 | if ((vftype != AE_TYPES) && (vftype != SE_TYPES)) |
124 | dev_err(dev, "Queue %d binding to group %llu failed" , |
125 | vf, mbx.data); |
126 | else { |
127 | dev_dbg(dev, "Queue %d binding to group %llu successful" , |
128 | vf, mbx.data); |
129 | mbx.msg = CPT_MSG_QBIND_GRP; |
130 | mbx.data = vftype; |
131 | cpt_send_msg_to_vf(cpt, vf, mbx: &mbx); |
132 | } |
133 | break; |
134 | case CPT_MSG_VQ_PRIORITY: |
135 | vfx->priority = mbx.data; |
136 | cpt_cfg_vq_priority(cpt, vf, pri: vfx->priority); |
137 | cpt_mbox_send_ack(cpt, vf, mbx: &mbx); |
138 | break; |
139 | default: |
140 | dev_err(&cpt->pdev->dev, "Invalid msg from VF%d, msg 0x%llx\n" , |
141 | vf, mbx.msg); |
142 | break; |
143 | } |
144 | } |
145 | |
146 | void cpt_mbox_intr_handler (struct cpt_device *cpt, int mbx) |
147 | { |
148 | u64 intr; |
149 | u8 vf; |
150 | |
151 | intr = cpt_read_csr64(hw_addr: cpt->reg_base, CPTX_PF_MBOX_INTX(0, 0)); |
152 | dev_dbg(&cpt->pdev->dev, "PF interrupt Mbox%d 0x%llx\n" , mbx, intr); |
153 | for (vf = 0; vf < CPT_MAX_VF_NUM; vf++) { |
154 | if (intr & (1ULL << vf)) { |
155 | dev_dbg(&cpt->pdev->dev, "Intr from VF %d\n" , vf); |
156 | cpt_handle_mbox_intr(cpt, vf); |
157 | cpt_clear_mbox_intr(cpt, vf); |
158 | } |
159 | } |
160 | } |
161 | |