1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
2 | /* Copyright (c) 2017 - 2021 Intel Corporation */ |
3 | #include "osdep.h" |
4 | #include "type.h" |
5 | #include "icrdma_hw.h" |
6 | |
7 | static u32 icrdma_regs[IRDMA_MAX_REGS] = { |
8 | PFPE_CQPTAIL, |
9 | PFPE_CQPDB, |
10 | PFPE_CCQPSTATUS, |
11 | PFPE_CCQPHIGH, |
12 | PFPE_CCQPLOW, |
13 | PFPE_CQARM, |
14 | PFPE_CQACK, |
15 | PFPE_AEQALLOC, |
16 | PFPE_CQPERRCODES, |
17 | PFPE_WQEALLOC, |
18 | GLINT_DYN_CTL(0), |
19 | ICRDMA_DB_ADDR_OFFSET, |
20 | |
21 | GLPCI_LBARCTRL, |
22 | GLPE_CPUSTATUS0, |
23 | GLPE_CPUSTATUS1, |
24 | GLPE_CPUSTATUS2, |
25 | PFINT_AEQCTL, |
26 | GLINT_CEQCTL(0), |
27 | VSIQF_PE_CTL1(0), |
28 | PFHMC_PDINV, |
29 | GLHMC_VFPDINV(0), |
30 | GLPE_CRITERR, |
31 | GLINT_RATE(0), |
32 | }; |
33 | |
34 | static u64 icrdma_masks[IRDMA_MAX_MASKS] = { |
35 | ICRDMA_CCQPSTATUS_CCQP_DONE, |
36 | ICRDMA_CCQPSTATUS_CCQP_ERR, |
37 | ICRDMA_CQPSQ_STAG_PDID, |
38 | ICRDMA_CQPSQ_CQ_CEQID, |
39 | ICRDMA_CQPSQ_CQ_CQID, |
40 | ICRDMA_COMMIT_FPM_CQCNT, |
41 | }; |
42 | |
43 | static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = { |
44 | ICRDMA_CCQPSTATUS_CCQP_DONE_S, |
45 | ICRDMA_CCQPSTATUS_CCQP_ERR_S, |
46 | ICRDMA_CQPSQ_STAG_PDID_S, |
47 | ICRDMA_CQPSQ_CQ_CEQID_S, |
48 | ICRDMA_CQPSQ_CQ_CQID_S, |
49 | ICRDMA_COMMIT_FPM_CQCNT_S, |
50 | }; |
51 | |
52 | /** |
53 | * icrdma_ena_irq - Enable interrupt |
54 | * @dev: pointer to the device structure |
55 | * @idx: vector index |
56 | */ |
57 | static void icrdma_ena_irq(struct irdma_sc_dev *dev, u32 idx) |
58 | { |
59 | u32 val; |
60 | u32 interval = 0; |
61 | |
62 | if (dev->ceq_itr && dev->aeq->msix_idx != idx) |
63 | interval = dev->ceq_itr >> 1; /* 2 usec units */ |
64 | val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, 0) | |
65 | FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTERVAL, interval) | |
66 | FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 1) | |
67 | FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 1); |
68 | |
69 | if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) |
70 | writel(val, addr: dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx); |
71 | else |
72 | writel(val, addr: dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx - 1)); |
73 | } |
74 | |
75 | /** |
76 | * icrdma_disable_irq - Disable interrupt |
77 | * @dev: pointer to the device structure |
78 | * @idx: vector index |
79 | */ |
80 | static void icrdma_disable_irq(struct irdma_sc_dev *dev, u32 idx) |
81 | { |
82 | if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) |
83 | writel(val: 0, addr: dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx); |
84 | else |
85 | writel(val: 0, addr: dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx - 1)); |
86 | } |
87 | |
88 | /** |
89 | * icrdma_cfg_ceq- Configure CEQ interrupt |
90 | * @dev: pointer to the device structure |
91 | * @ceq_id: Completion Event Queue ID |
92 | * @idx: vector index |
93 | * @enable: True to enable, False disables |
94 | */ |
95 | static void icrdma_cfg_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx, |
96 | bool enable) |
97 | { |
98 | u32 reg_val; |
99 | |
100 | reg_val = FIELD_PREP(IRDMA_GLINT_CEQCTL_CAUSE_ENA, enable) | |
101 | FIELD_PREP(IRDMA_GLINT_CEQCTL_MSIX_INDX, idx) | |
102 | FIELD_PREP(IRDMA_GLINT_CEQCTL_ITR_INDX, 3); |
103 | |
104 | writel(val: reg_val, addr: dev->hw_regs[IRDMA_GLINT_CEQCTL] + ceq_id); |
105 | } |
106 | |
107 | static const struct irdma_irq_ops icrdma_irq_ops = { |
108 | .irdma_cfg_aeq = irdma_cfg_aeq, |
109 | .irdma_cfg_ceq = icrdma_cfg_ceq, |
110 | .irdma_dis_irq = icrdma_disable_irq, |
111 | .irdma_en_irq = icrdma_ena_irq, |
112 | }; |
113 | |
114 | static const struct irdma_hw_stat_map icrdma_hw_stat_map[] = { |
115 | [IRDMA_HW_STAT_INDEX_RXVLANERR] = { .byteoff: 0, .bitoff: 32, IRDMA_MAX_STATS_24 }, |
116 | [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = { .byteoff: 8, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
117 | [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = { .byteoff: 16, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
118 | [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = { .byteoff: 24, .bitoff: 32, IRDMA_MAX_STATS_32 }, |
119 | [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = { .byteoff: 24, .bitoff: 0, IRDMA_MAX_STATS_32 }, |
120 | [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = { .byteoff: 32, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
121 | [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = { .byteoff: 40, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
122 | [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = { .byteoff: 48, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
123 | [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = { .byteoff: 56, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
124 | [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = { .byteoff: 64, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
125 | [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = { .byteoff: 72, .bitoff: 32, IRDMA_MAX_STATS_32 }, |
126 | [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = { .byteoff: 72, .bitoff: 0, IRDMA_MAX_STATS_32 }, |
127 | [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = { .byteoff: 80, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
128 | [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = { .byteoff: 88, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
129 | [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = { .byteoff: 96, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
130 | [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = { .byteoff: 104, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
131 | [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = { .byteoff: 112, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
132 | [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = { .byteoff: 120, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
133 | [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = { .byteoff: 128, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
134 | [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = { .byteoff: 136, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
135 | [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = { .byteoff: 144, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
136 | [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = { .byteoff: 152, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
137 | [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = { .byteoff: 160, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
138 | [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = { .byteoff: 168, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
139 | [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = { .byteoff: 176, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
140 | [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = { .byteoff: 184, .bitoff: 32, IRDMA_MAX_STATS_24 }, |
141 | [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = { .byteoff: 184, .bitoff: 0, IRDMA_MAX_STATS_24 }, |
142 | [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = { .byteoff: 192, .bitoff: 32, IRDMA_MAX_STATS_48 }, |
143 | [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = { .byteoff: 200, .bitoff: 32, IRDMA_MAX_STATS_24 }, |
144 | [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = { .byteoff: 200, .bitoff: 0, IRDMA_MAX_STATS_24 }, |
145 | [IRDMA_HW_STAT_INDEX_TCPTXSEG] = { .byteoff: 208, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
146 | [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = { .byteoff: 216, .bitoff: 32, IRDMA_MAX_STATS_32 }, |
147 | [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = { .byteoff: 224, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
148 | [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = { .byteoff: 232, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
149 | [IRDMA_HW_STAT_INDEX_RDMARXWRS] = { .byteoff: 240, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
150 | [IRDMA_HW_STAT_INDEX_RDMARXRDS] = { .byteoff: 248, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
151 | [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = { .byteoff: 256, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
152 | [IRDMA_HW_STAT_INDEX_RDMATXWRS] = { .byteoff: 264, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
153 | [IRDMA_HW_STAT_INDEX_RDMATXRDS] = { .byteoff: 272, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
154 | [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = { .byteoff: 280, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
155 | [IRDMA_HW_STAT_INDEX_RDMAVBND] = { .byteoff: 288, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
156 | [IRDMA_HW_STAT_INDEX_RDMAVINV] = { .byteoff: 296, .bitoff: 0, IRDMA_MAX_STATS_48 }, |
157 | [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = { .byteoff: 304, .bitoff: 0, IRDMA_MAX_STATS_56 }, |
158 | [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = { .byteoff: 312, .bitoff: 32, IRDMA_MAX_STATS_24 }, |
159 | [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = { .byteoff: 312, .bitoff: 0, IRDMA_MAX_STATS_32 }, |
160 | [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = { .byteoff: 320, .bitoff: 0, IRDMA_MAX_STATS_32 }, |
161 | }; |
162 | |
163 | void icrdma_init_hw(struct irdma_sc_dev *dev) |
164 | { |
165 | int i; |
166 | u8 __iomem *hw_addr; |
167 | |
168 | for (i = 0; i < IRDMA_MAX_REGS; ++i) { |
169 | hw_addr = dev->hw->hw_addr; |
170 | |
171 | if (i == IRDMA_DB_ADDR_OFFSET) |
172 | hw_addr = NULL; |
173 | |
174 | dev->hw_regs[i] = (u32 __iomem *)(hw_addr + icrdma_regs[i]); |
175 | } |
176 | dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID; |
177 | dev->hw_attrs.first_hw_vf_fpm_id = IRDMA_FIRST_VF_FPM_ID; |
178 | |
179 | for (i = 0; i < IRDMA_MAX_SHIFTS; ++i) |
180 | dev->hw_shifts[i] = icrdma_shifts[i]; |
181 | |
182 | for (i = 0; i < IRDMA_MAX_MASKS; ++i) |
183 | dev->hw_masks[i] = icrdma_masks[i]; |
184 | |
185 | dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC]; |
186 | dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM]; |
187 | dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC]; |
188 | dev->cqp_db = dev->hw_regs[IRDMA_CQPDB]; |
189 | dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK]; |
190 | dev->irq_ops = &icrdma_irq_ops; |
191 | dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G; |
192 | dev->hw_stats_map = icrdma_hw_stat_map; |
193 | dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE; |
194 | dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE; |
195 | dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT; |
196 | dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2; |
197 | |
198 | dev->hw_attrs.uk_attrs.min_hw_wq_size = ICRDMA_MIN_WQ_SIZE; |
199 | dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR; |
200 | dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE | |
201 | IRDMA_FEATURE_CQ_RESIZE; |
202 | } |
203 | |