1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* SandyBridge-EP/IvyTown uncore support */ |
3 | #include "uncore.h" |
4 | #include "uncore_discovery.h" |
5 | |
6 | /* SNB-EP pci bus to socket mapping */ |
7 | #define SNBEP_CPUNODEID 0x40 |
8 | #define SNBEP_GIDNIDMAP 0x54 |
9 | |
10 | /* SNB-EP Box level control */ |
11 | #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0) |
12 | #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1) |
13 | #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8) |
14 | #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16) |
15 | #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \ |
16 | SNBEP_PMON_BOX_CTL_RST_CTRS | \ |
17 | SNBEP_PMON_BOX_CTL_FRZ_EN) |
18 | /* SNB-EP event control */ |
19 | #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff |
20 | #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00 |
21 | #define SNBEP_PMON_CTL_RST (1 << 17) |
22 | #define SNBEP_PMON_CTL_EDGE_DET (1 << 18) |
23 | #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21) |
24 | #define SNBEP_PMON_CTL_EN (1 << 22) |
25 | #define SNBEP_PMON_CTL_INVERT (1 << 23) |
26 | #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000 |
27 | #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ |
28 | SNBEP_PMON_CTL_UMASK_MASK | \ |
29 | SNBEP_PMON_CTL_EDGE_DET | \ |
30 | SNBEP_PMON_CTL_INVERT | \ |
31 | SNBEP_PMON_CTL_TRESH_MASK) |
32 | |
33 | /* SNB-EP Ubox event control */ |
34 | #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000 |
35 | #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \ |
36 | (SNBEP_PMON_CTL_EV_SEL_MASK | \ |
37 | SNBEP_PMON_CTL_UMASK_MASK | \ |
38 | SNBEP_PMON_CTL_EDGE_DET | \ |
39 | SNBEP_PMON_CTL_INVERT | \ |
40 | SNBEP_U_MSR_PMON_CTL_TRESH_MASK) |
41 | |
42 | #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19) |
43 | #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ |
44 | SNBEP_CBO_PMON_CTL_TID_EN) |
45 | |
46 | /* SNB-EP PCU event control */ |
47 | #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000 |
48 | #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000 |
49 | #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30) |
50 | #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31) |
51 | #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \ |
52 | (SNBEP_PMON_CTL_EV_SEL_MASK | \ |
53 | SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ |
54 | SNBEP_PMON_CTL_EDGE_DET | \ |
55 | SNBEP_PMON_CTL_INVERT | \ |
56 | SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ |
57 | SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ |
58 | SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) |
59 | |
60 | #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \ |
61 | (SNBEP_PMON_RAW_EVENT_MASK | \ |
62 | SNBEP_PMON_CTL_EV_SEL_EXT) |
63 | |
64 | /* SNB-EP pci control register */ |
65 | #define SNBEP_PCI_PMON_BOX_CTL 0xf4 |
66 | #define SNBEP_PCI_PMON_CTL0 0xd8 |
67 | /* SNB-EP pci counter register */ |
68 | #define SNBEP_PCI_PMON_CTR0 0xa0 |
69 | |
70 | /* SNB-EP home agent register */ |
71 | #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40 |
72 | #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44 |
73 | #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48 |
74 | /* SNB-EP memory controller register */ |
75 | #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0 |
76 | #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0 |
77 | /* SNB-EP QPI register */ |
78 | #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228 |
79 | #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c |
80 | #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238 |
81 | #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c |
82 | |
83 | /* SNB-EP Ubox register */ |
84 | #define SNBEP_U_MSR_PMON_CTR0 0xc16 |
85 | #define SNBEP_U_MSR_PMON_CTL0 0xc10 |
86 | |
87 | #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08 |
88 | #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09 |
89 | |
90 | /* SNB-EP Cbo register */ |
91 | #define SNBEP_C0_MSR_PMON_CTR0 0xd16 |
92 | #define SNBEP_C0_MSR_PMON_CTL0 0xd10 |
93 | #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04 |
94 | #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14 |
95 | #define SNBEP_CBO_MSR_OFFSET 0x20 |
96 | |
97 | #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f |
98 | #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00 |
99 | #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000 |
100 | #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000 |
101 | |
102 | #define (e, m, i) { \ |
103 | .event = (e), \ |
104 | .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \ |
105 | .config_mask = (m), \ |
106 | .idx = (i) \ |
107 | } |
108 | |
109 | /* SNB-EP PCU register */ |
110 | #define SNBEP_PCU_MSR_PMON_CTR0 0xc36 |
111 | #define SNBEP_PCU_MSR_PMON_CTL0 0xc30 |
112 | #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24 |
113 | #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34 |
114 | #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff |
115 | #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc |
116 | #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd |
117 | |
118 | /* IVBEP event control */ |
119 | #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \ |
120 | SNBEP_PMON_BOX_CTL_RST_CTRS) |
121 | #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ |
122 | SNBEP_PMON_CTL_UMASK_MASK | \ |
123 | SNBEP_PMON_CTL_EDGE_DET | \ |
124 | SNBEP_PMON_CTL_TRESH_MASK) |
125 | /* IVBEP Ubox */ |
126 | #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00 |
127 | #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31) |
128 | #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29) |
129 | |
130 | #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \ |
131 | (SNBEP_PMON_CTL_EV_SEL_MASK | \ |
132 | SNBEP_PMON_CTL_UMASK_MASK | \ |
133 | SNBEP_PMON_CTL_EDGE_DET | \ |
134 | SNBEP_U_MSR_PMON_CTL_TRESH_MASK) |
135 | /* IVBEP Cbo */ |
136 | #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \ |
137 | SNBEP_CBO_PMON_CTL_TID_EN) |
138 | |
139 | #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0) |
140 | #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5) |
141 | #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17) |
142 | #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32) |
143 | #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52) |
144 | #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61) |
145 | #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62) |
146 | #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63) |
147 | |
148 | /* IVBEP home agent */ |
149 | #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16) |
150 | #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \ |
151 | (IVBEP_PMON_RAW_EVENT_MASK | \ |
152 | IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST) |
153 | /* IVBEP PCU */ |
154 | #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \ |
155 | (SNBEP_PMON_CTL_EV_SEL_MASK | \ |
156 | SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ |
157 | SNBEP_PMON_CTL_EDGE_DET | \ |
158 | SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ |
159 | SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ |
160 | SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) |
161 | /* IVBEP QPI */ |
162 | #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \ |
163 | (IVBEP_PMON_RAW_EVENT_MASK | \ |
164 | SNBEP_PMON_CTL_EV_SEL_EXT) |
165 | |
166 | #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \ |
167 | ((1ULL << (n)) - 1))) |
168 | |
169 | /* Haswell-EP Ubox */ |
170 | #define HSWEP_U_MSR_PMON_CTR0 0x709 |
171 | #define HSWEP_U_MSR_PMON_CTL0 0x705 |
172 | #define HSWEP_U_MSR_PMON_FILTER 0x707 |
173 | |
174 | #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703 |
175 | #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704 |
176 | |
177 | #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0) |
178 | #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1) |
179 | #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \ |
180 | (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \ |
181 | HSWEP_U_MSR_PMON_BOX_FILTER_CID) |
182 | |
183 | /* Haswell-EP CBo */ |
184 | #define HSWEP_C0_MSR_PMON_CTR0 0xe08 |
185 | #define HSWEP_C0_MSR_PMON_CTL0 0xe01 |
186 | #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00 |
187 | #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05 |
188 | #define HSWEP_CBO_MSR_OFFSET 0x10 |
189 | |
190 | |
191 | #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0) |
192 | #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6) |
193 | #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17) |
194 | #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32) |
195 | #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52) |
196 | #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61) |
197 | #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62) |
198 | #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63) |
199 | |
200 | |
201 | /* Haswell-EP Sbox */ |
202 | #define HSWEP_S0_MSR_PMON_CTR0 0x726 |
203 | #define HSWEP_S0_MSR_PMON_CTL0 0x721 |
204 | #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720 |
205 | #define HSWEP_SBOX_MSR_OFFSET 0xa |
206 | #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ |
207 | SNBEP_CBO_PMON_CTL_TID_EN) |
208 | |
209 | /* Haswell-EP PCU */ |
210 | #define HSWEP_PCU_MSR_PMON_CTR0 0x717 |
211 | #define HSWEP_PCU_MSR_PMON_CTL0 0x711 |
212 | #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710 |
213 | #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715 |
214 | |
215 | /* KNL Ubox */ |
216 | #define KNL_U_MSR_PMON_RAW_EVENT_MASK \ |
217 | (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \ |
218 | SNBEP_CBO_PMON_CTL_TID_EN) |
219 | /* KNL CHA */ |
220 | #define KNL_CHA_MSR_OFFSET 0xc |
221 | #define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16) |
222 | #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \ |
223 | (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \ |
224 | KNL_CHA_MSR_PMON_CTL_QOR) |
225 | #define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff |
226 | #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18) |
227 | #define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32) |
228 | #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32) |
229 | #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33) |
230 | #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37) |
231 | |
232 | /* KNL EDC/MC UCLK */ |
233 | #define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400 |
234 | #define KNL_UCLK_MSR_PMON_CTL0 0x420 |
235 | #define KNL_UCLK_MSR_PMON_BOX_CTL 0x430 |
236 | #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c |
237 | #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454 |
238 | #define KNL_PMON_FIXED_CTL_EN 0x1 |
239 | |
240 | /* KNL EDC */ |
241 | #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00 |
242 | #define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20 |
243 | #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30 |
244 | #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c |
245 | #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44 |
246 | |
247 | /* KNL MC */ |
248 | #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00 |
249 | #define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20 |
250 | #define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30 |
251 | #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c |
252 | #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44 |
253 | |
254 | /* KNL IRP */ |
255 | #define KNL_IRP_PCI_PMON_BOX_CTL 0xf0 |
256 | #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ |
257 | KNL_CHA_MSR_PMON_CTL_QOR) |
258 | /* KNL PCU */ |
259 | #define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f |
260 | #define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7) |
261 | #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000 |
262 | #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \ |
263 | (KNL_PCU_PMON_CTL_EV_SEL_MASK | \ |
264 | KNL_PCU_PMON_CTL_USE_OCC_CTR | \ |
265 | SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ |
266 | SNBEP_PMON_CTL_EDGE_DET | \ |
267 | SNBEP_CBO_PMON_CTL_TID_EN | \ |
268 | SNBEP_PMON_CTL_INVERT | \ |
269 | KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \ |
270 | SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ |
271 | SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) |
272 | |
273 | /* SKX pci bus to socket mapping */ |
274 | #define SKX_CPUNODEID 0xc0 |
275 | #define SKX_GIDNIDMAP 0xd4 |
276 | |
277 | /* |
278 | * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR |
279 | * that BIOS programmed. MSR has package scope. |
280 | * | Bit | Default | Description |
281 | * | [63] | 00h | VALID - When set, indicates the CPU bus |
282 | * numbers have been initialized. (RO) |
283 | * |[62:48]| --- | Reserved |
284 | * |[47:40]| 00h | BUS_NUM_5 - Return the bus number BIOS assigned |
285 | * CPUBUSNO(5). (RO) |
286 | * |[39:32]| 00h | BUS_NUM_4 - Return the bus number BIOS assigned |
287 | * CPUBUSNO(4). (RO) |
288 | * |[31:24]| 00h | BUS_NUM_3 - Return the bus number BIOS assigned |
289 | * CPUBUSNO(3). (RO) |
290 | * |[23:16]| 00h | BUS_NUM_2 - Return the bus number BIOS assigned |
291 | * CPUBUSNO(2). (RO) |
292 | * |[15:8] | 00h | BUS_NUM_1 - Return the bus number BIOS assigned |
293 | * CPUBUSNO(1). (RO) |
294 | * | [7:0] | 00h | BUS_NUM_0 - Return the bus number BIOS assigned |
295 | * CPUBUSNO(0). (RO) |
296 | */ |
297 | #define SKX_MSR_CPU_BUS_NUMBER 0x300 |
298 | #define SKX_MSR_CPU_BUS_VALID_BIT (1ULL << 63) |
299 | #define BUS_NUM_STRIDE 8 |
300 | |
301 | /* SKX CHA */ |
302 | #define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0) |
303 | #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9) |
304 | #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17) |
305 | #define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32) |
306 | #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33) |
307 | #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35) |
308 | #define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36) |
309 | #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37) |
310 | #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41) |
311 | #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51) |
312 | #define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61) |
313 | #define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62) |
314 | #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63) |
315 | |
316 | /* SKX IIO */ |
317 | #define SKX_IIO0_MSR_PMON_CTL0 0xa48 |
318 | #define SKX_IIO0_MSR_PMON_CTR0 0xa41 |
319 | #define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40 |
320 | #define SKX_IIO_MSR_OFFSET 0x20 |
321 | |
322 | #define SKX_PMON_CTL_TRESH_MASK (0xff << 24) |
323 | #define SKX_PMON_CTL_TRESH_MASK_EXT (0xf) |
324 | #define SKX_PMON_CTL_CH_MASK (0xff << 4) |
325 | #define SKX_PMON_CTL_FC_MASK (0x7 << 12) |
326 | #define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ |
327 | SNBEP_PMON_CTL_UMASK_MASK | \ |
328 | SNBEP_PMON_CTL_EDGE_DET | \ |
329 | SNBEP_PMON_CTL_INVERT | \ |
330 | SKX_PMON_CTL_TRESH_MASK) |
331 | #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \ |
332 | SKX_PMON_CTL_CH_MASK | \ |
333 | SKX_PMON_CTL_FC_MASK) |
334 | |
335 | /* SKX IRP */ |
336 | #define SKX_IRP0_MSR_PMON_CTL0 0xa5b |
337 | #define SKX_IRP0_MSR_PMON_CTR0 0xa59 |
338 | #define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58 |
339 | #define SKX_IRP_MSR_OFFSET 0x20 |
340 | |
341 | /* SKX UPI */ |
342 | #define SKX_UPI_PCI_PMON_CTL0 0x350 |
343 | #define SKX_UPI_PCI_PMON_CTR0 0x318 |
344 | #define SKX_UPI_PCI_PMON_BOX_CTL 0x378 |
345 | #define SKX_UPI_CTL_UMASK_EXT 0xffefff |
346 | |
347 | /* SKX M2M */ |
348 | #define SKX_M2M_PCI_PMON_CTL0 0x228 |
349 | #define SKX_M2M_PCI_PMON_CTR0 0x200 |
350 | #define SKX_M2M_PCI_PMON_BOX_CTL 0x258 |
351 | |
352 | /* Memory Map registers device ID */ |
353 | #define SNR_ICX_MESH2IIO_MMAP_DID 0x9a2 |
354 | #define SNR_ICX_SAD_CONTROL_CFG 0x3f4 |
355 | |
356 | /* Getting I/O stack id in SAD_COTROL_CFG notation */ |
357 | #define SAD_CONTROL_STACK_ID(data) (((data) >> 4) & 0x7) |
358 | |
359 | /* SNR Ubox */ |
360 | #define SNR_U_MSR_PMON_CTR0 0x1f98 |
361 | #define SNR_U_MSR_PMON_CTL0 0x1f91 |
362 | #define SNR_U_MSR_PMON_UCLK_FIXED_CTL 0x1f93 |
363 | #define SNR_U_MSR_PMON_UCLK_FIXED_CTR 0x1f94 |
364 | |
365 | /* SNR CHA */ |
366 | #define SNR_CHA_RAW_EVENT_MASK_EXT 0x3ffffff |
367 | #define SNR_CHA_MSR_PMON_CTL0 0x1c01 |
368 | #define SNR_CHA_MSR_PMON_CTR0 0x1c08 |
369 | #define SNR_CHA_MSR_PMON_BOX_CTL 0x1c00 |
370 | #define SNR_C0_MSR_PMON_BOX_FILTER0 0x1c05 |
371 | |
372 | |
373 | /* SNR IIO */ |
374 | #define SNR_IIO_MSR_PMON_CTL0 0x1e08 |
375 | #define SNR_IIO_MSR_PMON_CTR0 0x1e01 |
376 | #define SNR_IIO_MSR_PMON_BOX_CTL 0x1e00 |
377 | #define SNR_IIO_MSR_OFFSET 0x10 |
378 | #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT 0x7ffff |
379 | |
380 | /* SNR IRP */ |
381 | #define SNR_IRP0_MSR_PMON_CTL0 0x1ea8 |
382 | #define SNR_IRP0_MSR_PMON_CTR0 0x1ea1 |
383 | #define SNR_IRP0_MSR_PMON_BOX_CTL 0x1ea0 |
384 | #define SNR_IRP_MSR_OFFSET 0x10 |
385 | |
386 | /* SNR M2PCIE */ |
387 | #define SNR_M2PCIE_MSR_PMON_CTL0 0x1e58 |
388 | #define SNR_M2PCIE_MSR_PMON_CTR0 0x1e51 |
389 | #define SNR_M2PCIE_MSR_PMON_BOX_CTL 0x1e50 |
390 | #define SNR_M2PCIE_MSR_OFFSET 0x10 |
391 | |
392 | /* SNR PCU */ |
393 | #define SNR_PCU_MSR_PMON_CTL0 0x1ef1 |
394 | #define SNR_PCU_MSR_PMON_CTR0 0x1ef8 |
395 | #define SNR_PCU_MSR_PMON_BOX_CTL 0x1ef0 |
396 | #define SNR_PCU_MSR_PMON_BOX_FILTER 0x1efc |
397 | |
398 | /* SNR M2M */ |
399 | #define SNR_M2M_PCI_PMON_CTL0 0x468 |
400 | #define SNR_M2M_PCI_PMON_CTR0 0x440 |
401 | #define SNR_M2M_PCI_PMON_BOX_CTL 0x438 |
402 | #define SNR_M2M_PCI_PMON_UMASK_EXT 0xff |
403 | |
404 | /* SNR PCIE3 */ |
405 | #define SNR_PCIE3_PCI_PMON_CTL0 0x508 |
406 | #define SNR_PCIE3_PCI_PMON_CTR0 0x4e8 |
407 | #define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e0 |
408 | |
409 | /* SNR IMC */ |
410 | #define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54 |
411 | #define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38 |
412 | #define SNR_IMC_MMIO_PMON_CTL0 0x40 |
413 | #define SNR_IMC_MMIO_PMON_CTR0 0x8 |
414 | #define SNR_IMC_MMIO_PMON_BOX_CTL 0x22800 |
415 | #define SNR_IMC_MMIO_OFFSET 0x4000 |
416 | #define SNR_IMC_MMIO_SIZE 0x4000 |
417 | #define SNR_IMC_MMIO_BASE_OFFSET 0xd0 |
418 | #define SNR_IMC_MMIO_BASE_MASK 0x1FFFFFFF |
419 | #define SNR_IMC_MMIO_MEM0_OFFSET 0xd8 |
420 | #define SNR_IMC_MMIO_MEM0_MASK 0x7FF |
421 | |
422 | /* ICX CHA */ |
423 | #define ICX_C34_MSR_PMON_CTR0 0xb68 |
424 | #define ICX_C34_MSR_PMON_CTL0 0xb61 |
425 | #define ICX_C34_MSR_PMON_BOX_CTL 0xb60 |
426 | #define ICX_C34_MSR_PMON_BOX_FILTER0 0xb65 |
427 | |
428 | /* ICX IIO */ |
429 | #define ICX_IIO_MSR_PMON_CTL0 0xa58 |
430 | #define ICX_IIO_MSR_PMON_CTR0 0xa51 |
431 | #define ICX_IIO_MSR_PMON_BOX_CTL 0xa50 |
432 | |
433 | /* ICX IRP */ |
434 | #define ICX_IRP0_MSR_PMON_CTL0 0xa4d |
435 | #define ICX_IRP0_MSR_PMON_CTR0 0xa4b |
436 | #define ICX_IRP0_MSR_PMON_BOX_CTL 0xa4a |
437 | |
438 | /* ICX M2PCIE */ |
439 | #define ICX_M2PCIE_MSR_PMON_CTL0 0xa46 |
440 | #define ICX_M2PCIE_MSR_PMON_CTR0 0xa41 |
441 | #define ICX_M2PCIE_MSR_PMON_BOX_CTL 0xa40 |
442 | |
443 | /* ICX UPI */ |
444 | #define ICX_UPI_PCI_PMON_CTL0 0x350 |
445 | #define ICX_UPI_PCI_PMON_CTR0 0x320 |
446 | #define ICX_UPI_PCI_PMON_BOX_CTL 0x318 |
447 | #define ICX_UPI_CTL_UMASK_EXT 0xffffff |
448 | #define ICX_UBOX_DID 0x3450 |
449 | |
450 | /* ICX M3UPI*/ |
451 | #define ICX_M3UPI_PCI_PMON_CTL0 0xd8 |
452 | #define ICX_M3UPI_PCI_PMON_CTR0 0xa8 |
453 | #define ICX_M3UPI_PCI_PMON_BOX_CTL 0xa0 |
454 | |
455 | /* ICX IMC */ |
456 | #define ICX_NUMBER_IMC_CHN 3 |
457 | #define ICX_IMC_MEM_STRIDE 0x4 |
458 | |
459 | /* SPR */ |
460 | #define SPR_RAW_EVENT_MASK_EXT 0xffffff |
461 | #define SPR_UBOX_DID 0x3250 |
462 | |
463 | /* SPR CHA */ |
464 | #define SPR_CHA_PMON_CTL_TID_EN (1 << 16) |
465 | #define SPR_CHA_PMON_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ |
466 | SPR_CHA_PMON_CTL_TID_EN) |
467 | #define SPR_CHA_PMON_BOX_FILTER_TID 0x3ff |
468 | |
469 | #define SPR_C0_MSR_PMON_BOX_FILTER0 0x200e |
470 | |
471 | DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7" ); |
472 | DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6" ); |
473 | DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21" ); |
474 | DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7" ); |
475 | DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15" ); |
476 | DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55" ); |
477 | DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57" ); |
478 | DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39" ); |
479 | DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55" ); |
480 | DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16" ); |
481 | DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18" ); |
482 | DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19" ); |
483 | DEFINE_UNCORE_FORMAT_ATTR(tid_en2, tid_en, "config:16" ); |
484 | DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23" ); |
485 | DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35" ); |
486 | DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31" ); |
487 | DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29" ); |
488 | DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28" ); |
489 | DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15" ); |
490 | DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30" ); |
491 | DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51" ); |
492 | DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31" ); |
493 | DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43" ); |
494 | DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47" ); |
495 | DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46" ); |
496 | DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50" ); |
497 | DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4" ); |
498 | DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0" ); |
499 | DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5" ); |
500 | DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8" ); |
501 | DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9" ); |
502 | DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5" ); |
503 | DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8" ); |
504 | DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8" ); |
505 | DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12" ); |
506 | DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17" ); |
507 | DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47" ); |
508 | DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22" ); |
509 | DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22" ); |
510 | DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23" ); |
511 | DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20" ); |
512 | DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26" ); |
513 | DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32" ); |
514 | DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33" ); |
515 | DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36" ); |
516 | DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37" ); |
517 | DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33" ); |
518 | DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35" ); |
519 | DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37" ); |
520 | DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31" ); |
521 | DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60" ); |
522 | DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60" ); |
523 | DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50" ); |
524 | DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60" ); |
525 | DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62" ); |
526 | DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61" ); |
527 | DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63" ); |
528 | DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7" ); |
529 | DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15" ); |
530 | DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23" ); |
531 | DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31" ); |
532 | DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51" ); |
533 | DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35" ); |
534 | DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31" ); |
535 | DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17" ); |
536 | DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12" ); |
537 | DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8" ); |
538 | DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4" ); |
539 | DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31" ); |
540 | DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63" ); |
541 | DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51" ); |
542 | DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35" ); |
543 | DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31" ); |
544 | DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17" ); |
545 | DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12" ); |
546 | DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8" ); |
547 | DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4" ); |
548 | DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31" ); |
549 | DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63" ); |
550 | |
551 | static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box) |
552 | { |
553 | struct pci_dev *pdev = box->pci_dev; |
554 | int box_ctl = uncore_pci_box_ctl(box); |
555 | u32 config = 0; |
556 | |
557 | if (!pci_read_config_dword(dev: pdev, where: box_ctl, val: &config)) { |
558 | config |= SNBEP_PMON_BOX_CTL_FRZ; |
559 | pci_write_config_dword(dev: pdev, where: box_ctl, val: config); |
560 | } |
561 | } |
562 | |
563 | static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box) |
564 | { |
565 | struct pci_dev *pdev = box->pci_dev; |
566 | int box_ctl = uncore_pci_box_ctl(box); |
567 | u32 config = 0; |
568 | |
569 | if (!pci_read_config_dword(dev: pdev, where: box_ctl, val: &config)) { |
570 | config &= ~SNBEP_PMON_BOX_CTL_FRZ; |
571 | pci_write_config_dword(dev: pdev, where: box_ctl, val: config); |
572 | } |
573 | } |
574 | |
575 | static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event) |
576 | { |
577 | struct pci_dev *pdev = box->pci_dev; |
578 | struct hw_perf_event *hwc = &event->hw; |
579 | |
580 | pci_write_config_dword(dev: pdev, where: hwc->config_base, val: hwc->config | SNBEP_PMON_CTL_EN); |
581 | } |
582 | |
583 | static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event) |
584 | { |
585 | struct pci_dev *pdev = box->pci_dev; |
586 | struct hw_perf_event *hwc = &event->hw; |
587 | |
588 | pci_write_config_dword(dev: pdev, where: hwc->config_base, val: hwc->config); |
589 | } |
590 | |
591 | static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event) |
592 | { |
593 | struct pci_dev *pdev = box->pci_dev; |
594 | struct hw_perf_event *hwc = &event->hw; |
595 | u64 count = 0; |
596 | |
597 | pci_read_config_dword(dev: pdev, where: hwc->event_base, val: (u32 *)&count); |
598 | pci_read_config_dword(dev: pdev, where: hwc->event_base + 4, val: (u32 *)&count + 1); |
599 | |
600 | return count; |
601 | } |
602 | |
603 | static void snbep_uncore_pci_init_box(struct intel_uncore_box *box) |
604 | { |
605 | struct pci_dev *pdev = box->pci_dev; |
606 | int box_ctl = uncore_pci_box_ctl(box); |
607 | |
608 | pci_write_config_dword(dev: pdev, where: box_ctl, SNBEP_PMON_BOX_CTL_INT); |
609 | } |
610 | |
611 | static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box) |
612 | { |
613 | u64 config; |
614 | unsigned msr; |
615 | |
616 | msr = uncore_msr_box_ctl(box); |
617 | if (msr) { |
618 | rdmsrl(msr, config); |
619 | config |= SNBEP_PMON_BOX_CTL_FRZ; |
620 | wrmsrl(msr, val: config); |
621 | } |
622 | } |
623 | |
624 | static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box) |
625 | { |
626 | u64 config; |
627 | unsigned msr; |
628 | |
629 | msr = uncore_msr_box_ctl(box); |
630 | if (msr) { |
631 | rdmsrl(msr, config); |
632 | config &= ~SNBEP_PMON_BOX_CTL_FRZ; |
633 | wrmsrl(msr, val: config); |
634 | } |
635 | } |
636 | |
637 | static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) |
638 | { |
639 | struct hw_perf_event *hwc = &event->hw; |
640 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; |
641 | |
642 | if (reg1->idx != EXTRA_REG_NONE) |
643 | wrmsrl(msr: reg1->reg, val: uncore_shared_reg_config(box, idx: 0)); |
644 | |
645 | wrmsrl(msr: hwc->config_base, val: hwc->config | SNBEP_PMON_CTL_EN); |
646 | } |
647 | |
648 | static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box, |
649 | struct perf_event *event) |
650 | { |
651 | struct hw_perf_event *hwc = &event->hw; |
652 | |
653 | wrmsrl(msr: hwc->config_base, val: hwc->config); |
654 | } |
655 | |
656 | static void snbep_uncore_msr_init_box(struct intel_uncore_box *box) |
657 | { |
658 | unsigned msr = uncore_msr_box_ctl(box); |
659 | |
660 | if (msr) |
661 | wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT); |
662 | } |
663 | |
664 | static struct attribute *snbep_uncore_formats_attr[] = { |
665 | &format_attr_event.attr, |
666 | &format_attr_umask.attr, |
667 | &format_attr_edge.attr, |
668 | &format_attr_inv.attr, |
669 | &format_attr_thresh8.attr, |
670 | NULL, |
671 | }; |
672 | |
673 | static struct attribute *snbep_uncore_ubox_formats_attr[] = { |
674 | &format_attr_event.attr, |
675 | &format_attr_umask.attr, |
676 | &format_attr_edge.attr, |
677 | &format_attr_inv.attr, |
678 | &format_attr_thresh5.attr, |
679 | NULL, |
680 | }; |
681 | |
682 | static struct attribute *snbep_uncore_cbox_formats_attr[] = { |
683 | &format_attr_event.attr, |
684 | &format_attr_umask.attr, |
685 | &format_attr_edge.attr, |
686 | &format_attr_tid_en.attr, |
687 | &format_attr_inv.attr, |
688 | &format_attr_thresh8.attr, |
689 | &format_attr_filter_tid.attr, |
690 | &format_attr_filter_nid.attr, |
691 | &format_attr_filter_state.attr, |
692 | &format_attr_filter_opc.attr, |
693 | NULL, |
694 | }; |
695 | |
696 | static struct attribute *snbep_uncore_pcu_formats_attr[] = { |
697 | &format_attr_event.attr, |
698 | &format_attr_occ_sel.attr, |
699 | &format_attr_edge.attr, |
700 | &format_attr_inv.attr, |
701 | &format_attr_thresh5.attr, |
702 | &format_attr_occ_invert.attr, |
703 | &format_attr_occ_edge.attr, |
704 | &format_attr_filter_band0.attr, |
705 | &format_attr_filter_band1.attr, |
706 | &format_attr_filter_band2.attr, |
707 | &format_attr_filter_band3.attr, |
708 | NULL, |
709 | }; |
710 | |
711 | static struct attribute *snbep_uncore_qpi_formats_attr[] = { |
712 | &format_attr_event_ext.attr, |
713 | &format_attr_umask.attr, |
714 | &format_attr_edge.attr, |
715 | &format_attr_inv.attr, |
716 | &format_attr_thresh8.attr, |
717 | &format_attr_match_rds.attr, |
718 | &format_attr_match_rnid30.attr, |
719 | &format_attr_match_rnid4.attr, |
720 | &format_attr_match_dnid.attr, |
721 | &format_attr_match_mc.attr, |
722 | &format_attr_match_opc.attr, |
723 | &format_attr_match_vnw.attr, |
724 | &format_attr_match0.attr, |
725 | &format_attr_match1.attr, |
726 | &format_attr_mask_rds.attr, |
727 | &format_attr_mask_rnid30.attr, |
728 | &format_attr_mask_rnid4.attr, |
729 | &format_attr_mask_dnid.attr, |
730 | &format_attr_mask_mc.attr, |
731 | &format_attr_mask_opc.attr, |
732 | &format_attr_mask_vnw.attr, |
733 | &format_attr_mask0.attr, |
734 | &format_attr_mask1.attr, |
735 | NULL, |
736 | }; |
737 | |
738 | static struct uncore_event_desc snbep_uncore_imc_events[] = { |
739 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00" ), |
740 | INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03" ), |
741 | INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5" ), |
742 | INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB" ), |
743 | INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c" ), |
744 | INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5" ), |
745 | INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB" ), |
746 | { /* end: all zeroes */ }, |
747 | }; |
748 | |
749 | static struct uncore_event_desc snbep_uncore_qpi_events[] = { |
750 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14" ), |
751 | INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06" ), |
752 | INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08" ), |
753 | INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04" ), |
754 | { /* end: all zeroes */ }, |
755 | }; |
756 | |
757 | static const struct attribute_group snbep_uncore_format_group = { |
758 | .name = "format" , |
759 | .attrs = snbep_uncore_formats_attr, |
760 | }; |
761 | |
762 | static const struct attribute_group snbep_uncore_ubox_format_group = { |
763 | .name = "format" , |
764 | .attrs = snbep_uncore_ubox_formats_attr, |
765 | }; |
766 | |
767 | static const struct attribute_group snbep_uncore_cbox_format_group = { |
768 | .name = "format" , |
769 | .attrs = snbep_uncore_cbox_formats_attr, |
770 | }; |
771 | |
772 | static const struct attribute_group snbep_uncore_pcu_format_group = { |
773 | .name = "format" , |
774 | .attrs = snbep_uncore_pcu_formats_attr, |
775 | }; |
776 | |
777 | static const struct attribute_group snbep_uncore_qpi_format_group = { |
778 | .name = "format" , |
779 | .attrs = snbep_uncore_qpi_formats_attr, |
780 | }; |
781 | |
782 | #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ |
783 | .disable_box = snbep_uncore_msr_disable_box, \ |
784 | .enable_box = snbep_uncore_msr_enable_box, \ |
785 | .disable_event = snbep_uncore_msr_disable_event, \ |
786 | .enable_event = snbep_uncore_msr_enable_event, \ |
787 | .read_counter = uncore_msr_read_counter |
788 | |
789 | #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ |
790 | __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \ |
791 | .init_box = snbep_uncore_msr_init_box \ |
792 | |
793 | static struct intel_uncore_ops snbep_uncore_msr_ops = { |
794 | SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), |
795 | }; |
796 | |
797 | #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \ |
798 | .init_box = snbep_uncore_pci_init_box, \ |
799 | .disable_box = snbep_uncore_pci_disable_box, \ |
800 | .enable_box = snbep_uncore_pci_enable_box, \ |
801 | .disable_event = snbep_uncore_pci_disable_event, \ |
802 | .read_counter = snbep_uncore_pci_read_counter |
803 | |
804 | static struct intel_uncore_ops snbep_uncore_pci_ops = { |
805 | SNBEP_UNCORE_PCI_OPS_COMMON_INIT(), |
806 | .enable_event = snbep_uncore_pci_enable_event, \ |
807 | }; |
808 | |
809 | static struct event_constraint snbep_uncore_cbox_constraints[] = { |
810 | UNCORE_EVENT_CONSTRAINT(0x01, 0x1), |
811 | UNCORE_EVENT_CONSTRAINT(0x02, 0x3), |
812 | UNCORE_EVENT_CONSTRAINT(0x04, 0x3), |
813 | UNCORE_EVENT_CONSTRAINT(0x05, 0x3), |
814 | UNCORE_EVENT_CONSTRAINT(0x07, 0x3), |
815 | UNCORE_EVENT_CONSTRAINT(0x09, 0x3), |
816 | UNCORE_EVENT_CONSTRAINT(0x11, 0x1), |
817 | UNCORE_EVENT_CONSTRAINT(0x12, 0x3), |
818 | UNCORE_EVENT_CONSTRAINT(0x13, 0x3), |
819 | UNCORE_EVENT_CONSTRAINT(0x1b, 0xc), |
820 | UNCORE_EVENT_CONSTRAINT(0x1c, 0xc), |
821 | UNCORE_EVENT_CONSTRAINT(0x1d, 0xc), |
822 | UNCORE_EVENT_CONSTRAINT(0x1e, 0xc), |
823 | UNCORE_EVENT_CONSTRAINT(0x1f, 0xe), |
824 | UNCORE_EVENT_CONSTRAINT(0x21, 0x3), |
825 | UNCORE_EVENT_CONSTRAINT(0x23, 0x3), |
826 | UNCORE_EVENT_CONSTRAINT(0x31, 0x3), |
827 | UNCORE_EVENT_CONSTRAINT(0x32, 0x3), |
828 | UNCORE_EVENT_CONSTRAINT(0x33, 0x3), |
829 | UNCORE_EVENT_CONSTRAINT(0x34, 0x3), |
830 | UNCORE_EVENT_CONSTRAINT(0x35, 0x3), |
831 | UNCORE_EVENT_CONSTRAINT(0x36, 0x1), |
832 | UNCORE_EVENT_CONSTRAINT(0x37, 0x3), |
833 | UNCORE_EVENT_CONSTRAINT(0x38, 0x3), |
834 | UNCORE_EVENT_CONSTRAINT(0x39, 0x3), |
835 | UNCORE_EVENT_CONSTRAINT(0x3b, 0x1), |
836 | EVENT_CONSTRAINT_END |
837 | }; |
838 | |
839 | static struct event_constraint snbep_uncore_r2pcie_constraints[] = { |
840 | UNCORE_EVENT_CONSTRAINT(0x10, 0x3), |
841 | UNCORE_EVENT_CONSTRAINT(0x11, 0x3), |
842 | UNCORE_EVENT_CONSTRAINT(0x12, 0x1), |
843 | UNCORE_EVENT_CONSTRAINT(0x23, 0x3), |
844 | UNCORE_EVENT_CONSTRAINT(0x24, 0x3), |
845 | UNCORE_EVENT_CONSTRAINT(0x25, 0x3), |
846 | UNCORE_EVENT_CONSTRAINT(0x26, 0x3), |
847 | UNCORE_EVENT_CONSTRAINT(0x32, 0x3), |
848 | UNCORE_EVENT_CONSTRAINT(0x33, 0x3), |
849 | UNCORE_EVENT_CONSTRAINT(0x34, 0x3), |
850 | EVENT_CONSTRAINT_END |
851 | }; |
852 | |
853 | static struct event_constraint snbep_uncore_r3qpi_constraints[] = { |
854 | UNCORE_EVENT_CONSTRAINT(0x10, 0x3), |
855 | UNCORE_EVENT_CONSTRAINT(0x11, 0x3), |
856 | UNCORE_EVENT_CONSTRAINT(0x12, 0x3), |
857 | UNCORE_EVENT_CONSTRAINT(0x13, 0x1), |
858 | UNCORE_EVENT_CONSTRAINT(0x20, 0x3), |
859 | UNCORE_EVENT_CONSTRAINT(0x21, 0x3), |
860 | UNCORE_EVENT_CONSTRAINT(0x22, 0x3), |
861 | UNCORE_EVENT_CONSTRAINT(0x23, 0x3), |
862 | UNCORE_EVENT_CONSTRAINT(0x24, 0x3), |
863 | UNCORE_EVENT_CONSTRAINT(0x25, 0x3), |
864 | UNCORE_EVENT_CONSTRAINT(0x26, 0x3), |
865 | UNCORE_EVENT_CONSTRAINT(0x28, 0x3), |
866 | UNCORE_EVENT_CONSTRAINT(0x29, 0x3), |
867 | UNCORE_EVENT_CONSTRAINT(0x2a, 0x3), |
868 | UNCORE_EVENT_CONSTRAINT(0x2b, 0x3), |
869 | UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), |
870 | UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), |
871 | UNCORE_EVENT_CONSTRAINT(0x2e, 0x3), |
872 | UNCORE_EVENT_CONSTRAINT(0x2f, 0x3), |
873 | UNCORE_EVENT_CONSTRAINT(0x30, 0x3), |
874 | UNCORE_EVENT_CONSTRAINT(0x31, 0x3), |
875 | UNCORE_EVENT_CONSTRAINT(0x32, 0x3), |
876 | UNCORE_EVENT_CONSTRAINT(0x33, 0x3), |
877 | UNCORE_EVENT_CONSTRAINT(0x34, 0x3), |
878 | UNCORE_EVENT_CONSTRAINT(0x36, 0x3), |
879 | UNCORE_EVENT_CONSTRAINT(0x37, 0x3), |
880 | UNCORE_EVENT_CONSTRAINT(0x38, 0x3), |
881 | UNCORE_EVENT_CONSTRAINT(0x39, 0x3), |
882 | EVENT_CONSTRAINT_END |
883 | }; |
884 | |
885 | static struct intel_uncore_type snbep_uncore_ubox = { |
886 | .name = "ubox" , |
887 | .num_counters = 2, |
888 | .num_boxes = 1, |
889 | .perf_ctr_bits = 44, |
890 | .fixed_ctr_bits = 48, |
891 | .perf_ctr = SNBEP_U_MSR_PMON_CTR0, |
892 | .event_ctl = SNBEP_U_MSR_PMON_CTL0, |
893 | .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, |
894 | .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR, |
895 | .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL, |
896 | .ops = &snbep_uncore_msr_ops, |
897 | .format_group = &snbep_uncore_ubox_format_group, |
898 | }; |
899 | |
900 | static struct extra_reg [] = { |
901 | SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, |
902 | SNBEP_CBO_PMON_CTL_TID_EN, 0x1), |
903 | SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), |
904 | SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6), |
905 | SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), |
906 | SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6), |
907 | SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), |
908 | SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6), |
909 | SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), |
910 | SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), |
911 | SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), |
912 | SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa), |
913 | SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa), |
914 | SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2), |
915 | SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2), |
916 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2), |
917 | SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2), |
918 | SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8), |
919 | SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8), |
920 | SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa), |
921 | SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa), |
922 | SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2), |
923 | SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2), |
924 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2), |
925 | SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2), |
926 | EVENT_EXTRA_END |
927 | }; |
928 | |
929 | static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) |
930 | { |
931 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; |
932 | struct intel_uncore_extra_reg *er = &box->shared_regs[0]; |
933 | int i; |
934 | |
935 | if (uncore_box_is_fake(box)) |
936 | return; |
937 | |
938 | for (i = 0; i < 5; i++) { |
939 | if (reg1->alloc & (0x1 << i)) |
940 | atomic_sub(i: 1 << (i * 6), v: &er->ref); |
941 | } |
942 | reg1->alloc = 0; |
943 | } |
944 | |
945 | static struct event_constraint * |
946 | __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event, |
947 | u64 (*cbox_filter_mask)(int fields)) |
948 | { |
949 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; |
950 | struct intel_uncore_extra_reg *er = &box->shared_regs[0]; |
951 | int i, alloc = 0; |
952 | unsigned long flags; |
953 | u64 mask; |
954 | |
955 | if (reg1->idx == EXTRA_REG_NONE) |
956 | return NULL; |
957 | |
958 | raw_spin_lock_irqsave(&er->lock, flags); |
959 | for (i = 0; i < 5; i++) { |
960 | if (!(reg1->idx & (0x1 << i))) |
961 | continue; |
962 | if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) |
963 | continue; |
964 | |
965 | mask = cbox_filter_mask(0x1 << i); |
966 | if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) || |
967 | !((reg1->config ^ er->config) & mask)) { |
968 | atomic_add(i: 1 << (i * 6), v: &er->ref); |
969 | er->config &= ~mask; |
970 | er->config |= reg1->config & mask; |
971 | alloc |= (0x1 << i); |
972 | } else { |
973 | break; |
974 | } |
975 | } |
976 | raw_spin_unlock_irqrestore(&er->lock, flags); |
977 | if (i < 5) |
978 | goto fail; |
979 | |
980 | if (!uncore_box_is_fake(box)) |
981 | reg1->alloc |= alloc; |
982 | |
983 | return NULL; |
984 | fail: |
985 | for (; i >= 0; i--) { |
986 | if (alloc & (0x1 << i)) |
987 | atomic_sub(i: 1 << (i * 6), v: &er->ref); |
988 | } |
989 | return &uncore_constraint_empty; |
990 | } |
991 | |
992 | static u64 snbep_cbox_filter_mask(int fields) |
993 | { |
994 | u64 mask = 0; |
995 | |
996 | if (fields & 0x1) |
997 | mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID; |
998 | if (fields & 0x2) |
999 | mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID; |
1000 | if (fields & 0x4) |
1001 | mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE; |
1002 | if (fields & 0x8) |
1003 | mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC; |
1004 | |
1005 | return mask; |
1006 | } |
1007 | |
1008 | static struct event_constraint * |
1009 | snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) |
1010 | { |
1011 | return __snbep_cbox_get_constraint(box, event, cbox_filter_mask: snbep_cbox_filter_mask); |
1012 | } |
1013 | |
1014 | static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) |
1015 | { |
1016 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; |
1017 | struct extra_reg *er; |
1018 | int idx = 0; |
1019 | |
1020 | for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) { |
1021 | if (er->event != (event->hw.config & er->config_mask)) |
1022 | continue; |
1023 | idx |= er->idx; |
1024 | } |
1025 | |
1026 | if (idx) { |
1027 | reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER + |
1028 | SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; |
1029 | reg1->config = event->attr.config1 & snbep_cbox_filter_mask(fields: idx); |
1030 | reg1->idx = idx; |
1031 | } |
1032 | return 0; |
1033 | } |
1034 | |
1035 | static struct intel_uncore_ops snbep_uncore_cbox_ops = { |
1036 | SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), |
1037 | .hw_config = snbep_cbox_hw_config, |
1038 | .get_constraint = snbep_cbox_get_constraint, |
1039 | .put_constraint = snbep_cbox_put_constraint, |
1040 | }; |
1041 | |
1042 | static struct intel_uncore_type snbep_uncore_cbox = { |
1043 | .name = "cbox" , |
1044 | .num_counters = 4, |
1045 | .num_boxes = 8, |
1046 | .perf_ctr_bits = 44, |
1047 | .event_ctl = SNBEP_C0_MSR_PMON_CTL0, |
1048 | .perf_ctr = SNBEP_C0_MSR_PMON_CTR0, |
1049 | .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, |
1050 | .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL, |
1051 | .msr_offset = SNBEP_CBO_MSR_OFFSET, |
1052 | .num_shared_regs = 1, |
1053 | .constraints = snbep_uncore_cbox_constraints, |
1054 | .ops = &snbep_uncore_cbox_ops, |
1055 | .format_group = &snbep_uncore_cbox_format_group, |
1056 | }; |
1057 | |
1058 | static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify) |
1059 | { |
1060 | struct hw_perf_event *hwc = &event->hw; |
1061 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; |
1062 | u64 config = reg1->config; |
1063 | |
1064 | if (new_idx > reg1->idx) |
1065 | config <<= 8 * (new_idx - reg1->idx); |
1066 | else |
1067 | config >>= 8 * (reg1->idx - new_idx); |
1068 | |
1069 | if (modify) { |
1070 | hwc->config += new_idx - reg1->idx; |
1071 | reg1->config = config; |
1072 | reg1->idx = new_idx; |
1073 | } |
1074 | return config; |
1075 | } |
1076 | |
1077 | static struct event_constraint * |
1078 | snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event) |
1079 | { |
1080 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; |
1081 | struct intel_uncore_extra_reg *er = &box->shared_regs[0]; |
1082 | unsigned long flags; |
1083 | int idx = reg1->idx; |
1084 | u64 mask, config1 = reg1->config; |
1085 | bool ok = false; |
1086 | |
1087 | if (reg1->idx == EXTRA_REG_NONE || |
1088 | (!uncore_box_is_fake(box) && reg1->alloc)) |
1089 | return NULL; |
1090 | again: |
1091 | mask = 0xffULL << (idx * 8); |
1092 | raw_spin_lock_irqsave(&er->lock, flags); |
1093 | if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) || |
1094 | !((config1 ^ er->config) & mask)) { |
1095 | atomic_add(i: 1 << (idx * 8), v: &er->ref); |
1096 | er->config &= ~mask; |
1097 | er->config |= config1 & mask; |
1098 | ok = true; |
1099 | } |
1100 | raw_spin_unlock_irqrestore(&er->lock, flags); |
1101 | |
1102 | if (!ok) { |
1103 | idx = (idx + 1) % 4; |
1104 | if (idx != reg1->idx) { |
1105 | config1 = snbep_pcu_alter_er(event, new_idx: idx, modify: false); |
1106 | goto again; |
1107 | } |
1108 | return &uncore_constraint_empty; |
1109 | } |
1110 | |
1111 | if (!uncore_box_is_fake(box)) { |
1112 | if (idx != reg1->idx) |
1113 | snbep_pcu_alter_er(event, new_idx: idx, modify: true); |
1114 | reg1->alloc = 1; |
1115 | } |
1116 | return NULL; |
1117 | } |
1118 | |
1119 | static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event) |
1120 | { |
1121 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; |
1122 | struct intel_uncore_extra_reg *er = &box->shared_regs[0]; |
1123 | |
1124 | if (uncore_box_is_fake(box) || !reg1->alloc) |
1125 | return; |
1126 | |
1127 | atomic_sub(i: 1 << (reg1->idx * 8), v: &er->ref); |
1128 | reg1->alloc = 0; |
1129 | } |
1130 | |
1131 | static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) |
1132 | { |
1133 | struct hw_perf_event *hwc = &event->hw; |
1134 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; |
1135 | int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK; |
1136 | |
1137 | if (ev_sel >= 0xb && ev_sel <= 0xe) { |
1138 | reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER; |
1139 | reg1->idx = ev_sel - 0xb; |
1140 | reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8)); |
1141 | } |
1142 | return 0; |
1143 | } |
1144 | |
1145 | static struct intel_uncore_ops snbep_uncore_pcu_ops = { |
1146 | SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), |
1147 | .hw_config = snbep_pcu_hw_config, |
1148 | .get_constraint = snbep_pcu_get_constraint, |
1149 | .put_constraint = snbep_pcu_put_constraint, |
1150 | }; |
1151 | |
1152 | static struct intel_uncore_type snbep_uncore_pcu = { |
1153 | .name = "pcu" , |
1154 | .num_counters = 4, |
1155 | .num_boxes = 1, |
1156 | .perf_ctr_bits = 48, |
1157 | .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0, |
1158 | .event_ctl = SNBEP_PCU_MSR_PMON_CTL0, |
1159 | .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, |
1160 | .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, |
1161 | .num_shared_regs = 1, |
1162 | .ops = &snbep_uncore_pcu_ops, |
1163 | .format_group = &snbep_uncore_pcu_format_group, |
1164 | }; |
1165 | |
1166 | static struct intel_uncore_type *snbep_msr_uncores[] = { |
1167 | &snbep_uncore_ubox, |
1168 | &snbep_uncore_cbox, |
1169 | &snbep_uncore_pcu, |
1170 | NULL, |
1171 | }; |
1172 | |
1173 | void snbep_uncore_cpu_init(void) |
1174 | { |
1175 | if (snbep_uncore_cbox.num_boxes > topology_num_cores_per_package()) |
1176 | snbep_uncore_cbox.num_boxes = topology_num_cores_per_package(); |
1177 | uncore_msr_uncores = snbep_msr_uncores; |
1178 | } |
1179 | |
1180 | enum { |
1181 | SNBEP_PCI_QPI_PORT0_FILTER, |
1182 | SNBEP_PCI_QPI_PORT1_FILTER, |
1183 | BDX_PCI_QPI_PORT2_FILTER, |
1184 | }; |
1185 | |
1186 | static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) |
1187 | { |
1188 | struct hw_perf_event *hwc = &event->hw; |
1189 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; |
1190 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; |
1191 | |
1192 | if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) { |
1193 | reg1->idx = 0; |
1194 | reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0; |
1195 | reg1->config = event->attr.config1; |
1196 | reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0; |
1197 | reg2->config = event->attr.config2; |
1198 | } |
1199 | return 0; |
1200 | } |
1201 | |
1202 | static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event) |
1203 | { |
1204 | struct pci_dev *pdev = box->pci_dev; |
1205 | struct hw_perf_event *hwc = &event->hw; |
1206 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; |
1207 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; |
1208 | |
1209 | if (reg1->idx != EXTRA_REG_NONE) { |
1210 | int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER; |
1211 | int die = box->dieid; |
1212 | struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx]; |
1213 | |
1214 | if (filter_pdev) { |
1215 | pci_write_config_dword(dev: filter_pdev, where: reg1->reg, |
1216 | val: (u32)reg1->config); |
1217 | pci_write_config_dword(dev: filter_pdev, where: reg1->reg + 4, |
1218 | val: (u32)(reg1->config >> 32)); |
1219 | pci_write_config_dword(dev: filter_pdev, where: reg2->reg, |
1220 | val: (u32)reg2->config); |
1221 | pci_write_config_dword(dev: filter_pdev, where: reg2->reg + 4, |
1222 | val: (u32)(reg2->config >> 32)); |
1223 | } |
1224 | } |
1225 | |
1226 | pci_write_config_dword(dev: pdev, where: hwc->config_base, val: hwc->config | SNBEP_PMON_CTL_EN); |
1227 | } |
1228 | |
1229 | static struct intel_uncore_ops snbep_uncore_qpi_ops = { |
1230 | SNBEP_UNCORE_PCI_OPS_COMMON_INIT(), |
1231 | .enable_event = snbep_qpi_enable_event, |
1232 | .hw_config = snbep_qpi_hw_config, |
1233 | .get_constraint = uncore_get_constraint, |
1234 | .put_constraint = uncore_put_constraint, |
1235 | }; |
1236 | |
1237 | #define SNBEP_UNCORE_PCI_COMMON_INIT() \ |
1238 | .perf_ctr = SNBEP_PCI_PMON_CTR0, \ |
1239 | .event_ctl = SNBEP_PCI_PMON_CTL0, \ |
1240 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \ |
1241 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \ |
1242 | .ops = &snbep_uncore_pci_ops, \ |
1243 | .format_group = &snbep_uncore_format_group |
1244 | |
1245 | static struct intel_uncore_type snbep_uncore_ha = { |
1246 | .name = "ha" , |
1247 | .num_counters = 4, |
1248 | .num_boxes = 1, |
1249 | .perf_ctr_bits = 48, |
1250 | SNBEP_UNCORE_PCI_COMMON_INIT(), |
1251 | }; |
1252 | |
1253 | static struct intel_uncore_type snbep_uncore_imc = { |
1254 | .name = "imc" , |
1255 | .num_counters = 4, |
1256 | .num_boxes = 4, |
1257 | .perf_ctr_bits = 48, |
1258 | .fixed_ctr_bits = 48, |
1259 | .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, |
1260 | .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, |
1261 | .event_descs = snbep_uncore_imc_events, |
1262 | SNBEP_UNCORE_PCI_COMMON_INIT(), |
1263 | }; |
1264 | |
1265 | static struct intel_uncore_type snbep_uncore_qpi = { |
1266 | .name = "qpi" , |
1267 | .num_counters = 4, |
1268 | .num_boxes = 2, |
1269 | .perf_ctr_bits = 48, |
1270 | .perf_ctr = SNBEP_PCI_PMON_CTR0, |
1271 | .event_ctl = SNBEP_PCI_PMON_CTL0, |
1272 | .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, |
1273 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, |
1274 | .num_shared_regs = 1, |
1275 | .ops = &snbep_uncore_qpi_ops, |
1276 | .event_descs = snbep_uncore_qpi_events, |
1277 | .format_group = &snbep_uncore_qpi_format_group, |
1278 | }; |
1279 | |
1280 | |
1281 | static struct intel_uncore_type snbep_uncore_r2pcie = { |
1282 | .name = "r2pcie" , |
1283 | .num_counters = 4, |
1284 | .num_boxes = 1, |
1285 | .perf_ctr_bits = 44, |
1286 | .constraints = snbep_uncore_r2pcie_constraints, |
1287 | SNBEP_UNCORE_PCI_COMMON_INIT(), |
1288 | }; |
1289 | |
1290 | static struct intel_uncore_type snbep_uncore_r3qpi = { |
1291 | .name = "r3qpi" , |
1292 | .num_counters = 3, |
1293 | .num_boxes = 2, |
1294 | .perf_ctr_bits = 44, |
1295 | .constraints = snbep_uncore_r3qpi_constraints, |
1296 | SNBEP_UNCORE_PCI_COMMON_INIT(), |
1297 | }; |
1298 | |
1299 | enum { |
1300 | SNBEP_PCI_UNCORE_HA, |
1301 | SNBEP_PCI_UNCORE_IMC, |
1302 | SNBEP_PCI_UNCORE_QPI, |
1303 | SNBEP_PCI_UNCORE_R2PCIE, |
1304 | SNBEP_PCI_UNCORE_R3QPI, |
1305 | }; |
1306 | |
1307 | static struct intel_uncore_type *snbep_pci_uncores[] = { |
1308 | [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha, |
1309 | [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc, |
1310 | [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi, |
1311 | [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie, |
1312 | [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi, |
1313 | NULL, |
1314 | }; |
1315 | |
1316 | static const struct pci_device_id snbep_uncore_pci_ids[] = { |
1317 | { /* Home Agent */ |
1318 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA), |
1319 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0), |
1320 | }, |
1321 | { /* MC Channel 0 */ |
1322 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0), |
1323 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0), |
1324 | }, |
1325 | { /* MC Channel 1 */ |
1326 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1), |
1327 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1), |
1328 | }, |
1329 | { /* MC Channel 2 */ |
1330 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2), |
1331 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2), |
1332 | }, |
1333 | { /* MC Channel 3 */ |
1334 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3), |
1335 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3), |
1336 | }, |
1337 | { /* QPI Port 0 */ |
1338 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0), |
1339 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0), |
1340 | }, |
1341 | { /* QPI Port 1 */ |
1342 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1), |
1343 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1), |
1344 | }, |
1345 | { /* R2PCIe */ |
1346 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE), |
1347 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0), |
1348 | }, |
1349 | { /* R3QPI Link 0 */ |
1350 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0), |
1351 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0), |
1352 | }, |
1353 | { /* R3QPI Link 1 */ |
1354 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1), |
1355 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1), |
1356 | }, |
1357 | { /* QPI Port 0 filter */ |
1358 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86), |
1359 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, |
1360 | SNBEP_PCI_QPI_PORT0_FILTER), |
1361 | }, |
1362 | { /* QPI Port 0 filter */ |
1363 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96), |
1364 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, |
1365 | SNBEP_PCI_QPI_PORT1_FILTER), |
1366 | }, |
1367 | { /* end: all zeroes */ } |
1368 | }; |
1369 | |
1370 | static struct pci_driver snbep_uncore_pci_driver = { |
1371 | .name = "snbep_uncore" , |
1372 | .id_table = snbep_uncore_pci_ids, |
1373 | }; |
1374 | |
1375 | #define NODE_ID_MASK 0x7 |
1376 | |
1377 | /* Each three bits from 0 to 23 of GIDNIDMAP register correspond Node ID. */ |
1378 | #define GIDNIDMAP(config, id) (((config) >> (3 * (id))) & 0x7) |
1379 | |
1380 | static int upi_nodeid_groupid(struct pci_dev *ubox_dev, int nodeid_loc, int idmap_loc, |
1381 | int *nodeid, int *groupid) |
1382 | { |
1383 | int ret; |
1384 | |
1385 | /* get the Node ID of the local register */ |
1386 | ret = pci_read_config_dword(dev: ubox_dev, where: nodeid_loc, val: nodeid); |
1387 | if (ret) |
1388 | goto err; |
1389 | |
1390 | *nodeid = *nodeid & NODE_ID_MASK; |
1391 | /* get the Node ID mapping */ |
1392 | ret = pci_read_config_dword(dev: ubox_dev, where: idmap_loc, val: groupid); |
1393 | if (ret) |
1394 | goto err; |
1395 | err: |
1396 | return ret; |
1397 | } |
1398 | |
1399 | static int topology_gidnid_map(int nodeid, u32 gidnid) |
1400 | { |
1401 | int i, die_id = -1; |
1402 | |
1403 | /* |
1404 | * every three bits in the Node ID mapping register maps |
1405 | * to a particular node. |
1406 | */ |
1407 | for (i = 0; i < 8; i++) { |
1408 | if (nodeid == GIDNIDMAP(gidnid, i)) { |
1409 | if (topology_max_dies_per_package() > 1) |
1410 | die_id = i; |
1411 | else |
1412 | die_id = topology_phys_to_logical_pkg(pkg: i); |
1413 | if (die_id < 0) |
1414 | die_id = -ENODEV; |
1415 | break; |
1416 | } |
1417 | } |
1418 | |
1419 | return die_id; |
1420 | } |
1421 | |
1422 | /* |
1423 | * build pci bus to socket mapping |
1424 | */ |
1425 | static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse) |
1426 | { |
1427 | struct pci_dev *ubox_dev = NULL; |
1428 | int i, bus, nodeid, segment, die_id; |
1429 | struct pci2phy_map *map; |
1430 | int err = 0; |
1431 | u32 config = 0; |
1432 | |
1433 | while (1) { |
1434 | /* find the UBOX device */ |
1435 | ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device: devid, from: ubox_dev); |
1436 | if (!ubox_dev) |
1437 | break; |
1438 | bus = ubox_dev->bus->number; |
1439 | /* |
1440 | * The nodeid and idmap registers only contain enough |
1441 | * information to handle 8 nodes. On systems with more |
1442 | * than 8 nodes, we need to rely on NUMA information, |
1443 | * filled in from BIOS supplied information, to determine |
1444 | * the topology. |
1445 | */ |
1446 | if (nr_node_ids <= 8) { |
1447 | err = upi_nodeid_groupid(ubox_dev, nodeid_loc, idmap_loc, |
1448 | nodeid: &nodeid, groupid: &config); |
1449 | if (err) |
1450 | break; |
1451 | |
1452 | segment = pci_domain_nr(bus: ubox_dev->bus); |
1453 | raw_spin_lock(&pci2phy_map_lock); |
1454 | map = __find_pci2phy_map(segment); |
1455 | if (!map) { |
1456 | raw_spin_unlock(&pci2phy_map_lock); |
1457 | err = -ENOMEM; |
1458 | break; |
1459 | } |
1460 | |
1461 | map->pbus_to_dieid[bus] = topology_gidnid_map(nodeid, gidnid: config); |
1462 | raw_spin_unlock(&pci2phy_map_lock); |
1463 | } else { |
1464 | segment = pci_domain_nr(bus: ubox_dev->bus); |
1465 | raw_spin_lock(&pci2phy_map_lock); |
1466 | map = __find_pci2phy_map(segment); |
1467 | if (!map) { |
1468 | raw_spin_unlock(&pci2phy_map_lock); |
1469 | err = -ENOMEM; |
1470 | break; |
1471 | } |
1472 | |
1473 | map->pbus_to_dieid[bus] = die_id = uncore_device_to_die(dev: ubox_dev); |
1474 | |
1475 | raw_spin_unlock(&pci2phy_map_lock); |
1476 | |
1477 | if (WARN_ON_ONCE(die_id == -1)) { |
1478 | err = -EINVAL; |
1479 | break; |
1480 | } |
1481 | } |
1482 | } |
1483 | |
1484 | if (!err) { |
1485 | /* |
1486 | * For PCI bus with no UBOX device, find the next bus |
1487 | * that has UBOX device and use its mapping. |
1488 | */ |
1489 | raw_spin_lock(&pci2phy_map_lock); |
1490 | list_for_each_entry(map, &pci2phy_map_head, list) { |
1491 | i = -1; |
1492 | if (reverse) { |
1493 | for (bus = 255; bus >= 0; bus--) { |
1494 | if (map->pbus_to_dieid[bus] != -1) |
1495 | i = map->pbus_to_dieid[bus]; |
1496 | else |
1497 | map->pbus_to_dieid[bus] = i; |
1498 | } |
1499 | } else { |
1500 | for (bus = 0; bus <= 255; bus++) { |
1501 | if (map->pbus_to_dieid[bus] != -1) |
1502 | i = map->pbus_to_dieid[bus]; |
1503 | else |
1504 | map->pbus_to_dieid[bus] = i; |
1505 | } |
1506 | } |
1507 | } |
1508 | raw_spin_unlock(&pci2phy_map_lock); |
1509 | } |
1510 | |
1511 | pci_dev_put(dev: ubox_dev); |
1512 | |
1513 | return pcibios_err_to_errno(err); |
1514 | } |
1515 | |
1516 | int snbep_uncore_pci_init(void) |
1517 | { |
1518 | int ret = snbep_pci2phy_map_init(devid: 0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, reverse: true); |
1519 | if (ret) |
1520 | return ret; |
1521 | uncore_pci_uncores = snbep_pci_uncores; |
1522 | uncore_pci_driver = &snbep_uncore_pci_driver; |
1523 | return 0; |
1524 | } |
1525 | /* end of Sandy Bridge-EP uncore support */ |
1526 | |
1527 | /* IvyTown uncore support */ |
1528 | static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box) |
1529 | { |
1530 | unsigned msr = uncore_msr_box_ctl(box); |
1531 | if (msr) |
1532 | wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT); |
1533 | } |
1534 | |
1535 | static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box) |
1536 | { |
1537 | struct pci_dev *pdev = box->pci_dev; |
1538 | |
1539 | pci_write_config_dword(dev: pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT); |
1540 | } |
1541 | |
1542 | #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \ |
1543 | .init_box = ivbep_uncore_msr_init_box, \ |
1544 | .disable_box = snbep_uncore_msr_disable_box, \ |
1545 | .enable_box = snbep_uncore_msr_enable_box, \ |
1546 | .disable_event = snbep_uncore_msr_disable_event, \ |
1547 | .enable_event = snbep_uncore_msr_enable_event, \ |
1548 | .read_counter = uncore_msr_read_counter |
1549 | |
1550 | static struct intel_uncore_ops ivbep_uncore_msr_ops = { |
1551 | IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), |
1552 | }; |
1553 | |
1554 | static struct intel_uncore_ops ivbep_uncore_pci_ops = { |
1555 | .init_box = ivbep_uncore_pci_init_box, |
1556 | .disable_box = snbep_uncore_pci_disable_box, |
1557 | .enable_box = snbep_uncore_pci_enable_box, |
1558 | .disable_event = snbep_uncore_pci_disable_event, |
1559 | .enable_event = snbep_uncore_pci_enable_event, |
1560 | .read_counter = snbep_uncore_pci_read_counter, |
1561 | }; |
1562 | |
1563 | #define IVBEP_UNCORE_PCI_COMMON_INIT() \ |
1564 | .perf_ctr = SNBEP_PCI_PMON_CTR0, \ |
1565 | .event_ctl = SNBEP_PCI_PMON_CTL0, \ |
1566 | .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \ |
1567 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \ |
1568 | .ops = &ivbep_uncore_pci_ops, \ |
1569 | .format_group = &ivbep_uncore_format_group |
1570 | |
1571 | static struct attribute *ivbep_uncore_formats_attr[] = { |
1572 | &format_attr_event.attr, |
1573 | &format_attr_umask.attr, |
1574 | &format_attr_edge.attr, |
1575 | &format_attr_inv.attr, |
1576 | &format_attr_thresh8.attr, |
1577 | NULL, |
1578 | }; |
1579 | |
1580 | static struct attribute *ivbep_uncore_ubox_formats_attr[] = { |
1581 | &format_attr_event.attr, |
1582 | &format_attr_umask.attr, |
1583 | &format_attr_edge.attr, |
1584 | &format_attr_inv.attr, |
1585 | &format_attr_thresh5.attr, |
1586 | NULL, |
1587 | }; |
1588 | |
1589 | static struct attribute *ivbep_uncore_cbox_formats_attr[] = { |
1590 | &format_attr_event.attr, |
1591 | &format_attr_umask.attr, |
1592 | &format_attr_edge.attr, |
1593 | &format_attr_tid_en.attr, |
1594 | &format_attr_thresh8.attr, |
1595 | &format_attr_filter_tid.attr, |
1596 | &format_attr_filter_link.attr, |
1597 | &format_attr_filter_state2.attr, |
1598 | &format_attr_filter_nid2.attr, |
1599 | &format_attr_filter_opc2.attr, |
1600 | &format_attr_filter_nc.attr, |
1601 | &format_attr_filter_c6.attr, |
1602 | &format_attr_filter_isoc.attr, |
1603 | NULL, |
1604 | }; |
1605 | |
1606 | static struct attribute *ivbep_uncore_pcu_formats_attr[] = { |
1607 | &format_attr_event.attr, |
1608 | &format_attr_occ_sel.attr, |
1609 | &format_attr_edge.attr, |
1610 | &format_attr_thresh5.attr, |
1611 | &format_attr_occ_invert.attr, |
1612 | &format_attr_occ_edge.attr, |
1613 | &format_attr_filter_band0.attr, |
1614 | &format_attr_filter_band1.attr, |
1615 | &format_attr_filter_band2.attr, |
1616 | &format_attr_filter_band3.attr, |
1617 | NULL, |
1618 | }; |
1619 | |
1620 | static struct attribute *ivbep_uncore_qpi_formats_attr[] = { |
1621 | &format_attr_event_ext.attr, |
1622 | &format_attr_umask.attr, |
1623 | &format_attr_edge.attr, |
1624 | &format_attr_thresh8.attr, |
1625 | &format_attr_match_rds.attr, |
1626 | &format_attr_match_rnid30.attr, |
1627 | &format_attr_match_rnid4.attr, |
1628 | &format_attr_match_dnid.attr, |
1629 | &format_attr_match_mc.attr, |
1630 | &format_attr_match_opc.attr, |
1631 | &format_attr_match_vnw.attr, |
1632 | &format_attr_match0.attr, |
1633 | &format_attr_match1.attr, |
1634 | &format_attr_mask_rds.attr, |
1635 | &format_attr_mask_rnid30.attr, |
1636 | &format_attr_mask_rnid4.attr, |
1637 | &format_attr_mask_dnid.attr, |
1638 | &format_attr_mask_mc.attr, |
1639 | &format_attr_mask_opc.attr, |
1640 | &format_attr_mask_vnw.attr, |
1641 | &format_attr_mask0.attr, |
1642 | &format_attr_mask1.attr, |
1643 | NULL, |
1644 | }; |
1645 | |
1646 | static const struct attribute_group ivbep_uncore_format_group = { |
1647 | .name = "format" , |
1648 | .attrs = ivbep_uncore_formats_attr, |
1649 | }; |
1650 | |
1651 | static const struct attribute_group ivbep_uncore_ubox_format_group = { |
1652 | .name = "format" , |
1653 | .attrs = ivbep_uncore_ubox_formats_attr, |
1654 | }; |
1655 | |
1656 | static const struct attribute_group ivbep_uncore_cbox_format_group = { |
1657 | .name = "format" , |
1658 | .attrs = ivbep_uncore_cbox_formats_attr, |
1659 | }; |
1660 | |
1661 | static const struct attribute_group ivbep_uncore_pcu_format_group = { |
1662 | .name = "format" , |
1663 | .attrs = ivbep_uncore_pcu_formats_attr, |
1664 | }; |
1665 | |
1666 | static const struct attribute_group ivbep_uncore_qpi_format_group = { |
1667 | .name = "format" , |
1668 | .attrs = ivbep_uncore_qpi_formats_attr, |
1669 | }; |
1670 | |
1671 | static struct intel_uncore_type ivbep_uncore_ubox = { |
1672 | .name = "ubox" , |
1673 | .num_counters = 2, |
1674 | .num_boxes = 1, |
1675 | .perf_ctr_bits = 44, |
1676 | .fixed_ctr_bits = 48, |
1677 | .perf_ctr = SNBEP_U_MSR_PMON_CTR0, |
1678 | .event_ctl = SNBEP_U_MSR_PMON_CTL0, |
1679 | .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK, |
1680 | .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR, |
1681 | .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL, |
1682 | .ops = &ivbep_uncore_msr_ops, |
1683 | .format_group = &ivbep_uncore_ubox_format_group, |
1684 | }; |
1685 | |
1686 | static struct extra_reg [] = { |
1687 | SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, |
1688 | SNBEP_CBO_PMON_CTL_TID_EN, 0x1), |
1689 | SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), |
1690 | SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), |
1691 | SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), |
1692 | SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), |
1693 | SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), |
1694 | SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc), |
1695 | SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), |
1696 | SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc), |
1697 | SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), |
1698 | SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc), |
1699 | SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10), |
1700 | SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), |
1701 | SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10), |
1702 | SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10), |
1703 | SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18), |
1704 | SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18), |
1705 | SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8), |
1706 | SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8), |
1707 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8), |
1708 | SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8), |
1709 | SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10), |
1710 | SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), |
1711 | SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), |
1712 | SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), |
1713 | SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10), |
1714 | SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), |
1715 | SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), |
1716 | SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), |
1717 | SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8), |
1718 | SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8), |
1719 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8), |
1720 | SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8), |
1721 | SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10), |
1722 | SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10), |
1723 | SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8), |
1724 | EVENT_EXTRA_END |
1725 | }; |
1726 | |
1727 | static u64 ivbep_cbox_filter_mask(int fields) |
1728 | { |
1729 | u64 mask = 0; |
1730 | |
1731 | if (fields & 0x1) |
1732 | mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID; |
1733 | if (fields & 0x2) |
1734 | mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK; |
1735 | if (fields & 0x4) |
1736 | mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE; |
1737 | if (fields & 0x8) |
1738 | mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID; |
1739 | if (fields & 0x10) { |
1740 | mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC; |
1741 | mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC; |
1742 | mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6; |
1743 | mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC; |
1744 | } |
1745 | |
1746 | return mask; |
1747 | } |
1748 | |
1749 | static struct event_constraint * |
1750 | ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) |
1751 | { |
1752 | return __snbep_cbox_get_constraint(box, event, cbox_filter_mask: ivbep_cbox_filter_mask); |
1753 | } |
1754 | |
1755 | static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) |
1756 | { |
1757 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; |
1758 | struct extra_reg *er; |
1759 | int idx = 0; |
1760 | |
1761 | for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) { |
1762 | if (er->event != (event->hw.config & er->config_mask)) |
1763 | continue; |
1764 | idx |= er->idx; |
1765 | } |
1766 | |
1767 | if (idx) { |
1768 | reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER + |
1769 | SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; |
1770 | reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(fields: idx); |
1771 | reg1->idx = idx; |
1772 | } |
1773 | return 0; |
1774 | } |
1775 | |
1776 | static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event) |
1777 | { |
1778 | struct hw_perf_event *hwc = &event->hw; |
1779 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; |
1780 | |
1781 | if (reg1->idx != EXTRA_REG_NONE) { |
1782 | u64 filter = uncore_shared_reg_config(box, idx: 0); |
1783 | wrmsrl(msr: reg1->reg, val: filter & 0xffffffff); |
1784 | wrmsrl(msr: reg1->reg + 6, val: filter >> 32); |
1785 | } |
1786 | |
1787 | wrmsrl(msr: hwc->config_base, val: hwc->config | SNBEP_PMON_CTL_EN); |
1788 | } |
1789 | |
1790 | static struct intel_uncore_ops ivbep_uncore_cbox_ops = { |
1791 | .init_box = ivbep_uncore_msr_init_box, |
1792 | .disable_box = snbep_uncore_msr_disable_box, |
1793 | .enable_box = snbep_uncore_msr_enable_box, |
1794 | .disable_event = snbep_uncore_msr_disable_event, |
1795 | .enable_event = ivbep_cbox_enable_event, |
1796 | .read_counter = uncore_msr_read_counter, |
1797 | .hw_config = ivbep_cbox_hw_config, |
1798 | .get_constraint = ivbep_cbox_get_constraint, |
1799 | .put_constraint = snbep_cbox_put_constraint, |
1800 | }; |
1801 | |
1802 | static struct intel_uncore_type ivbep_uncore_cbox = { |
1803 | .name = "cbox" , |
1804 | .num_counters = 4, |
1805 | .num_boxes = 15, |
1806 | .perf_ctr_bits = 44, |
1807 | .event_ctl = SNBEP_C0_MSR_PMON_CTL0, |
1808 | .perf_ctr = SNBEP_C0_MSR_PMON_CTR0, |
1809 | .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK, |
1810 | .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL, |
1811 | .msr_offset = SNBEP_CBO_MSR_OFFSET, |
1812 | .num_shared_regs = 1, |
1813 | .constraints = snbep_uncore_cbox_constraints, |
1814 | .ops = &ivbep_uncore_cbox_ops, |
1815 | .format_group = &ivbep_uncore_cbox_format_group, |
1816 | }; |
1817 | |
1818 | static struct intel_uncore_ops ivbep_uncore_pcu_ops = { |
1819 | IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), |
1820 | .hw_config = snbep_pcu_hw_config, |
1821 | .get_constraint = snbep_pcu_get_constraint, |
1822 | .put_constraint = snbep_pcu_put_constraint, |
1823 | }; |
1824 | |
1825 | static struct intel_uncore_type ivbep_uncore_pcu = { |
1826 | .name = "pcu" , |
1827 | .num_counters = 4, |
1828 | .num_boxes = 1, |
1829 | .perf_ctr_bits = 48, |
1830 | .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0, |
1831 | .event_ctl = SNBEP_PCU_MSR_PMON_CTL0, |
1832 | .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK, |
1833 | .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, |
1834 | .num_shared_regs = 1, |
1835 | .ops = &ivbep_uncore_pcu_ops, |
1836 | .format_group = &ivbep_uncore_pcu_format_group, |
1837 | }; |
1838 | |
1839 | static struct intel_uncore_type *ivbep_msr_uncores[] = { |
1840 | &ivbep_uncore_ubox, |
1841 | &ivbep_uncore_cbox, |
1842 | &ivbep_uncore_pcu, |
1843 | NULL, |
1844 | }; |
1845 | |
1846 | void ivbep_uncore_cpu_init(void) |
1847 | { |
1848 | if (ivbep_uncore_cbox.num_boxes > topology_num_cores_per_package()) |
1849 | ivbep_uncore_cbox.num_boxes = topology_num_cores_per_package(); |
1850 | uncore_msr_uncores = ivbep_msr_uncores; |
1851 | } |
1852 | |
1853 | static struct intel_uncore_type ivbep_uncore_ha = { |
1854 | .name = "ha" , |
1855 | .num_counters = 4, |
1856 | .num_boxes = 2, |
1857 | .perf_ctr_bits = 48, |
1858 | IVBEP_UNCORE_PCI_COMMON_INIT(), |
1859 | }; |
1860 | |
1861 | static struct intel_uncore_type ivbep_uncore_imc = { |
1862 | .name = "imc" , |
1863 | .num_counters = 4, |
1864 | .num_boxes = 8, |
1865 | .perf_ctr_bits = 48, |
1866 | .fixed_ctr_bits = 48, |
1867 | .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, |
1868 | .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, |
1869 | .event_descs = snbep_uncore_imc_events, |
1870 | IVBEP_UNCORE_PCI_COMMON_INIT(), |
1871 | }; |
1872 | |
1873 | /* registers in IRP boxes are not properly aligned */ |
1874 | static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4}; |
1875 | static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0}; |
1876 | |
1877 | static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event) |
1878 | { |
1879 | struct pci_dev *pdev = box->pci_dev; |
1880 | struct hw_perf_event *hwc = &event->hw; |
1881 | |
1882 | pci_write_config_dword(dev: pdev, where: ivbep_uncore_irp_ctls[hwc->idx], |
1883 | val: hwc->config | SNBEP_PMON_CTL_EN); |
1884 | } |
1885 | |
1886 | static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event) |
1887 | { |
1888 | struct pci_dev *pdev = box->pci_dev; |
1889 | struct hw_perf_event *hwc = &event->hw; |
1890 | |
1891 | pci_write_config_dword(dev: pdev, where: ivbep_uncore_irp_ctls[hwc->idx], val: hwc->config); |
1892 | } |
1893 | |
1894 | static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event) |
1895 | { |
1896 | struct pci_dev *pdev = box->pci_dev; |
1897 | struct hw_perf_event *hwc = &event->hw; |
1898 | u64 count = 0; |
1899 | |
1900 | pci_read_config_dword(dev: pdev, where: ivbep_uncore_irp_ctrs[hwc->idx], val: (u32 *)&count); |
1901 | pci_read_config_dword(dev: pdev, where: ivbep_uncore_irp_ctrs[hwc->idx] + 4, val: (u32 *)&count + 1); |
1902 | |
1903 | return count; |
1904 | } |
1905 | |
1906 | static struct intel_uncore_ops ivbep_uncore_irp_ops = { |
1907 | .init_box = ivbep_uncore_pci_init_box, |
1908 | .disable_box = snbep_uncore_pci_disable_box, |
1909 | .enable_box = snbep_uncore_pci_enable_box, |
1910 | .disable_event = ivbep_uncore_irp_disable_event, |
1911 | .enable_event = ivbep_uncore_irp_enable_event, |
1912 | .read_counter = ivbep_uncore_irp_read_counter, |
1913 | }; |
1914 | |
1915 | static struct intel_uncore_type ivbep_uncore_irp = { |
1916 | .name = "irp" , |
1917 | .num_counters = 4, |
1918 | .num_boxes = 1, |
1919 | .perf_ctr_bits = 48, |
1920 | .event_mask = IVBEP_PMON_RAW_EVENT_MASK, |
1921 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, |
1922 | .ops = &ivbep_uncore_irp_ops, |
1923 | .format_group = &ivbep_uncore_format_group, |
1924 | }; |
1925 | |
1926 | static struct intel_uncore_ops ivbep_uncore_qpi_ops = { |
1927 | .init_box = ivbep_uncore_pci_init_box, |
1928 | .disable_box = snbep_uncore_pci_disable_box, |
1929 | .enable_box = snbep_uncore_pci_enable_box, |
1930 | .disable_event = snbep_uncore_pci_disable_event, |
1931 | .enable_event = snbep_qpi_enable_event, |
1932 | .read_counter = snbep_uncore_pci_read_counter, |
1933 | .hw_config = snbep_qpi_hw_config, |
1934 | .get_constraint = uncore_get_constraint, |
1935 | .put_constraint = uncore_put_constraint, |
1936 | }; |
1937 | |
1938 | static struct intel_uncore_type ivbep_uncore_qpi = { |
1939 | .name = "qpi" , |
1940 | .num_counters = 4, |
1941 | .num_boxes = 3, |
1942 | .perf_ctr_bits = 48, |
1943 | .perf_ctr = SNBEP_PCI_PMON_CTR0, |
1944 | .event_ctl = SNBEP_PCI_PMON_CTL0, |
1945 | .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK, |
1946 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, |
1947 | .num_shared_regs = 1, |
1948 | .ops = &ivbep_uncore_qpi_ops, |
1949 | .format_group = &ivbep_uncore_qpi_format_group, |
1950 | }; |
1951 | |
1952 | static struct intel_uncore_type ivbep_uncore_r2pcie = { |
1953 | .name = "r2pcie" , |
1954 | .num_counters = 4, |
1955 | .num_boxes = 1, |
1956 | .perf_ctr_bits = 44, |
1957 | .constraints = snbep_uncore_r2pcie_constraints, |
1958 | IVBEP_UNCORE_PCI_COMMON_INIT(), |
1959 | }; |
1960 | |
1961 | static struct intel_uncore_type ivbep_uncore_r3qpi = { |
1962 | .name = "r3qpi" , |
1963 | .num_counters = 3, |
1964 | .num_boxes = 2, |
1965 | .perf_ctr_bits = 44, |
1966 | .constraints = snbep_uncore_r3qpi_constraints, |
1967 | IVBEP_UNCORE_PCI_COMMON_INIT(), |
1968 | }; |
1969 | |
1970 | enum { |
1971 | IVBEP_PCI_UNCORE_HA, |
1972 | IVBEP_PCI_UNCORE_IMC, |
1973 | IVBEP_PCI_UNCORE_IRP, |
1974 | IVBEP_PCI_UNCORE_QPI, |
1975 | IVBEP_PCI_UNCORE_R2PCIE, |
1976 | IVBEP_PCI_UNCORE_R3QPI, |
1977 | }; |
1978 | |
1979 | static struct intel_uncore_type *ivbep_pci_uncores[] = { |
1980 | [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha, |
1981 | [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc, |
1982 | [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp, |
1983 | [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi, |
1984 | [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie, |
1985 | [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi, |
1986 | NULL, |
1987 | }; |
1988 | |
1989 | static const struct pci_device_id ivbep_uncore_pci_ids[] = { |
1990 | { /* Home Agent 0 */ |
1991 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30), |
1992 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0), |
1993 | }, |
1994 | { /* Home Agent 1 */ |
1995 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38), |
1996 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1), |
1997 | }, |
1998 | { /* MC0 Channel 0 */ |
1999 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4), |
2000 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0), |
2001 | }, |
2002 | { /* MC0 Channel 1 */ |
2003 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5), |
2004 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1), |
2005 | }, |
2006 | { /* MC0 Channel 3 */ |
2007 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0), |
2008 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2), |
2009 | }, |
2010 | { /* MC0 Channel 4 */ |
2011 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1), |
2012 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3), |
2013 | }, |
2014 | { /* MC1 Channel 0 */ |
2015 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4), |
2016 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4), |
2017 | }, |
2018 | { /* MC1 Channel 1 */ |
2019 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5), |
2020 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5), |
2021 | }, |
2022 | { /* MC1 Channel 3 */ |
2023 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0), |
2024 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6), |
2025 | }, |
2026 | { /* MC1 Channel 4 */ |
2027 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1), |
2028 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7), |
2029 | }, |
2030 | { /* IRP */ |
2031 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39), |
2032 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0), |
2033 | }, |
2034 | { /* QPI0 Port 0 */ |
2035 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32), |
2036 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0), |
2037 | }, |
2038 | { /* QPI0 Port 1 */ |
2039 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33), |
2040 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1), |
2041 | }, |
2042 | { /* QPI1 Port 2 */ |
2043 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a), |
2044 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2), |
2045 | }, |
2046 | { /* R2PCIe */ |
2047 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34), |
2048 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0), |
2049 | }, |
2050 | { /* R3QPI0 Link 0 */ |
2051 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36), |
2052 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0), |
2053 | }, |
2054 | { /* R3QPI0 Link 1 */ |
2055 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37), |
2056 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1), |
2057 | }, |
2058 | { /* R3QPI1 Link 2 */ |
2059 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e), |
2060 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2), |
2061 | }, |
2062 | { /* QPI Port 0 filter */ |
2063 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86), |
2064 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, |
2065 | SNBEP_PCI_QPI_PORT0_FILTER), |
2066 | }, |
2067 | { /* QPI Port 0 filter */ |
2068 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96), |
2069 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, |
2070 | SNBEP_PCI_QPI_PORT1_FILTER), |
2071 | }, |
2072 | { /* end: all zeroes */ } |
2073 | }; |
2074 | |
2075 | static struct pci_driver ivbep_uncore_pci_driver = { |
2076 | .name = "ivbep_uncore" , |
2077 | .id_table = ivbep_uncore_pci_ids, |
2078 | }; |
2079 | |
2080 | int ivbep_uncore_pci_init(void) |
2081 | { |
2082 | int ret = snbep_pci2phy_map_init(devid: 0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, reverse: true); |
2083 | if (ret) |
2084 | return ret; |
2085 | uncore_pci_uncores = ivbep_pci_uncores; |
2086 | uncore_pci_driver = &ivbep_uncore_pci_driver; |
2087 | return 0; |
2088 | } |
2089 | /* end of IvyTown uncore support */ |
2090 | |
2091 | /* KNL uncore support */ |
2092 | static struct attribute *knl_uncore_ubox_formats_attr[] = { |
2093 | &format_attr_event.attr, |
2094 | &format_attr_umask.attr, |
2095 | &format_attr_edge.attr, |
2096 | &format_attr_tid_en.attr, |
2097 | &format_attr_inv.attr, |
2098 | &format_attr_thresh5.attr, |
2099 | NULL, |
2100 | }; |
2101 | |
2102 | static const struct attribute_group knl_uncore_ubox_format_group = { |
2103 | .name = "format" , |
2104 | .attrs = knl_uncore_ubox_formats_attr, |
2105 | }; |
2106 | |
2107 | static struct intel_uncore_type knl_uncore_ubox = { |
2108 | .name = "ubox" , |
2109 | .num_counters = 2, |
2110 | .num_boxes = 1, |
2111 | .perf_ctr_bits = 48, |
2112 | .fixed_ctr_bits = 48, |
2113 | .perf_ctr = HSWEP_U_MSR_PMON_CTR0, |
2114 | .event_ctl = HSWEP_U_MSR_PMON_CTL0, |
2115 | .event_mask = KNL_U_MSR_PMON_RAW_EVENT_MASK, |
2116 | .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR, |
2117 | .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL, |
2118 | .ops = &snbep_uncore_msr_ops, |
2119 | .format_group = &knl_uncore_ubox_format_group, |
2120 | }; |
2121 | |
2122 | static struct attribute *knl_uncore_cha_formats_attr[] = { |
2123 | &format_attr_event.attr, |
2124 | &format_attr_umask.attr, |
2125 | &format_attr_qor.attr, |
2126 | &format_attr_edge.attr, |
2127 | &format_attr_tid_en.attr, |
2128 | &format_attr_inv.attr, |
2129 | &format_attr_thresh8.attr, |
2130 | &format_attr_filter_tid4.attr, |
2131 | &format_attr_filter_link3.attr, |
2132 | &format_attr_filter_state4.attr, |
2133 | &format_attr_filter_local.attr, |
2134 | &format_attr_filter_all_op.attr, |
2135 | &format_attr_filter_nnm.attr, |
2136 | &format_attr_filter_opc3.attr, |
2137 | &format_attr_filter_nc.attr, |
2138 | &format_attr_filter_isoc.attr, |
2139 | NULL, |
2140 | }; |
2141 | |
2142 | static const struct attribute_group knl_uncore_cha_format_group = { |
2143 | .name = "format" , |
2144 | .attrs = knl_uncore_cha_formats_attr, |
2145 | }; |
2146 | |
2147 | static struct event_constraint knl_uncore_cha_constraints[] = { |
2148 | UNCORE_EVENT_CONSTRAINT(0x11, 0x1), |
2149 | UNCORE_EVENT_CONSTRAINT(0x1f, 0x1), |
2150 | UNCORE_EVENT_CONSTRAINT(0x36, 0x1), |
2151 | EVENT_CONSTRAINT_END |
2152 | }; |
2153 | |
2154 | static struct extra_reg [] = { |
2155 | SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, |
2156 | SNBEP_CBO_PMON_CTL_TID_EN, 0x1), |
2157 | SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2), |
2158 | SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4), |
2159 | SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4), |
2160 | EVENT_EXTRA_END |
2161 | }; |
2162 | |
2163 | static u64 knl_cha_filter_mask(int fields) |
2164 | { |
2165 | u64 mask = 0; |
2166 | |
2167 | if (fields & 0x1) |
2168 | mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID; |
2169 | if (fields & 0x2) |
2170 | mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE; |
2171 | if (fields & 0x4) |
2172 | mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP; |
2173 | return mask; |
2174 | } |
2175 | |
2176 | static struct event_constraint * |
2177 | knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event) |
2178 | { |
2179 | return __snbep_cbox_get_constraint(box, event, cbox_filter_mask: knl_cha_filter_mask); |
2180 | } |
2181 | |
2182 | static int knl_cha_hw_config(struct intel_uncore_box *box, |
2183 | struct perf_event *event) |
2184 | { |
2185 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; |
2186 | struct extra_reg *er; |
2187 | int idx = 0; |
2188 | |
2189 | for (er = knl_uncore_cha_extra_regs; er->msr; er++) { |
2190 | if (er->event != (event->hw.config & er->config_mask)) |
2191 | continue; |
2192 | idx |= er->idx; |
2193 | } |
2194 | |
2195 | if (idx) { |
2196 | reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 + |
2197 | KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx; |
2198 | reg1->config = event->attr.config1 & knl_cha_filter_mask(fields: idx); |
2199 | |
2200 | reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE; |
2201 | reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE; |
2202 | reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC; |
2203 | reg1->idx = idx; |
2204 | } |
2205 | return 0; |
2206 | } |
2207 | |
2208 | static void hswep_cbox_enable_event(struct intel_uncore_box *box, |
2209 | struct perf_event *event); |
2210 | |
2211 | static struct intel_uncore_ops knl_uncore_cha_ops = { |
2212 | .init_box = snbep_uncore_msr_init_box, |
2213 | .disable_box = snbep_uncore_msr_disable_box, |
2214 | .enable_box = snbep_uncore_msr_enable_box, |
2215 | .disable_event = snbep_uncore_msr_disable_event, |
2216 | .enable_event = hswep_cbox_enable_event, |
2217 | .read_counter = uncore_msr_read_counter, |
2218 | .hw_config = knl_cha_hw_config, |
2219 | .get_constraint = knl_cha_get_constraint, |
2220 | .put_constraint = snbep_cbox_put_constraint, |
2221 | }; |
2222 | |
2223 | static struct intel_uncore_type knl_uncore_cha = { |
2224 | .name = "cha" , |
2225 | .num_counters = 4, |
2226 | .num_boxes = 38, |
2227 | .perf_ctr_bits = 48, |
2228 | .event_ctl = HSWEP_C0_MSR_PMON_CTL0, |
2229 | .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, |
2230 | .event_mask = KNL_CHA_MSR_PMON_RAW_EVENT_MASK, |
2231 | .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL, |
2232 | .msr_offset = KNL_CHA_MSR_OFFSET, |
2233 | .num_shared_regs = 1, |
2234 | .constraints = knl_uncore_cha_constraints, |
2235 | .ops = &knl_uncore_cha_ops, |
2236 | .format_group = &knl_uncore_cha_format_group, |
2237 | }; |
2238 | |
2239 | static struct attribute *knl_uncore_pcu_formats_attr[] = { |
2240 | &format_attr_event2.attr, |
2241 | &format_attr_use_occ_ctr.attr, |
2242 | &format_attr_occ_sel.attr, |
2243 | &format_attr_edge.attr, |
2244 | &format_attr_tid_en.attr, |
2245 | &format_attr_inv.attr, |
2246 | &format_attr_thresh6.attr, |
2247 | &format_attr_occ_invert.attr, |
2248 | &format_attr_occ_edge_det.attr, |
2249 | NULL, |
2250 | }; |
2251 | |
2252 | static const struct attribute_group knl_uncore_pcu_format_group = { |
2253 | .name = "format" , |
2254 | .attrs = knl_uncore_pcu_formats_attr, |
2255 | }; |
2256 | |
2257 | static struct intel_uncore_type knl_uncore_pcu = { |
2258 | .name = "pcu" , |
2259 | .num_counters = 4, |
2260 | .num_boxes = 1, |
2261 | .perf_ctr_bits = 48, |
2262 | .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0, |
2263 | .event_ctl = HSWEP_PCU_MSR_PMON_CTL0, |
2264 | .event_mask = KNL_PCU_MSR_PMON_RAW_EVENT_MASK, |
2265 | .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL, |
2266 | .ops = &snbep_uncore_msr_ops, |
2267 | .format_group = &knl_uncore_pcu_format_group, |
2268 | }; |
2269 | |
2270 | static struct intel_uncore_type *knl_msr_uncores[] = { |
2271 | &knl_uncore_ubox, |
2272 | &knl_uncore_cha, |
2273 | &knl_uncore_pcu, |
2274 | NULL, |
2275 | }; |
2276 | |
2277 | void knl_uncore_cpu_init(void) |
2278 | { |
2279 | uncore_msr_uncores = knl_msr_uncores; |
2280 | } |
2281 | |
2282 | static void knl_uncore_imc_enable_box(struct intel_uncore_box *box) |
2283 | { |
2284 | struct pci_dev *pdev = box->pci_dev; |
2285 | int box_ctl = uncore_pci_box_ctl(box); |
2286 | |
2287 | pci_write_config_dword(dev: pdev, where: box_ctl, val: 0); |
2288 | } |
2289 | |
2290 | static void knl_uncore_imc_enable_event(struct intel_uncore_box *box, |
2291 | struct perf_event *event) |
2292 | { |
2293 | struct pci_dev *pdev = box->pci_dev; |
2294 | struct hw_perf_event *hwc = &event->hw; |
2295 | |
2296 | if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK) |
2297 | == UNCORE_FIXED_EVENT) |
2298 | pci_write_config_dword(dev: pdev, where: hwc->config_base, |
2299 | val: hwc->config | KNL_PMON_FIXED_CTL_EN); |
2300 | else |
2301 | pci_write_config_dword(dev: pdev, where: hwc->config_base, |
2302 | val: hwc->config | SNBEP_PMON_CTL_EN); |
2303 | } |
2304 | |
2305 | static struct intel_uncore_ops knl_uncore_imc_ops = { |
2306 | .init_box = snbep_uncore_pci_init_box, |
2307 | .disable_box = snbep_uncore_pci_disable_box, |
2308 | .enable_box = knl_uncore_imc_enable_box, |
2309 | .read_counter = snbep_uncore_pci_read_counter, |
2310 | .enable_event = knl_uncore_imc_enable_event, |
2311 | .disable_event = snbep_uncore_pci_disable_event, |
2312 | }; |
2313 | |
2314 | static struct intel_uncore_type knl_uncore_imc_uclk = { |
2315 | .name = "imc_uclk" , |
2316 | .num_counters = 4, |
2317 | .num_boxes = 2, |
2318 | .perf_ctr_bits = 48, |
2319 | .fixed_ctr_bits = 48, |
2320 | .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW, |
2321 | .event_ctl = KNL_UCLK_MSR_PMON_CTL0, |
2322 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
2323 | .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW, |
2324 | .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL, |
2325 | .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL, |
2326 | .ops = &knl_uncore_imc_ops, |
2327 | .format_group = &snbep_uncore_format_group, |
2328 | }; |
2329 | |
2330 | static struct intel_uncore_type knl_uncore_imc_dclk = { |
2331 | .name = "imc" , |
2332 | .num_counters = 4, |
2333 | .num_boxes = 6, |
2334 | .perf_ctr_bits = 48, |
2335 | .fixed_ctr_bits = 48, |
2336 | .perf_ctr = KNL_MC0_CH0_MSR_PMON_CTR0_LOW, |
2337 | .event_ctl = KNL_MC0_CH0_MSR_PMON_CTL0, |
2338 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
2339 | .fixed_ctr = KNL_MC0_CH0_MSR_PMON_FIXED_LOW, |
2340 | .fixed_ctl = KNL_MC0_CH0_MSR_PMON_FIXED_CTL, |
2341 | .box_ctl = KNL_MC0_CH0_MSR_PMON_BOX_CTL, |
2342 | .ops = &knl_uncore_imc_ops, |
2343 | .format_group = &snbep_uncore_format_group, |
2344 | }; |
2345 | |
2346 | static struct intel_uncore_type knl_uncore_edc_uclk = { |
2347 | .name = "edc_uclk" , |
2348 | .num_counters = 4, |
2349 | .num_boxes = 8, |
2350 | .perf_ctr_bits = 48, |
2351 | .fixed_ctr_bits = 48, |
2352 | .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW, |
2353 | .event_ctl = KNL_UCLK_MSR_PMON_CTL0, |
2354 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
2355 | .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW, |
2356 | .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL, |
2357 | .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL, |
2358 | .ops = &knl_uncore_imc_ops, |
2359 | .format_group = &snbep_uncore_format_group, |
2360 | }; |
2361 | |
2362 | static struct intel_uncore_type knl_uncore_edc_eclk = { |
2363 | .name = "edc_eclk" , |
2364 | .num_counters = 4, |
2365 | .num_boxes = 8, |
2366 | .perf_ctr_bits = 48, |
2367 | .fixed_ctr_bits = 48, |
2368 | .perf_ctr = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW, |
2369 | .event_ctl = KNL_EDC0_ECLK_MSR_PMON_CTL0, |
2370 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
2371 | .fixed_ctr = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW, |
2372 | .fixed_ctl = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL, |
2373 | .box_ctl = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL, |
2374 | .ops = &knl_uncore_imc_ops, |
2375 | .format_group = &snbep_uncore_format_group, |
2376 | }; |
2377 | |
2378 | static struct event_constraint knl_uncore_m2pcie_constraints[] = { |
2379 | UNCORE_EVENT_CONSTRAINT(0x23, 0x3), |
2380 | EVENT_CONSTRAINT_END |
2381 | }; |
2382 | |
2383 | static struct intel_uncore_type knl_uncore_m2pcie = { |
2384 | .name = "m2pcie" , |
2385 | .num_counters = 4, |
2386 | .num_boxes = 1, |
2387 | .perf_ctr_bits = 48, |
2388 | .constraints = knl_uncore_m2pcie_constraints, |
2389 | SNBEP_UNCORE_PCI_COMMON_INIT(), |
2390 | }; |
2391 | |
2392 | static struct attribute *knl_uncore_irp_formats_attr[] = { |
2393 | &format_attr_event.attr, |
2394 | &format_attr_umask.attr, |
2395 | &format_attr_qor.attr, |
2396 | &format_attr_edge.attr, |
2397 | &format_attr_inv.attr, |
2398 | &format_attr_thresh8.attr, |
2399 | NULL, |
2400 | }; |
2401 | |
2402 | static const struct attribute_group knl_uncore_irp_format_group = { |
2403 | .name = "format" , |
2404 | .attrs = knl_uncore_irp_formats_attr, |
2405 | }; |
2406 | |
2407 | static struct intel_uncore_type knl_uncore_irp = { |
2408 | .name = "irp" , |
2409 | .num_counters = 2, |
2410 | .num_boxes = 1, |
2411 | .perf_ctr_bits = 48, |
2412 | .perf_ctr = SNBEP_PCI_PMON_CTR0, |
2413 | .event_ctl = SNBEP_PCI_PMON_CTL0, |
2414 | .event_mask = KNL_IRP_PCI_PMON_RAW_EVENT_MASK, |
2415 | .box_ctl = KNL_IRP_PCI_PMON_BOX_CTL, |
2416 | .ops = &snbep_uncore_pci_ops, |
2417 | .format_group = &knl_uncore_irp_format_group, |
2418 | }; |
2419 | |
2420 | enum { |
2421 | KNL_PCI_UNCORE_MC_UCLK, |
2422 | KNL_PCI_UNCORE_MC_DCLK, |
2423 | KNL_PCI_UNCORE_EDC_UCLK, |
2424 | KNL_PCI_UNCORE_EDC_ECLK, |
2425 | KNL_PCI_UNCORE_M2PCIE, |
2426 | KNL_PCI_UNCORE_IRP, |
2427 | }; |
2428 | |
2429 | static struct intel_uncore_type *knl_pci_uncores[] = { |
2430 | [KNL_PCI_UNCORE_MC_UCLK] = &knl_uncore_imc_uclk, |
2431 | [KNL_PCI_UNCORE_MC_DCLK] = &knl_uncore_imc_dclk, |
2432 | [KNL_PCI_UNCORE_EDC_UCLK] = &knl_uncore_edc_uclk, |
2433 | [KNL_PCI_UNCORE_EDC_ECLK] = &knl_uncore_edc_eclk, |
2434 | [KNL_PCI_UNCORE_M2PCIE] = &knl_uncore_m2pcie, |
2435 | [KNL_PCI_UNCORE_IRP] = &knl_uncore_irp, |
2436 | NULL, |
2437 | }; |
2438 | |
2439 | /* |
2440 | * KNL uses a common PCI device ID for multiple instances of an Uncore PMU |
2441 | * device type. prior to KNL, each instance of a PMU device type had a unique |
2442 | * device ID. |
2443 | * |
2444 | * PCI Device ID Uncore PMU Devices |
2445 | * ---------------------------------- |
2446 | * 0x7841 MC0 UClk, MC1 UClk |
2447 | * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2, |
2448 | * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2 |
2449 | * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk, |
2450 | * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk |
2451 | * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk, |
2452 | * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk |
2453 | * 0x7817 M2PCIe |
2454 | * 0x7814 IRP |
2455 | */ |
2456 | |
2457 | static const struct pci_device_id knl_uncore_pci_ids[] = { |
2458 | { /* MC0 UClk */ |
2459 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841), |
2460 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0), |
2461 | }, |
2462 | { /* MC1 UClk */ |
2463 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841), |
2464 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1), |
2465 | }, |
2466 | { /* MC0 DClk CH 0 */ |
2467 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843), |
2468 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0), |
2469 | }, |
2470 | { /* MC0 DClk CH 1 */ |
2471 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843), |
2472 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1), |
2473 | }, |
2474 | { /* MC0 DClk CH 2 */ |
2475 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843), |
2476 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2), |
2477 | }, |
2478 | { /* MC1 DClk CH 0 */ |
2479 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843), |
2480 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3), |
2481 | }, |
2482 | { /* MC1 DClk CH 1 */ |
2483 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843), |
2484 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4), |
2485 | }, |
2486 | { /* MC1 DClk CH 2 */ |
2487 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843), |
2488 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5), |
2489 | }, |
2490 | { /* EDC0 UClk */ |
2491 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), |
2492 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0), |
2493 | }, |
2494 | { /* EDC1 UClk */ |
2495 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), |
2496 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1), |
2497 | }, |
2498 | { /* EDC2 UClk */ |
2499 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), |
2500 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2), |
2501 | }, |
2502 | { /* EDC3 UClk */ |
2503 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), |
2504 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3), |
2505 | }, |
2506 | { /* EDC4 UClk */ |
2507 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), |
2508 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4), |
2509 | }, |
2510 | { /* EDC5 UClk */ |
2511 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), |
2512 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5), |
2513 | }, |
2514 | { /* EDC6 UClk */ |
2515 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), |
2516 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6), |
2517 | }, |
2518 | { /* EDC7 UClk */ |
2519 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), |
2520 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7), |
2521 | }, |
2522 | { /* EDC0 EClk */ |
2523 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), |
2524 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0), |
2525 | }, |
2526 | { /* EDC1 EClk */ |
2527 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), |
2528 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1), |
2529 | }, |
2530 | { /* EDC2 EClk */ |
2531 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), |
2532 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2), |
2533 | }, |
2534 | { /* EDC3 EClk */ |
2535 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), |
2536 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3), |
2537 | }, |
2538 | { /* EDC4 EClk */ |
2539 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), |
2540 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4), |
2541 | }, |
2542 | { /* EDC5 EClk */ |
2543 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), |
2544 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5), |
2545 | }, |
2546 | { /* EDC6 EClk */ |
2547 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), |
2548 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6), |
2549 | }, |
2550 | { /* EDC7 EClk */ |
2551 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), |
2552 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7), |
2553 | }, |
2554 | { /* M2PCIe */ |
2555 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817), |
2556 | .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0), |
2557 | }, |
2558 | { /* IRP */ |
2559 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814), |
2560 | .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0), |
2561 | }, |
2562 | { /* end: all zeroes */ } |
2563 | }; |
2564 | |
2565 | static struct pci_driver knl_uncore_pci_driver = { |
2566 | .name = "knl_uncore" , |
2567 | .id_table = knl_uncore_pci_ids, |
2568 | }; |
2569 | |
2570 | int knl_uncore_pci_init(void) |
2571 | { |
2572 | int ret; |
2573 | |
2574 | /* All KNL PCI based PMON units are on the same PCI bus except IRP */ |
2575 | ret = snb_pci2phy_map_init(devid: 0x7814); /* IRP */ |
2576 | if (ret) |
2577 | return ret; |
2578 | ret = snb_pci2phy_map_init(devid: 0x7817); /* M2PCIe */ |
2579 | if (ret) |
2580 | return ret; |
2581 | uncore_pci_uncores = knl_pci_uncores; |
2582 | uncore_pci_driver = &knl_uncore_pci_driver; |
2583 | return 0; |
2584 | } |
2585 | |
2586 | /* end of KNL uncore support */ |
2587 | |
2588 | /* Haswell-EP uncore support */ |
2589 | static struct attribute *hswep_uncore_ubox_formats_attr[] = { |
2590 | &format_attr_event.attr, |
2591 | &format_attr_umask.attr, |
2592 | &format_attr_edge.attr, |
2593 | &format_attr_inv.attr, |
2594 | &format_attr_thresh5.attr, |
2595 | &format_attr_filter_tid2.attr, |
2596 | &format_attr_filter_cid.attr, |
2597 | NULL, |
2598 | }; |
2599 | |
2600 | static const struct attribute_group hswep_uncore_ubox_format_group = { |
2601 | .name = "format" , |
2602 | .attrs = hswep_uncore_ubox_formats_attr, |
2603 | }; |
2604 | |
2605 | static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event) |
2606 | { |
2607 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; |
2608 | reg1->reg = HSWEP_U_MSR_PMON_FILTER; |
2609 | reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK; |
2610 | reg1->idx = 0; |
2611 | return 0; |
2612 | } |
2613 | |
2614 | static struct intel_uncore_ops hswep_uncore_ubox_ops = { |
2615 | SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), |
2616 | .hw_config = hswep_ubox_hw_config, |
2617 | .get_constraint = uncore_get_constraint, |
2618 | .put_constraint = uncore_put_constraint, |
2619 | }; |
2620 | |
2621 | static struct intel_uncore_type hswep_uncore_ubox = { |
2622 | .name = "ubox" , |
2623 | .num_counters = 2, |
2624 | .num_boxes = 1, |
2625 | .perf_ctr_bits = 44, |
2626 | .fixed_ctr_bits = 48, |
2627 | .perf_ctr = HSWEP_U_MSR_PMON_CTR0, |
2628 | .event_ctl = HSWEP_U_MSR_PMON_CTL0, |
2629 | .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, |
2630 | .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR, |
2631 | .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL, |
2632 | .num_shared_regs = 1, |
2633 | .ops = &hswep_uncore_ubox_ops, |
2634 | .format_group = &hswep_uncore_ubox_format_group, |
2635 | }; |
2636 | |
2637 | static struct attribute *hswep_uncore_cbox_formats_attr[] = { |
2638 | &format_attr_event.attr, |
2639 | &format_attr_umask.attr, |
2640 | &format_attr_edge.attr, |
2641 | &format_attr_tid_en.attr, |
2642 | &format_attr_thresh8.attr, |
2643 | &format_attr_filter_tid3.attr, |
2644 | &format_attr_filter_link2.attr, |
2645 | &format_attr_filter_state3.attr, |
2646 | &format_attr_filter_nid2.attr, |
2647 | &format_attr_filter_opc2.attr, |
2648 | &format_attr_filter_nc.attr, |
2649 | &format_attr_filter_c6.attr, |
2650 | &format_attr_filter_isoc.attr, |
2651 | NULL, |
2652 | }; |
2653 | |
2654 | static const struct attribute_group hswep_uncore_cbox_format_group = { |
2655 | .name = "format" , |
2656 | .attrs = hswep_uncore_cbox_formats_attr, |
2657 | }; |
2658 | |
2659 | static struct event_constraint hswep_uncore_cbox_constraints[] = { |
2660 | UNCORE_EVENT_CONSTRAINT(0x01, 0x1), |
2661 | UNCORE_EVENT_CONSTRAINT(0x09, 0x1), |
2662 | UNCORE_EVENT_CONSTRAINT(0x11, 0x1), |
2663 | UNCORE_EVENT_CONSTRAINT(0x36, 0x1), |
2664 | UNCORE_EVENT_CONSTRAINT(0x38, 0x3), |
2665 | UNCORE_EVENT_CONSTRAINT(0x3b, 0x1), |
2666 | UNCORE_EVENT_CONSTRAINT(0x3e, 0x1), |
2667 | EVENT_CONSTRAINT_END |
2668 | }; |
2669 | |
2670 | static struct extra_reg [] = { |
2671 | SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, |
2672 | SNBEP_CBO_PMON_CTL_TID_EN, 0x1), |
2673 | SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), |
2674 | SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), |
2675 | SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), |
2676 | SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), |
2677 | SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4), |
2678 | SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4), |
2679 | SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8), |
2680 | SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8), |
2681 | SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8), |
2682 | SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8), |
2683 | SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8), |
2684 | SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8), |
2685 | SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12), |
2686 | SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), |
2687 | SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18), |
2688 | SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8), |
2689 | SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8), |
2690 | SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8), |
2691 | SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18), |
2692 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8), |
2693 | SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10), |
2694 | SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), |
2695 | SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10), |
2696 | SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10), |
2697 | SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), |
2698 | SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), |
2699 | SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), |
2700 | SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8), |
2701 | SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8), |
2702 | SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), |
2703 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8), |
2704 | SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), |
2705 | SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10), |
2706 | SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10), |
2707 | SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10), |
2708 | SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8), |
2709 | EVENT_EXTRA_END |
2710 | }; |
2711 | |
2712 | static u64 hswep_cbox_filter_mask(int fields) |
2713 | { |
2714 | u64 mask = 0; |
2715 | if (fields & 0x1) |
2716 | mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID; |
2717 | if (fields & 0x2) |
2718 | mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK; |
2719 | if (fields & 0x4) |
2720 | mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE; |
2721 | if (fields & 0x8) |
2722 | mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID; |
2723 | if (fields & 0x10) { |
2724 | mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC; |
2725 | mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC; |
2726 | mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6; |
2727 | mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC; |
2728 | } |
2729 | return mask; |
2730 | } |
2731 | |
2732 | static struct event_constraint * |
2733 | hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) |
2734 | { |
2735 | return __snbep_cbox_get_constraint(box, event, cbox_filter_mask: hswep_cbox_filter_mask); |
2736 | } |
2737 | |
2738 | static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) |
2739 | { |
2740 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; |
2741 | struct extra_reg *er; |
2742 | int idx = 0; |
2743 | |
2744 | for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) { |
2745 | if (er->event != (event->hw.config & er->config_mask)) |
2746 | continue; |
2747 | idx |= er->idx; |
2748 | } |
2749 | |
2750 | if (idx) { |
2751 | reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 + |
2752 | HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; |
2753 | reg1->config = event->attr.config1 & hswep_cbox_filter_mask(fields: idx); |
2754 | reg1->idx = idx; |
2755 | } |
2756 | return 0; |
2757 | } |
2758 | |
2759 | static void hswep_cbox_enable_event(struct intel_uncore_box *box, |
2760 | struct perf_event *event) |
2761 | { |
2762 | struct hw_perf_event *hwc = &event->hw; |
2763 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; |
2764 | |
2765 | if (reg1->idx != EXTRA_REG_NONE) { |
2766 | u64 filter = uncore_shared_reg_config(box, idx: 0); |
2767 | wrmsrl(msr: reg1->reg, val: filter & 0xffffffff); |
2768 | wrmsrl(msr: reg1->reg + 1, val: filter >> 32); |
2769 | } |
2770 | |
2771 | wrmsrl(msr: hwc->config_base, val: hwc->config | SNBEP_PMON_CTL_EN); |
2772 | } |
2773 | |
2774 | static struct intel_uncore_ops hswep_uncore_cbox_ops = { |
2775 | .init_box = snbep_uncore_msr_init_box, |
2776 | .disable_box = snbep_uncore_msr_disable_box, |
2777 | .enable_box = snbep_uncore_msr_enable_box, |
2778 | .disable_event = snbep_uncore_msr_disable_event, |
2779 | .enable_event = hswep_cbox_enable_event, |
2780 | .read_counter = uncore_msr_read_counter, |
2781 | .hw_config = hswep_cbox_hw_config, |
2782 | .get_constraint = hswep_cbox_get_constraint, |
2783 | .put_constraint = snbep_cbox_put_constraint, |
2784 | }; |
2785 | |
2786 | static struct intel_uncore_type hswep_uncore_cbox = { |
2787 | .name = "cbox" , |
2788 | .num_counters = 4, |
2789 | .num_boxes = 18, |
2790 | .perf_ctr_bits = 48, |
2791 | .event_ctl = HSWEP_C0_MSR_PMON_CTL0, |
2792 | .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, |
2793 | .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, |
2794 | .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL, |
2795 | .msr_offset = HSWEP_CBO_MSR_OFFSET, |
2796 | .num_shared_regs = 1, |
2797 | .constraints = hswep_uncore_cbox_constraints, |
2798 | .ops = &hswep_uncore_cbox_ops, |
2799 | .format_group = &hswep_uncore_cbox_format_group, |
2800 | }; |
2801 | |
2802 | /* |
2803 | * Write SBOX Initialization register bit by bit to avoid spurious #GPs |
2804 | */ |
2805 | static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box) |
2806 | { |
2807 | unsigned msr = uncore_msr_box_ctl(box); |
2808 | |
2809 | if (msr) { |
2810 | u64 init = SNBEP_PMON_BOX_CTL_INT; |
2811 | u64 flags = 0; |
2812 | int i; |
2813 | |
2814 | for_each_set_bit(i, (unsigned long *)&init, 64) { |
2815 | flags |= (1ULL << i); |
2816 | wrmsrl(msr, val: flags); |
2817 | } |
2818 | } |
2819 | } |
2820 | |
2821 | static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = { |
2822 | __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), |
2823 | .init_box = hswep_uncore_sbox_msr_init_box |
2824 | }; |
2825 | |
2826 | static struct attribute *hswep_uncore_sbox_formats_attr[] = { |
2827 | &format_attr_event.attr, |
2828 | &format_attr_umask.attr, |
2829 | &format_attr_edge.attr, |
2830 | &format_attr_tid_en.attr, |
2831 | &format_attr_inv.attr, |
2832 | &format_attr_thresh8.attr, |
2833 | NULL, |
2834 | }; |
2835 | |
2836 | static const struct attribute_group hswep_uncore_sbox_format_group = { |
2837 | .name = "format" , |
2838 | .attrs = hswep_uncore_sbox_formats_attr, |
2839 | }; |
2840 | |
2841 | static struct intel_uncore_type hswep_uncore_sbox = { |
2842 | .name = "sbox" , |
2843 | .num_counters = 4, |
2844 | .num_boxes = 4, |
2845 | .perf_ctr_bits = 44, |
2846 | .event_ctl = HSWEP_S0_MSR_PMON_CTL0, |
2847 | .perf_ctr = HSWEP_S0_MSR_PMON_CTR0, |
2848 | .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, |
2849 | .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, |
2850 | .msr_offset = HSWEP_SBOX_MSR_OFFSET, |
2851 | .ops = &hswep_uncore_sbox_msr_ops, |
2852 | .format_group = &hswep_uncore_sbox_format_group, |
2853 | }; |
2854 | |
2855 | static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) |
2856 | { |
2857 | struct hw_perf_event *hwc = &event->hw; |
2858 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; |
2859 | int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK; |
2860 | |
2861 | if (ev_sel >= 0xb && ev_sel <= 0xe) { |
2862 | reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER; |
2863 | reg1->idx = ev_sel - 0xb; |
2864 | reg1->config = event->attr.config1 & (0xff << reg1->idx); |
2865 | } |
2866 | return 0; |
2867 | } |
2868 | |
2869 | static struct intel_uncore_ops hswep_uncore_pcu_ops = { |
2870 | SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), |
2871 | .hw_config = hswep_pcu_hw_config, |
2872 | .get_constraint = snbep_pcu_get_constraint, |
2873 | .put_constraint = snbep_pcu_put_constraint, |
2874 | }; |
2875 | |
2876 | static struct intel_uncore_type hswep_uncore_pcu = { |
2877 | .name = "pcu" , |
2878 | .num_counters = 4, |
2879 | .num_boxes = 1, |
2880 | .perf_ctr_bits = 48, |
2881 | .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0, |
2882 | .event_ctl = HSWEP_PCU_MSR_PMON_CTL0, |
2883 | .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, |
2884 | .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL, |
2885 | .num_shared_regs = 1, |
2886 | .ops = &hswep_uncore_pcu_ops, |
2887 | .format_group = &snbep_uncore_pcu_format_group, |
2888 | }; |
2889 | |
2890 | static struct intel_uncore_type *hswep_msr_uncores[] = { |
2891 | &hswep_uncore_ubox, |
2892 | &hswep_uncore_cbox, |
2893 | &hswep_uncore_sbox, |
2894 | &hswep_uncore_pcu, |
2895 | NULL, |
2896 | }; |
2897 | |
2898 | #define HSWEP_PCU_DID 0x2fc0 |
2899 | #define HSWEP_PCU_CAPID4_OFFET 0x94 |
2900 | #define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3) |
2901 | |
2902 | static bool hswep_has_limit_sbox(unsigned int device) |
2903 | { |
2904 | struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); |
2905 | u32 capid4; |
2906 | |
2907 | if (!dev) |
2908 | return false; |
2909 | |
2910 | pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, val: &capid4); |
2911 | pci_dev_put(dev); |
2912 | if (!hswep_get_chop(capid4)) |
2913 | return true; |
2914 | |
2915 | return false; |
2916 | } |
2917 | |
2918 | void hswep_uncore_cpu_init(void) |
2919 | { |
2920 | if (hswep_uncore_cbox.num_boxes > topology_num_cores_per_package()) |
2921 | hswep_uncore_cbox.num_boxes = topology_num_cores_per_package(); |
2922 | |
2923 | /* Detect 6-8 core systems with only two SBOXes */ |
2924 | if (hswep_has_limit_sbox(HSWEP_PCU_DID)) |
2925 | hswep_uncore_sbox.num_boxes = 2; |
2926 | |
2927 | uncore_msr_uncores = hswep_msr_uncores; |
2928 | } |
2929 | |
2930 | static struct intel_uncore_type hswep_uncore_ha = { |
2931 | .name = "ha" , |
2932 | .num_counters = 4, |
2933 | .num_boxes = 2, |
2934 | .perf_ctr_bits = 48, |
2935 | SNBEP_UNCORE_PCI_COMMON_INIT(), |
2936 | }; |
2937 | |
2938 | static struct uncore_event_desc hswep_uncore_imc_events[] = { |
2939 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00" ), |
2940 | INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03" ), |
2941 | INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5" ), |
2942 | INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB" ), |
2943 | INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c" ), |
2944 | INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5" ), |
2945 | INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB" ), |
2946 | { /* end: all zeroes */ }, |
2947 | }; |
2948 | |
2949 | static struct intel_uncore_type hswep_uncore_imc = { |
2950 | .name = "imc" , |
2951 | .num_counters = 4, |
2952 | .num_boxes = 8, |
2953 | .perf_ctr_bits = 48, |
2954 | .fixed_ctr_bits = 48, |
2955 | .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, |
2956 | .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, |
2957 | .event_descs = hswep_uncore_imc_events, |
2958 | SNBEP_UNCORE_PCI_COMMON_INIT(), |
2959 | }; |
2960 | |
2961 | static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8}; |
2962 | |
2963 | static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event) |
2964 | { |
2965 | struct pci_dev *pdev = box->pci_dev; |
2966 | struct hw_perf_event *hwc = &event->hw; |
2967 | u64 count = 0; |
2968 | |
2969 | pci_read_config_dword(dev: pdev, where: hswep_uncore_irp_ctrs[hwc->idx], val: (u32 *)&count); |
2970 | pci_read_config_dword(dev: pdev, where: hswep_uncore_irp_ctrs[hwc->idx] + 4, val: (u32 *)&count + 1); |
2971 | |
2972 | return count; |
2973 | } |
2974 | |
2975 | static struct intel_uncore_ops hswep_uncore_irp_ops = { |
2976 | .init_box = snbep_uncore_pci_init_box, |
2977 | .disable_box = snbep_uncore_pci_disable_box, |
2978 | .enable_box = snbep_uncore_pci_enable_box, |
2979 | .disable_event = ivbep_uncore_irp_disable_event, |
2980 | .enable_event = ivbep_uncore_irp_enable_event, |
2981 | .read_counter = hswep_uncore_irp_read_counter, |
2982 | }; |
2983 | |
2984 | static struct intel_uncore_type hswep_uncore_irp = { |
2985 | .name = "irp" , |
2986 | .num_counters = 4, |
2987 | .num_boxes = 1, |
2988 | .perf_ctr_bits = 48, |
2989 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
2990 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, |
2991 | .ops = &hswep_uncore_irp_ops, |
2992 | .format_group = &snbep_uncore_format_group, |
2993 | }; |
2994 | |
2995 | static struct intel_uncore_type hswep_uncore_qpi = { |
2996 | .name = "qpi" , |
2997 | .num_counters = 4, |
2998 | .num_boxes = 3, |
2999 | .perf_ctr_bits = 48, |
3000 | .perf_ctr = SNBEP_PCI_PMON_CTR0, |
3001 | .event_ctl = SNBEP_PCI_PMON_CTL0, |
3002 | .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, |
3003 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, |
3004 | .num_shared_regs = 1, |
3005 | .ops = &snbep_uncore_qpi_ops, |
3006 | .format_group = &snbep_uncore_qpi_format_group, |
3007 | }; |
3008 | |
3009 | static struct event_constraint hswep_uncore_r2pcie_constraints[] = { |
3010 | UNCORE_EVENT_CONSTRAINT(0x10, 0x3), |
3011 | UNCORE_EVENT_CONSTRAINT(0x11, 0x3), |
3012 | UNCORE_EVENT_CONSTRAINT(0x13, 0x1), |
3013 | UNCORE_EVENT_CONSTRAINT(0x23, 0x1), |
3014 | UNCORE_EVENT_CONSTRAINT(0x24, 0x1), |
3015 | UNCORE_EVENT_CONSTRAINT(0x25, 0x1), |
3016 | UNCORE_EVENT_CONSTRAINT(0x26, 0x3), |
3017 | UNCORE_EVENT_CONSTRAINT(0x27, 0x1), |
3018 | UNCORE_EVENT_CONSTRAINT(0x28, 0x3), |
3019 | UNCORE_EVENT_CONSTRAINT(0x29, 0x3), |
3020 | UNCORE_EVENT_CONSTRAINT(0x2a, 0x1), |
3021 | UNCORE_EVENT_CONSTRAINT(0x2b, 0x3), |
3022 | UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), |
3023 | UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), |
3024 | UNCORE_EVENT_CONSTRAINT(0x32, 0x3), |
3025 | UNCORE_EVENT_CONSTRAINT(0x33, 0x3), |
3026 | UNCORE_EVENT_CONSTRAINT(0x34, 0x3), |
3027 | UNCORE_EVENT_CONSTRAINT(0x35, 0x3), |
3028 | EVENT_CONSTRAINT_END |
3029 | }; |
3030 | |
3031 | static struct intel_uncore_type hswep_uncore_r2pcie = { |
3032 | .name = "r2pcie" , |
3033 | .num_counters = 4, |
3034 | .num_boxes = 1, |
3035 | .perf_ctr_bits = 48, |
3036 | .constraints = hswep_uncore_r2pcie_constraints, |
3037 | SNBEP_UNCORE_PCI_COMMON_INIT(), |
3038 | }; |
3039 | |
3040 | static struct event_constraint hswep_uncore_r3qpi_constraints[] = { |
3041 | UNCORE_EVENT_CONSTRAINT(0x01, 0x3), |
3042 | UNCORE_EVENT_CONSTRAINT(0x07, 0x7), |
3043 | UNCORE_EVENT_CONSTRAINT(0x08, 0x7), |
3044 | UNCORE_EVENT_CONSTRAINT(0x09, 0x7), |
3045 | UNCORE_EVENT_CONSTRAINT(0x0a, 0x7), |
3046 | UNCORE_EVENT_CONSTRAINT(0x0e, 0x7), |
3047 | UNCORE_EVENT_CONSTRAINT(0x10, 0x3), |
3048 | UNCORE_EVENT_CONSTRAINT(0x11, 0x3), |
3049 | UNCORE_EVENT_CONSTRAINT(0x12, 0x3), |
3050 | UNCORE_EVENT_CONSTRAINT(0x13, 0x1), |
3051 | UNCORE_EVENT_CONSTRAINT(0x14, 0x3), |
3052 | UNCORE_EVENT_CONSTRAINT(0x15, 0x3), |
3053 | UNCORE_EVENT_CONSTRAINT(0x1f, 0x3), |
3054 | UNCORE_EVENT_CONSTRAINT(0x20, 0x3), |
3055 | UNCORE_EVENT_CONSTRAINT(0x21, 0x3), |
3056 | UNCORE_EVENT_CONSTRAINT(0x22, 0x3), |
3057 | UNCORE_EVENT_CONSTRAINT(0x23, 0x3), |
3058 | UNCORE_EVENT_CONSTRAINT(0x25, 0x3), |
3059 | UNCORE_EVENT_CONSTRAINT(0x26, 0x3), |
3060 | UNCORE_EVENT_CONSTRAINT(0x28, 0x3), |
3061 | UNCORE_EVENT_CONSTRAINT(0x29, 0x3), |
3062 | UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), |
3063 | UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), |
3064 | UNCORE_EVENT_CONSTRAINT(0x2e, 0x3), |
3065 | UNCORE_EVENT_CONSTRAINT(0x2f, 0x3), |
3066 | UNCORE_EVENT_CONSTRAINT(0x31, 0x3), |
3067 | UNCORE_EVENT_CONSTRAINT(0x32, 0x3), |
3068 | UNCORE_EVENT_CONSTRAINT(0x33, 0x3), |
3069 | UNCORE_EVENT_CONSTRAINT(0x34, 0x3), |
3070 | UNCORE_EVENT_CONSTRAINT(0x36, 0x3), |
3071 | UNCORE_EVENT_CONSTRAINT(0x37, 0x3), |
3072 | UNCORE_EVENT_CONSTRAINT(0x38, 0x3), |
3073 | UNCORE_EVENT_CONSTRAINT(0x39, 0x3), |
3074 | EVENT_CONSTRAINT_END |
3075 | }; |
3076 | |
3077 | static struct intel_uncore_type hswep_uncore_r3qpi = { |
3078 | .name = "r3qpi" , |
3079 | .num_counters = 3, |
3080 | .num_boxes = 3, |
3081 | .perf_ctr_bits = 44, |
3082 | .constraints = hswep_uncore_r3qpi_constraints, |
3083 | SNBEP_UNCORE_PCI_COMMON_INIT(), |
3084 | }; |
3085 | |
3086 | enum { |
3087 | HSWEP_PCI_UNCORE_HA, |
3088 | HSWEP_PCI_UNCORE_IMC, |
3089 | HSWEP_PCI_UNCORE_IRP, |
3090 | HSWEP_PCI_UNCORE_QPI, |
3091 | HSWEP_PCI_UNCORE_R2PCIE, |
3092 | HSWEP_PCI_UNCORE_R3QPI, |
3093 | }; |
3094 | |
3095 | static struct intel_uncore_type *hswep_pci_uncores[] = { |
3096 | [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha, |
3097 | [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc, |
3098 | [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp, |
3099 | [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi, |
3100 | [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie, |
3101 | [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi, |
3102 | NULL, |
3103 | }; |
3104 | |
3105 | static const struct pci_device_id hswep_uncore_pci_ids[] = { |
3106 | { /* Home Agent 0 */ |
3107 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30), |
3108 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0), |
3109 | }, |
3110 | { /* Home Agent 1 */ |
3111 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38), |
3112 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1), |
3113 | }, |
3114 | { /* MC0 Channel 0 */ |
3115 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0), |
3116 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0), |
3117 | }, |
3118 | { /* MC0 Channel 1 */ |
3119 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1), |
3120 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1), |
3121 | }, |
3122 | { /* MC0 Channel 2 */ |
3123 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4), |
3124 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2), |
3125 | }, |
3126 | { /* MC0 Channel 3 */ |
3127 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5), |
3128 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3), |
3129 | }, |
3130 | { /* MC1 Channel 0 */ |
3131 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0), |
3132 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4), |
3133 | }, |
3134 | { /* MC1 Channel 1 */ |
3135 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1), |
3136 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5), |
3137 | }, |
3138 | { /* MC1 Channel 2 */ |
3139 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4), |
3140 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6), |
3141 | }, |
3142 | { /* MC1 Channel 3 */ |
3143 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5), |
3144 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7), |
3145 | }, |
3146 | { /* IRP */ |
3147 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39), |
3148 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0), |
3149 | }, |
3150 | { /* QPI0 Port 0 */ |
3151 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32), |
3152 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0), |
3153 | }, |
3154 | { /* QPI0 Port 1 */ |
3155 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33), |
3156 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1), |
3157 | }, |
3158 | { /* QPI1 Port 2 */ |
3159 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a), |
3160 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2), |
3161 | }, |
3162 | { /* R2PCIe */ |
3163 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34), |
3164 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0), |
3165 | }, |
3166 | { /* R3QPI0 Link 0 */ |
3167 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36), |
3168 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0), |
3169 | }, |
3170 | { /* R3QPI0 Link 1 */ |
3171 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37), |
3172 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1), |
3173 | }, |
3174 | { /* R3QPI1 Link 2 */ |
3175 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e), |
3176 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2), |
3177 | }, |
3178 | { /* QPI Port 0 filter */ |
3179 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86), |
3180 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, |
3181 | SNBEP_PCI_QPI_PORT0_FILTER), |
3182 | }, |
3183 | { /* QPI Port 1 filter */ |
3184 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96), |
3185 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, |
3186 | SNBEP_PCI_QPI_PORT1_FILTER), |
3187 | }, |
3188 | { /* end: all zeroes */ } |
3189 | }; |
3190 | |
3191 | static struct pci_driver hswep_uncore_pci_driver = { |
3192 | .name = "hswep_uncore" , |
3193 | .id_table = hswep_uncore_pci_ids, |
3194 | }; |
3195 | |
3196 | int hswep_uncore_pci_init(void) |
3197 | { |
3198 | int ret = snbep_pci2phy_map_init(devid: 0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, reverse: true); |
3199 | if (ret) |
3200 | return ret; |
3201 | uncore_pci_uncores = hswep_pci_uncores; |
3202 | uncore_pci_driver = &hswep_uncore_pci_driver; |
3203 | return 0; |
3204 | } |
3205 | /* end of Haswell-EP uncore support */ |
3206 | |
3207 | /* BDX uncore support */ |
3208 | |
3209 | static struct intel_uncore_type bdx_uncore_ubox = { |
3210 | .name = "ubox" , |
3211 | .num_counters = 2, |
3212 | .num_boxes = 1, |
3213 | .perf_ctr_bits = 48, |
3214 | .fixed_ctr_bits = 48, |
3215 | .perf_ctr = HSWEP_U_MSR_PMON_CTR0, |
3216 | .event_ctl = HSWEP_U_MSR_PMON_CTL0, |
3217 | .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, |
3218 | .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR, |
3219 | .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL, |
3220 | .num_shared_regs = 1, |
3221 | .ops = &ivbep_uncore_msr_ops, |
3222 | .format_group = &ivbep_uncore_ubox_format_group, |
3223 | }; |
3224 | |
3225 | static struct event_constraint bdx_uncore_cbox_constraints[] = { |
3226 | UNCORE_EVENT_CONSTRAINT(0x09, 0x3), |
3227 | UNCORE_EVENT_CONSTRAINT(0x11, 0x1), |
3228 | UNCORE_EVENT_CONSTRAINT(0x36, 0x1), |
3229 | UNCORE_EVENT_CONSTRAINT(0x3e, 0x1), |
3230 | EVENT_CONSTRAINT_END |
3231 | }; |
3232 | |
3233 | static struct intel_uncore_type bdx_uncore_cbox = { |
3234 | .name = "cbox" , |
3235 | .num_counters = 4, |
3236 | .num_boxes = 24, |
3237 | .perf_ctr_bits = 48, |
3238 | .event_ctl = HSWEP_C0_MSR_PMON_CTL0, |
3239 | .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, |
3240 | .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, |
3241 | .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL, |
3242 | .msr_offset = HSWEP_CBO_MSR_OFFSET, |
3243 | .num_shared_regs = 1, |
3244 | .constraints = bdx_uncore_cbox_constraints, |
3245 | .ops = &hswep_uncore_cbox_ops, |
3246 | .format_group = &hswep_uncore_cbox_format_group, |
3247 | }; |
3248 | |
3249 | static struct intel_uncore_type bdx_uncore_sbox = { |
3250 | .name = "sbox" , |
3251 | .num_counters = 4, |
3252 | .num_boxes = 4, |
3253 | .perf_ctr_bits = 48, |
3254 | .event_ctl = HSWEP_S0_MSR_PMON_CTL0, |
3255 | .perf_ctr = HSWEP_S0_MSR_PMON_CTR0, |
3256 | .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, |
3257 | .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, |
3258 | .msr_offset = HSWEP_SBOX_MSR_OFFSET, |
3259 | .ops = &hswep_uncore_sbox_msr_ops, |
3260 | .format_group = &hswep_uncore_sbox_format_group, |
3261 | }; |
3262 | |
3263 | #define BDX_MSR_UNCORE_SBOX 3 |
3264 | |
3265 | static struct intel_uncore_type *bdx_msr_uncores[] = { |
3266 | &bdx_uncore_ubox, |
3267 | &bdx_uncore_cbox, |
3268 | &hswep_uncore_pcu, |
3269 | &bdx_uncore_sbox, |
3270 | NULL, |
3271 | }; |
3272 | |
3273 | /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */ |
3274 | static struct event_constraint bdx_uncore_pcu_constraints[] = { |
3275 | EVENT_CONSTRAINT(0x80, 0xe, 0x80), |
3276 | EVENT_CONSTRAINT_END |
3277 | }; |
3278 | |
3279 | #define BDX_PCU_DID 0x6fc0 |
3280 | |
3281 | void bdx_uncore_cpu_init(void) |
3282 | { |
3283 | if (bdx_uncore_cbox.num_boxes > topology_num_cores_per_package()) |
3284 | bdx_uncore_cbox.num_boxes = topology_num_cores_per_package(); |
3285 | uncore_msr_uncores = bdx_msr_uncores; |
3286 | |
3287 | /* Detect systems with no SBOXes */ |
3288 | if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID)) |
3289 | uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; |
3290 | |
3291 | hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints; |
3292 | } |
3293 | |
3294 | static struct intel_uncore_type bdx_uncore_ha = { |
3295 | .name = "ha" , |
3296 | .num_counters = 4, |
3297 | .num_boxes = 2, |
3298 | .perf_ctr_bits = 48, |
3299 | SNBEP_UNCORE_PCI_COMMON_INIT(), |
3300 | }; |
3301 | |
3302 | static struct intel_uncore_type bdx_uncore_imc = { |
3303 | .name = "imc" , |
3304 | .num_counters = 4, |
3305 | .num_boxes = 8, |
3306 | .perf_ctr_bits = 48, |
3307 | .fixed_ctr_bits = 48, |
3308 | .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, |
3309 | .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, |
3310 | .event_descs = hswep_uncore_imc_events, |
3311 | SNBEP_UNCORE_PCI_COMMON_INIT(), |
3312 | }; |
3313 | |
3314 | static struct intel_uncore_type bdx_uncore_irp = { |
3315 | .name = "irp" , |
3316 | .num_counters = 4, |
3317 | .num_boxes = 1, |
3318 | .perf_ctr_bits = 48, |
3319 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
3320 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, |
3321 | .ops = &hswep_uncore_irp_ops, |
3322 | .format_group = &snbep_uncore_format_group, |
3323 | }; |
3324 | |
3325 | static struct intel_uncore_type bdx_uncore_qpi = { |
3326 | .name = "qpi" , |
3327 | .num_counters = 4, |
3328 | .num_boxes = 3, |
3329 | .perf_ctr_bits = 48, |
3330 | .perf_ctr = SNBEP_PCI_PMON_CTR0, |
3331 | .event_ctl = SNBEP_PCI_PMON_CTL0, |
3332 | .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, |
3333 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, |
3334 | .num_shared_regs = 1, |
3335 | .ops = &snbep_uncore_qpi_ops, |
3336 | .format_group = &snbep_uncore_qpi_format_group, |
3337 | }; |
3338 | |
3339 | static struct event_constraint bdx_uncore_r2pcie_constraints[] = { |
3340 | UNCORE_EVENT_CONSTRAINT(0x10, 0x3), |
3341 | UNCORE_EVENT_CONSTRAINT(0x11, 0x3), |
3342 | UNCORE_EVENT_CONSTRAINT(0x13, 0x1), |
3343 | UNCORE_EVENT_CONSTRAINT(0x23, 0x1), |
3344 | UNCORE_EVENT_CONSTRAINT(0x25, 0x1), |
3345 | UNCORE_EVENT_CONSTRAINT(0x26, 0x3), |
3346 | UNCORE_EVENT_CONSTRAINT(0x28, 0x3), |
3347 | UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), |
3348 | UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), |
3349 | EVENT_CONSTRAINT_END |
3350 | }; |
3351 | |
3352 | static struct intel_uncore_type bdx_uncore_r2pcie = { |
3353 | .name = "r2pcie" , |
3354 | .num_counters = 4, |
3355 | .num_boxes = 1, |
3356 | .perf_ctr_bits = 48, |
3357 | .constraints = bdx_uncore_r2pcie_constraints, |
3358 | SNBEP_UNCORE_PCI_COMMON_INIT(), |
3359 | }; |
3360 | |
3361 | static struct event_constraint bdx_uncore_r3qpi_constraints[] = { |
3362 | UNCORE_EVENT_CONSTRAINT(0x01, 0x7), |
3363 | UNCORE_EVENT_CONSTRAINT(0x07, 0x7), |
3364 | UNCORE_EVENT_CONSTRAINT(0x08, 0x7), |
3365 | UNCORE_EVENT_CONSTRAINT(0x09, 0x7), |
3366 | UNCORE_EVENT_CONSTRAINT(0x0a, 0x7), |
3367 | UNCORE_EVENT_CONSTRAINT(0x0e, 0x7), |
3368 | UNCORE_EVENT_CONSTRAINT(0x10, 0x3), |
3369 | UNCORE_EVENT_CONSTRAINT(0x11, 0x3), |
3370 | UNCORE_EVENT_CONSTRAINT(0x13, 0x1), |
3371 | UNCORE_EVENT_CONSTRAINT(0x14, 0x3), |
3372 | UNCORE_EVENT_CONSTRAINT(0x15, 0x3), |
3373 | UNCORE_EVENT_CONSTRAINT(0x1f, 0x3), |
3374 | UNCORE_EVENT_CONSTRAINT(0x20, 0x3), |
3375 | UNCORE_EVENT_CONSTRAINT(0x21, 0x3), |
3376 | UNCORE_EVENT_CONSTRAINT(0x22, 0x3), |
3377 | UNCORE_EVENT_CONSTRAINT(0x23, 0x3), |
3378 | UNCORE_EVENT_CONSTRAINT(0x25, 0x3), |
3379 | UNCORE_EVENT_CONSTRAINT(0x26, 0x3), |
3380 | UNCORE_EVENT_CONSTRAINT(0x28, 0x3), |
3381 | UNCORE_EVENT_CONSTRAINT(0x29, 0x3), |
3382 | UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), |
3383 | UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), |
3384 | UNCORE_EVENT_CONSTRAINT(0x2e, 0x3), |
3385 | UNCORE_EVENT_CONSTRAINT(0x2f, 0x3), |
3386 | UNCORE_EVENT_CONSTRAINT(0x33, 0x3), |
3387 | UNCORE_EVENT_CONSTRAINT(0x34, 0x3), |
3388 | UNCORE_EVENT_CONSTRAINT(0x36, 0x3), |
3389 | UNCORE_EVENT_CONSTRAINT(0x37, 0x3), |
3390 | UNCORE_EVENT_CONSTRAINT(0x38, 0x3), |
3391 | UNCORE_EVENT_CONSTRAINT(0x39, 0x3), |
3392 | EVENT_CONSTRAINT_END |
3393 | }; |
3394 | |
3395 | static struct intel_uncore_type bdx_uncore_r3qpi = { |
3396 | .name = "r3qpi" , |
3397 | .num_counters = 3, |
3398 | .num_boxes = 3, |
3399 | .perf_ctr_bits = 48, |
3400 | .constraints = bdx_uncore_r3qpi_constraints, |
3401 | SNBEP_UNCORE_PCI_COMMON_INIT(), |
3402 | }; |
3403 | |
3404 | enum { |
3405 | BDX_PCI_UNCORE_HA, |
3406 | BDX_PCI_UNCORE_IMC, |
3407 | BDX_PCI_UNCORE_IRP, |
3408 | BDX_PCI_UNCORE_QPI, |
3409 | BDX_PCI_UNCORE_R2PCIE, |
3410 | BDX_PCI_UNCORE_R3QPI, |
3411 | }; |
3412 | |
3413 | static struct intel_uncore_type *bdx_pci_uncores[] = { |
3414 | [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha, |
3415 | [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc, |
3416 | [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp, |
3417 | [BDX_PCI_UNCORE_QPI] = &bdx_uncore_qpi, |
3418 | [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie, |
3419 | [BDX_PCI_UNCORE_R3QPI] = &bdx_uncore_r3qpi, |
3420 | NULL, |
3421 | }; |
3422 | |
3423 | static const struct pci_device_id bdx_uncore_pci_ids[] = { |
3424 | { /* Home Agent 0 */ |
3425 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30), |
3426 | .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0), |
3427 | }, |
3428 | { /* Home Agent 1 */ |
3429 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38), |
3430 | .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1), |
3431 | }, |
3432 | { /* MC0 Channel 0 */ |
3433 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0), |
3434 | .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0), |
3435 | }, |
3436 | { /* MC0 Channel 1 */ |
3437 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1), |
3438 | .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1), |
3439 | }, |
3440 | { /* MC0 Channel 2 */ |
3441 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4), |
3442 | .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2), |
3443 | }, |
3444 | { /* MC0 Channel 3 */ |
3445 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5), |
3446 | .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3), |
3447 | }, |
3448 | { /* MC1 Channel 0 */ |
3449 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0), |
3450 | .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4), |
3451 | }, |
3452 | { /* MC1 Channel 1 */ |
3453 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1), |
3454 | .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5), |
3455 | }, |
3456 | { /* MC1 Channel 2 */ |
3457 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4), |
3458 | .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6), |
3459 | }, |
3460 | { /* MC1 Channel 3 */ |
3461 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5), |
3462 | .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7), |
3463 | }, |
3464 | { /* IRP */ |
3465 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39), |
3466 | .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0), |
3467 | }, |
3468 | { /* QPI0 Port 0 */ |
3469 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32), |
3470 | .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0), |
3471 | }, |
3472 | { /* QPI0 Port 1 */ |
3473 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33), |
3474 | .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1), |
3475 | }, |
3476 | { /* QPI1 Port 2 */ |
3477 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a), |
3478 | .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2), |
3479 | }, |
3480 | { /* R2PCIe */ |
3481 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34), |
3482 | .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0), |
3483 | }, |
3484 | { /* R3QPI0 Link 0 */ |
3485 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36), |
3486 | .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0), |
3487 | }, |
3488 | { /* R3QPI0 Link 1 */ |
3489 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37), |
3490 | .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1), |
3491 | }, |
3492 | { /* R3QPI1 Link 2 */ |
3493 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e), |
3494 | .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2), |
3495 | }, |
3496 | { /* QPI Port 0 filter */ |
3497 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86), |
3498 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, |
3499 | SNBEP_PCI_QPI_PORT0_FILTER), |
3500 | }, |
3501 | { /* QPI Port 1 filter */ |
3502 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96), |
3503 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, |
3504 | SNBEP_PCI_QPI_PORT1_FILTER), |
3505 | }, |
3506 | { /* QPI Port 2 filter */ |
3507 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46), |
3508 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, |
3509 | BDX_PCI_QPI_PORT2_FILTER), |
3510 | }, |
3511 | { /* end: all zeroes */ } |
3512 | }; |
3513 | |
3514 | static struct pci_driver bdx_uncore_pci_driver = { |
3515 | .name = "bdx_uncore" , |
3516 | .id_table = bdx_uncore_pci_ids, |
3517 | }; |
3518 | |
3519 | int bdx_uncore_pci_init(void) |
3520 | { |
3521 | int ret = snbep_pci2phy_map_init(devid: 0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, reverse: true); |
3522 | |
3523 | if (ret) |
3524 | return ret; |
3525 | uncore_pci_uncores = bdx_pci_uncores; |
3526 | uncore_pci_driver = &bdx_uncore_pci_driver; |
3527 | return 0; |
3528 | } |
3529 | |
3530 | /* end of BDX uncore support */ |
3531 | |
3532 | /* SKX uncore support */ |
3533 | |
3534 | static struct intel_uncore_type skx_uncore_ubox = { |
3535 | .name = "ubox" , |
3536 | .num_counters = 2, |
3537 | .num_boxes = 1, |
3538 | .perf_ctr_bits = 48, |
3539 | .fixed_ctr_bits = 48, |
3540 | .perf_ctr = HSWEP_U_MSR_PMON_CTR0, |
3541 | .event_ctl = HSWEP_U_MSR_PMON_CTL0, |
3542 | .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, |
3543 | .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR, |
3544 | .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL, |
3545 | .ops = &ivbep_uncore_msr_ops, |
3546 | .format_group = &ivbep_uncore_ubox_format_group, |
3547 | }; |
3548 | |
3549 | static struct attribute *skx_uncore_cha_formats_attr[] = { |
3550 | &format_attr_event.attr, |
3551 | &format_attr_umask.attr, |
3552 | &format_attr_edge.attr, |
3553 | &format_attr_tid_en.attr, |
3554 | &format_attr_inv.attr, |
3555 | &format_attr_thresh8.attr, |
3556 | &format_attr_filter_tid4.attr, |
3557 | &format_attr_filter_state5.attr, |
3558 | &format_attr_filter_rem.attr, |
3559 | &format_attr_filter_loc.attr, |
3560 | &format_attr_filter_nm.attr, |
3561 | &format_attr_filter_all_op.attr, |
3562 | &format_attr_filter_not_nm.attr, |
3563 | &format_attr_filter_opc_0.attr, |
3564 | &format_attr_filter_opc_1.attr, |
3565 | &format_attr_filter_nc.attr, |
3566 | &format_attr_filter_isoc.attr, |
3567 | NULL, |
3568 | }; |
3569 | |
3570 | static const struct attribute_group skx_uncore_chabox_format_group = { |
3571 | .name = "format" , |
3572 | .attrs = skx_uncore_cha_formats_attr, |
3573 | }; |
3574 | |
3575 | static struct event_constraint skx_uncore_chabox_constraints[] = { |
3576 | UNCORE_EVENT_CONSTRAINT(0x11, 0x1), |
3577 | UNCORE_EVENT_CONSTRAINT(0x36, 0x1), |
3578 | EVENT_CONSTRAINT_END |
3579 | }; |
3580 | |
3581 | static struct extra_reg [] = { |
3582 | SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), |
3583 | SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), |
3584 | SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), |
3585 | SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), |
3586 | SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4), |
3587 | SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4), |
3588 | SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8), |
3589 | SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8), |
3590 | SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3), |
3591 | EVENT_EXTRA_END |
3592 | }; |
3593 | |
3594 | static u64 skx_cha_filter_mask(int fields) |
3595 | { |
3596 | u64 mask = 0; |
3597 | |
3598 | if (fields & 0x1) |
3599 | mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID; |
3600 | if (fields & 0x2) |
3601 | mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK; |
3602 | if (fields & 0x4) |
3603 | mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE; |
3604 | if (fields & 0x8) { |
3605 | mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM; |
3606 | mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC; |
3607 | mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC; |
3608 | mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM; |
3609 | mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM; |
3610 | mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0; |
3611 | mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1; |
3612 | mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC; |
3613 | mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC; |
3614 | } |
3615 | return mask; |
3616 | } |
3617 | |
3618 | static struct event_constraint * |
3619 | skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event) |
3620 | { |
3621 | return __snbep_cbox_get_constraint(box, event, cbox_filter_mask: skx_cha_filter_mask); |
3622 | } |
3623 | |
3624 | static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) |
3625 | { |
3626 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; |
3627 | struct extra_reg *er; |
3628 | int idx = 0; |
3629 | /* Any of the CHA events may be filtered by Thread/Core-ID.*/ |
3630 | if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN) |
3631 | idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID; |
3632 | |
3633 | for (er = skx_uncore_cha_extra_regs; er->msr; er++) { |
3634 | if (er->event != (event->hw.config & er->config_mask)) |
3635 | continue; |
3636 | idx |= er->idx; |
3637 | } |
3638 | |
3639 | if (idx) { |
3640 | reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 + |
3641 | HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; |
3642 | reg1->config = event->attr.config1 & skx_cha_filter_mask(fields: idx); |
3643 | reg1->idx = idx; |
3644 | } |
3645 | return 0; |
3646 | } |
3647 | |
3648 | static struct intel_uncore_ops skx_uncore_chabox_ops = { |
3649 | /* There is no frz_en for chabox ctl */ |
3650 | .init_box = ivbep_uncore_msr_init_box, |
3651 | .disable_box = snbep_uncore_msr_disable_box, |
3652 | .enable_box = snbep_uncore_msr_enable_box, |
3653 | .disable_event = snbep_uncore_msr_disable_event, |
3654 | .enable_event = hswep_cbox_enable_event, |
3655 | .read_counter = uncore_msr_read_counter, |
3656 | .hw_config = skx_cha_hw_config, |
3657 | .get_constraint = skx_cha_get_constraint, |
3658 | .put_constraint = snbep_cbox_put_constraint, |
3659 | }; |
3660 | |
3661 | static struct intel_uncore_type skx_uncore_chabox = { |
3662 | .name = "cha" , |
3663 | .num_counters = 4, |
3664 | .perf_ctr_bits = 48, |
3665 | .event_ctl = HSWEP_C0_MSR_PMON_CTL0, |
3666 | .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, |
3667 | .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, |
3668 | .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL, |
3669 | .msr_offset = HSWEP_CBO_MSR_OFFSET, |
3670 | .num_shared_regs = 1, |
3671 | .constraints = skx_uncore_chabox_constraints, |
3672 | .ops = &skx_uncore_chabox_ops, |
3673 | .format_group = &skx_uncore_chabox_format_group, |
3674 | }; |
3675 | |
3676 | static struct attribute *skx_uncore_iio_formats_attr[] = { |
3677 | &format_attr_event.attr, |
3678 | &format_attr_umask.attr, |
3679 | &format_attr_edge.attr, |
3680 | &format_attr_inv.attr, |
3681 | &format_attr_thresh9.attr, |
3682 | &format_attr_ch_mask.attr, |
3683 | &format_attr_fc_mask.attr, |
3684 | NULL, |
3685 | }; |
3686 | |
3687 | static const struct attribute_group skx_uncore_iio_format_group = { |
3688 | .name = "format" , |
3689 | .attrs = skx_uncore_iio_formats_attr, |
3690 | }; |
3691 | |
3692 | static struct event_constraint skx_uncore_iio_constraints[] = { |
3693 | UNCORE_EVENT_CONSTRAINT(0x83, 0x3), |
3694 | UNCORE_EVENT_CONSTRAINT(0x88, 0xc), |
3695 | UNCORE_EVENT_CONSTRAINT(0x95, 0xc), |
3696 | UNCORE_EVENT_CONSTRAINT(0xc0, 0xc), |
3697 | UNCORE_EVENT_CONSTRAINT(0xc5, 0xc), |
3698 | UNCORE_EVENT_CONSTRAINT(0xd4, 0xc), |
3699 | UNCORE_EVENT_CONSTRAINT(0xd5, 0xc), |
3700 | EVENT_CONSTRAINT_END |
3701 | }; |
3702 | |
3703 | static void skx_iio_enable_event(struct intel_uncore_box *box, |
3704 | struct perf_event *event) |
3705 | { |
3706 | struct hw_perf_event *hwc = &event->hw; |
3707 | |
3708 | wrmsrl(msr: hwc->config_base, val: hwc->config | SNBEP_PMON_CTL_EN); |
3709 | } |
3710 | |
3711 | static struct intel_uncore_ops skx_uncore_iio_ops = { |
3712 | .init_box = ivbep_uncore_msr_init_box, |
3713 | .disable_box = snbep_uncore_msr_disable_box, |
3714 | .enable_box = snbep_uncore_msr_enable_box, |
3715 | .disable_event = snbep_uncore_msr_disable_event, |
3716 | .enable_event = skx_iio_enable_event, |
3717 | .read_counter = uncore_msr_read_counter, |
3718 | }; |
3719 | |
3720 | static struct intel_uncore_topology *pmu_topology(struct intel_uncore_pmu *pmu, int die) |
3721 | { |
3722 | int idx; |
3723 | |
3724 | for (idx = 0; idx < pmu->type->num_boxes; idx++) { |
3725 | if (pmu->type->topology[die][idx].pmu_idx == pmu->pmu_idx) |
3726 | return &pmu->type->topology[die][idx]; |
3727 | } |
3728 | |
3729 | return NULL; |
3730 | } |
3731 | |
3732 | static umode_t |
3733 | pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, |
3734 | int die, int zero_bus_pmu) |
3735 | { |
3736 | struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj)); |
3737 | struct intel_uncore_topology *pmut = pmu_topology(pmu, die); |
3738 | |
3739 | return (pmut && !pmut->iio->pci_bus_no && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode; |
3740 | } |
3741 | |
3742 | static umode_t |
3743 | skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die) |
3744 | { |
3745 | /* Root bus 0x00 is valid only for pmu_idx = 0. */ |
3746 | return pmu_iio_mapping_visible(kobj, attr, die, zero_bus_pmu: 0); |
3747 | } |
3748 | |
3749 | static ssize_t skx_iio_mapping_show(struct device *dev, |
3750 | struct device_attribute *attr, char *buf) |
3751 | { |
3752 | struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev); |
3753 | struct dev_ext_attribute *ea = to_dev_ext_attribute(attr); |
3754 | long die = (long)ea->var; |
3755 | struct intel_uncore_topology *pmut = pmu_topology(pmu, die); |
3756 | |
3757 | return sprintf(buf, fmt: "%04x:%02x\n" , pmut ? pmut->iio->segment : 0, |
3758 | pmut ? pmut->iio->pci_bus_no : 0); |
3759 | } |
3760 | |
3761 | static int skx_msr_cpu_bus_read(int cpu, u64 *topology) |
3762 | { |
3763 | u64 msr_value; |
3764 | |
3765 | if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, q: &msr_value) || |
3766 | !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT)) |
3767 | return -ENXIO; |
3768 | |
3769 | *topology = msr_value; |
3770 | |
3771 | return 0; |
3772 | } |
3773 | |
3774 | static int die_to_cpu(int die) |
3775 | { |
3776 | int res = 0, cpu, current_die; |
3777 | /* |
3778 | * Using cpus_read_lock() to ensure cpu is not going down between |
3779 | * looking at cpu_online_mask. |
3780 | */ |
3781 | cpus_read_lock(); |
3782 | for_each_online_cpu(cpu) { |
3783 | current_die = topology_logical_die_id(cpu); |
3784 | if (current_die == die) { |
3785 | res = cpu; |
3786 | break; |
3787 | } |
3788 | } |
3789 | cpus_read_unlock(); |
3790 | return res; |
3791 | } |
3792 | |
3793 | enum { |
3794 | IIO_TOPOLOGY_TYPE, |
3795 | UPI_TOPOLOGY_TYPE, |
3796 | TOPOLOGY_MAX |
3797 | }; |
3798 | |
3799 | static const size_t topology_size[TOPOLOGY_MAX] = { |
3800 | sizeof(*((struct intel_uncore_topology *)NULL)->iio), |
3801 | sizeof(*((struct intel_uncore_topology *)NULL)->upi) |
3802 | }; |
3803 | |
3804 | static int pmu_alloc_topology(struct intel_uncore_type *type, int topology_type) |
3805 | { |
3806 | int die, idx; |
3807 | struct intel_uncore_topology **topology; |
3808 | |
3809 | if (!type->num_boxes) |
3810 | return -EPERM; |
3811 | |
3812 | topology = kcalloc(uncore_max_dies(), size: sizeof(*topology), GFP_KERNEL); |
3813 | if (!topology) |
3814 | goto err; |
3815 | |
3816 | for (die = 0; die < uncore_max_dies(); die++) { |
3817 | topology[die] = kcalloc(n: type->num_boxes, size: sizeof(**topology), GFP_KERNEL); |
3818 | if (!topology[die]) |
3819 | goto clear; |
3820 | for (idx = 0; idx < type->num_boxes; idx++) { |
3821 | topology[die][idx].untyped = kcalloc(n: type->num_boxes, |
3822 | size: topology_size[topology_type], |
3823 | GFP_KERNEL); |
3824 | if (!topology[die][idx].untyped) |
3825 | goto clear; |
3826 | } |
3827 | } |
3828 | |
3829 | type->topology = topology; |
3830 | |
3831 | return 0; |
3832 | clear: |
3833 | for (; die >= 0; die--) { |
3834 | for (idx = 0; idx < type->num_boxes; idx++) |
3835 | kfree(objp: topology[die][idx].untyped); |
3836 | kfree(objp: topology[die]); |
3837 | } |
3838 | kfree(objp: topology); |
3839 | err: |
3840 | return -ENOMEM; |
3841 | } |
3842 | |
3843 | static void pmu_free_topology(struct intel_uncore_type *type) |
3844 | { |
3845 | int die, idx; |
3846 | |
3847 | if (type->topology) { |
3848 | for (die = 0; die < uncore_max_dies(); die++) { |
3849 | for (idx = 0; idx < type->num_boxes; idx++) |
3850 | kfree(objp: type->topology[die][idx].untyped); |
3851 | kfree(objp: type->topology[die]); |
3852 | } |
3853 | kfree(objp: type->topology); |
3854 | type->topology = NULL; |
3855 | } |
3856 | } |
3857 | |
3858 | static int skx_pmu_get_topology(struct intel_uncore_type *type, |
3859 | int (*topology_cb)(struct intel_uncore_type*, int, int, u64)) |
3860 | { |
3861 | int die, ret = -EPERM; |
3862 | u64 cpu_bus_msr; |
3863 | |
3864 | for (die = 0; die < uncore_max_dies(); die++) { |
3865 | ret = skx_msr_cpu_bus_read(cpu: die_to_cpu(die), topology: &cpu_bus_msr); |
3866 | if (ret) |
3867 | break; |
3868 | |
3869 | ret = uncore_die_to_segment(die); |
3870 | if (ret < 0) |
3871 | break; |
3872 | |
3873 | ret = topology_cb(type, ret, die, cpu_bus_msr); |
3874 | if (ret) |
3875 | break; |
3876 | } |
3877 | |
3878 | return ret; |
3879 | } |
3880 | |
3881 | static int skx_iio_topology_cb(struct intel_uncore_type *type, int segment, |
3882 | int die, u64 cpu_bus_msr) |
3883 | { |
3884 | int idx; |
3885 | struct intel_uncore_topology *t; |
3886 | |
3887 | for (idx = 0; idx < type->num_boxes; idx++) { |
3888 | t = &type->topology[die][idx]; |
3889 | t->pmu_idx = idx; |
3890 | t->iio->segment = segment; |
3891 | t->iio->pci_bus_no = (cpu_bus_msr >> (idx * BUS_NUM_STRIDE)) & 0xff; |
3892 | } |
3893 | |
3894 | return 0; |
3895 | } |
3896 | |
3897 | static int skx_iio_get_topology(struct intel_uncore_type *type) |
3898 | { |
3899 | return skx_pmu_get_topology(type, topology_cb: skx_iio_topology_cb); |
3900 | } |
3901 | |
3902 | static struct attribute_group skx_iio_mapping_group = { |
3903 | .is_visible = skx_iio_mapping_visible, |
3904 | }; |
3905 | |
3906 | static const struct attribute_group *skx_iio_attr_update[] = { |
3907 | &skx_iio_mapping_group, |
3908 | NULL, |
3909 | }; |
3910 | |
3911 | static void pmu_clear_mapping_attr(const struct attribute_group **groups, |
3912 | struct attribute_group *ag) |
3913 | { |
3914 | int i; |
3915 | |
3916 | for (i = 0; groups[i]; i++) { |
3917 | if (groups[i] == ag) { |
3918 | for (i++; groups[i]; i++) |
3919 | groups[i - 1] = groups[i]; |
3920 | groups[i - 1] = NULL; |
3921 | break; |
3922 | } |
3923 | } |
3924 | } |
3925 | |
3926 | static void |
3927 | pmu_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag, |
3928 | ssize_t (*show)(struct device*, struct device_attribute*, char*), |
3929 | int topology_type) |
3930 | { |
3931 | char buf[64]; |
3932 | int ret; |
3933 | long die = -1; |
3934 | struct attribute **attrs = NULL; |
3935 | struct dev_ext_attribute *eas = NULL; |
3936 | |
3937 | ret = pmu_alloc_topology(type, topology_type); |
3938 | if (ret < 0) |
3939 | goto clear_attr_update; |
3940 | |
3941 | ret = type->get_topology(type); |
3942 | if (ret < 0) |
3943 | goto clear_topology; |
3944 | |
3945 | /* One more for NULL. */ |
3946 | attrs = kcalloc(n: (uncore_max_dies() + 1), size: sizeof(*attrs), GFP_KERNEL); |
3947 | if (!attrs) |
3948 | goto clear_topology; |
3949 | |
3950 | eas = kcalloc(uncore_max_dies(), size: sizeof(*eas), GFP_KERNEL); |
3951 | if (!eas) |
3952 | goto clear_attrs; |
3953 | |
3954 | for (die = 0; die < uncore_max_dies(); die++) { |
3955 | snprintf(buf, size: sizeof(buf), fmt: "die%ld" , die); |
3956 | sysfs_attr_init(&eas[die].attr.attr); |
3957 | eas[die].attr.attr.name = kstrdup(s: buf, GFP_KERNEL); |
3958 | if (!eas[die].attr.attr.name) |
3959 | goto err; |
3960 | eas[die].attr.attr.mode = 0444; |
3961 | eas[die].attr.show = show; |
3962 | eas[die].attr.store = NULL; |
3963 | eas[die].var = (void *)die; |
3964 | attrs[die] = &eas[die].attr.attr; |
3965 | } |
3966 | ag->attrs = attrs; |
3967 | |
3968 | return; |
3969 | err: |
3970 | for (; die >= 0; die--) |
3971 | kfree(objp: eas[die].attr.attr.name); |
3972 | kfree(objp: eas); |
3973 | clear_attrs: |
3974 | kfree(objp: attrs); |
3975 | clear_topology: |
3976 | pmu_free_topology(type); |
3977 | clear_attr_update: |
3978 | pmu_clear_mapping_attr(groups: type->attr_update, ag); |
3979 | } |
3980 | |
3981 | static void |
3982 | pmu_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag) |
3983 | { |
3984 | struct attribute **attr = ag->attrs; |
3985 | |
3986 | if (!attr) |
3987 | return; |
3988 | |
3989 | for (; *attr; attr++) |
3990 | kfree(objp: (*attr)->name); |
3991 | kfree(attr_to_ext_attr(*ag->attrs)); |
3992 | kfree(objp: ag->attrs); |
3993 | ag->attrs = NULL; |
3994 | pmu_free_topology(type); |
3995 | } |
3996 | |
3997 | static void |
3998 | pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag) |
3999 | { |
4000 | pmu_set_mapping(type, ag, show: skx_iio_mapping_show, topology_type: IIO_TOPOLOGY_TYPE); |
4001 | } |
4002 | |
4003 | static void skx_iio_set_mapping(struct intel_uncore_type *type) |
4004 | { |
4005 | pmu_iio_set_mapping(type, ag: &skx_iio_mapping_group); |
4006 | } |
4007 | |
4008 | static void skx_iio_cleanup_mapping(struct intel_uncore_type *type) |
4009 | { |
4010 | pmu_cleanup_mapping(type, ag: &skx_iio_mapping_group); |
4011 | } |
4012 | |
4013 | static struct intel_uncore_type skx_uncore_iio = { |
4014 | .name = "iio" , |
4015 | .num_counters = 4, |
4016 | .num_boxes = 6, |
4017 | .perf_ctr_bits = 48, |
4018 | .event_ctl = SKX_IIO0_MSR_PMON_CTL0, |
4019 | .perf_ctr = SKX_IIO0_MSR_PMON_CTR0, |
4020 | .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK, |
4021 | .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT, |
4022 | .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL, |
4023 | .msr_offset = SKX_IIO_MSR_OFFSET, |
4024 | .constraints = skx_uncore_iio_constraints, |
4025 | .ops = &skx_uncore_iio_ops, |
4026 | .format_group = &skx_uncore_iio_format_group, |
4027 | .attr_update = skx_iio_attr_update, |
4028 | .get_topology = skx_iio_get_topology, |
4029 | .set_mapping = skx_iio_set_mapping, |
4030 | .cleanup_mapping = skx_iio_cleanup_mapping, |
4031 | }; |
4032 | |
4033 | enum perf_uncore_iio_freerunning_type_id { |
4034 | SKX_IIO_MSR_IOCLK = 0, |
4035 | SKX_IIO_MSR_BW = 1, |
4036 | SKX_IIO_MSR_UTIL = 2, |
4037 | |
4038 | SKX_IIO_FREERUNNING_TYPE_MAX, |
4039 | }; |
4040 | |
4041 | |
4042 | static struct freerunning_counters skx_iio_freerunning[] = { |
4043 | [SKX_IIO_MSR_IOCLK] = { .counter_base: 0xa45, .counter_offset: 0x1, .box_offset: 0x20, .num_counters: 1, .bits: 36 }, |
4044 | [SKX_IIO_MSR_BW] = { .counter_base: 0xb00, .counter_offset: 0x1, .box_offset: 0x10, .num_counters: 8, .bits: 36 }, |
4045 | [SKX_IIO_MSR_UTIL] = { .counter_base: 0xb08, .counter_offset: 0x1, .box_offset: 0x10, .num_counters: 8, .bits: 36 }, |
4046 | }; |
4047 | |
4048 | static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = { |
4049 | /* Free-Running IO CLOCKS Counter */ |
4050 | INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10" ), |
4051 | /* Free-Running IIO BANDWIDTH Counters */ |
4052 | INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20" ), |
4053 | INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6" ), |
4054 | INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB" ), |
4055 | INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21" ), |
4056 | INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6" ), |
4057 | INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB" ), |
4058 | INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22" ), |
4059 | INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6" ), |
4060 | INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB" ), |
4061 | INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23" ), |
4062 | INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6" ), |
4063 | INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB" ), |
4064 | INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x24" ), |
4065 | INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6" ), |
4066 | INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB" ), |
4067 | INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x25" ), |
4068 | INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6" ), |
4069 | INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB" ), |
4070 | INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x26" ), |
4071 | INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6" ), |
4072 | INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB" ), |
4073 | INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x27" ), |
4074 | INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6" ), |
4075 | INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB" ), |
4076 | /* Free-running IIO UTILIZATION Counters */ |
4077 | INTEL_UNCORE_EVENT_DESC(util_in_port0, "event=0xff,umask=0x30" ), |
4078 | INTEL_UNCORE_EVENT_DESC(util_out_port0, "event=0xff,umask=0x31" ), |
4079 | INTEL_UNCORE_EVENT_DESC(util_in_port1, "event=0xff,umask=0x32" ), |
4080 | INTEL_UNCORE_EVENT_DESC(util_out_port1, "event=0xff,umask=0x33" ), |
4081 | INTEL_UNCORE_EVENT_DESC(util_in_port2, "event=0xff,umask=0x34" ), |
4082 | INTEL_UNCORE_EVENT_DESC(util_out_port2, "event=0xff,umask=0x35" ), |
4083 | INTEL_UNCORE_EVENT_DESC(util_in_port3, "event=0xff,umask=0x36" ), |
4084 | INTEL_UNCORE_EVENT_DESC(util_out_port3, "event=0xff,umask=0x37" ), |
4085 | { /* end: all zeroes */ }, |
4086 | }; |
4087 | |
4088 | static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = { |
4089 | .read_counter = uncore_msr_read_counter, |
4090 | .hw_config = uncore_freerunning_hw_config, |
4091 | }; |
4092 | |
4093 | static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = { |
4094 | &format_attr_event.attr, |
4095 | &format_attr_umask.attr, |
4096 | NULL, |
4097 | }; |
4098 | |
4099 | static const struct attribute_group skx_uncore_iio_freerunning_format_group = { |
4100 | .name = "format" , |
4101 | .attrs = skx_uncore_iio_freerunning_formats_attr, |
4102 | }; |
4103 | |
4104 | static struct intel_uncore_type skx_uncore_iio_free_running = { |
4105 | .name = "iio_free_running" , |
4106 | .num_counters = 17, |
4107 | .num_boxes = 6, |
4108 | .num_freerunning_types = SKX_IIO_FREERUNNING_TYPE_MAX, |
4109 | .freerunning = skx_iio_freerunning, |
4110 | .ops = &skx_uncore_iio_freerunning_ops, |
4111 | .event_descs = skx_uncore_iio_freerunning_events, |
4112 | .format_group = &skx_uncore_iio_freerunning_format_group, |
4113 | }; |
4114 | |
4115 | static struct attribute *skx_uncore_formats_attr[] = { |
4116 | &format_attr_event.attr, |
4117 | &format_attr_umask.attr, |
4118 | &format_attr_edge.attr, |
4119 | &format_attr_inv.attr, |
4120 | &format_attr_thresh8.attr, |
4121 | NULL, |
4122 | }; |
4123 | |
4124 | static const struct attribute_group skx_uncore_format_group = { |
4125 | .name = "format" , |
4126 | .attrs = skx_uncore_formats_attr, |
4127 | }; |
4128 | |
4129 | static struct intel_uncore_type skx_uncore_irp = { |
4130 | .name = "irp" , |
4131 | .num_counters = 2, |
4132 | .num_boxes = 6, |
4133 | .perf_ctr_bits = 48, |
4134 | .event_ctl = SKX_IRP0_MSR_PMON_CTL0, |
4135 | .perf_ctr = SKX_IRP0_MSR_PMON_CTR0, |
4136 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
4137 | .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL, |
4138 | .msr_offset = SKX_IRP_MSR_OFFSET, |
4139 | .ops = &skx_uncore_iio_ops, |
4140 | .format_group = &skx_uncore_format_group, |
4141 | }; |
4142 | |
4143 | static struct attribute *skx_uncore_pcu_formats_attr[] = { |
4144 | &format_attr_event.attr, |
4145 | &format_attr_umask.attr, |
4146 | &format_attr_edge.attr, |
4147 | &format_attr_inv.attr, |
4148 | &format_attr_thresh8.attr, |
4149 | &format_attr_occ_invert.attr, |
4150 | &format_attr_occ_edge_det.attr, |
4151 | &format_attr_filter_band0.attr, |
4152 | &format_attr_filter_band1.attr, |
4153 | &format_attr_filter_band2.attr, |
4154 | &format_attr_filter_band3.attr, |
4155 | NULL, |
4156 | }; |
4157 | |
4158 | static struct attribute_group skx_uncore_pcu_format_group = { |
4159 | .name = "format" , |
4160 | .attrs = skx_uncore_pcu_formats_attr, |
4161 | }; |
4162 | |
4163 | static struct intel_uncore_ops skx_uncore_pcu_ops = { |
4164 | IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), |
4165 | .hw_config = hswep_pcu_hw_config, |
4166 | .get_constraint = snbep_pcu_get_constraint, |
4167 | .put_constraint = snbep_pcu_put_constraint, |
4168 | }; |
4169 | |
4170 | static struct intel_uncore_type skx_uncore_pcu = { |
4171 | .name = "pcu" , |
4172 | .num_counters = 4, |
4173 | .num_boxes = 1, |
4174 | .perf_ctr_bits = 48, |
4175 | .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0, |
4176 | .event_ctl = HSWEP_PCU_MSR_PMON_CTL0, |
4177 | .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, |
4178 | .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL, |
4179 | .num_shared_regs = 1, |
4180 | .ops = &skx_uncore_pcu_ops, |
4181 | .format_group = &skx_uncore_pcu_format_group, |
4182 | }; |
4183 | |
4184 | static struct intel_uncore_type *skx_msr_uncores[] = { |
4185 | &skx_uncore_ubox, |
4186 | &skx_uncore_chabox, |
4187 | &skx_uncore_iio, |
4188 | &skx_uncore_iio_free_running, |
4189 | &skx_uncore_irp, |
4190 | &skx_uncore_pcu, |
4191 | NULL, |
4192 | }; |
4193 | |
4194 | /* |
4195 | * To determine the number of CHAs, it should read bits 27:0 in the CAPID6 |
4196 | * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083. |
4197 | */ |
4198 | #define SKX_CAPID6 0x9c |
4199 | #define SKX_CHA_BIT_MASK GENMASK(27, 0) |
4200 | |
4201 | static int skx_count_chabox(void) |
4202 | { |
4203 | struct pci_dev *dev = NULL; |
4204 | u32 val = 0; |
4205 | |
4206 | dev = pci_get_device(PCI_VENDOR_ID_INTEL, device: 0x2083, from: dev); |
4207 | if (!dev) |
4208 | goto out; |
4209 | |
4210 | pci_read_config_dword(dev, SKX_CAPID6, val: &val); |
4211 | val &= SKX_CHA_BIT_MASK; |
4212 | out: |
4213 | pci_dev_put(dev); |
4214 | return hweight32(val); |
4215 | } |
4216 | |
4217 | void skx_uncore_cpu_init(void) |
4218 | { |
4219 | skx_uncore_chabox.num_boxes = skx_count_chabox(); |
4220 | uncore_msr_uncores = skx_msr_uncores; |
4221 | } |
4222 | |
4223 | static struct intel_uncore_type skx_uncore_imc = { |
4224 | .name = "imc" , |
4225 | .num_counters = 4, |
4226 | .num_boxes = 6, |
4227 | .perf_ctr_bits = 48, |
4228 | .fixed_ctr_bits = 48, |
4229 | .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, |
4230 | .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, |
4231 | .event_descs = hswep_uncore_imc_events, |
4232 | .perf_ctr = SNBEP_PCI_PMON_CTR0, |
4233 | .event_ctl = SNBEP_PCI_PMON_CTL0, |
4234 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
4235 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, |
4236 | .ops = &ivbep_uncore_pci_ops, |
4237 | .format_group = &skx_uncore_format_group, |
4238 | }; |
4239 | |
4240 | static struct attribute *skx_upi_uncore_formats_attr[] = { |
4241 | &format_attr_event.attr, |
4242 | &format_attr_umask_ext.attr, |
4243 | &format_attr_edge.attr, |
4244 | &format_attr_inv.attr, |
4245 | &format_attr_thresh8.attr, |
4246 | NULL, |
4247 | }; |
4248 | |
4249 | static const struct attribute_group skx_upi_uncore_format_group = { |
4250 | .name = "format" , |
4251 | .attrs = skx_upi_uncore_formats_attr, |
4252 | }; |
4253 | |
4254 | static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box) |
4255 | { |
4256 | struct pci_dev *pdev = box->pci_dev; |
4257 | |
4258 | __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); |
4259 | pci_write_config_dword(dev: pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT); |
4260 | } |
4261 | |
4262 | static struct intel_uncore_ops skx_upi_uncore_pci_ops = { |
4263 | .init_box = skx_upi_uncore_pci_init_box, |
4264 | .disable_box = snbep_uncore_pci_disable_box, |
4265 | .enable_box = snbep_uncore_pci_enable_box, |
4266 | .disable_event = snbep_uncore_pci_disable_event, |
4267 | .enable_event = snbep_uncore_pci_enable_event, |
4268 | .read_counter = snbep_uncore_pci_read_counter, |
4269 | }; |
4270 | |
4271 | static umode_t |
4272 | skx_upi_mapping_visible(struct kobject *kobj, struct attribute *attr, int die) |
4273 | { |
4274 | struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj)); |
4275 | |
4276 | return pmu->type->topology[die][pmu->pmu_idx].upi->enabled ? attr->mode : 0; |
4277 | } |
4278 | |
4279 | static ssize_t skx_upi_mapping_show(struct device *dev, |
4280 | struct device_attribute *attr, char *buf) |
4281 | { |
4282 | struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev); |
4283 | struct dev_ext_attribute *ea = to_dev_ext_attribute(attr); |
4284 | long die = (long)ea->var; |
4285 | struct uncore_upi_topology *upi = pmu->type->topology[die][pmu->pmu_idx].upi; |
4286 | |
4287 | return sysfs_emit(buf, fmt: "upi_%d,die_%d\n" , upi->pmu_idx_to, upi->die_to); |
4288 | } |
4289 | |
4290 | #define SKX_UPI_REG_DID 0x2058 |
4291 | #define SKX_UPI_REGS_ADDR_DEVICE_LINK0 0x0e |
4292 | #define SKX_UPI_REGS_ADDR_FUNCTION 0x00 |
4293 | |
4294 | /* |
4295 | * UPI Link Parameter 0 |
4296 | * | Bit | Default | Description |
4297 | * | 19:16 | 0h | base_nodeid - The NodeID of the sending socket. |
4298 | * | 12:8 | 00h | sending_port - The processor die port number of the sending port. |
4299 | */ |
4300 | #define SKX_KTILP0_OFFSET 0x94 |
4301 | |
4302 | /* |
4303 | * UPI Pcode Status. This register is used by PCode to store the link training status. |
4304 | * | Bit | Default | Description |
4305 | * | 4 | 0h | ll_status_valid — Bit indicates the valid training status |
4306 | * logged from PCode to the BIOS. |
4307 | */ |
4308 | #define SKX_KTIPCSTS_OFFSET 0x120 |
4309 | |
4310 | static int upi_fill_topology(struct pci_dev *dev, struct intel_uncore_topology *tp, |
4311 | int pmu_idx) |
4312 | { |
4313 | int ret; |
4314 | u32 upi_conf; |
4315 | struct uncore_upi_topology *upi = tp->upi; |
4316 | |
4317 | tp->pmu_idx = pmu_idx; |
4318 | ret = pci_read_config_dword(dev, SKX_KTIPCSTS_OFFSET, val: &upi_conf); |
4319 | if (ret) { |
4320 | ret = pcibios_err_to_errno(err: ret); |
4321 | goto err; |
4322 | } |
4323 | upi->enabled = (upi_conf >> 4) & 1; |
4324 | if (upi->enabled) { |
4325 | ret = pci_read_config_dword(dev, SKX_KTILP0_OFFSET, |
4326 | val: &upi_conf); |
4327 | if (ret) { |
4328 | ret = pcibios_err_to_errno(err: ret); |
4329 | goto err; |
4330 | } |
4331 | upi->die_to = (upi_conf >> 16) & 0xf; |
4332 | upi->pmu_idx_to = (upi_conf >> 8) & 0x1f; |
4333 | } |
4334 | err: |
4335 | return ret; |
4336 | } |
4337 | |
4338 | static int skx_upi_topology_cb(struct intel_uncore_type *type, int segment, |
4339 | int die, u64 cpu_bus_msr) |
4340 | { |
4341 | int idx, ret; |
4342 | struct intel_uncore_topology *upi; |
4343 | unsigned int devfn; |
4344 | struct pci_dev *dev = NULL; |
4345 | u8 bus = cpu_bus_msr >> (3 * BUS_NUM_STRIDE); |
4346 | |
4347 | for (idx = 0; idx < type->num_boxes; idx++) { |
4348 | upi = &type->topology[die][idx]; |
4349 | devfn = PCI_DEVFN(SKX_UPI_REGS_ADDR_DEVICE_LINK0 + idx, |
4350 | SKX_UPI_REGS_ADDR_FUNCTION); |
4351 | dev = pci_get_domain_bus_and_slot(domain: segment, bus, devfn); |
4352 | if (dev) { |
4353 | ret = upi_fill_topology(dev, tp: upi, pmu_idx: idx); |
4354 | if (ret) |
4355 | break; |
4356 | } |
4357 | } |
4358 | |
4359 | pci_dev_put(dev); |
4360 | return ret; |
4361 | } |
4362 | |
4363 | static int skx_upi_get_topology(struct intel_uncore_type *type) |
4364 | { |
4365 | /* CPX case is not supported */ |
4366 | if (boot_cpu_data.x86_stepping == 11) |
4367 | return -EPERM; |
4368 | |
4369 | return skx_pmu_get_topology(type, topology_cb: skx_upi_topology_cb); |
4370 | } |
4371 | |
4372 | static struct attribute_group skx_upi_mapping_group = { |
4373 | .is_visible = skx_upi_mapping_visible, |
4374 | }; |
4375 | |
4376 | static const struct attribute_group *skx_upi_attr_update[] = { |
4377 | &skx_upi_mapping_group, |
4378 | NULL |
4379 | }; |
4380 | |
4381 | static void |
4382 | pmu_upi_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag) |
4383 | { |
4384 | pmu_set_mapping(type, ag, show: skx_upi_mapping_show, topology_type: UPI_TOPOLOGY_TYPE); |
4385 | } |
4386 | |
4387 | static void skx_upi_set_mapping(struct intel_uncore_type *type) |
4388 | { |
4389 | pmu_upi_set_mapping(type, ag: &skx_upi_mapping_group); |
4390 | } |
4391 | |
4392 | static void skx_upi_cleanup_mapping(struct intel_uncore_type *type) |
4393 | { |
4394 | pmu_cleanup_mapping(type, ag: &skx_upi_mapping_group); |
4395 | } |
4396 | |
4397 | static struct intel_uncore_type skx_uncore_upi = { |
4398 | .name = "upi" , |
4399 | .num_counters = 4, |
4400 | .num_boxes = 3, |
4401 | .perf_ctr_bits = 48, |
4402 | .perf_ctr = SKX_UPI_PCI_PMON_CTR0, |
4403 | .event_ctl = SKX_UPI_PCI_PMON_CTL0, |
4404 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
4405 | .event_mask_ext = SKX_UPI_CTL_UMASK_EXT, |
4406 | .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL, |
4407 | .ops = &skx_upi_uncore_pci_ops, |
4408 | .format_group = &skx_upi_uncore_format_group, |
4409 | .attr_update = skx_upi_attr_update, |
4410 | .get_topology = skx_upi_get_topology, |
4411 | .set_mapping = skx_upi_set_mapping, |
4412 | .cleanup_mapping = skx_upi_cleanup_mapping, |
4413 | }; |
4414 | |
4415 | static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box) |
4416 | { |
4417 | struct pci_dev *pdev = box->pci_dev; |
4418 | |
4419 | __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); |
4420 | pci_write_config_dword(dev: pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT); |
4421 | } |
4422 | |
4423 | static struct intel_uncore_ops skx_m2m_uncore_pci_ops = { |
4424 | .init_box = skx_m2m_uncore_pci_init_box, |
4425 | .disable_box = snbep_uncore_pci_disable_box, |
4426 | .enable_box = snbep_uncore_pci_enable_box, |
4427 | .disable_event = snbep_uncore_pci_disable_event, |
4428 | .enable_event = snbep_uncore_pci_enable_event, |
4429 | .read_counter = snbep_uncore_pci_read_counter, |
4430 | }; |
4431 | |
4432 | static struct intel_uncore_type skx_uncore_m2m = { |
4433 | .name = "m2m" , |
4434 | .num_counters = 4, |
4435 | .num_boxes = 2, |
4436 | .perf_ctr_bits = 48, |
4437 | .perf_ctr = SKX_M2M_PCI_PMON_CTR0, |
4438 | .event_ctl = SKX_M2M_PCI_PMON_CTL0, |
4439 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
4440 | .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL, |
4441 | .ops = &skx_m2m_uncore_pci_ops, |
4442 | .format_group = &skx_uncore_format_group, |
4443 | }; |
4444 | |
4445 | static struct event_constraint skx_uncore_m2pcie_constraints[] = { |
4446 | UNCORE_EVENT_CONSTRAINT(0x23, 0x3), |
4447 | EVENT_CONSTRAINT_END |
4448 | }; |
4449 | |
4450 | static struct intel_uncore_type skx_uncore_m2pcie = { |
4451 | .name = "m2pcie" , |
4452 | .num_counters = 4, |
4453 | .num_boxes = 4, |
4454 | .perf_ctr_bits = 48, |
4455 | .constraints = skx_uncore_m2pcie_constraints, |
4456 | .perf_ctr = SNBEP_PCI_PMON_CTR0, |
4457 | .event_ctl = SNBEP_PCI_PMON_CTL0, |
4458 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
4459 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, |
4460 | .ops = &ivbep_uncore_pci_ops, |
4461 | .format_group = &skx_uncore_format_group, |
4462 | }; |
4463 | |
4464 | static struct event_constraint skx_uncore_m3upi_constraints[] = { |
4465 | UNCORE_EVENT_CONSTRAINT(0x1d, 0x1), |
4466 | UNCORE_EVENT_CONSTRAINT(0x1e, 0x1), |
4467 | UNCORE_EVENT_CONSTRAINT(0x40, 0x7), |
4468 | UNCORE_EVENT_CONSTRAINT(0x4e, 0x7), |
4469 | UNCORE_EVENT_CONSTRAINT(0x4f, 0x7), |
4470 | UNCORE_EVENT_CONSTRAINT(0x50, 0x7), |
4471 | UNCORE_EVENT_CONSTRAINT(0x51, 0x7), |
4472 | UNCORE_EVENT_CONSTRAINT(0x52, 0x7), |
4473 | EVENT_CONSTRAINT_END |
4474 | }; |
4475 | |
4476 | static struct intel_uncore_type skx_uncore_m3upi = { |
4477 | .name = "m3upi" , |
4478 | .num_counters = 3, |
4479 | .num_boxes = 3, |
4480 | .perf_ctr_bits = 48, |
4481 | .constraints = skx_uncore_m3upi_constraints, |
4482 | .perf_ctr = SNBEP_PCI_PMON_CTR0, |
4483 | .event_ctl = SNBEP_PCI_PMON_CTL0, |
4484 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
4485 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, |
4486 | .ops = &ivbep_uncore_pci_ops, |
4487 | .format_group = &skx_uncore_format_group, |
4488 | }; |
4489 | |
4490 | enum { |
4491 | SKX_PCI_UNCORE_IMC, |
4492 | SKX_PCI_UNCORE_M2M, |
4493 | SKX_PCI_UNCORE_UPI, |
4494 | SKX_PCI_UNCORE_M2PCIE, |
4495 | SKX_PCI_UNCORE_M3UPI, |
4496 | }; |
4497 | |
4498 | static struct intel_uncore_type *skx_pci_uncores[] = { |
4499 | [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc, |
4500 | [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m, |
4501 | [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi, |
4502 | [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie, |
4503 | [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi, |
4504 | NULL, |
4505 | }; |
4506 | |
4507 | static const struct pci_device_id skx_uncore_pci_ids[] = { |
4508 | { /* MC0 Channel 0 */ |
4509 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042), |
4510 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0), |
4511 | }, |
4512 | { /* MC0 Channel 1 */ |
4513 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046), |
4514 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1), |
4515 | }, |
4516 | { /* MC0 Channel 2 */ |
4517 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a), |
4518 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2), |
4519 | }, |
4520 | { /* MC1 Channel 0 */ |
4521 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042), |
4522 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3), |
4523 | }, |
4524 | { /* MC1 Channel 1 */ |
4525 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046), |
4526 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4), |
4527 | }, |
4528 | { /* MC1 Channel 2 */ |
4529 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a), |
4530 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5), |
4531 | }, |
4532 | { /* M2M0 */ |
4533 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066), |
4534 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0), |
4535 | }, |
4536 | { /* M2M1 */ |
4537 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066), |
4538 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1), |
4539 | }, |
4540 | { /* UPI0 Link 0 */ |
4541 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058), |
4542 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0), |
4543 | }, |
4544 | { /* UPI0 Link 1 */ |
4545 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058), |
4546 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1), |
4547 | }, |
4548 | { /* UPI1 Link 2 */ |
4549 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058), |
4550 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2), |
4551 | }, |
4552 | { /* M2PCIe 0 */ |
4553 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088), |
4554 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0), |
4555 | }, |
4556 | { /* M2PCIe 1 */ |
4557 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088), |
4558 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1), |
4559 | }, |
4560 | { /* M2PCIe 2 */ |
4561 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088), |
4562 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2), |
4563 | }, |
4564 | { /* M2PCIe 3 */ |
4565 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088), |
4566 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3), |
4567 | }, |
4568 | { /* M3UPI0 Link 0 */ |
4569 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D), |
4570 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0), |
4571 | }, |
4572 | { /* M3UPI0 Link 1 */ |
4573 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E), |
4574 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1), |
4575 | }, |
4576 | { /* M3UPI1 Link 2 */ |
4577 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D), |
4578 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2), |
4579 | }, |
4580 | { /* end: all zeroes */ } |
4581 | }; |
4582 | |
4583 | |
4584 | static struct pci_driver skx_uncore_pci_driver = { |
4585 | .name = "skx_uncore" , |
4586 | .id_table = skx_uncore_pci_ids, |
4587 | }; |
4588 | |
4589 | int skx_uncore_pci_init(void) |
4590 | { |
4591 | /* need to double check pci address */ |
4592 | int ret = snbep_pci2phy_map_init(devid: 0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, reverse: false); |
4593 | |
4594 | if (ret) |
4595 | return ret; |
4596 | |
4597 | uncore_pci_uncores = skx_pci_uncores; |
4598 | uncore_pci_driver = &skx_uncore_pci_driver; |
4599 | return 0; |
4600 | } |
4601 | |
4602 | /* end of SKX uncore support */ |
4603 | |
4604 | /* SNR uncore support */ |
4605 | |
4606 | static struct intel_uncore_type snr_uncore_ubox = { |
4607 | .name = "ubox" , |
4608 | .num_counters = 2, |
4609 | .num_boxes = 1, |
4610 | .perf_ctr_bits = 48, |
4611 | .fixed_ctr_bits = 48, |
4612 | .perf_ctr = SNR_U_MSR_PMON_CTR0, |
4613 | .event_ctl = SNR_U_MSR_PMON_CTL0, |
4614 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
4615 | .fixed_ctr = SNR_U_MSR_PMON_UCLK_FIXED_CTR, |
4616 | .fixed_ctl = SNR_U_MSR_PMON_UCLK_FIXED_CTL, |
4617 | .ops = &ivbep_uncore_msr_ops, |
4618 | .format_group = &ivbep_uncore_format_group, |
4619 | }; |
4620 | |
4621 | static struct attribute *snr_uncore_cha_formats_attr[] = { |
4622 | &format_attr_event.attr, |
4623 | &format_attr_umask_ext2.attr, |
4624 | &format_attr_edge.attr, |
4625 | &format_attr_tid_en.attr, |
4626 | &format_attr_inv.attr, |
4627 | &format_attr_thresh8.attr, |
4628 | &format_attr_filter_tid5.attr, |
4629 | NULL, |
4630 | }; |
4631 | static const struct attribute_group snr_uncore_chabox_format_group = { |
4632 | .name = "format" , |
4633 | .attrs = snr_uncore_cha_formats_attr, |
4634 | }; |
4635 | |
4636 | static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) |
4637 | { |
4638 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; |
4639 | |
4640 | reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 + |
4641 | box->pmu->type->msr_offset * box->pmu->pmu_idx; |
4642 | reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID; |
4643 | reg1->idx = 0; |
4644 | |
4645 | return 0; |
4646 | } |
4647 | |
4648 | static void snr_cha_enable_event(struct intel_uncore_box *box, |
4649 | struct perf_event *event) |
4650 | { |
4651 | struct hw_perf_event *hwc = &event->hw; |
4652 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; |
4653 | |
4654 | if (reg1->idx != EXTRA_REG_NONE) |
4655 | wrmsrl(msr: reg1->reg, val: reg1->config); |
4656 | |
4657 | wrmsrl(msr: hwc->config_base, val: hwc->config | SNBEP_PMON_CTL_EN); |
4658 | } |
4659 | |
4660 | static struct intel_uncore_ops snr_uncore_chabox_ops = { |
4661 | .init_box = ivbep_uncore_msr_init_box, |
4662 | .disable_box = snbep_uncore_msr_disable_box, |
4663 | .enable_box = snbep_uncore_msr_enable_box, |
4664 | .disable_event = snbep_uncore_msr_disable_event, |
4665 | .enable_event = snr_cha_enable_event, |
4666 | .read_counter = uncore_msr_read_counter, |
4667 | .hw_config = snr_cha_hw_config, |
4668 | }; |
4669 | |
4670 | static struct intel_uncore_type snr_uncore_chabox = { |
4671 | .name = "cha" , |
4672 | .num_counters = 4, |
4673 | .num_boxes = 6, |
4674 | .perf_ctr_bits = 48, |
4675 | .event_ctl = SNR_CHA_MSR_PMON_CTL0, |
4676 | .perf_ctr = SNR_CHA_MSR_PMON_CTR0, |
4677 | .box_ctl = SNR_CHA_MSR_PMON_BOX_CTL, |
4678 | .msr_offset = HSWEP_CBO_MSR_OFFSET, |
4679 | .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, |
4680 | .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT, |
4681 | .ops = &snr_uncore_chabox_ops, |
4682 | .format_group = &snr_uncore_chabox_format_group, |
4683 | }; |
4684 | |
4685 | static struct attribute *snr_uncore_iio_formats_attr[] = { |
4686 | &format_attr_event.attr, |
4687 | &format_attr_umask.attr, |
4688 | &format_attr_edge.attr, |
4689 | &format_attr_inv.attr, |
4690 | &format_attr_thresh9.attr, |
4691 | &format_attr_ch_mask2.attr, |
4692 | &format_attr_fc_mask2.attr, |
4693 | NULL, |
4694 | }; |
4695 | |
4696 | static const struct attribute_group snr_uncore_iio_format_group = { |
4697 | .name = "format" , |
4698 | .attrs = snr_uncore_iio_formats_attr, |
4699 | }; |
4700 | |
4701 | static umode_t |
4702 | snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die) |
4703 | { |
4704 | /* Root bus 0x00 is valid only for pmu_idx = 1. */ |
4705 | return pmu_iio_mapping_visible(kobj, attr, die, zero_bus_pmu: 1); |
4706 | } |
4707 | |
4708 | static struct attribute_group snr_iio_mapping_group = { |
4709 | .is_visible = snr_iio_mapping_visible, |
4710 | }; |
4711 | |
4712 | static const struct attribute_group *snr_iio_attr_update[] = { |
4713 | &snr_iio_mapping_group, |
4714 | NULL, |
4715 | }; |
4716 | |
4717 | static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping) |
4718 | { |
4719 | u32 sad_cfg; |
4720 | int die, stack_id, ret = -EPERM; |
4721 | struct pci_dev *dev = NULL; |
4722 | |
4723 | while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, from: dev))) { |
4724 | ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, val: &sad_cfg); |
4725 | if (ret) { |
4726 | ret = pcibios_err_to_errno(err: ret); |
4727 | break; |
4728 | } |
4729 | |
4730 | die = uncore_pcibus_to_dieid(bus: dev->bus); |
4731 | stack_id = SAD_CONTROL_STACK_ID(sad_cfg); |
4732 | if (die < 0 || stack_id >= type->num_boxes) { |
4733 | ret = -EPERM; |
4734 | break; |
4735 | } |
4736 | |
4737 | /* Convert stack id from SAD_CONTROL to PMON notation. */ |
4738 | stack_id = sad_pmon_mapping[stack_id]; |
4739 | |
4740 | type->topology[die][stack_id].iio->segment = pci_domain_nr(bus: dev->bus); |
4741 | type->topology[die][stack_id].pmu_idx = stack_id; |
4742 | type->topology[die][stack_id].iio->pci_bus_no = dev->bus->number; |
4743 | } |
4744 | |
4745 | pci_dev_put(dev); |
4746 | |
4747 | return ret; |
4748 | } |
4749 | |
4750 | /* |
4751 | * SNR has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON |
4752 | */ |
4753 | enum { |
4754 | SNR_QAT_PMON_ID, |
4755 | SNR_CBDMA_DMI_PMON_ID, |
4756 | SNR_NIS_PMON_ID, |
4757 | SNR_DLB_PMON_ID, |
4758 | SNR_PCIE_GEN3_PMON_ID |
4759 | }; |
4760 | |
4761 | static u8 snr_sad_pmon_mapping[] = { |
4762 | SNR_CBDMA_DMI_PMON_ID, |
4763 | SNR_PCIE_GEN3_PMON_ID, |
4764 | SNR_DLB_PMON_ID, |
4765 | SNR_NIS_PMON_ID, |
4766 | SNR_QAT_PMON_ID |
4767 | }; |
4768 | |
4769 | static int snr_iio_get_topology(struct intel_uncore_type *type) |
4770 | { |
4771 | return sad_cfg_iio_topology(type, sad_pmon_mapping: snr_sad_pmon_mapping); |
4772 | } |
4773 | |
4774 | static void snr_iio_set_mapping(struct intel_uncore_type *type) |
4775 | { |
4776 | pmu_iio_set_mapping(type, ag: &snr_iio_mapping_group); |
4777 | } |
4778 | |
4779 | static void snr_iio_cleanup_mapping(struct intel_uncore_type *type) |
4780 | { |
4781 | pmu_cleanup_mapping(type, ag: &snr_iio_mapping_group); |
4782 | } |
4783 | |
4784 | static struct event_constraint snr_uncore_iio_constraints[] = { |
4785 | UNCORE_EVENT_CONSTRAINT(0x83, 0x3), |
4786 | UNCORE_EVENT_CONSTRAINT(0xc0, 0xc), |
4787 | UNCORE_EVENT_CONSTRAINT(0xd5, 0xc), |
4788 | EVENT_CONSTRAINT_END |
4789 | }; |
4790 | |
4791 | static struct intel_uncore_type snr_uncore_iio = { |
4792 | .name = "iio" , |
4793 | .num_counters = 4, |
4794 | .num_boxes = 5, |
4795 | .perf_ctr_bits = 48, |
4796 | .event_ctl = SNR_IIO_MSR_PMON_CTL0, |
4797 | .perf_ctr = SNR_IIO_MSR_PMON_CTR0, |
4798 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
4799 | .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT, |
4800 | .box_ctl = SNR_IIO_MSR_PMON_BOX_CTL, |
4801 | .msr_offset = SNR_IIO_MSR_OFFSET, |
4802 | .constraints = snr_uncore_iio_constraints, |
4803 | .ops = &ivbep_uncore_msr_ops, |
4804 | .format_group = &snr_uncore_iio_format_group, |
4805 | .attr_update = snr_iio_attr_update, |
4806 | .get_topology = snr_iio_get_topology, |
4807 | .set_mapping = snr_iio_set_mapping, |
4808 | .cleanup_mapping = snr_iio_cleanup_mapping, |
4809 | }; |
4810 | |
4811 | static struct intel_uncore_type snr_uncore_irp = { |
4812 | .name = "irp" , |
4813 | .num_counters = 2, |
4814 | .num_boxes = 5, |
4815 | .perf_ctr_bits = 48, |
4816 | .event_ctl = SNR_IRP0_MSR_PMON_CTL0, |
4817 | .perf_ctr = SNR_IRP0_MSR_PMON_CTR0, |
4818 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
4819 | .box_ctl = SNR_IRP0_MSR_PMON_BOX_CTL, |
4820 | .msr_offset = SNR_IRP_MSR_OFFSET, |
4821 | .ops = &ivbep_uncore_msr_ops, |
4822 | .format_group = &ivbep_uncore_format_group, |
4823 | }; |
4824 | |
4825 | static struct intel_uncore_type snr_uncore_m2pcie = { |
4826 | .name = "m2pcie" , |
4827 | .num_counters = 4, |
4828 | .num_boxes = 5, |
4829 | .perf_ctr_bits = 48, |
4830 | .event_ctl = SNR_M2PCIE_MSR_PMON_CTL0, |
4831 | .perf_ctr = SNR_M2PCIE_MSR_PMON_CTR0, |
4832 | .box_ctl = SNR_M2PCIE_MSR_PMON_BOX_CTL, |
4833 | .msr_offset = SNR_M2PCIE_MSR_OFFSET, |
4834 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
4835 | .ops = &ivbep_uncore_msr_ops, |
4836 | .format_group = &ivbep_uncore_format_group, |
4837 | }; |
4838 | |
4839 | static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) |
4840 | { |
4841 | struct hw_perf_event *hwc = &event->hw; |
4842 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; |
4843 | int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK; |
4844 | |
4845 | if (ev_sel >= 0xb && ev_sel <= 0xe) { |
4846 | reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER; |
4847 | reg1->idx = ev_sel - 0xb; |
4848 | reg1->config = event->attr.config1 & (0xff << reg1->idx); |
4849 | } |
4850 | return 0; |
4851 | } |
4852 | |
4853 | static struct intel_uncore_ops snr_uncore_pcu_ops = { |
4854 | IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), |
4855 | .hw_config = snr_pcu_hw_config, |
4856 | .get_constraint = snbep_pcu_get_constraint, |
4857 | .put_constraint = snbep_pcu_put_constraint, |
4858 | }; |
4859 | |
4860 | static struct intel_uncore_type snr_uncore_pcu = { |
4861 | .name = "pcu" , |
4862 | .num_counters = 4, |
4863 | .num_boxes = 1, |
4864 | .perf_ctr_bits = 48, |
4865 | .perf_ctr = SNR_PCU_MSR_PMON_CTR0, |
4866 | .event_ctl = SNR_PCU_MSR_PMON_CTL0, |
4867 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
4868 | .box_ctl = SNR_PCU_MSR_PMON_BOX_CTL, |
4869 | .num_shared_regs = 1, |
4870 | .ops = &snr_uncore_pcu_ops, |
4871 | .format_group = &skx_uncore_pcu_format_group, |
4872 | }; |
4873 | |
4874 | enum perf_uncore_snr_iio_freerunning_type_id { |
4875 | SNR_IIO_MSR_IOCLK, |
4876 | SNR_IIO_MSR_BW_IN, |
4877 | |
4878 | SNR_IIO_FREERUNNING_TYPE_MAX, |
4879 | }; |
4880 | |
4881 | static struct freerunning_counters snr_iio_freerunning[] = { |
4882 | [SNR_IIO_MSR_IOCLK] = { 0x1eac, 0x1, 0x10, 1, 48 }, |
4883 | [SNR_IIO_MSR_BW_IN] = { .counter_base: 0x1f00, .counter_offset: 0x1, .box_offset: 0x10, .num_counters: 8, .bits: 48 }, |
4884 | }; |
4885 | |
4886 | static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = { |
4887 | /* Free-Running IIO CLOCKS Counter */ |
4888 | INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10" ), |
4889 | /* Free-Running IIO BANDWIDTH IN Counters */ |
4890 | INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20" ), |
4891 | INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6" ), |
4892 | INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB" ), |
4893 | INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21" ), |
4894 | INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6" ), |
4895 | INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB" ), |
4896 | INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22" ), |
4897 | INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6" ), |
4898 | INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB" ), |
4899 | INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23" ), |
4900 | INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6" ), |
4901 | INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB" ), |
4902 | INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24" ), |
4903 | INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6" ), |
4904 | INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB" ), |
4905 | INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25" ), |
4906 | INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6" ), |
4907 | INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB" ), |
4908 | INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26" ), |
4909 | INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6" ), |
4910 | INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB" ), |
4911 | INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27" ), |
4912 | INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6" ), |
4913 | INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB" ), |
4914 | { /* end: all zeroes */ }, |
4915 | }; |
4916 | |
4917 | static struct intel_uncore_type snr_uncore_iio_free_running = { |
4918 | .name = "iio_free_running" , |
4919 | .num_counters = 9, |
4920 | .num_boxes = 5, |
4921 | .num_freerunning_types = SNR_IIO_FREERUNNING_TYPE_MAX, |
4922 | .freerunning = snr_iio_freerunning, |
4923 | .ops = &skx_uncore_iio_freerunning_ops, |
4924 | .event_descs = snr_uncore_iio_freerunning_events, |
4925 | .format_group = &skx_uncore_iio_freerunning_format_group, |
4926 | }; |
4927 | |
4928 | static struct intel_uncore_type *snr_msr_uncores[] = { |
4929 | &snr_uncore_ubox, |
4930 | &snr_uncore_chabox, |
4931 | &snr_uncore_iio, |
4932 | &snr_uncore_irp, |
4933 | &snr_uncore_m2pcie, |
4934 | &snr_uncore_pcu, |
4935 | &snr_uncore_iio_free_running, |
4936 | NULL, |
4937 | }; |
4938 | |
4939 | void snr_uncore_cpu_init(void) |
4940 | { |
4941 | uncore_msr_uncores = snr_msr_uncores; |
4942 | } |
4943 | |
4944 | static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box) |
4945 | { |
4946 | struct pci_dev *pdev = box->pci_dev; |
4947 | int box_ctl = uncore_pci_box_ctl(box); |
4948 | |
4949 | __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); |
4950 | pci_write_config_dword(dev: pdev, where: box_ctl, IVBEP_PMON_BOX_CTL_INT); |
4951 | } |
4952 | |
4953 | static struct intel_uncore_ops snr_m2m_uncore_pci_ops = { |
4954 | .init_box = snr_m2m_uncore_pci_init_box, |
4955 | .disable_box = snbep_uncore_pci_disable_box, |
4956 | .enable_box = snbep_uncore_pci_enable_box, |
4957 | .disable_event = snbep_uncore_pci_disable_event, |
4958 | .enable_event = snbep_uncore_pci_enable_event, |
4959 | .read_counter = snbep_uncore_pci_read_counter, |
4960 | }; |
4961 | |
4962 | static struct attribute *snr_m2m_uncore_formats_attr[] = { |
4963 | &format_attr_event.attr, |
4964 | &format_attr_umask_ext3.attr, |
4965 | &format_attr_edge.attr, |
4966 | &format_attr_inv.attr, |
4967 | &format_attr_thresh8.attr, |
4968 | NULL, |
4969 | }; |
4970 | |
4971 | static const struct attribute_group snr_m2m_uncore_format_group = { |
4972 | .name = "format" , |
4973 | .attrs = snr_m2m_uncore_formats_attr, |
4974 | }; |
4975 | |
4976 | static struct intel_uncore_type snr_uncore_m2m = { |
4977 | .name = "m2m" , |
4978 | .num_counters = 4, |
4979 | .num_boxes = 1, |
4980 | .perf_ctr_bits = 48, |
4981 | .perf_ctr = SNR_M2M_PCI_PMON_CTR0, |
4982 | .event_ctl = SNR_M2M_PCI_PMON_CTL0, |
4983 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
4984 | .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT, |
4985 | .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL, |
4986 | .ops = &snr_m2m_uncore_pci_ops, |
4987 | .format_group = &snr_m2m_uncore_format_group, |
4988 | }; |
4989 | |
4990 | static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event) |
4991 | { |
4992 | struct pci_dev *pdev = box->pci_dev; |
4993 | struct hw_perf_event *hwc = &event->hw; |
4994 | |
4995 | pci_write_config_dword(dev: pdev, where: hwc->config_base, val: (u32)(hwc->config | SNBEP_PMON_CTL_EN)); |
4996 | pci_write_config_dword(dev: pdev, where: hwc->config_base + 4, val: (u32)(hwc->config >> 32)); |
4997 | } |
4998 | |
4999 | static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = { |
5000 | .init_box = snr_m2m_uncore_pci_init_box, |
5001 | .disable_box = snbep_uncore_pci_disable_box, |
5002 | .enable_box = snbep_uncore_pci_enable_box, |
5003 | .disable_event = snbep_uncore_pci_disable_event, |
5004 | .enable_event = snr_uncore_pci_enable_event, |
5005 | .read_counter = snbep_uncore_pci_read_counter, |
5006 | }; |
5007 | |
5008 | static struct intel_uncore_type snr_uncore_pcie3 = { |
5009 | .name = "pcie3" , |
5010 | .num_counters = 4, |
5011 | .num_boxes = 1, |
5012 | .perf_ctr_bits = 48, |
5013 | .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0, |
5014 | .event_ctl = SNR_PCIE3_PCI_PMON_CTL0, |
5015 | .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK, |
5016 | .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT, |
5017 | .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL, |
5018 | .ops = &snr_pcie3_uncore_pci_ops, |
5019 | .format_group = &skx_uncore_iio_format_group, |
5020 | }; |
5021 | |
5022 | enum { |
5023 | SNR_PCI_UNCORE_M2M, |
5024 | SNR_PCI_UNCORE_PCIE3, |
5025 | }; |
5026 | |
5027 | static struct intel_uncore_type *snr_pci_uncores[] = { |
5028 | [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m, |
5029 | [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3, |
5030 | NULL, |
5031 | }; |
5032 | |
5033 | static const struct pci_device_id snr_uncore_pci_ids[] = { |
5034 | { /* M2M */ |
5035 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), |
5036 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0), |
5037 | }, |
5038 | { /* end: all zeroes */ } |
5039 | }; |
5040 | |
5041 | static struct pci_driver snr_uncore_pci_driver = { |
5042 | .name = "snr_uncore" , |
5043 | .id_table = snr_uncore_pci_ids, |
5044 | }; |
5045 | |
5046 | static const struct pci_device_id snr_uncore_pci_sub_ids[] = { |
5047 | { /* PCIe3 RP */ |
5048 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a), |
5049 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0), |
5050 | }, |
5051 | { /* end: all zeroes */ } |
5052 | }; |
5053 | |
5054 | static struct pci_driver snr_uncore_pci_sub_driver = { |
5055 | .name = "snr_uncore_sub" , |
5056 | .id_table = snr_uncore_pci_sub_ids, |
5057 | }; |
5058 | |
5059 | int snr_uncore_pci_init(void) |
5060 | { |
5061 | /* SNR UBOX DID */ |
5062 | int ret = snbep_pci2phy_map_init(devid: 0x3460, SKX_CPUNODEID, |
5063 | SKX_GIDNIDMAP, reverse: true); |
5064 | |
5065 | if (ret) |
5066 | return ret; |
5067 | |
5068 | uncore_pci_uncores = snr_pci_uncores; |
5069 | uncore_pci_driver = &snr_uncore_pci_driver; |
5070 | uncore_pci_sub_driver = &snr_uncore_pci_sub_driver; |
5071 | return 0; |
5072 | } |
5073 | |
5074 | #define SNR_MC_DEVICE_ID 0x3451 |
5075 | |
5076 | static struct pci_dev *snr_uncore_get_mc_dev(unsigned int device, int id) |
5077 | { |
5078 | struct pci_dev *mc_dev = NULL; |
5079 | int pkg; |
5080 | |
5081 | while (1) { |
5082 | mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, from: mc_dev); |
5083 | if (!mc_dev) |
5084 | break; |
5085 | pkg = uncore_pcibus_to_dieid(bus: mc_dev->bus); |
5086 | if (pkg == id) |
5087 | break; |
5088 | } |
5089 | return mc_dev; |
5090 | } |
5091 | |
5092 | static int snr_uncore_mmio_map(struct intel_uncore_box *box, |
5093 | unsigned int box_ctl, int mem_offset, |
5094 | unsigned int device) |
5095 | { |
5096 | struct pci_dev *pdev = snr_uncore_get_mc_dev(device, id: box->dieid); |
5097 | struct intel_uncore_type *type = box->pmu->type; |
5098 | resource_size_t addr; |
5099 | u32 pci_dword; |
5100 | |
5101 | if (!pdev) |
5102 | return -ENODEV; |
5103 | |
5104 | pci_read_config_dword(dev: pdev, SNR_IMC_MMIO_BASE_OFFSET, val: &pci_dword); |
5105 | addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23; |
5106 | |
5107 | pci_read_config_dword(dev: pdev, where: mem_offset, val: &pci_dword); |
5108 | addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12; |
5109 | |
5110 | addr += box_ctl; |
5111 | |
5112 | pci_dev_put(dev: pdev); |
5113 | |
5114 | box->io_addr = ioremap(offset: addr, size: type->mmio_map_size); |
5115 | if (!box->io_addr) { |
5116 | pr_warn("perf uncore: Failed to ioremap for %s.\n" , type->name); |
5117 | return -EINVAL; |
5118 | } |
5119 | |
5120 | return 0; |
5121 | } |
5122 | |
5123 | static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box, |
5124 | unsigned int box_ctl, int mem_offset, |
5125 | unsigned int device) |
5126 | { |
5127 | if (!snr_uncore_mmio_map(box, box_ctl, mem_offset, device)) |
5128 | writel(IVBEP_PMON_BOX_CTL_INT, addr: box->io_addr); |
5129 | } |
5130 | |
5131 | static void snr_uncore_mmio_init_box(struct intel_uncore_box *box) |
5132 | { |
5133 | __snr_uncore_mmio_init_box(box, box_ctl: uncore_mmio_box_ctl(box), |
5134 | SNR_IMC_MMIO_MEM0_OFFSET, |
5135 | SNR_MC_DEVICE_ID); |
5136 | } |
5137 | |
5138 | static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box) |
5139 | { |
5140 | u32 config; |
5141 | |
5142 | if (!box->io_addr) |
5143 | return; |
5144 | |
5145 | config = readl(addr: box->io_addr); |
5146 | config |= SNBEP_PMON_BOX_CTL_FRZ; |
5147 | writel(val: config, addr: box->io_addr); |
5148 | } |
5149 | |
5150 | static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box) |
5151 | { |
5152 | u32 config; |
5153 | |
5154 | if (!box->io_addr) |
5155 | return; |
5156 | |
5157 | config = readl(addr: box->io_addr); |
5158 | config &= ~SNBEP_PMON_BOX_CTL_FRZ; |
5159 | writel(val: config, addr: box->io_addr); |
5160 | } |
5161 | |
5162 | static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box, |
5163 | struct perf_event *event) |
5164 | { |
5165 | struct hw_perf_event *hwc = &event->hw; |
5166 | |
5167 | if (!box->io_addr) |
5168 | return; |
5169 | |
5170 | if (!uncore_mmio_is_valid_offset(box, offset: hwc->config_base)) |
5171 | return; |
5172 | |
5173 | writel(val: hwc->config | SNBEP_PMON_CTL_EN, |
5174 | addr: box->io_addr + hwc->config_base); |
5175 | } |
5176 | |
5177 | static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box, |
5178 | struct perf_event *event) |
5179 | { |
5180 | struct hw_perf_event *hwc = &event->hw; |
5181 | |
5182 | if (!box->io_addr) |
5183 | return; |
5184 | |
5185 | if (!uncore_mmio_is_valid_offset(box, offset: hwc->config_base)) |
5186 | return; |
5187 | |
5188 | writel(val: hwc->config, addr: box->io_addr + hwc->config_base); |
5189 | } |
5190 | |
5191 | static struct intel_uncore_ops snr_uncore_mmio_ops = { |
5192 | .init_box = snr_uncore_mmio_init_box, |
5193 | .exit_box = uncore_mmio_exit_box, |
5194 | .disable_box = snr_uncore_mmio_disable_box, |
5195 | .enable_box = snr_uncore_mmio_enable_box, |
5196 | .disable_event = snr_uncore_mmio_disable_event, |
5197 | .enable_event = snr_uncore_mmio_enable_event, |
5198 | .read_counter = uncore_mmio_read_counter, |
5199 | }; |
5200 | |
5201 | static struct uncore_event_desc snr_uncore_imc_events[] = { |
5202 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00" ), |
5203 | INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x0f" ), |
5204 | INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5" ), |
5205 | INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB" ), |
5206 | INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30" ), |
5207 | INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5" ), |
5208 | INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB" ), |
5209 | { /* end: all zeroes */ }, |
5210 | }; |
5211 | |
5212 | static struct intel_uncore_type snr_uncore_imc = { |
5213 | .name = "imc" , |
5214 | .num_counters = 4, |
5215 | .num_boxes = 2, |
5216 | .perf_ctr_bits = 48, |
5217 | .fixed_ctr_bits = 48, |
5218 | .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, |
5219 | .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, |
5220 | .event_descs = snr_uncore_imc_events, |
5221 | .perf_ctr = SNR_IMC_MMIO_PMON_CTR0, |
5222 | .event_ctl = SNR_IMC_MMIO_PMON_CTL0, |
5223 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
5224 | .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL, |
5225 | .mmio_offset = SNR_IMC_MMIO_OFFSET, |
5226 | .mmio_map_size = SNR_IMC_MMIO_SIZE, |
5227 | .ops = &snr_uncore_mmio_ops, |
5228 | .format_group = &skx_uncore_format_group, |
5229 | }; |
5230 | |
5231 | enum perf_uncore_snr_imc_freerunning_type_id { |
5232 | SNR_IMC_DCLK, |
5233 | SNR_IMC_DDR, |
5234 | |
5235 | SNR_IMC_FREERUNNING_TYPE_MAX, |
5236 | }; |
5237 | |
5238 | static struct freerunning_counters snr_imc_freerunning[] = { |
5239 | [SNR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 }, |
5240 | [SNR_IMC_DDR] = { .counter_base: 0x2290, .counter_offset: 0x8, .box_offset: 0, .num_counters: 2, .bits: 48 }, |
5241 | }; |
5242 | |
5243 | static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = { |
5244 | INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10" ), |
5245 | |
5246 | INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20" ), |
5247 | INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5" ), |
5248 | INTEL_UNCORE_EVENT_DESC(read.unit, "MiB" ), |
5249 | INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21" ), |
5250 | INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5" ), |
5251 | INTEL_UNCORE_EVENT_DESC(write.unit, "MiB" ), |
5252 | { /* end: all zeroes */ }, |
5253 | }; |
5254 | |
5255 | static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = { |
5256 | .init_box = snr_uncore_mmio_init_box, |
5257 | .exit_box = uncore_mmio_exit_box, |
5258 | .read_counter = uncore_mmio_read_counter, |
5259 | .hw_config = uncore_freerunning_hw_config, |
5260 | }; |
5261 | |
5262 | static struct intel_uncore_type snr_uncore_imc_free_running = { |
5263 | .name = "imc_free_running" , |
5264 | .num_counters = 3, |
5265 | .num_boxes = 1, |
5266 | .num_freerunning_types = SNR_IMC_FREERUNNING_TYPE_MAX, |
5267 | .mmio_map_size = SNR_IMC_MMIO_SIZE, |
5268 | .freerunning = snr_imc_freerunning, |
5269 | .ops = &snr_uncore_imc_freerunning_ops, |
5270 | .event_descs = snr_uncore_imc_freerunning_events, |
5271 | .format_group = &skx_uncore_iio_freerunning_format_group, |
5272 | }; |
5273 | |
5274 | static struct intel_uncore_type *snr_mmio_uncores[] = { |
5275 | &snr_uncore_imc, |
5276 | &snr_uncore_imc_free_running, |
5277 | NULL, |
5278 | }; |
5279 | |
5280 | void snr_uncore_mmio_init(void) |
5281 | { |
5282 | uncore_mmio_uncores = snr_mmio_uncores; |
5283 | } |
5284 | |
5285 | /* end of SNR uncore support */ |
5286 | |
5287 | /* ICX uncore support */ |
5288 | |
5289 | static u64 icx_cha_msr_offsets[] = { |
5290 | 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310, |
5291 | 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e, |
5292 | 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a, |
5293 | 0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0, 0xe, |
5294 | 0x1c, 0x2a, 0x38, 0x46, |
5295 | }; |
5296 | |
5297 | static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) |
5298 | { |
5299 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; |
5300 | bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN); |
5301 | |
5302 | if (tie_en) { |
5303 | reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 + |
5304 | icx_cha_msr_offsets[box->pmu->pmu_idx]; |
5305 | reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID; |
5306 | reg1->idx = 0; |
5307 | } |
5308 | |
5309 | return 0; |
5310 | } |
5311 | |
5312 | static struct intel_uncore_ops icx_uncore_chabox_ops = { |
5313 | .init_box = ivbep_uncore_msr_init_box, |
5314 | .disable_box = snbep_uncore_msr_disable_box, |
5315 | .enable_box = snbep_uncore_msr_enable_box, |
5316 | .disable_event = snbep_uncore_msr_disable_event, |
5317 | .enable_event = snr_cha_enable_event, |
5318 | .read_counter = uncore_msr_read_counter, |
5319 | .hw_config = icx_cha_hw_config, |
5320 | }; |
5321 | |
5322 | static struct intel_uncore_type icx_uncore_chabox = { |
5323 | .name = "cha" , |
5324 | .num_counters = 4, |
5325 | .perf_ctr_bits = 48, |
5326 | .event_ctl = ICX_C34_MSR_PMON_CTL0, |
5327 | .perf_ctr = ICX_C34_MSR_PMON_CTR0, |
5328 | .box_ctl = ICX_C34_MSR_PMON_BOX_CTL, |
5329 | .msr_offsets = icx_cha_msr_offsets, |
5330 | .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, |
5331 | .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT, |
5332 | .constraints = skx_uncore_chabox_constraints, |
5333 | .ops = &icx_uncore_chabox_ops, |
5334 | .format_group = &snr_uncore_chabox_format_group, |
5335 | }; |
5336 | |
5337 | static u64 icx_msr_offsets[] = { |
5338 | 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0, |
5339 | }; |
5340 | |
5341 | static struct event_constraint icx_uncore_iio_constraints[] = { |
5342 | UNCORE_EVENT_CONSTRAINT(0x02, 0x3), |
5343 | UNCORE_EVENT_CONSTRAINT(0x03, 0x3), |
5344 | UNCORE_EVENT_CONSTRAINT(0x83, 0x3), |
5345 | UNCORE_EVENT_CONSTRAINT(0x88, 0xc), |
5346 | UNCORE_EVENT_CONSTRAINT(0xc0, 0xc), |
5347 | UNCORE_EVENT_CONSTRAINT(0xc5, 0xc), |
5348 | UNCORE_EVENT_CONSTRAINT(0xd5, 0xc), |
5349 | EVENT_CONSTRAINT_END |
5350 | }; |
5351 | |
5352 | static umode_t |
5353 | icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die) |
5354 | { |
5355 | /* Root bus 0x00 is valid only for pmu_idx = 5. */ |
5356 | return pmu_iio_mapping_visible(kobj, attr, die, zero_bus_pmu: 5); |
5357 | } |
5358 | |
5359 | static struct attribute_group icx_iio_mapping_group = { |
5360 | .is_visible = icx_iio_mapping_visible, |
5361 | }; |
5362 | |
5363 | static const struct attribute_group *icx_iio_attr_update[] = { |
5364 | &icx_iio_mapping_group, |
5365 | NULL, |
5366 | }; |
5367 | |
5368 | /* |
5369 | * ICX has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON |
5370 | */ |
5371 | enum { |
5372 | ICX_PCIE1_PMON_ID, |
5373 | ICX_PCIE2_PMON_ID, |
5374 | ICX_PCIE3_PMON_ID, |
5375 | ICX_PCIE4_PMON_ID, |
5376 | ICX_PCIE5_PMON_ID, |
5377 | ICX_CBDMA_DMI_PMON_ID |
5378 | }; |
5379 | |
5380 | static u8 icx_sad_pmon_mapping[] = { |
5381 | ICX_CBDMA_DMI_PMON_ID, |
5382 | ICX_PCIE1_PMON_ID, |
5383 | ICX_PCIE2_PMON_ID, |
5384 | ICX_PCIE3_PMON_ID, |
5385 | ICX_PCIE4_PMON_ID, |
5386 | ICX_PCIE5_PMON_ID, |
5387 | }; |
5388 | |
5389 | static int icx_iio_get_topology(struct intel_uncore_type *type) |
5390 | { |
5391 | return sad_cfg_iio_topology(type, sad_pmon_mapping: icx_sad_pmon_mapping); |
5392 | } |
5393 | |
5394 | static void icx_iio_set_mapping(struct intel_uncore_type *type) |
5395 | { |
5396 | /* Detect ICX-D system. This case is not supported */ |
5397 | if (boot_cpu_data.x86_model == INTEL_FAM6_ICELAKE_D) { |
5398 | pmu_clear_mapping_attr(groups: type->attr_update, ag: &icx_iio_mapping_group); |
5399 | return; |
5400 | } |
5401 | pmu_iio_set_mapping(type, ag: &icx_iio_mapping_group); |
5402 | } |
5403 | |
5404 | static void icx_iio_cleanup_mapping(struct intel_uncore_type *type) |
5405 | { |
5406 | pmu_cleanup_mapping(type, ag: &icx_iio_mapping_group); |
5407 | } |
5408 | |
5409 | static struct intel_uncore_type icx_uncore_iio = { |
5410 | .name = "iio" , |
5411 | .num_counters = 4, |
5412 | .num_boxes = 6, |
5413 | .perf_ctr_bits = 48, |
5414 | .event_ctl = ICX_IIO_MSR_PMON_CTL0, |
5415 | .perf_ctr = ICX_IIO_MSR_PMON_CTR0, |
5416 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
5417 | .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT, |
5418 | .box_ctl = ICX_IIO_MSR_PMON_BOX_CTL, |
5419 | .msr_offsets = icx_msr_offsets, |
5420 | .constraints = icx_uncore_iio_constraints, |
5421 | .ops = &skx_uncore_iio_ops, |
5422 | .format_group = &snr_uncore_iio_format_group, |
5423 | .attr_update = icx_iio_attr_update, |
5424 | .get_topology = icx_iio_get_topology, |
5425 | .set_mapping = icx_iio_set_mapping, |
5426 | .cleanup_mapping = icx_iio_cleanup_mapping, |
5427 | }; |
5428 | |
5429 | static struct intel_uncore_type icx_uncore_irp = { |
5430 | .name = "irp" , |
5431 | .num_counters = 2, |
5432 | .num_boxes = 6, |
5433 | .perf_ctr_bits = 48, |
5434 | .event_ctl = ICX_IRP0_MSR_PMON_CTL0, |
5435 | .perf_ctr = ICX_IRP0_MSR_PMON_CTR0, |
5436 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
5437 | .box_ctl = ICX_IRP0_MSR_PMON_BOX_CTL, |
5438 | .msr_offsets = icx_msr_offsets, |
5439 | .ops = &ivbep_uncore_msr_ops, |
5440 | .format_group = &ivbep_uncore_format_group, |
5441 | }; |
5442 | |
5443 | static struct event_constraint icx_uncore_m2pcie_constraints[] = { |
5444 | UNCORE_EVENT_CONSTRAINT(0x14, 0x3), |
5445 | UNCORE_EVENT_CONSTRAINT(0x23, 0x3), |
5446 | UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), |
5447 | EVENT_CONSTRAINT_END |
5448 | }; |
5449 | |
5450 | static struct intel_uncore_type icx_uncore_m2pcie = { |
5451 | .name = "m2pcie" , |
5452 | .num_counters = 4, |
5453 | .num_boxes = 6, |
5454 | .perf_ctr_bits = 48, |
5455 | .event_ctl = ICX_M2PCIE_MSR_PMON_CTL0, |
5456 | .perf_ctr = ICX_M2PCIE_MSR_PMON_CTR0, |
5457 | .box_ctl = ICX_M2PCIE_MSR_PMON_BOX_CTL, |
5458 | .msr_offsets = icx_msr_offsets, |
5459 | .constraints = icx_uncore_m2pcie_constraints, |
5460 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
5461 | .ops = &ivbep_uncore_msr_ops, |
5462 | .format_group = &ivbep_uncore_format_group, |
5463 | }; |
5464 | |
5465 | enum perf_uncore_icx_iio_freerunning_type_id { |
5466 | ICX_IIO_MSR_IOCLK, |
5467 | ICX_IIO_MSR_BW_IN, |
5468 | |
5469 | ICX_IIO_FREERUNNING_TYPE_MAX, |
5470 | }; |
5471 | |
5472 | static unsigned icx_iio_clk_freerunning_box_offsets[] = { |
5473 | 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0, |
5474 | }; |
5475 | |
5476 | static unsigned icx_iio_bw_freerunning_box_offsets[] = { |
5477 | 0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0, |
5478 | }; |
5479 | |
5480 | static struct freerunning_counters icx_iio_freerunning[] = { |
5481 | [ICX_IIO_MSR_IOCLK] = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets }, |
5482 | [ICX_IIO_MSR_BW_IN] = { .counter_base: 0xaa0, .counter_offset: 0x1, .box_offset: 0x10, .num_counters: 8, .bits: 48, .box_offsets: icx_iio_bw_freerunning_box_offsets }, |
5483 | }; |
5484 | |
5485 | static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = { |
5486 | /* Free-Running IIO CLOCKS Counter */ |
5487 | INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10" ), |
5488 | /* Free-Running IIO BANDWIDTH IN Counters */ |
5489 | INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20" ), |
5490 | INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6" ), |
5491 | INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB" ), |
5492 | INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21" ), |
5493 | INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6" ), |
5494 | INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB" ), |
5495 | INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22" ), |
5496 | INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6" ), |
5497 | INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB" ), |
5498 | INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23" ), |
5499 | INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6" ), |
5500 | INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB" ), |
5501 | INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24" ), |
5502 | INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6" ), |
5503 | INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB" ), |
5504 | INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25" ), |
5505 | INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6" ), |
5506 | INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB" ), |
5507 | INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26" ), |
5508 | INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6" ), |
5509 | INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB" ), |
5510 | INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27" ), |
5511 | INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6" ), |
5512 | INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB" ), |
5513 | { /* end: all zeroes */ }, |
5514 | }; |
5515 | |
5516 | static struct intel_uncore_type icx_uncore_iio_free_running = { |
5517 | .name = "iio_free_running" , |
5518 | .num_counters = 9, |
5519 | .num_boxes = 6, |
5520 | .num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX, |
5521 | .freerunning = icx_iio_freerunning, |
5522 | .ops = &skx_uncore_iio_freerunning_ops, |
5523 | .event_descs = icx_uncore_iio_freerunning_events, |
5524 | .format_group = &skx_uncore_iio_freerunning_format_group, |
5525 | }; |
5526 | |
5527 | static struct intel_uncore_type *icx_msr_uncores[] = { |
5528 | &skx_uncore_ubox, |
5529 | &icx_uncore_chabox, |
5530 | &icx_uncore_iio, |
5531 | &icx_uncore_irp, |
5532 | &icx_uncore_m2pcie, |
5533 | &skx_uncore_pcu, |
5534 | &icx_uncore_iio_free_running, |
5535 | NULL, |
5536 | }; |
5537 | |
5538 | /* |
5539 | * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High) |
5540 | * registers which located at Device 30, Function 3 |
5541 | */ |
5542 | #define ICX_CAPID6 0x9c |
5543 | #define ICX_CAPID7 0xa0 |
5544 | |
5545 | static u64 icx_count_chabox(void) |
5546 | { |
5547 | struct pci_dev *dev = NULL; |
5548 | u64 caps = 0; |
5549 | |
5550 | dev = pci_get_device(PCI_VENDOR_ID_INTEL, device: 0x345b, from: dev); |
5551 | if (!dev) |
5552 | goto out; |
5553 | |
5554 | pci_read_config_dword(dev, ICX_CAPID6, val: (u32 *)&caps); |
5555 | pci_read_config_dword(dev, ICX_CAPID7, val: (u32 *)&caps + 1); |
5556 | out: |
5557 | pci_dev_put(dev); |
5558 | return hweight64(caps); |
5559 | } |
5560 | |
5561 | void icx_uncore_cpu_init(void) |
5562 | { |
5563 | u64 num_boxes = icx_count_chabox(); |
5564 | |
5565 | if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets))) |
5566 | return; |
5567 | icx_uncore_chabox.num_boxes = num_boxes; |
5568 | uncore_msr_uncores = icx_msr_uncores; |
5569 | } |
5570 | |
5571 | static struct intel_uncore_type icx_uncore_m2m = { |
5572 | .name = "m2m" , |
5573 | .num_counters = 4, |
5574 | .num_boxes = 4, |
5575 | .perf_ctr_bits = 48, |
5576 | .perf_ctr = SNR_M2M_PCI_PMON_CTR0, |
5577 | .event_ctl = SNR_M2M_PCI_PMON_CTL0, |
5578 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
5579 | .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT, |
5580 | .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL, |
5581 | .ops = &snr_m2m_uncore_pci_ops, |
5582 | .format_group = &snr_m2m_uncore_format_group, |
5583 | }; |
5584 | |
5585 | static struct attribute *icx_upi_uncore_formats_attr[] = { |
5586 | &format_attr_event.attr, |
5587 | &format_attr_umask_ext4.attr, |
5588 | &format_attr_edge.attr, |
5589 | &format_attr_inv.attr, |
5590 | &format_attr_thresh8.attr, |
5591 | NULL, |
5592 | }; |
5593 | |
5594 | static const struct attribute_group icx_upi_uncore_format_group = { |
5595 | .name = "format" , |
5596 | .attrs = icx_upi_uncore_formats_attr, |
5597 | }; |
5598 | |
5599 | #define ICX_UPI_REGS_ADDR_DEVICE_LINK0 0x02 |
5600 | #define ICX_UPI_REGS_ADDR_FUNCTION 0x01 |
5601 | |
5602 | static int discover_upi_topology(struct intel_uncore_type *type, int ubox_did, int dev_link0) |
5603 | { |
5604 | struct pci_dev *ubox = NULL; |
5605 | struct pci_dev *dev = NULL; |
5606 | u32 nid, gid; |
5607 | int idx, lgc_pkg, ret = -EPERM; |
5608 | struct intel_uncore_topology *upi; |
5609 | unsigned int devfn; |
5610 | |
5611 | /* GIDNIDMAP method supports machines which have less than 8 sockets. */ |
5612 | if (uncore_max_dies() > 8) |
5613 | goto err; |
5614 | |
5615 | while ((ubox = pci_get_device(PCI_VENDOR_ID_INTEL, device: ubox_did, from: ubox))) { |
5616 | ret = upi_nodeid_groupid(ubox_dev: ubox, SKX_CPUNODEID, SKX_GIDNIDMAP, nodeid: &nid, groupid: &gid); |
5617 | if (ret) { |
5618 | ret = pcibios_err_to_errno(err: ret); |
5619 | break; |
5620 | } |
5621 | |
5622 | lgc_pkg = topology_gidnid_map(nodeid: nid, gidnid: gid); |
5623 | if (lgc_pkg < 0) { |
5624 | ret = -EPERM; |
5625 | goto err; |
5626 | } |
5627 | for (idx = 0; idx < type->num_boxes; idx++) { |
5628 | upi = &type->topology[lgc_pkg][idx]; |
5629 | devfn = PCI_DEVFN(dev_link0 + idx, ICX_UPI_REGS_ADDR_FUNCTION); |
5630 | dev = pci_get_domain_bus_and_slot(domain: pci_domain_nr(bus: ubox->bus), |
5631 | bus: ubox->bus->number, |
5632 | devfn); |
5633 | if (dev) { |
5634 | ret = upi_fill_topology(dev, tp: upi, pmu_idx: idx); |
5635 | if (ret) |
5636 | goto err; |
5637 | } |
5638 | } |
5639 | } |
5640 | err: |
5641 | pci_dev_put(dev: ubox); |
5642 | pci_dev_put(dev); |
5643 | return ret; |
5644 | } |
5645 | |
5646 | static int icx_upi_get_topology(struct intel_uncore_type *type) |
5647 | { |
5648 | return discover_upi_topology(type, ICX_UBOX_DID, ICX_UPI_REGS_ADDR_DEVICE_LINK0); |
5649 | } |
5650 | |
5651 | static struct attribute_group icx_upi_mapping_group = { |
5652 | .is_visible = skx_upi_mapping_visible, |
5653 | }; |
5654 | |
5655 | static const struct attribute_group *icx_upi_attr_update[] = { |
5656 | &icx_upi_mapping_group, |
5657 | NULL |
5658 | }; |
5659 | |
5660 | static void icx_upi_set_mapping(struct intel_uncore_type *type) |
5661 | { |
5662 | pmu_upi_set_mapping(type, ag: &icx_upi_mapping_group); |
5663 | } |
5664 | |
5665 | static void icx_upi_cleanup_mapping(struct intel_uncore_type *type) |
5666 | { |
5667 | pmu_cleanup_mapping(type, ag: &icx_upi_mapping_group); |
5668 | } |
5669 | |
5670 | static struct intel_uncore_type icx_uncore_upi = { |
5671 | .name = "upi" , |
5672 | .num_counters = 4, |
5673 | .num_boxes = 3, |
5674 | .perf_ctr_bits = 48, |
5675 | .perf_ctr = ICX_UPI_PCI_PMON_CTR0, |
5676 | .event_ctl = ICX_UPI_PCI_PMON_CTL0, |
5677 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
5678 | .event_mask_ext = ICX_UPI_CTL_UMASK_EXT, |
5679 | .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL, |
5680 | .ops = &skx_upi_uncore_pci_ops, |
5681 | .format_group = &icx_upi_uncore_format_group, |
5682 | .attr_update = icx_upi_attr_update, |
5683 | .get_topology = icx_upi_get_topology, |
5684 | .set_mapping = icx_upi_set_mapping, |
5685 | .cleanup_mapping = icx_upi_cleanup_mapping, |
5686 | }; |
5687 | |
5688 | static struct event_constraint icx_uncore_m3upi_constraints[] = { |
5689 | UNCORE_EVENT_CONSTRAINT(0x1c, 0x1), |
5690 | UNCORE_EVENT_CONSTRAINT(0x1d, 0x1), |
5691 | UNCORE_EVENT_CONSTRAINT(0x1e, 0x1), |
5692 | UNCORE_EVENT_CONSTRAINT(0x1f, 0x1), |
5693 | UNCORE_EVENT_CONSTRAINT(0x40, 0x7), |
5694 | UNCORE_EVENT_CONSTRAINT(0x4e, 0x7), |
5695 | UNCORE_EVENT_CONSTRAINT(0x4f, 0x7), |
5696 | UNCORE_EVENT_CONSTRAINT(0x50, 0x7), |
5697 | EVENT_CONSTRAINT_END |
5698 | }; |
5699 | |
5700 | static struct intel_uncore_type icx_uncore_m3upi = { |
5701 | .name = "m3upi" , |
5702 | .num_counters = 4, |
5703 | .num_boxes = 3, |
5704 | .perf_ctr_bits = 48, |
5705 | .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0, |
5706 | .event_ctl = ICX_M3UPI_PCI_PMON_CTL0, |
5707 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
5708 | .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL, |
5709 | .constraints = icx_uncore_m3upi_constraints, |
5710 | .ops = &ivbep_uncore_pci_ops, |
5711 | .format_group = &skx_uncore_format_group, |
5712 | }; |
5713 | |
5714 | enum { |
5715 | ICX_PCI_UNCORE_M2M, |
5716 | ICX_PCI_UNCORE_UPI, |
5717 | ICX_PCI_UNCORE_M3UPI, |
5718 | }; |
5719 | |
5720 | static struct intel_uncore_type *icx_pci_uncores[] = { |
5721 | [ICX_PCI_UNCORE_M2M] = &icx_uncore_m2m, |
5722 | [ICX_PCI_UNCORE_UPI] = &icx_uncore_upi, |
5723 | [ICX_PCI_UNCORE_M3UPI] = &icx_uncore_m3upi, |
5724 | NULL, |
5725 | }; |
5726 | |
5727 | static const struct pci_device_id icx_uncore_pci_ids[] = { |
5728 | { /* M2M 0 */ |
5729 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), |
5730 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0), |
5731 | }, |
5732 | { /* M2M 1 */ |
5733 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), |
5734 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1), |
5735 | }, |
5736 | { /* M2M 2 */ |
5737 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), |
5738 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2), |
5739 | }, |
5740 | { /* M2M 3 */ |
5741 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), |
5742 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3), |
5743 | }, |
5744 | { /* UPI Link 0 */ |
5745 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441), |
5746 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0), |
5747 | }, |
5748 | { /* UPI Link 1 */ |
5749 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441), |
5750 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1), |
5751 | }, |
5752 | { /* UPI Link 2 */ |
5753 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441), |
5754 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2), |
5755 | }, |
5756 | { /* M3UPI Link 0 */ |
5757 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446), |
5758 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0), |
5759 | }, |
5760 | { /* M3UPI Link 1 */ |
5761 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446), |
5762 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1), |
5763 | }, |
5764 | { /* M3UPI Link 2 */ |
5765 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446), |
5766 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2), |
5767 | }, |
5768 | { /* end: all zeroes */ } |
5769 | }; |
5770 | |
5771 | static struct pci_driver icx_uncore_pci_driver = { |
5772 | .name = "icx_uncore" , |
5773 | .id_table = icx_uncore_pci_ids, |
5774 | }; |
5775 | |
5776 | int icx_uncore_pci_init(void) |
5777 | { |
5778 | /* ICX UBOX DID */ |
5779 | int ret = snbep_pci2phy_map_init(devid: 0x3450, SKX_CPUNODEID, |
5780 | SKX_GIDNIDMAP, reverse: true); |
5781 | |
5782 | if (ret) |
5783 | return ret; |
5784 | |
5785 | uncore_pci_uncores = icx_pci_uncores; |
5786 | uncore_pci_driver = &icx_uncore_pci_driver; |
5787 | return 0; |
5788 | } |
5789 | |
5790 | static void icx_uncore_imc_init_box(struct intel_uncore_box *box) |
5791 | { |
5792 | unsigned int box_ctl = box->pmu->type->box_ctl + |
5793 | box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN); |
5794 | int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE + |
5795 | SNR_IMC_MMIO_MEM0_OFFSET; |
5796 | |
5797 | __snr_uncore_mmio_init_box(box, box_ctl, mem_offset, |
5798 | SNR_MC_DEVICE_ID); |
5799 | } |
5800 | |
5801 | static struct intel_uncore_ops icx_uncore_mmio_ops = { |
5802 | .init_box = icx_uncore_imc_init_box, |
5803 | .exit_box = uncore_mmio_exit_box, |
5804 | .disable_box = snr_uncore_mmio_disable_box, |
5805 | .enable_box = snr_uncore_mmio_enable_box, |
5806 | .disable_event = snr_uncore_mmio_disable_event, |
5807 | .enable_event = snr_uncore_mmio_enable_event, |
5808 | .read_counter = uncore_mmio_read_counter, |
5809 | }; |
5810 | |
5811 | static struct intel_uncore_type icx_uncore_imc = { |
5812 | .name = "imc" , |
5813 | .num_counters = 4, |
5814 | .num_boxes = 12, |
5815 | .perf_ctr_bits = 48, |
5816 | .fixed_ctr_bits = 48, |
5817 | .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, |
5818 | .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, |
5819 | .event_descs = snr_uncore_imc_events, |
5820 | .perf_ctr = SNR_IMC_MMIO_PMON_CTR0, |
5821 | .event_ctl = SNR_IMC_MMIO_PMON_CTL0, |
5822 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
5823 | .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL, |
5824 | .mmio_offset = SNR_IMC_MMIO_OFFSET, |
5825 | .mmio_map_size = SNR_IMC_MMIO_SIZE, |
5826 | .ops = &icx_uncore_mmio_ops, |
5827 | .format_group = &skx_uncore_format_group, |
5828 | }; |
5829 | |
5830 | enum perf_uncore_icx_imc_freerunning_type_id { |
5831 | ICX_IMC_DCLK, |
5832 | ICX_IMC_DDR, |
5833 | ICX_IMC_DDRT, |
5834 | |
5835 | ICX_IMC_FREERUNNING_TYPE_MAX, |
5836 | }; |
5837 | |
5838 | static struct freerunning_counters icx_imc_freerunning[] = { |
5839 | [ICX_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 }, |
5840 | [ICX_IMC_DDR] = { .counter_base: 0x2290, .counter_offset: 0x8, .box_offset: 0, .num_counters: 2, .bits: 48 }, |
5841 | [ICX_IMC_DDRT] = { .counter_base: 0x22a0, .counter_offset: 0x8, .box_offset: 0, .num_counters: 2, .bits: 48 }, |
5842 | }; |
5843 | |
5844 | static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = { |
5845 | INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10" ), |
5846 | |
5847 | INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20" ), |
5848 | INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5" ), |
5849 | INTEL_UNCORE_EVENT_DESC(read.unit, "MiB" ), |
5850 | INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21" ), |
5851 | INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5" ), |
5852 | INTEL_UNCORE_EVENT_DESC(write.unit, "MiB" ), |
5853 | |
5854 | INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30" ), |
5855 | INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "6.103515625e-5" ), |
5856 | INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB" ), |
5857 | INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31" ), |
5858 | INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "6.103515625e-5" ), |
5859 | INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB" ), |
5860 | { /* end: all zeroes */ }, |
5861 | }; |
5862 | |
5863 | static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) |
5864 | { |
5865 | int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + |
5866 | SNR_IMC_MMIO_MEM0_OFFSET; |
5867 | |
5868 | snr_uncore_mmio_map(box, box_ctl: uncore_mmio_box_ctl(box), |
5869 | mem_offset, SNR_MC_DEVICE_ID); |
5870 | } |
5871 | |
5872 | static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = { |
5873 | .init_box = icx_uncore_imc_freerunning_init_box, |
5874 | .exit_box = uncore_mmio_exit_box, |
5875 | .read_counter = uncore_mmio_read_counter, |
5876 | .hw_config = uncore_freerunning_hw_config, |
5877 | }; |
5878 | |
5879 | static struct intel_uncore_type icx_uncore_imc_free_running = { |
5880 | .name = "imc_free_running" , |
5881 | .num_counters = 5, |
5882 | .num_boxes = 4, |
5883 | .num_freerunning_types = ICX_IMC_FREERUNNING_TYPE_MAX, |
5884 | .mmio_map_size = SNR_IMC_MMIO_SIZE, |
5885 | .freerunning = icx_imc_freerunning, |
5886 | .ops = &icx_uncore_imc_freerunning_ops, |
5887 | .event_descs = icx_uncore_imc_freerunning_events, |
5888 | .format_group = &skx_uncore_iio_freerunning_format_group, |
5889 | }; |
5890 | |
5891 | static struct intel_uncore_type *icx_mmio_uncores[] = { |
5892 | &icx_uncore_imc, |
5893 | &icx_uncore_imc_free_running, |
5894 | NULL, |
5895 | }; |
5896 | |
5897 | void icx_uncore_mmio_init(void) |
5898 | { |
5899 | uncore_mmio_uncores = icx_mmio_uncores; |
5900 | } |
5901 | |
5902 | /* end of ICX uncore support */ |
5903 | |
5904 | /* SPR uncore support */ |
5905 | |
5906 | static void spr_uncore_msr_enable_event(struct intel_uncore_box *box, |
5907 | struct perf_event *event) |
5908 | { |
5909 | struct hw_perf_event *hwc = &event->hw; |
5910 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; |
5911 | |
5912 | if (reg1->idx != EXTRA_REG_NONE) |
5913 | wrmsrl(msr: reg1->reg, val: reg1->config); |
5914 | |
5915 | wrmsrl(msr: hwc->config_base, val: hwc->config); |
5916 | } |
5917 | |
5918 | static void spr_uncore_msr_disable_event(struct intel_uncore_box *box, |
5919 | struct perf_event *event) |
5920 | { |
5921 | struct hw_perf_event *hwc = &event->hw; |
5922 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; |
5923 | |
5924 | if (reg1->idx != EXTRA_REG_NONE) |
5925 | wrmsrl(msr: reg1->reg, val: 0); |
5926 | |
5927 | wrmsrl(msr: hwc->config_base, val: 0); |
5928 | } |
5929 | |
5930 | static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) |
5931 | { |
5932 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; |
5933 | bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN); |
5934 | struct intel_uncore_type *type = box->pmu->type; |
5935 | |
5936 | if (tie_en) { |
5937 | reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 + |
5938 | HSWEP_CBO_MSR_OFFSET * type->box_ids[box->pmu->pmu_idx]; |
5939 | reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID; |
5940 | reg1->idx = 0; |
5941 | } |
5942 | |
5943 | return 0; |
5944 | } |
5945 | |
5946 | static struct intel_uncore_ops spr_uncore_chabox_ops = { |
5947 | .init_box = intel_generic_uncore_msr_init_box, |
5948 | .disable_box = intel_generic_uncore_msr_disable_box, |
5949 | .enable_box = intel_generic_uncore_msr_enable_box, |
5950 | .disable_event = spr_uncore_msr_disable_event, |
5951 | .enable_event = spr_uncore_msr_enable_event, |
5952 | .read_counter = uncore_msr_read_counter, |
5953 | .hw_config = spr_cha_hw_config, |
5954 | .get_constraint = uncore_get_constraint, |
5955 | .put_constraint = uncore_put_constraint, |
5956 | }; |
5957 | |
5958 | static struct attribute *spr_uncore_cha_formats_attr[] = { |
5959 | &format_attr_event.attr, |
5960 | &format_attr_umask_ext4.attr, |
5961 | &format_attr_tid_en2.attr, |
5962 | &format_attr_edge.attr, |
5963 | &format_attr_inv.attr, |
5964 | &format_attr_thresh8.attr, |
5965 | &format_attr_filter_tid5.attr, |
5966 | NULL, |
5967 | }; |
5968 | static const struct attribute_group spr_uncore_chabox_format_group = { |
5969 | .name = "format" , |
5970 | .attrs = spr_uncore_cha_formats_attr, |
5971 | }; |
5972 | |
5973 | static ssize_t alias_show(struct device *dev, |
5974 | struct device_attribute *attr, |
5975 | char *buf) |
5976 | { |
5977 | struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev); |
5978 | char pmu_name[UNCORE_PMU_NAME_LEN]; |
5979 | |
5980 | uncore_get_alias_name(pmu_name, pmu); |
5981 | return sysfs_emit(buf, fmt: "%s\n" , pmu_name); |
5982 | } |
5983 | |
5984 | static DEVICE_ATTR_RO(alias); |
5985 | |
5986 | static struct attribute *uncore_alias_attrs[] = { |
5987 | &dev_attr_alias.attr, |
5988 | NULL |
5989 | }; |
5990 | |
5991 | ATTRIBUTE_GROUPS(uncore_alias); |
5992 | |
5993 | static struct intel_uncore_type spr_uncore_chabox = { |
5994 | .name = "cha" , |
5995 | .event_mask = SPR_CHA_PMON_EVENT_MASK, |
5996 | .event_mask_ext = SPR_RAW_EVENT_MASK_EXT, |
5997 | .num_shared_regs = 1, |
5998 | .constraints = skx_uncore_chabox_constraints, |
5999 | .ops = &spr_uncore_chabox_ops, |
6000 | .format_group = &spr_uncore_chabox_format_group, |
6001 | .attr_update = uncore_alias_groups, |
6002 | }; |
6003 | |
6004 | static struct intel_uncore_type spr_uncore_iio = { |
6005 | .name = "iio" , |
6006 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
6007 | .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT, |
6008 | .format_group = &snr_uncore_iio_format_group, |
6009 | .attr_update = uncore_alias_groups, |
6010 | .constraints = icx_uncore_iio_constraints, |
6011 | }; |
6012 | |
6013 | static struct attribute *spr_uncore_raw_formats_attr[] = { |
6014 | &format_attr_event.attr, |
6015 | &format_attr_umask_ext4.attr, |
6016 | &format_attr_edge.attr, |
6017 | &format_attr_inv.attr, |
6018 | &format_attr_thresh8.attr, |
6019 | NULL, |
6020 | }; |
6021 | |
6022 | static const struct attribute_group spr_uncore_raw_format_group = { |
6023 | .name = "format" , |
6024 | .attrs = spr_uncore_raw_formats_attr, |
6025 | }; |
6026 | |
6027 | #define SPR_UNCORE_COMMON_FORMAT() \ |
6028 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \ |
6029 | .event_mask_ext = SPR_RAW_EVENT_MASK_EXT, \ |
6030 | .format_group = &spr_uncore_raw_format_group, \ |
6031 | .attr_update = uncore_alias_groups |
6032 | |
6033 | static struct intel_uncore_type spr_uncore_irp = { |
6034 | SPR_UNCORE_COMMON_FORMAT(), |
6035 | .name = "irp" , |
6036 | |
6037 | }; |
6038 | |
6039 | static struct event_constraint spr_uncore_m2pcie_constraints[] = { |
6040 | UNCORE_EVENT_CONSTRAINT(0x14, 0x3), |
6041 | UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), |
6042 | EVENT_CONSTRAINT_END |
6043 | }; |
6044 | |
6045 | static struct intel_uncore_type spr_uncore_m2pcie = { |
6046 | SPR_UNCORE_COMMON_FORMAT(), |
6047 | .name = "m2pcie" , |
6048 | .constraints = spr_uncore_m2pcie_constraints, |
6049 | }; |
6050 | |
6051 | static struct intel_uncore_type spr_uncore_pcu = { |
6052 | .name = "pcu" , |
6053 | .attr_update = uncore_alias_groups, |
6054 | }; |
6055 | |
6056 | static void spr_uncore_mmio_enable_event(struct intel_uncore_box *box, |
6057 | struct perf_event *event) |
6058 | { |
6059 | struct hw_perf_event *hwc = &event->hw; |
6060 | |
6061 | if (!box->io_addr) |
6062 | return; |
6063 | |
6064 | if (uncore_pmc_fixed(idx: hwc->idx)) |
6065 | writel(SNBEP_PMON_CTL_EN, addr: box->io_addr + hwc->config_base); |
6066 | else |
6067 | writel(val: hwc->config, addr: box->io_addr + hwc->config_base); |
6068 | } |
6069 | |
6070 | static struct intel_uncore_ops spr_uncore_mmio_ops = { |
6071 | .init_box = intel_generic_uncore_mmio_init_box, |
6072 | .exit_box = uncore_mmio_exit_box, |
6073 | .disable_box = intel_generic_uncore_mmio_disable_box, |
6074 | .enable_box = intel_generic_uncore_mmio_enable_box, |
6075 | .disable_event = intel_generic_uncore_mmio_disable_event, |
6076 | .enable_event = spr_uncore_mmio_enable_event, |
6077 | .read_counter = uncore_mmio_read_counter, |
6078 | }; |
6079 | |
6080 | static struct uncore_event_desc spr_uncore_imc_events[] = { |
6081 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x01,umask=0x00" ), |
6082 | INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x05,umask=0xcf" ), |
6083 | INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5" ), |
6084 | INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB" ), |
6085 | INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x05,umask=0xf0" ), |
6086 | INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5" ), |
6087 | INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB" ), |
6088 | { /* end: all zeroes */ }, |
6089 | }; |
6090 | |
6091 | #define SPR_UNCORE_MMIO_COMMON_FORMAT() \ |
6092 | SPR_UNCORE_COMMON_FORMAT(), \ |
6093 | .ops = &spr_uncore_mmio_ops |
6094 | |
6095 | static struct intel_uncore_type spr_uncore_imc = { |
6096 | SPR_UNCORE_MMIO_COMMON_FORMAT(), |
6097 | .name = "imc" , |
6098 | .fixed_ctr_bits = 48, |
6099 | .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, |
6100 | .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, |
6101 | .event_descs = spr_uncore_imc_events, |
6102 | }; |
6103 | |
6104 | static void spr_uncore_pci_enable_event(struct intel_uncore_box *box, |
6105 | struct perf_event *event) |
6106 | { |
6107 | struct pci_dev *pdev = box->pci_dev; |
6108 | struct hw_perf_event *hwc = &event->hw; |
6109 | |
6110 | pci_write_config_dword(dev: pdev, where: hwc->config_base + 4, val: (u32)(hwc->config >> 32)); |
6111 | pci_write_config_dword(dev: pdev, where: hwc->config_base, val: (u32)hwc->config); |
6112 | } |
6113 | |
6114 | static struct intel_uncore_ops spr_uncore_pci_ops = { |
6115 | .init_box = intel_generic_uncore_pci_init_box, |
6116 | .disable_box = intel_generic_uncore_pci_disable_box, |
6117 | .enable_box = intel_generic_uncore_pci_enable_box, |
6118 | .disable_event = intel_generic_uncore_pci_disable_event, |
6119 | .enable_event = spr_uncore_pci_enable_event, |
6120 | .read_counter = intel_generic_uncore_pci_read_counter, |
6121 | }; |
6122 | |
6123 | #define SPR_UNCORE_PCI_COMMON_FORMAT() \ |
6124 | SPR_UNCORE_COMMON_FORMAT(), \ |
6125 | .ops = &spr_uncore_pci_ops |
6126 | |
6127 | static struct intel_uncore_type spr_uncore_m2m = { |
6128 | SPR_UNCORE_PCI_COMMON_FORMAT(), |
6129 | .name = "m2m" , |
6130 | }; |
6131 | |
6132 | static struct attribute_group spr_upi_mapping_group = { |
6133 | .is_visible = skx_upi_mapping_visible, |
6134 | }; |
6135 | |
6136 | static const struct attribute_group *spr_upi_attr_update[] = { |
6137 | &uncore_alias_group, |
6138 | &spr_upi_mapping_group, |
6139 | NULL |
6140 | }; |
6141 | |
6142 | #define SPR_UPI_REGS_ADDR_DEVICE_LINK0 0x01 |
6143 | |
6144 | static void spr_upi_set_mapping(struct intel_uncore_type *type) |
6145 | { |
6146 | pmu_upi_set_mapping(type, ag: &spr_upi_mapping_group); |
6147 | } |
6148 | |
6149 | static void spr_upi_cleanup_mapping(struct intel_uncore_type *type) |
6150 | { |
6151 | pmu_cleanup_mapping(type, ag: &spr_upi_mapping_group); |
6152 | } |
6153 | |
6154 | static int spr_upi_get_topology(struct intel_uncore_type *type) |
6155 | { |
6156 | return discover_upi_topology(type, SPR_UBOX_DID, SPR_UPI_REGS_ADDR_DEVICE_LINK0); |
6157 | } |
6158 | |
6159 | static struct intel_uncore_type spr_uncore_mdf = { |
6160 | SPR_UNCORE_COMMON_FORMAT(), |
6161 | .name = "mdf" , |
6162 | }; |
6163 | |
6164 | #define UNCORE_SPR_NUM_UNCORE_TYPES 12 |
6165 | #define UNCORE_SPR_CHA 0 |
6166 | #define UNCORE_SPR_IIO 1 |
6167 | #define UNCORE_SPR_IMC 6 |
6168 | #define UNCORE_SPR_UPI 8 |
6169 | #define UNCORE_SPR_M3UPI 9 |
6170 | |
6171 | /* |
6172 | * The uncore units, which are supported by the discovery table, |
6173 | * are defined here. |
6174 | */ |
6175 | static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = { |
6176 | &spr_uncore_chabox, |
6177 | &spr_uncore_iio, |
6178 | &spr_uncore_irp, |
6179 | &spr_uncore_m2pcie, |
6180 | &spr_uncore_pcu, |
6181 | NULL, |
6182 | &spr_uncore_imc, |
6183 | &spr_uncore_m2m, |
6184 | NULL, |
6185 | NULL, |
6186 | NULL, |
6187 | &spr_uncore_mdf, |
6188 | }; |
6189 | |
6190 | /* |
6191 | * The uncore units, which are not supported by the discovery table, |
6192 | * are implemented from here. |
6193 | */ |
6194 | #define SPR_UNCORE_UPI_NUM_BOXES 4 |
6195 | |
6196 | static u64 spr_upi_pci_offsets[SPR_UNCORE_UPI_NUM_BOXES] = { |
6197 | 0, 0x8000, 0x10000, 0x18000 |
6198 | }; |
6199 | |
6200 | static struct intel_uncore_type spr_uncore_upi = { |
6201 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
6202 | .event_mask_ext = SPR_RAW_EVENT_MASK_EXT, |
6203 | .format_group = &spr_uncore_raw_format_group, |
6204 | .ops = &spr_uncore_pci_ops, |
6205 | .name = "upi" , |
6206 | .attr_update = spr_upi_attr_update, |
6207 | .get_topology = spr_upi_get_topology, |
6208 | .set_mapping = spr_upi_set_mapping, |
6209 | .cleanup_mapping = spr_upi_cleanup_mapping, |
6210 | .type_id = UNCORE_SPR_UPI, |
6211 | .num_counters = 4, |
6212 | .num_boxes = SPR_UNCORE_UPI_NUM_BOXES, |
6213 | .perf_ctr_bits = 48, |
6214 | .perf_ctr = ICX_UPI_PCI_PMON_CTR0, |
6215 | .event_ctl = ICX_UPI_PCI_PMON_CTL0, |
6216 | .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL, |
6217 | .pci_offsets = spr_upi_pci_offsets, |
6218 | }; |
6219 | |
6220 | static struct intel_uncore_type spr_uncore_m3upi = { |
6221 | SPR_UNCORE_PCI_COMMON_FORMAT(), |
6222 | .name = "m3upi" , |
6223 | .type_id = UNCORE_SPR_M3UPI, |
6224 | .num_counters = 4, |
6225 | .num_boxes = SPR_UNCORE_UPI_NUM_BOXES, |
6226 | .perf_ctr_bits = 48, |
6227 | .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0, |
6228 | .event_ctl = ICX_M3UPI_PCI_PMON_CTL0, |
6229 | .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL, |
6230 | .pci_offsets = spr_upi_pci_offsets, |
6231 | .constraints = icx_uncore_m3upi_constraints, |
6232 | }; |
6233 | |
6234 | enum perf_uncore_spr_iio_freerunning_type_id { |
6235 | SPR_IIO_MSR_IOCLK, |
6236 | SPR_IIO_MSR_BW_IN, |
6237 | SPR_IIO_MSR_BW_OUT, |
6238 | |
6239 | SPR_IIO_FREERUNNING_TYPE_MAX, |
6240 | }; |
6241 | |
6242 | static struct freerunning_counters spr_iio_freerunning[] = { |
6243 | [SPR_IIO_MSR_IOCLK] = { .counter_base: 0x340e, .counter_offset: 0x1, .box_offset: 0x10, .num_counters: 1, .bits: 48 }, |
6244 | [SPR_IIO_MSR_BW_IN] = { .counter_base: 0x3800, .counter_offset: 0x1, .box_offset: 0x10, .num_counters: 8, .bits: 48 }, |
6245 | [SPR_IIO_MSR_BW_OUT] = { .counter_base: 0x3808, .counter_offset: 0x1, .box_offset: 0x10, .num_counters: 8, .bits: 48 }, |
6246 | }; |
6247 | |
6248 | static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = { |
6249 | /* Free-Running IIO CLOCKS Counter */ |
6250 | INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10" ), |
6251 | /* Free-Running IIO BANDWIDTH IN Counters */ |
6252 | INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20" ), |
6253 | INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6" ), |
6254 | INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB" ), |
6255 | INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21" ), |
6256 | INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6" ), |
6257 | INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB" ), |
6258 | INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22" ), |
6259 | INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6" ), |
6260 | INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB" ), |
6261 | INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23" ), |
6262 | INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6" ), |
6263 | INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB" ), |
6264 | INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24" ), |
6265 | INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6" ), |
6266 | INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB" ), |
6267 | INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25" ), |
6268 | INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6" ), |
6269 | INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB" ), |
6270 | INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26" ), |
6271 | INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6" ), |
6272 | INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB" ), |
6273 | INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27" ), |
6274 | INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6" ), |
6275 | INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB" ), |
6276 | /* Free-Running IIO BANDWIDTH OUT Counters */ |
6277 | INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x30" ), |
6278 | INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6" ), |
6279 | INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB" ), |
6280 | INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x31" ), |
6281 | INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6" ), |
6282 | INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB" ), |
6283 | INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x32" ), |
6284 | INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6" ), |
6285 | INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB" ), |
6286 | INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x33" ), |
6287 | INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6" ), |
6288 | INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB" ), |
6289 | INTEL_UNCORE_EVENT_DESC(bw_out_port4, "event=0xff,umask=0x34" ), |
6290 | INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale, "3.814697266e-6" ), |
6291 | INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit, "MiB" ), |
6292 | INTEL_UNCORE_EVENT_DESC(bw_out_port5, "event=0xff,umask=0x35" ), |
6293 | INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale, "3.814697266e-6" ), |
6294 | INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit, "MiB" ), |
6295 | INTEL_UNCORE_EVENT_DESC(bw_out_port6, "event=0xff,umask=0x36" ), |
6296 | INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale, "3.814697266e-6" ), |
6297 | INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit, "MiB" ), |
6298 | INTEL_UNCORE_EVENT_DESC(bw_out_port7, "event=0xff,umask=0x37" ), |
6299 | INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale, "3.814697266e-6" ), |
6300 | INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit, "MiB" ), |
6301 | { /* end: all zeroes */ }, |
6302 | }; |
6303 | |
6304 | static struct intel_uncore_type spr_uncore_iio_free_running = { |
6305 | .name = "iio_free_running" , |
6306 | .num_counters = 17, |
6307 | .num_freerunning_types = SPR_IIO_FREERUNNING_TYPE_MAX, |
6308 | .freerunning = spr_iio_freerunning, |
6309 | .ops = &skx_uncore_iio_freerunning_ops, |
6310 | .event_descs = spr_uncore_iio_freerunning_events, |
6311 | .format_group = &skx_uncore_iio_freerunning_format_group, |
6312 | }; |
6313 | |
6314 | enum perf_uncore_spr_imc_freerunning_type_id { |
6315 | SPR_IMC_DCLK, |
6316 | SPR_IMC_PQ_CYCLES, |
6317 | |
6318 | SPR_IMC_FREERUNNING_TYPE_MAX, |
6319 | }; |
6320 | |
6321 | static struct freerunning_counters spr_imc_freerunning[] = { |
6322 | [SPR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 }, |
6323 | [SPR_IMC_PQ_CYCLES] = { .counter_base: 0x2318, .counter_offset: 0x8, .box_offset: 0, .num_counters: 2, .bits: 48 }, |
6324 | }; |
6325 | |
6326 | static struct uncore_event_desc spr_uncore_imc_freerunning_events[] = { |
6327 | INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10" ), |
6328 | |
6329 | INTEL_UNCORE_EVENT_DESC(rpq_cycles, "event=0xff,umask=0x20" ), |
6330 | INTEL_UNCORE_EVENT_DESC(wpq_cycles, "event=0xff,umask=0x21" ), |
6331 | { /* end: all zeroes */ }, |
6332 | }; |
6333 | |
6334 | #define SPR_MC_DEVICE_ID 0x3251 |
6335 | |
6336 | static void spr_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) |
6337 | { |
6338 | int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + SNR_IMC_MMIO_MEM0_OFFSET; |
6339 | |
6340 | snr_uncore_mmio_map(box, box_ctl: uncore_mmio_box_ctl(box), |
6341 | mem_offset, SPR_MC_DEVICE_ID); |
6342 | } |
6343 | |
6344 | static struct intel_uncore_ops spr_uncore_imc_freerunning_ops = { |
6345 | .init_box = spr_uncore_imc_freerunning_init_box, |
6346 | .exit_box = uncore_mmio_exit_box, |
6347 | .read_counter = uncore_mmio_read_counter, |
6348 | .hw_config = uncore_freerunning_hw_config, |
6349 | }; |
6350 | |
6351 | static struct intel_uncore_type spr_uncore_imc_free_running = { |
6352 | .name = "imc_free_running" , |
6353 | .num_counters = 3, |
6354 | .mmio_map_size = SNR_IMC_MMIO_SIZE, |
6355 | .num_freerunning_types = SPR_IMC_FREERUNNING_TYPE_MAX, |
6356 | .freerunning = spr_imc_freerunning, |
6357 | .ops = &spr_uncore_imc_freerunning_ops, |
6358 | .event_descs = spr_uncore_imc_freerunning_events, |
6359 | .format_group = &skx_uncore_iio_freerunning_format_group, |
6360 | }; |
6361 | |
6362 | #define 1 |
6363 | #define 1 |
6364 | #define 2 |
6365 | |
6366 | static struct intel_uncore_type *spr_msr_uncores[UNCORE_SPR_MSR_EXTRA_UNCORES] = { |
6367 | &spr_uncore_iio_free_running, |
6368 | }; |
6369 | |
6370 | static struct intel_uncore_type *spr_mmio_uncores[UNCORE_SPR_MMIO_EXTRA_UNCORES] = { |
6371 | &spr_uncore_imc_free_running, |
6372 | }; |
6373 | |
6374 | static struct intel_uncore_type *spr_pci_uncores[UNCORE_SPR_PCI_EXTRA_UNCORES] = { |
6375 | &spr_uncore_upi, |
6376 | &spr_uncore_m3upi |
6377 | }; |
6378 | |
6379 | int spr_uncore_units_ignore[] = { |
6380 | UNCORE_SPR_UPI, |
6381 | UNCORE_SPR_M3UPI, |
6382 | UNCORE_IGNORE_END |
6383 | }; |
6384 | |
6385 | static void uncore_type_customized_copy(struct intel_uncore_type *to_type, |
6386 | struct intel_uncore_type *from_type) |
6387 | { |
6388 | if (!to_type || !from_type) |
6389 | return; |
6390 | |
6391 | if (from_type->name) |
6392 | to_type->name = from_type->name; |
6393 | if (from_type->fixed_ctr_bits) |
6394 | to_type->fixed_ctr_bits = from_type->fixed_ctr_bits; |
6395 | if (from_type->event_mask) |
6396 | to_type->event_mask = from_type->event_mask; |
6397 | if (from_type->event_mask_ext) |
6398 | to_type->event_mask_ext = from_type->event_mask_ext; |
6399 | if (from_type->fixed_ctr) |
6400 | to_type->fixed_ctr = from_type->fixed_ctr; |
6401 | if (from_type->fixed_ctl) |
6402 | to_type->fixed_ctl = from_type->fixed_ctl; |
6403 | if (from_type->fixed_ctr_bits) |
6404 | to_type->fixed_ctr_bits = from_type->fixed_ctr_bits; |
6405 | if (from_type->num_shared_regs) |
6406 | to_type->num_shared_regs = from_type->num_shared_regs; |
6407 | if (from_type->constraints) |
6408 | to_type->constraints = from_type->constraints; |
6409 | if (from_type->ops) |
6410 | to_type->ops = from_type->ops; |
6411 | if (from_type->event_descs) |
6412 | to_type->event_descs = from_type->event_descs; |
6413 | if (from_type->format_group) |
6414 | to_type->format_group = from_type->format_group; |
6415 | if (from_type->attr_update) |
6416 | to_type->attr_update = from_type->attr_update; |
6417 | if (from_type->set_mapping) |
6418 | to_type->set_mapping = from_type->set_mapping; |
6419 | if (from_type->get_topology) |
6420 | to_type->get_topology = from_type->get_topology; |
6421 | if (from_type->cleanup_mapping) |
6422 | to_type->cleanup_mapping = from_type->cleanup_mapping; |
6423 | } |
6424 | |
6425 | static struct intel_uncore_type ** |
6426 | uncore_get_uncores(enum uncore_access_type type_id, int , |
6427 | struct intel_uncore_type **, int max_num_types, |
6428 | struct intel_uncore_type **uncores) |
6429 | { |
6430 | struct intel_uncore_type **types, **start_types; |
6431 | int i; |
6432 | |
6433 | start_types = types = intel_uncore_generic_init_uncores(type_id, num_extra); |
6434 | |
6435 | /* Only copy the customized features */ |
6436 | for (; *types; types++) { |
6437 | if ((*types)->type_id >= max_num_types) |
6438 | continue; |
6439 | uncore_type_customized_copy(to_type: *types, from_type: uncores[(*types)->type_id]); |
6440 | } |
6441 | |
6442 | for (i = 0; i < num_extra; i++, types++) |
6443 | *types = extra[i]; |
6444 | |
6445 | return start_types; |
6446 | } |
6447 | |
6448 | static struct intel_uncore_type * |
6449 | uncore_find_type_by_id(struct intel_uncore_type **types, int type_id) |
6450 | { |
6451 | for (; *types; types++) { |
6452 | if (type_id == (*types)->type_id) |
6453 | return *types; |
6454 | } |
6455 | |
6456 | return NULL; |
6457 | } |
6458 | |
6459 | static int uncore_type_max_boxes(struct intel_uncore_type **types, |
6460 | int type_id) |
6461 | { |
6462 | struct intel_uncore_type *type; |
6463 | int i, max = 0; |
6464 | |
6465 | type = uncore_find_type_by_id(types, type_id); |
6466 | if (!type) |
6467 | return 0; |
6468 | |
6469 | for (i = 0; i < type->num_boxes; i++) { |
6470 | if (type->box_ids[i] > max) |
6471 | max = type->box_ids[i]; |
6472 | } |
6473 | |
6474 | return max + 1; |
6475 | } |
6476 | |
6477 | #define SPR_MSR_UNC_CBO_CONFIG 0x2FFE |
6478 | |
6479 | void spr_uncore_cpu_init(void) |
6480 | { |
6481 | struct intel_uncore_type *type; |
6482 | u64 num_cbo; |
6483 | |
6484 | uncore_msr_uncores = uncore_get_uncores(type_id: UNCORE_ACCESS_MSR, |
6485 | UNCORE_SPR_MSR_EXTRA_UNCORES, |
6486 | extra: spr_msr_uncores, |
6487 | UNCORE_SPR_NUM_UNCORE_TYPES, |
6488 | uncores: spr_uncores); |
6489 | |
6490 | type = uncore_find_type_by_id(types: uncore_msr_uncores, UNCORE_SPR_CHA); |
6491 | if (type) { |
6492 | /* |
6493 | * The value from the discovery table (stored in the type->num_boxes |
6494 | * of UNCORE_SPR_CHA) is incorrect on some SPR variants because of a |
6495 | * firmware bug. Using the value from SPR_MSR_UNC_CBO_CONFIG to replace it. |
6496 | */ |
6497 | rdmsrl(SPR_MSR_UNC_CBO_CONFIG, num_cbo); |
6498 | /* |
6499 | * The MSR doesn't work on the EMR XCC, but the firmware bug doesn't impact |
6500 | * the EMR XCC. Don't let the value from the MSR replace the existing value. |
6501 | */ |
6502 | if (num_cbo) |
6503 | type->num_boxes = num_cbo; |
6504 | } |
6505 | spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(types: uncore_msr_uncores, UNCORE_SPR_IIO); |
6506 | } |
6507 | |
6508 | #define SPR_UNCORE_UPI_PCIID 0x3241 |
6509 | #define SPR_UNCORE_UPI0_DEVFN 0x9 |
6510 | #define SPR_UNCORE_M3UPI_PCIID 0x3246 |
6511 | #define SPR_UNCORE_M3UPI0_DEVFN 0x29 |
6512 | |
6513 | static void spr_update_device_location(int type_id) |
6514 | { |
6515 | struct intel_uncore_type *type; |
6516 | struct pci_dev *dev = NULL; |
6517 | u32 device, devfn; |
6518 | u64 *ctls; |
6519 | int die; |
6520 | |
6521 | if (type_id == UNCORE_SPR_UPI) { |
6522 | type = &spr_uncore_upi; |
6523 | device = SPR_UNCORE_UPI_PCIID; |
6524 | devfn = SPR_UNCORE_UPI0_DEVFN; |
6525 | } else if (type_id == UNCORE_SPR_M3UPI) { |
6526 | type = &spr_uncore_m3upi; |
6527 | device = SPR_UNCORE_M3UPI_PCIID; |
6528 | devfn = SPR_UNCORE_M3UPI0_DEVFN; |
6529 | } else |
6530 | return; |
6531 | |
6532 | ctls = kcalloc(n: __uncore_max_dies, size: sizeof(u64), GFP_KERNEL); |
6533 | if (!ctls) { |
6534 | type->num_boxes = 0; |
6535 | return; |
6536 | } |
6537 | |
6538 | while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, from: dev)) != NULL) { |
6539 | if (devfn != dev->devfn) |
6540 | continue; |
6541 | |
6542 | die = uncore_device_to_die(dev); |
6543 | if (die < 0) |
6544 | continue; |
6545 | |
6546 | ctls[die] = pci_domain_nr(bus: dev->bus) << UNCORE_DISCOVERY_PCI_DOMAIN_OFFSET | |
6547 | dev->bus->number << UNCORE_DISCOVERY_PCI_BUS_OFFSET | |
6548 | devfn << UNCORE_DISCOVERY_PCI_DEVFN_OFFSET | |
6549 | type->box_ctl; |
6550 | } |
6551 | |
6552 | type->box_ctls = ctls; |
6553 | } |
6554 | |
6555 | int spr_uncore_pci_init(void) |
6556 | { |
6557 | /* |
6558 | * The discovery table of UPI on some SPR variant is broken, |
6559 | * which impacts the detection of both UPI and M3UPI uncore PMON. |
6560 | * Use the pre-defined UPI and M3UPI table to replace. |
6561 | * |
6562 | * The accurate location, e.g., domain and BUS number, |
6563 | * can only be retrieved at load time. |
6564 | * Update the location of UPI and M3UPI. |
6565 | */ |
6566 | spr_update_device_location(UNCORE_SPR_UPI); |
6567 | spr_update_device_location(UNCORE_SPR_M3UPI); |
6568 | uncore_pci_uncores = uncore_get_uncores(type_id: UNCORE_ACCESS_PCI, |
6569 | UNCORE_SPR_PCI_EXTRA_UNCORES, |
6570 | extra: spr_pci_uncores, |
6571 | UNCORE_SPR_NUM_UNCORE_TYPES, |
6572 | uncores: spr_uncores); |
6573 | return 0; |
6574 | } |
6575 | |
6576 | void spr_uncore_mmio_init(void) |
6577 | { |
6578 | int ret = snbep_pci2phy_map_init(devid: 0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, reverse: true); |
6579 | |
6580 | if (ret) { |
6581 | uncore_mmio_uncores = uncore_get_uncores(type_id: UNCORE_ACCESS_MMIO, num_extra: 0, NULL, |
6582 | UNCORE_SPR_NUM_UNCORE_TYPES, |
6583 | uncores: spr_uncores); |
6584 | } else { |
6585 | uncore_mmio_uncores = uncore_get_uncores(type_id: UNCORE_ACCESS_MMIO, |
6586 | UNCORE_SPR_MMIO_EXTRA_UNCORES, |
6587 | extra: spr_mmio_uncores, |
6588 | UNCORE_SPR_NUM_UNCORE_TYPES, |
6589 | uncores: spr_uncores); |
6590 | |
6591 | spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(types: uncore_mmio_uncores, UNCORE_SPR_IMC) / 2; |
6592 | } |
6593 | } |
6594 | |
6595 | /* end of SPR uncore support */ |
6596 | |
6597 | /* GNR uncore support */ |
6598 | |
6599 | #define UNCORE_GNR_NUM_UNCORE_TYPES 23 |
6600 | #define UNCORE_GNR_TYPE_15 15 |
6601 | #define UNCORE_GNR_B2UPI 18 |
6602 | #define UNCORE_GNR_TYPE_21 21 |
6603 | #define UNCORE_GNR_TYPE_22 22 |
6604 | |
6605 | int gnr_uncore_units_ignore[] = { |
6606 | UNCORE_SPR_UPI, |
6607 | UNCORE_GNR_TYPE_15, |
6608 | UNCORE_GNR_B2UPI, |
6609 | UNCORE_GNR_TYPE_21, |
6610 | UNCORE_GNR_TYPE_22, |
6611 | UNCORE_IGNORE_END |
6612 | }; |
6613 | |
6614 | static struct intel_uncore_type gnr_uncore_ubox = { |
6615 | .name = "ubox" , |
6616 | .attr_update = uncore_alias_groups, |
6617 | }; |
6618 | |
6619 | static struct intel_uncore_type gnr_uncore_b2cmi = { |
6620 | SPR_UNCORE_PCI_COMMON_FORMAT(), |
6621 | .name = "b2cmi" , |
6622 | }; |
6623 | |
6624 | static struct intel_uncore_type gnr_uncore_b2cxl = { |
6625 | SPR_UNCORE_MMIO_COMMON_FORMAT(), |
6626 | .name = "b2cxl" , |
6627 | }; |
6628 | |
6629 | static struct intel_uncore_type gnr_uncore_mdf_sbo = { |
6630 | .name = "mdf_sbo" , |
6631 | .attr_update = uncore_alias_groups, |
6632 | }; |
6633 | |
6634 | static struct intel_uncore_type *gnr_uncores[UNCORE_GNR_NUM_UNCORE_TYPES] = { |
6635 | &spr_uncore_chabox, |
6636 | &spr_uncore_iio, |
6637 | &spr_uncore_irp, |
6638 | NULL, |
6639 | &spr_uncore_pcu, |
6640 | &gnr_uncore_ubox, |
6641 | &spr_uncore_imc, |
6642 | NULL, |
6643 | NULL, |
6644 | NULL, |
6645 | NULL, |
6646 | NULL, |
6647 | NULL, |
6648 | NULL, |
6649 | NULL, |
6650 | NULL, |
6651 | &gnr_uncore_b2cmi, |
6652 | &gnr_uncore_b2cxl, |
6653 | NULL, |
6654 | NULL, |
6655 | &gnr_uncore_mdf_sbo, |
6656 | NULL, |
6657 | NULL, |
6658 | }; |
6659 | |
6660 | static struct freerunning_counters gnr_iio_freerunning[] = { |
6661 | [SPR_IIO_MSR_IOCLK] = { 0x290e, 0x01, 0x10, 1, 48 }, |
6662 | [SPR_IIO_MSR_BW_IN] = { .counter_base: 0x360e, .counter_offset: 0x10, .box_offset: 0x80, .num_counters: 8, .bits: 48 }, |
6663 | [SPR_IIO_MSR_BW_OUT] = { .counter_base: 0x2e0e, .counter_offset: 0x10, .box_offset: 0x80, .num_counters: 8, .bits: 48 }, |
6664 | }; |
6665 | |
6666 | void gnr_uncore_cpu_init(void) |
6667 | { |
6668 | uncore_msr_uncores = uncore_get_uncores(type_id: UNCORE_ACCESS_MSR, |
6669 | UNCORE_SPR_MSR_EXTRA_UNCORES, |
6670 | extra: spr_msr_uncores, |
6671 | UNCORE_GNR_NUM_UNCORE_TYPES, |
6672 | uncores: gnr_uncores); |
6673 | spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(types: uncore_msr_uncores, UNCORE_SPR_IIO); |
6674 | spr_uncore_iio_free_running.freerunning = gnr_iio_freerunning; |
6675 | } |
6676 | |
6677 | int gnr_uncore_pci_init(void) |
6678 | { |
6679 | uncore_pci_uncores = uncore_get_uncores(type_id: UNCORE_ACCESS_PCI, num_extra: 0, NULL, |
6680 | UNCORE_GNR_NUM_UNCORE_TYPES, |
6681 | uncores: gnr_uncores); |
6682 | return 0; |
6683 | } |
6684 | |
6685 | void gnr_uncore_mmio_init(void) |
6686 | { |
6687 | uncore_mmio_uncores = uncore_get_uncores(type_id: UNCORE_ACCESS_MMIO, num_extra: 0, NULL, |
6688 | UNCORE_GNR_NUM_UNCORE_TYPES, |
6689 | uncores: gnr_uncores); |
6690 | } |
6691 | |
6692 | /* end of GNR uncore support */ |
6693 | |