1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * intel-tpmi : Driver to enumerate TPMI features and create devices |
4 | * |
5 | * Copyright (c) 2023, Intel Corporation. |
6 | * All Rights Reserved. |
7 | * |
8 | * The TPMI (Topology Aware Register and PM Capsule Interface) provides a |
9 | * flexible, extendable and PCIe enumerable MMIO interface for PM features. |
10 | * |
11 | * For example Intel RAPL (Running Average Power Limit) provides a MMIO |
12 | * interface using TPMI. This has advantage over traditional MSR |
13 | * (Model Specific Register) interface, where a thread needs to be scheduled |
14 | * on the target CPU to read or write. Also the RAPL features vary between |
15 | * CPU models, and hence lot of model specific code. Here TPMI provides an |
16 | * architectural interface by providing hierarchical tables and fields, |
17 | * which will not need any model specific implementation. |
18 | * |
19 | * The TPMI interface uses a PCI VSEC structure to expose the location of |
20 | * MMIO region. |
21 | * |
22 | * This VSEC structure is present in the PCI configuration space of the |
23 | * Intel Out-of-Band (OOB) device, which is handled by the Intel VSEC |
24 | * driver. The Intel VSEC driver parses VSEC structures present in the PCI |
25 | * configuration space of the given device and creates an auxiliary device |
26 | * object for each of them. In particular, it creates an auxiliary device |
27 | * object representing TPMI that can be bound by an auxiliary driver. |
28 | * |
29 | * This TPMI driver will bind to the TPMI auxiliary device object created |
30 | * by the Intel VSEC driver. |
31 | * |
32 | * The TPMI specification defines a PFS (PM Feature Structure) table. |
33 | * This table is present in the TPMI MMIO region. The starting address |
34 | * of PFS is derived from the tBIR (Bar Indicator Register) and "Address" |
35 | * field from the VSEC header. |
36 | * |
37 | * Each TPMI PM feature has one entry in the PFS with a unique TPMI |
38 | * ID and its access details. The TPMI driver creates device nodes |
39 | * for the supported PM features. |
40 | * |
41 | * The names of the devices created by the TPMI driver start with the |
42 | * "intel_vsec.tpmi-" prefix which is followed by a specific name of the |
43 | * given PM feature (for example, "intel_vsec.tpmi-rapl.0"). |
44 | * |
45 | * The device nodes are create by using interface "intel_vsec_add_aux()" |
46 | * provided by the Intel VSEC driver. |
47 | */ |
48 | |
49 | #include <linux/auxiliary_bus.h> |
50 | #include <linux/bitfield.h> |
51 | #include <linux/debugfs.h> |
52 | #include <linux/delay.h> |
53 | #include <linux/intel_tpmi.h> |
54 | #include <linux/io.h> |
55 | #include <linux/iopoll.h> |
56 | #include <linux/module.h> |
57 | #include <linux/pci.h> |
58 | #include <linux/security.h> |
59 | #include <linux/sizes.h> |
60 | #include <linux/string_helpers.h> |
61 | |
62 | #include "vsec.h" |
63 | |
64 | /** |
65 | * struct intel_tpmi_pfs_entry - TPMI PM Feature Structure (PFS) entry |
66 | * @tpmi_id: TPMI feature identifier (what the feature is and its data format). |
67 | * @num_entries: Number of feature interface instances present in the PFS. |
68 | * This represents the maximum number of Power domains in the SoC. |
69 | * @entry_size: Interface instance entry size in 32-bit words. |
70 | * @cap_offset: Offset from the PM_Features base address to the base of the PM VSEC |
71 | * register bank in KB. |
72 | * @attribute: Feature attribute: 0=BIOS. 1=OS. 2-3=Reserved. |
73 | * @reserved: Bits for use in the future. |
74 | * |
75 | * Represents one TPMI feature entry data in the PFS retrieved as is |
76 | * from the hardware. |
77 | */ |
78 | struct intel_tpmi_pfs_entry { |
79 | u64 tpmi_id:8; |
80 | u64 num_entries:8; |
81 | u64 entry_size:16; |
82 | u64 cap_offset:16; |
83 | u64 attribute:2; |
84 | u64 reserved:14; |
85 | } __packed; |
86 | |
87 | /** |
88 | * struct intel_tpmi_pm_feature - TPMI PM Feature information for a TPMI ID |
89 | * @pfs_header: PFS header retireved from the hardware. |
90 | * @vsec_offset: Starting MMIO address for this feature in bytes. Essentially |
91 | * this offset = "Address" from VSEC header + PFS Capability |
92 | * offset for this feature entry. |
93 | * @vsec_dev: Pointer to intel_vsec_device structure for this TPMI device |
94 | * |
95 | * Represents TPMI instance information for one TPMI ID. |
96 | */ |
97 | struct intel_tpmi_pm_feature { |
98 | struct intel_tpmi_pfs_entry pfs_header; |
99 | u64 vsec_offset; |
100 | struct intel_vsec_device *vsec_dev; |
101 | }; |
102 | |
103 | /** |
104 | * struct intel_tpmi_info - TPMI information for all IDs in an instance |
105 | * @tpmi_features: Pointer to a list of TPMI feature instances |
106 | * @vsec_dev: Pointer to intel_vsec_device structure for this TPMI device |
107 | * @feature_count: Number of TPMI of TPMI instances pointed by tpmi_features |
108 | * @pfs_start: Start of PFS offset for the TPMI instances in this device |
109 | * @plat_info: Stores platform info which can be used by the client drivers |
110 | * @tpmi_control_mem: Memory mapped IO for getting control information |
111 | * @dbgfs_dir: debugfs entry pointer |
112 | * |
113 | * Stores the information for all TPMI devices enumerated from a single PCI device. |
114 | */ |
115 | struct intel_tpmi_info { |
116 | struct intel_tpmi_pm_feature *tpmi_features; |
117 | struct intel_vsec_device *vsec_dev; |
118 | int feature_count; |
119 | u64 pfs_start; |
120 | struct intel_tpmi_plat_info plat_info; |
121 | void __iomem *tpmi_control_mem; |
122 | struct dentry *dbgfs_dir; |
123 | }; |
124 | |
125 | /** |
126 | * struct tpmi_info_header - CPU package ID to PCI device mapping information |
127 | * @fn: PCI function number |
128 | * @dev: PCI device number |
129 | * @bus: PCI bus number |
130 | * @pkg: CPU Package id |
131 | * @reserved: Reserved for future use |
132 | * @lock: When set to 1 the register is locked and becomes read-only |
133 | * until next reset. Not for use by the OS driver. |
134 | * |
135 | * The structure to read hardware provided mapping information. |
136 | */ |
137 | struct tpmi_info_header { |
138 | u64 fn:3; |
139 | u64 dev:5; |
140 | u64 bus:8; |
141 | u64 pkg:8; |
142 | u64 reserved:39; |
143 | u64 lock:1; |
144 | } __packed; |
145 | |
146 | /** |
147 | * struct tpmi_feature_state - Structure to read hardware state of a feature |
148 | * @enabled: Enable state of a feature, 1: enabled, 0: disabled |
149 | * @reserved_1: Reserved for future use |
150 | * @write_blocked: Writes are blocked means all write operations are ignored |
151 | * @read_blocked: Reads are blocked means will read 0xFFs |
152 | * @pcs_select: Interface used by out of band software, not used in OS |
153 | * @reserved_2: Reserved for future use |
154 | * @id: TPMI ID of the feature |
155 | * @reserved_3: Reserved for future use |
156 | * @locked: When set to 1, OS can't change this register. |
157 | * |
158 | * The structure is used to read hardware state of a TPMI feature. This |
159 | * information is used for debug and restricting operations for this feature. |
160 | */ |
161 | struct tpmi_feature_state { |
162 | u32 enabled:1; |
163 | u32 reserved_1:3; |
164 | u32 write_blocked:1; |
165 | u32 read_blocked:1; |
166 | u32 pcs_select:1; |
167 | u32 reserved_2:1; |
168 | u32 id:8; |
169 | u32 reserved_3:15; |
170 | u32 locked:1; |
171 | } __packed; |
172 | |
173 | /* |
174 | * The size from hardware is in u32 units. This size is from a trusted hardware, |
175 | * but better to verify for pre silicon platforms. Set size to 0, when invalid. |
176 | */ |
177 | #define TPMI_GET_SINGLE_ENTRY_SIZE(pfs) \ |
178 | ({ \ |
179 | pfs->pfs_header.entry_size > SZ_1K ? 0 : pfs->pfs_header.entry_size << 2; \ |
180 | }) |
181 | |
182 | /* Used during auxbus device creation */ |
183 | static DEFINE_IDA(intel_vsec_tpmi_ida); |
184 | |
185 | struct intel_tpmi_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev) |
186 | { |
187 | struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev); |
188 | |
189 | return vsec_dev->priv_data; |
190 | } |
191 | EXPORT_SYMBOL_NS_GPL(tpmi_get_platform_data, INTEL_TPMI); |
192 | |
193 | int tpmi_get_resource_count(struct auxiliary_device *auxdev) |
194 | { |
195 | struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev); |
196 | |
197 | if (vsec_dev) |
198 | return vsec_dev->num_resources; |
199 | |
200 | return 0; |
201 | } |
202 | EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_count, INTEL_TPMI); |
203 | |
204 | struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index) |
205 | { |
206 | struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev); |
207 | |
208 | if (vsec_dev && index < vsec_dev->num_resources) |
209 | return &vsec_dev->resource[index]; |
210 | |
211 | return NULL; |
212 | } |
213 | EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_at_index, INTEL_TPMI); |
214 | |
215 | /* TPMI Control Interface */ |
216 | |
217 | #define TPMI_CONTROL_STATUS_OFFSET 0x00 |
218 | #define TPMI_COMMAND_OFFSET 0x08 |
219 | #define TMPI_CONTROL_DATA_VAL_OFFSET 0x0c |
220 | |
221 | /* |
222 | * Spec is calling for max 1 seconds to get ownership at the worst |
223 | * case. Read at 10 ms timeouts and repeat up to 1 second. |
224 | */ |
225 | #define TPMI_CONTROL_TIMEOUT_US (10 * USEC_PER_MSEC) |
226 | #define TPMI_CONTROL_TIMEOUT_MAX_US (1 * USEC_PER_SEC) |
227 | |
228 | #define TPMI_RB_TIMEOUT_US (10 * USEC_PER_MSEC) |
229 | #define TPMI_RB_TIMEOUT_MAX_US USEC_PER_SEC |
230 | |
231 | /* TPMI Control status register defines */ |
232 | |
233 | #define TPMI_CONTROL_STATUS_RB BIT_ULL(0) |
234 | |
235 | #define TPMI_CONTROL_STATUS_OWNER GENMASK_ULL(5, 4) |
236 | #define TPMI_OWNER_NONE 0 |
237 | #define TPMI_OWNER_IN_BAND 1 |
238 | |
239 | #define TPMI_CONTROL_STATUS_CPL BIT_ULL(6) |
240 | #define TPMI_CONTROL_STATUS_RESULT GENMASK_ULL(15, 8) |
241 | #define TPMI_CONTROL_STATUS_LEN GENMASK_ULL(31, 16) |
242 | |
243 | #define TPMI_CMD_PKT_LEN 2 |
244 | #define TPMI_CMD_STATUS_SUCCESS 0x40 |
245 | |
246 | /* TPMI command data registers */ |
247 | #define TMPI_CONTROL_DATA_CMD GENMASK_ULL(7, 0) |
248 | #define TPMI_CONTROL_DATA_VAL_FEATURE GENMASK_ULL(48, 40) |
249 | |
250 | /* Command to send via control interface */ |
251 | #define TPMI_CONTROL_GET_STATE_CMD 0x10 |
252 | |
253 | #define TPMI_CONTROL_CMD_MASK GENMASK_ULL(48, 40) |
254 | |
255 | #define TPMI_CMD_LEN_MASK GENMASK_ULL(18, 16) |
256 | |
257 | /* Mutex to complete get feature status without interruption */ |
258 | static DEFINE_MUTEX(tpmi_dev_lock); |
259 | |
260 | static int tpmi_wait_for_owner(struct intel_tpmi_info *tpmi_info, u8 owner) |
261 | { |
262 | u64 control; |
263 | |
264 | return readq_poll_timeout(tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET, |
265 | control, owner == FIELD_GET(TPMI_CONTROL_STATUS_OWNER, control), |
266 | TPMI_CONTROL_TIMEOUT_US, TPMI_CONTROL_TIMEOUT_MAX_US); |
267 | } |
268 | |
269 | static int tpmi_read_feature_status(struct intel_tpmi_info *tpmi_info, int feature_id, |
270 | struct tpmi_feature_state *feature_state) |
271 | { |
272 | u64 control, data; |
273 | int ret; |
274 | |
275 | if (!tpmi_info->tpmi_control_mem) |
276 | return -EFAULT; |
277 | |
278 | mutex_lock(&tpmi_dev_lock); |
279 | |
280 | /* Wait for owner bit set to 0 (none) */ |
281 | ret = tpmi_wait_for_owner(tpmi_info, TPMI_OWNER_NONE); |
282 | if (ret) |
283 | goto err_unlock; |
284 | |
285 | /* set command id to 0x10 for TPMI_GET_STATE */ |
286 | data = FIELD_PREP(TMPI_CONTROL_DATA_CMD, TPMI_CONTROL_GET_STATE_CMD); |
287 | |
288 | /* 32 bits for DATA offset and +8 for feature_id field */ |
289 | data |= FIELD_PREP(TPMI_CONTROL_DATA_VAL_FEATURE, feature_id); |
290 | |
291 | /* Write at command offset for qword access */ |
292 | writeq(val: data, addr: tpmi_info->tpmi_control_mem + TPMI_COMMAND_OFFSET); |
293 | |
294 | /* Wait for owner bit set to in-band */ |
295 | ret = tpmi_wait_for_owner(tpmi_info, TPMI_OWNER_IN_BAND); |
296 | if (ret) |
297 | goto err_unlock; |
298 | |
299 | /* Set Run Busy and packet length of 2 dwords */ |
300 | control = TPMI_CONTROL_STATUS_RB; |
301 | control |= FIELD_PREP(TPMI_CONTROL_STATUS_LEN, TPMI_CMD_PKT_LEN); |
302 | |
303 | /* Write at status offset for qword access */ |
304 | writeq(val: control, addr: tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET); |
305 | |
306 | /* Wait for Run Busy clear */ |
307 | ret = readq_poll_timeout(tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET, |
308 | control, !(control & TPMI_CONTROL_STATUS_RB), |
309 | TPMI_RB_TIMEOUT_US, TPMI_RB_TIMEOUT_MAX_US); |
310 | if (ret) |
311 | goto done_proc; |
312 | |
313 | control = FIELD_GET(TPMI_CONTROL_STATUS_RESULT, control); |
314 | if (control != TPMI_CMD_STATUS_SUCCESS) { |
315 | ret = -EBUSY; |
316 | goto done_proc; |
317 | } |
318 | |
319 | /* Response is ready */ |
320 | memcpy_fromio(feature_state, tpmi_info->tpmi_control_mem + TMPI_CONTROL_DATA_VAL_OFFSET, |
321 | sizeof(*feature_state)); |
322 | |
323 | ret = 0; |
324 | |
325 | done_proc: |
326 | /* Set CPL "completion" bit */ |
327 | writeq(TPMI_CONTROL_STATUS_CPL, addr: tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET); |
328 | |
329 | err_unlock: |
330 | mutex_unlock(lock: &tpmi_dev_lock); |
331 | |
332 | return ret; |
333 | } |
334 | |
335 | int tpmi_get_feature_status(struct auxiliary_device *auxdev, |
336 | int feature_id, bool *read_blocked, bool *write_blocked) |
337 | { |
338 | struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(dev: auxdev->dev.parent); |
339 | struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(auxdev: &intel_vsec_dev->auxdev); |
340 | struct tpmi_feature_state feature_state; |
341 | int ret; |
342 | |
343 | ret = tpmi_read_feature_status(tpmi_info, feature_id, feature_state: &feature_state); |
344 | if (ret) |
345 | return ret; |
346 | |
347 | *read_blocked = feature_state.read_blocked; |
348 | *write_blocked = feature_state.write_blocked; |
349 | |
350 | return 0; |
351 | } |
352 | EXPORT_SYMBOL_NS_GPL(tpmi_get_feature_status, INTEL_TPMI); |
353 | |
354 | static int tpmi_pfs_dbg_show(struct seq_file *s, void *unused) |
355 | { |
356 | struct intel_tpmi_info *tpmi_info = s->private; |
357 | int locked, disabled, read_blocked, write_blocked; |
358 | struct tpmi_feature_state feature_state; |
359 | struct intel_tpmi_pm_feature *pfs; |
360 | int ret, i; |
361 | |
362 | |
363 | seq_printf(m: s, fmt: "tpmi PFS start offset 0x:%llx\n", tpmi_info->pfs_start); |
364 | seq_puts(m: s, s: "tpmi_id\t\tentries\t\tsize\t\tcap_offset\tattribute\tvsec_offset\tlocked\tdisabled\tread_blocked\twrite_blocked\n"); |
365 | for (i = 0; i < tpmi_info->feature_count; ++i) { |
366 | pfs = &tpmi_info->tpmi_features[i]; |
367 | ret = tpmi_read_feature_status(tpmi_info, feature_id: pfs->pfs_header.tpmi_id, feature_state: &feature_state); |
368 | if (ret) { |
369 | locked = 'U'; |
370 | disabled = 'U'; |
371 | read_blocked = 'U'; |
372 | write_blocked = 'U'; |
373 | } else { |
374 | disabled = feature_state.enabled ? 'N' : 'Y'; |
375 | locked = feature_state.locked ? 'Y' : 'N'; |
376 | read_blocked = feature_state.read_blocked ? 'Y' : 'N'; |
377 | write_blocked = feature_state.write_blocked ? 'Y' : 'N'; |
378 | } |
379 | seq_printf(m: s, fmt: "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%016llx\t%c\t%c\t\t%c\t\t%c\n", |
380 | pfs->pfs_header.tpmi_id, pfs->pfs_header.num_entries, |
381 | pfs->pfs_header.entry_size, pfs->pfs_header.cap_offset, |
382 | pfs->pfs_header.attribute, pfs->vsec_offset, locked, disabled, |
383 | read_blocked, write_blocked); |
384 | } |
385 | |
386 | return 0; |
387 | } |
388 | DEFINE_SHOW_ATTRIBUTE(tpmi_pfs_dbg); |
389 | |
390 | #define MEM_DUMP_COLUMN_COUNT 8 |
391 | |
392 | static int tpmi_mem_dump_show(struct seq_file *s, void *unused) |
393 | { |
394 | size_t row_size = MEM_DUMP_COLUMN_COUNT * sizeof(u32); |
395 | struct intel_tpmi_pm_feature *pfs = s->private; |
396 | int count, ret = 0; |
397 | void __iomem *mem; |
398 | u32 size; |
399 | u64 off; |
400 | u8 *buffer; |
401 | |
402 | size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs); |
403 | if (!size) |
404 | return -EIO; |
405 | |
406 | buffer = kmalloc(size, GFP_KERNEL); |
407 | if (!buffer) |
408 | return -ENOMEM; |
409 | |
410 | off = pfs->vsec_offset; |
411 | |
412 | mutex_lock(&tpmi_dev_lock); |
413 | |
414 | for (count = 0; count < pfs->pfs_header.num_entries; ++count) { |
415 | seq_printf(m: s, fmt: "TPMI Instance:%d offset:0x%llx\n", count, off); |
416 | |
417 | mem = ioremap(offset: off, size); |
418 | if (!mem) { |
419 | ret = -ENOMEM; |
420 | break; |
421 | } |
422 | |
423 | memcpy_fromio(buffer, mem, size); |
424 | |
425 | seq_hex_dump(m: s, prefix_str: " ", prefix_type: DUMP_PREFIX_OFFSET, rowsize: row_size, groupsize: sizeof(u32), buf: buffer, len: size, |
426 | ascii: false); |
427 | |
428 | iounmap(addr: mem); |
429 | |
430 | off += size; |
431 | } |
432 | |
433 | mutex_unlock(lock: &tpmi_dev_lock); |
434 | |
435 | kfree(objp: buffer); |
436 | |
437 | return ret; |
438 | } |
439 | DEFINE_SHOW_ATTRIBUTE(tpmi_mem_dump); |
440 | |
441 | static ssize_t mem_write(struct file *file, const char __user *userbuf, size_t len, loff_t *ppos) |
442 | { |
443 | struct seq_file *m = file->private_data; |
444 | struct intel_tpmi_pm_feature *pfs = m->private; |
445 | u32 addr, value, punit, size; |
446 | u32 num_elems, *array; |
447 | void __iomem *mem; |
448 | int ret; |
449 | |
450 | size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs); |
451 | if (!size) |
452 | return -EIO; |
453 | |
454 | ret = parse_int_array_user(from: userbuf, count: len, array: (int **)&array); |
455 | if (ret < 0) |
456 | return ret; |
457 | |
458 | num_elems = *array; |
459 | if (num_elems != 3) { |
460 | ret = -EINVAL; |
461 | goto exit_write; |
462 | } |
463 | |
464 | punit = array[1]; |
465 | addr = array[2]; |
466 | value = array[3]; |
467 | |
468 | if (punit >= pfs->pfs_header.num_entries) { |
469 | ret = -EINVAL; |
470 | goto exit_write; |
471 | } |
472 | |
473 | if (addr >= size) { |
474 | ret = -EINVAL; |
475 | goto exit_write; |
476 | } |
477 | |
478 | mutex_lock(&tpmi_dev_lock); |
479 | |
480 | mem = ioremap(offset: pfs->vsec_offset + punit * size, size); |
481 | if (!mem) { |
482 | ret = -ENOMEM; |
483 | goto unlock_mem_write; |
484 | } |
485 | |
486 | writel(val: value, addr: mem + addr); |
487 | |
488 | iounmap(addr: mem); |
489 | |
490 | ret = len; |
491 | |
492 | unlock_mem_write: |
493 | mutex_unlock(lock: &tpmi_dev_lock); |
494 | |
495 | exit_write: |
496 | kfree(objp: array); |
497 | |
498 | return ret; |
499 | } |
500 | |
501 | static int mem_write_show(struct seq_file *s, void *unused) |
502 | { |
503 | return 0; |
504 | } |
505 | |
506 | static int mem_write_open(struct inode *inode, struct file *file) |
507 | { |
508 | return single_open(file, mem_write_show, inode->i_private); |
509 | } |
510 | |
511 | static const struct file_operations mem_write_ops = { |
512 | .open = mem_write_open, |
513 | .read = seq_read, |
514 | .write = mem_write, |
515 | .llseek = seq_lseek, |
516 | .release = single_release, |
517 | }; |
518 | |
519 | #define tpmi_to_dev(info) (&info->vsec_dev->pcidev->dev) |
520 | |
521 | static void tpmi_dbgfs_register(struct intel_tpmi_info *tpmi_info) |
522 | { |
523 | char name[64]; |
524 | int i; |
525 | |
526 | snprintf(buf: name, size: sizeof(name), fmt: "tpmi-%s", dev_name(tpmi_to_dev(tpmi_info))); |
527 | tpmi_info->dbgfs_dir = debugfs_create_dir(name, NULL); |
528 | |
529 | debugfs_create_file(name: "pfs_dump", mode: 0444, parent: tpmi_info->dbgfs_dir, data: tpmi_info, fops: &tpmi_pfs_dbg_fops); |
530 | |
531 | for (i = 0; i < tpmi_info->feature_count; ++i) { |
532 | struct intel_tpmi_pm_feature *pfs; |
533 | struct dentry *dir; |
534 | |
535 | pfs = &tpmi_info->tpmi_features[i]; |
536 | snprintf(buf: name, size: sizeof(name), fmt: "tpmi-id-%02x", pfs->pfs_header.tpmi_id); |
537 | dir = debugfs_create_dir(name, parent: tpmi_info->dbgfs_dir); |
538 | |
539 | debugfs_create_file(name: "mem_dump", mode: 0444, parent: dir, data: pfs, fops: &tpmi_mem_dump_fops); |
540 | debugfs_create_file(name: "mem_write", mode: 0644, parent: dir, data: pfs, fops: &mem_write_ops); |
541 | } |
542 | } |
543 | |
544 | static void tpmi_set_control_base(struct auxiliary_device *auxdev, |
545 | struct intel_tpmi_info *tpmi_info, |
546 | struct intel_tpmi_pm_feature *pfs) |
547 | { |
548 | void __iomem *mem; |
549 | u32 size; |
550 | |
551 | size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs); |
552 | if (!size) |
553 | return; |
554 | |
555 | mem = devm_ioremap(dev: &auxdev->dev, offset: pfs->vsec_offset, size); |
556 | if (!mem) |
557 | return; |
558 | |
559 | /* mem is pointing to TPMI CONTROL base */ |
560 | tpmi_info->tpmi_control_mem = mem; |
561 | } |
562 | |
563 | static const char *intel_tpmi_name(enum intel_tpmi_id id) |
564 | { |
565 | switch (id) { |
566 | case TPMI_ID_RAPL: |
567 | return "rapl"; |
568 | case TPMI_ID_PEM: |
569 | return "pem"; |
570 | case TPMI_ID_UNCORE: |
571 | return "uncore"; |
572 | case TPMI_ID_SST: |
573 | return "sst"; |
574 | default: |
575 | return NULL; |
576 | } |
577 | } |
578 | |
579 | /* String Length for tpmi-"feature_name(upto 8 bytes)" */ |
580 | #define TPMI_FEATURE_NAME_LEN 14 |
581 | |
582 | static int tpmi_create_device(struct intel_tpmi_info *tpmi_info, |
583 | struct intel_tpmi_pm_feature *pfs, |
584 | u64 pfs_start) |
585 | { |
586 | struct intel_vsec_device *vsec_dev = tpmi_info->vsec_dev; |
587 | char feature_id_name[TPMI_FEATURE_NAME_LEN]; |
588 | struct intel_vsec_device *feature_vsec_dev; |
589 | struct tpmi_feature_state feature_state; |
590 | struct resource *res, *tmp; |
591 | const char *name; |
592 | int i, ret; |
593 | |
594 | ret = tpmi_read_feature_status(tpmi_info, feature_id: pfs->pfs_header.tpmi_id, feature_state: &feature_state); |
595 | if (ret) |
596 | return ret; |
597 | |
598 | /* |
599 | * If not enabled, continue to look at other features in the PFS, so return -EOPNOTSUPP. |
600 | * This will not cause failure of loading of this driver. |
601 | */ |
602 | if (!feature_state.enabled) |
603 | return -EOPNOTSUPP; |
604 | |
605 | name = intel_tpmi_name(id: pfs->pfs_header.tpmi_id); |
606 | if (!name) |
607 | return -EOPNOTSUPP; |
608 | |
609 | res = kcalloc(n: pfs->pfs_header.num_entries, size: sizeof(*res), GFP_KERNEL); |
610 | if (!res) |
611 | return -ENOMEM; |
612 | |
613 | feature_vsec_dev = kzalloc(size: sizeof(*feature_vsec_dev), GFP_KERNEL); |
614 | if (!feature_vsec_dev) { |
615 | kfree(objp: res); |
616 | return -ENOMEM; |
617 | } |
618 | |
619 | snprintf(buf: feature_id_name, size: sizeof(feature_id_name), fmt: "tpmi-%s", name); |
620 | |
621 | for (i = 0, tmp = res; i < pfs->pfs_header.num_entries; i++, tmp++) { |
622 | u64 entry_size_bytes = pfs->pfs_header.entry_size * sizeof(u32); |
623 | |
624 | tmp->start = pfs->vsec_offset + entry_size_bytes * i; |
625 | tmp->end = tmp->start + entry_size_bytes - 1; |
626 | tmp->flags = IORESOURCE_MEM; |
627 | } |
628 | |
629 | feature_vsec_dev->pcidev = vsec_dev->pcidev; |
630 | feature_vsec_dev->resource = res; |
631 | feature_vsec_dev->num_resources = pfs->pfs_header.num_entries; |
632 | feature_vsec_dev->priv_data = &tpmi_info->plat_info; |
633 | feature_vsec_dev->priv_data_size = sizeof(tpmi_info->plat_info); |
634 | feature_vsec_dev->ida = &intel_vsec_tpmi_ida; |
635 | |
636 | /* |
637 | * intel_vsec_add_aux() is resource managed, no explicit |
638 | * delete is required on error or on module unload. |
639 | * feature_vsec_dev and res memory are also freed as part of |
640 | * device deletion. |
641 | */ |
642 | return intel_vsec_add_aux(pdev: vsec_dev->pcidev, parent: &vsec_dev->auxdev.dev, |
643 | intel_vsec_dev: feature_vsec_dev, name: feature_id_name); |
644 | } |
645 | |
646 | static int tpmi_create_devices(struct intel_tpmi_info *tpmi_info) |
647 | { |
648 | struct intel_vsec_device *vsec_dev = tpmi_info->vsec_dev; |
649 | int ret, i; |
650 | |
651 | for (i = 0; i < vsec_dev->num_resources; i++) { |
652 | ret = tpmi_create_device(tpmi_info, pfs: &tpmi_info->tpmi_features[i], |
653 | pfs_start: tpmi_info->pfs_start); |
654 | /* |
655 | * Fail, if the supported features fails to create device, |
656 | * otherwise, continue. Even if one device failed to create, |
657 | * fail the loading of driver. Since intel_vsec_add_aux() |
658 | * is resource managed, no clean up is required for the |
659 | * successfully created devices. |
660 | */ |
661 | if (ret && ret != -EOPNOTSUPP) |
662 | return ret; |
663 | } |
664 | |
665 | return 0; |
666 | } |
667 | |
668 | #define TPMI_INFO_BUS_INFO_OFFSET 0x08 |
669 | |
670 | static int tpmi_process_info(struct intel_tpmi_info *tpmi_info, |
671 | struct intel_tpmi_pm_feature *pfs) |
672 | { |
673 | struct tpmi_info_header header; |
674 | void __iomem *info_mem; |
675 | |
676 | info_mem = ioremap(offset: pfs->vsec_offset + TPMI_INFO_BUS_INFO_OFFSET, |
677 | size: pfs->pfs_header.entry_size * sizeof(u32) - TPMI_INFO_BUS_INFO_OFFSET); |
678 | if (!info_mem) |
679 | return -ENOMEM; |
680 | |
681 | memcpy_fromio(&header, info_mem, sizeof(header)); |
682 | |
683 | tpmi_info->plat_info.package_id = header.pkg; |
684 | tpmi_info->plat_info.bus_number = header.bus; |
685 | tpmi_info->plat_info.device_number = header.dev; |
686 | tpmi_info->plat_info.function_number = header.fn; |
687 | |
688 | iounmap(addr: info_mem); |
689 | |
690 | return 0; |
691 | } |
692 | |
693 | static int tpmi_fetch_pfs_header(struct intel_tpmi_pm_feature *pfs, u64 start, int size) |
694 | { |
695 | void __iomem *pfs_mem; |
696 | |
697 | pfs_mem = ioremap(offset: start, size); |
698 | if (!pfs_mem) |
699 | return -ENOMEM; |
700 | |
701 | memcpy_fromio(&pfs->pfs_header, pfs_mem, sizeof(pfs->pfs_header)); |
702 | |
703 | iounmap(addr: pfs_mem); |
704 | |
705 | return 0; |
706 | } |
707 | |
708 | #define TPMI_CAP_OFFSET_UNIT 1024 |
709 | |
710 | static int intel_vsec_tpmi_init(struct auxiliary_device *auxdev) |
711 | { |
712 | struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev); |
713 | struct pci_dev *pci_dev = vsec_dev->pcidev; |
714 | struct intel_tpmi_info *tpmi_info; |
715 | u64 pfs_start = 0; |
716 | int ret, i; |
717 | |
718 | tpmi_info = devm_kzalloc(dev: &auxdev->dev, size: sizeof(*tpmi_info), GFP_KERNEL); |
719 | if (!tpmi_info) |
720 | return -ENOMEM; |
721 | |
722 | tpmi_info->vsec_dev = vsec_dev; |
723 | tpmi_info->feature_count = vsec_dev->num_resources; |
724 | tpmi_info->plat_info.bus_number = pci_dev->bus->number; |
725 | |
726 | tpmi_info->tpmi_features = devm_kcalloc(dev: &auxdev->dev, n: vsec_dev->num_resources, |
727 | size: sizeof(*tpmi_info->tpmi_features), |
728 | GFP_KERNEL); |
729 | if (!tpmi_info->tpmi_features) |
730 | return -ENOMEM; |
731 | |
732 | for (i = 0; i < vsec_dev->num_resources; i++) { |
733 | struct intel_tpmi_pm_feature *pfs; |
734 | struct resource *res; |
735 | u64 res_start; |
736 | int size, ret; |
737 | |
738 | pfs = &tpmi_info->tpmi_features[i]; |
739 | pfs->vsec_dev = vsec_dev; |
740 | |
741 | res = &vsec_dev->resource[i]; |
742 | if (!res) |
743 | continue; |
744 | |
745 | res_start = res->start; |
746 | size = resource_size(res); |
747 | if (size < 0) |
748 | continue; |
749 | |
750 | ret = tpmi_fetch_pfs_header(pfs, start: res_start, size); |
751 | if (ret) |
752 | continue; |
753 | |
754 | if (!pfs_start) |
755 | pfs_start = res_start; |
756 | |
757 | pfs->vsec_offset = pfs_start + pfs->pfs_header.cap_offset * TPMI_CAP_OFFSET_UNIT; |
758 | |
759 | /* |
760 | * Process TPMI_INFO to get PCI device to CPU package ID. |
761 | * Device nodes for TPMI features are not created in this |
762 | * for loop. So, the mapping information will be available |
763 | * when actual device nodes created outside this |
764 | * loop via tpmi_create_devices(). |
765 | */ |
766 | if (pfs->pfs_header.tpmi_id == TPMI_INFO_ID) |
767 | tpmi_process_info(tpmi_info, pfs); |
768 | |
769 | if (pfs->pfs_header.tpmi_id == TPMI_CONTROL_ID) |
770 | tpmi_set_control_base(auxdev, tpmi_info, pfs); |
771 | } |
772 | |
773 | tpmi_info->pfs_start = pfs_start; |
774 | |
775 | auxiliary_set_drvdata(auxdev, data: tpmi_info); |
776 | |
777 | ret = tpmi_create_devices(tpmi_info); |
778 | if (ret) |
779 | return ret; |
780 | |
781 | /* |
782 | * Allow debugfs when security policy allows. Everything this debugfs |
783 | * interface provides, can also be done via /dev/mem access. If |
784 | * /dev/mem interface is locked, don't allow debugfs to present any |
785 | * information. Also check for CAP_SYS_RAWIO as /dev/mem interface. |
786 | */ |
787 | if (!security_locked_down(what: LOCKDOWN_DEV_MEM) && capable(CAP_SYS_RAWIO)) |
788 | tpmi_dbgfs_register(tpmi_info); |
789 | |
790 | return 0; |
791 | } |
792 | |
793 | static int tpmi_probe(struct auxiliary_device *auxdev, |
794 | const struct auxiliary_device_id *id) |
795 | { |
796 | return intel_vsec_tpmi_init(auxdev); |
797 | } |
798 | |
799 | static void tpmi_remove(struct auxiliary_device *auxdev) |
800 | { |
801 | struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(auxdev); |
802 | |
803 | debugfs_remove_recursive(dentry: tpmi_info->dbgfs_dir); |
804 | } |
805 | |
806 | static const struct auxiliary_device_id tpmi_id_table[] = { |
807 | { .name = "intel_vsec.tpmi"}, |
808 | {} |
809 | }; |
810 | MODULE_DEVICE_TABLE(auxiliary, tpmi_id_table); |
811 | |
812 | static struct auxiliary_driver tpmi_aux_driver = { |
813 | .id_table = tpmi_id_table, |
814 | .probe = tpmi_probe, |
815 | .remove = tpmi_remove, |
816 | }; |
817 | |
818 | module_auxiliary_driver(tpmi_aux_driver); |
819 | |
820 | MODULE_IMPORT_NS(INTEL_VSEC); |
821 | MODULE_DESCRIPTION("Intel TPMI enumeration module"); |
822 | MODULE_LICENSE("GPL"); |
823 |
Definitions
- intel_tpmi_pfs_entry
- intel_tpmi_pm_feature
- intel_tpmi_info
- tpmi_info_header
- tpmi_feature_state
- intel_vsec_tpmi_ida
- tpmi_get_platform_data
- tpmi_get_resource_count
- tpmi_get_resource_at_index
- tpmi_dev_lock
- tpmi_wait_for_owner
- tpmi_read_feature_status
- tpmi_get_feature_status
- tpmi_pfs_dbg_show
- tpmi_mem_dump_show
- mem_write
- mem_write_show
- mem_write_open
- mem_write_ops
- tpmi_dbgfs_register
- tpmi_set_control_base
- intel_tpmi_name
- tpmi_create_device
- tpmi_create_devices
- tpmi_process_info
- tpmi_fetch_pfs_header
- intel_vsec_tpmi_init
- tpmi_probe
- tpmi_remove
- tpmi_id_table
Improve your Profiling and Debugging skills
Find out more