| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Intel Platform Monitory Technology Telemetry driver |
| 4 | * |
| 5 | * Copyright (c) 2020, Intel Corporation. |
| 6 | * All Rights Reserved. |
| 7 | * |
| 8 | * Author: "David E. Box" <david.e.box@linux.intel.com> |
| 9 | */ |
| 10 | |
| 11 | #include <linux/auxiliary_bus.h> |
| 12 | #include <linux/intel_vsec.h> |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/pci.h> |
| 16 | #include <linux/slab.h> |
| 17 | #include <linux/uaccess.h> |
| 18 | #include <linux/overflow.h> |
| 19 | |
| 20 | #include "class.h" |
| 21 | |
| 22 | #define TELEM_SIZE_OFFSET 0x0 |
| 23 | #define TELEM_GUID_OFFSET 0x4 |
| 24 | #define TELEM_BASE_OFFSET 0x8 |
| 25 | #define TELEM_ACCESS(v) ((v) & GENMASK(3, 0)) |
| 26 | #define TELEM_TYPE(v) (((v) & GENMASK(7, 4)) >> 4) |
| 27 | /* size is in bytes */ |
| 28 | #define TELEM_SIZE(v) (((v) & GENMASK(27, 12)) >> 10) |
| 29 | |
| 30 | /* Used by client hardware to identify a fixed telemetry entry*/ |
| 31 | #define TELEM_CLIENT_FIXED_BLOCK_GUID 0x10000000 |
| 32 | |
| 33 | #define NUM_BYTES_QWORD(v) ((v) << 3) |
| 34 | #define SAMPLE_ID_OFFSET(v) ((v) << 3) |
| 35 | |
| 36 | #define NUM_BYTES_DWORD(v) ((v) << 2) |
| 37 | #define SAMPLE_ID_OFFSET32(v) ((v) << 2) |
| 38 | |
| 39 | /* Protects access to the xarray of telemetry endpoint handles */ |
| 40 | static DEFINE_MUTEX(ep_lock); |
| 41 | |
| 42 | enum telem_type { |
| 43 | TELEM_TYPE_PUNIT = 0, |
| 44 | TELEM_TYPE_CRASHLOG, |
| 45 | TELEM_TYPE_PUNIT_FIXED, |
| 46 | }; |
| 47 | |
| 48 | struct pmt_telem_priv { |
| 49 | int num_entries; |
| 50 | struct intel_pmt_entry entry[]; |
| 51 | }; |
| 52 | |
| 53 | static bool pmt_telem_region_overlaps(struct intel_pmt_entry *entry, |
| 54 | struct device *dev) |
| 55 | { |
| 56 | u32 guid = readl(addr: entry->disc_table + TELEM_GUID_OFFSET); |
| 57 | |
| 58 | if (intel_pmt_is_early_client_hw(dev)) { |
| 59 | u32 type = TELEM_TYPE(readl(entry->disc_table)); |
| 60 | |
| 61 | if ((type == TELEM_TYPE_PUNIT_FIXED) || |
| 62 | (guid == TELEM_CLIENT_FIXED_BLOCK_GUID)) |
| 63 | return true; |
| 64 | } |
| 65 | |
| 66 | return false; |
| 67 | } |
| 68 | |
| 69 | static int (struct intel_pmt_entry *entry, |
| 70 | struct device *dev) |
| 71 | { |
| 72 | void __iomem *disc_table = entry->disc_table; |
| 73 | struct intel_pmt_header * = &entry->header; |
| 74 | |
| 75 | if (pmt_telem_region_overlaps(entry, dev)) |
| 76 | return 1; |
| 77 | |
| 78 | header->access_type = TELEM_ACCESS(readl(disc_table)); |
| 79 | header->guid = readl(addr: disc_table + TELEM_GUID_OFFSET); |
| 80 | header->base_offset = readl(addr: disc_table + TELEM_BASE_OFFSET); |
| 81 | |
| 82 | /* Size is measured in DWORDS, but accessor returns bytes */ |
| 83 | header->size = TELEM_SIZE(readl(disc_table)); |
| 84 | |
| 85 | /* |
| 86 | * Some devices may expose non-functioning entries that are |
| 87 | * reserved for future use. They have zero size. Do not fail |
| 88 | * probe for these. Just ignore them. |
| 89 | */ |
| 90 | if (header->size == 0 || header->access_type == 0xF) |
| 91 | return 1; |
| 92 | |
| 93 | return 0; |
| 94 | } |
| 95 | |
| 96 | static int pmt_telem_add_endpoint(struct intel_vsec_device *ivdev, |
| 97 | struct intel_pmt_entry *entry) |
| 98 | { |
| 99 | struct telem_endpoint *ep; |
| 100 | |
| 101 | /* Endpoint lifetimes are managed by kref, not devres */ |
| 102 | entry->ep = kzalloc(sizeof(*(entry->ep)), GFP_KERNEL); |
| 103 | if (!entry->ep) |
| 104 | return -ENOMEM; |
| 105 | |
| 106 | ep = entry->ep; |
| 107 | ep->pcidev = ivdev->pcidev; |
| 108 | ep->header.access_type = entry->header.access_type; |
| 109 | ep->header.guid = entry->header.guid; |
| 110 | ep->header.base_offset = entry->header.base_offset; |
| 111 | ep->header.size = entry->header.size; |
| 112 | ep->base = entry->base; |
| 113 | ep->present = true; |
| 114 | ep->cb = ivdev->priv_data; |
| 115 | |
| 116 | kref_init(kref: &ep->kref); |
| 117 | |
| 118 | return 0; |
| 119 | } |
| 120 | |
| 121 | static DEFINE_XARRAY_ALLOC(telem_array); |
| 122 | static struct intel_pmt_namespace pmt_telem_ns = { |
| 123 | .name = "telem" , |
| 124 | .xa = &telem_array, |
| 125 | .pmt_header_decode = pmt_telem_header_decode, |
| 126 | .pmt_add_endpoint = pmt_telem_add_endpoint, |
| 127 | }; |
| 128 | |
| 129 | /* Called when all users unregister and the device is removed */ |
| 130 | static void pmt_telem_ep_release(struct kref *kref) |
| 131 | { |
| 132 | struct telem_endpoint *ep; |
| 133 | |
| 134 | ep = container_of(kref, struct telem_endpoint, kref); |
| 135 | kfree(objp: ep); |
| 136 | } |
| 137 | |
| 138 | unsigned long pmt_telem_get_next_endpoint(unsigned long start) |
| 139 | { |
| 140 | struct intel_pmt_entry *entry; |
| 141 | unsigned long found_idx; |
| 142 | |
| 143 | mutex_lock(&ep_lock); |
| 144 | xa_for_each_start(&telem_array, found_idx, entry, start) { |
| 145 | /* |
| 146 | * Return first found index after start. |
| 147 | * 0 is not valid id. |
| 148 | */ |
| 149 | if (found_idx > start) |
| 150 | break; |
| 151 | } |
| 152 | mutex_unlock(lock: &ep_lock); |
| 153 | |
| 154 | return found_idx == start ? 0 : found_idx; |
| 155 | } |
| 156 | EXPORT_SYMBOL_NS_GPL(pmt_telem_get_next_endpoint, "INTEL_PMT_TELEMETRY" ); |
| 157 | |
| 158 | struct telem_endpoint *pmt_telem_register_endpoint(int devid) |
| 159 | { |
| 160 | struct intel_pmt_entry *entry; |
| 161 | unsigned long index = devid; |
| 162 | |
| 163 | mutex_lock(&ep_lock); |
| 164 | entry = xa_find(xa: &telem_array, index: &index, max: index, XA_PRESENT); |
| 165 | if (!entry) { |
| 166 | mutex_unlock(lock: &ep_lock); |
| 167 | return ERR_PTR(error: -ENXIO); |
| 168 | } |
| 169 | |
| 170 | kref_get(kref: &entry->ep->kref); |
| 171 | mutex_unlock(lock: &ep_lock); |
| 172 | |
| 173 | return entry->ep; |
| 174 | } |
| 175 | EXPORT_SYMBOL_NS_GPL(pmt_telem_register_endpoint, "INTEL_PMT_TELEMETRY" ); |
| 176 | |
| 177 | void pmt_telem_unregister_endpoint(struct telem_endpoint *ep) |
| 178 | { |
| 179 | kref_put(kref: &ep->kref, release: pmt_telem_ep_release); |
| 180 | } |
| 181 | EXPORT_SYMBOL_NS_GPL(pmt_telem_unregister_endpoint, "INTEL_PMT_TELEMETRY" ); |
| 182 | |
| 183 | int pmt_telem_get_endpoint_info(int devid, struct telem_endpoint_info *info) |
| 184 | { |
| 185 | struct intel_pmt_entry *entry; |
| 186 | unsigned long index = devid; |
| 187 | int err = 0; |
| 188 | |
| 189 | if (!info) |
| 190 | return -EINVAL; |
| 191 | |
| 192 | mutex_lock(&ep_lock); |
| 193 | entry = xa_find(xa: &telem_array, index: &index, max: index, XA_PRESENT); |
| 194 | if (!entry) { |
| 195 | err = -ENXIO; |
| 196 | goto unlock; |
| 197 | } |
| 198 | |
| 199 | info->pdev = entry->ep->pcidev; |
| 200 | info->header = entry->ep->header; |
| 201 | |
| 202 | unlock: |
| 203 | mutex_unlock(lock: &ep_lock); |
| 204 | return err; |
| 205 | |
| 206 | } |
| 207 | EXPORT_SYMBOL_NS_GPL(pmt_telem_get_endpoint_info, "INTEL_PMT_TELEMETRY" ); |
| 208 | |
| 209 | int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count) |
| 210 | { |
| 211 | u32 offset, size; |
| 212 | |
| 213 | if (!ep->present) |
| 214 | return -ENODEV; |
| 215 | |
| 216 | offset = SAMPLE_ID_OFFSET(id); |
| 217 | size = ep->header.size; |
| 218 | |
| 219 | if (offset + NUM_BYTES_QWORD(count) > size) |
| 220 | return -EINVAL; |
| 221 | |
| 222 | pmt_telem_read_mmio(pdev: ep->pcidev, cb: ep->cb, guid: ep->header.guid, buf: data, addr: ep->base, off: offset, |
| 223 | NUM_BYTES_QWORD(count)); |
| 224 | |
| 225 | return ep->present ? 0 : -EPIPE; |
| 226 | } |
| 227 | EXPORT_SYMBOL_NS_GPL(pmt_telem_read, "INTEL_PMT_TELEMETRY" ); |
| 228 | |
| 229 | int pmt_telem_read32(struct telem_endpoint *ep, u32 id, u32 *data, u32 count) |
| 230 | { |
| 231 | u32 offset, size; |
| 232 | |
| 233 | if (!ep->present) |
| 234 | return -ENODEV; |
| 235 | |
| 236 | offset = SAMPLE_ID_OFFSET32(id); |
| 237 | size = ep->header.size; |
| 238 | |
| 239 | if (offset + NUM_BYTES_DWORD(count) > size) |
| 240 | return -EINVAL; |
| 241 | |
| 242 | memcpy_fromio(data, ep->base + offset, NUM_BYTES_DWORD(count)); |
| 243 | |
| 244 | return ep->present ? 0 : -EPIPE; |
| 245 | } |
| 246 | EXPORT_SYMBOL_NS_GPL(pmt_telem_read32, "INTEL_PMT_TELEMETRY" ); |
| 247 | |
| 248 | struct telem_endpoint * |
| 249 | pmt_telem_find_and_register_endpoint(struct pci_dev *pcidev, u32 guid, u16 pos) |
| 250 | { |
| 251 | int devid = 0; |
| 252 | int inst = 0; |
| 253 | int err = 0; |
| 254 | |
| 255 | while ((devid = pmt_telem_get_next_endpoint(devid))) { |
| 256 | struct telem_endpoint_info ep_info; |
| 257 | |
| 258 | err = pmt_telem_get_endpoint_info(devid, &ep_info); |
| 259 | if (err) |
| 260 | return ERR_PTR(error: err); |
| 261 | |
| 262 | if (ep_info.header.guid == guid && ep_info.pdev == pcidev) { |
| 263 | if (inst == pos) |
| 264 | return pmt_telem_register_endpoint(devid); |
| 265 | ++inst; |
| 266 | } |
| 267 | } |
| 268 | |
| 269 | return ERR_PTR(error: -ENXIO); |
| 270 | } |
| 271 | EXPORT_SYMBOL_NS_GPL(pmt_telem_find_and_register_endpoint, "INTEL_PMT_TELEMETRY" ); |
| 272 | |
| 273 | static void pmt_telem_remove(struct auxiliary_device *auxdev) |
| 274 | { |
| 275 | struct pmt_telem_priv *priv = auxiliary_get_drvdata(auxdev); |
| 276 | int i; |
| 277 | |
| 278 | mutex_lock(&ep_lock); |
| 279 | for (i = 0; i < priv->num_entries; i++) { |
| 280 | struct intel_pmt_entry *entry = &priv->entry[i]; |
| 281 | |
| 282 | kref_put(kref: &entry->ep->kref, release: pmt_telem_ep_release); |
| 283 | intel_pmt_dev_destroy(entry, ns: &pmt_telem_ns); |
| 284 | } |
| 285 | mutex_unlock(lock: &ep_lock); |
| 286 | }; |
| 287 | |
| 288 | static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id) |
| 289 | { |
| 290 | struct intel_vsec_device *intel_vsec_dev = auxdev_to_ivdev(auxdev); |
| 291 | struct pmt_telem_priv *priv; |
| 292 | size_t size; |
| 293 | int i, ret; |
| 294 | |
| 295 | size = struct_size(priv, entry, intel_vsec_dev->num_resources); |
| 296 | priv = devm_kzalloc(dev: &auxdev->dev, size, GFP_KERNEL); |
| 297 | if (!priv) |
| 298 | return -ENOMEM; |
| 299 | |
| 300 | auxiliary_set_drvdata(auxdev, data: priv); |
| 301 | |
| 302 | for (i = 0; i < intel_vsec_dev->num_resources; i++) { |
| 303 | struct intel_pmt_entry *entry = &priv->entry[priv->num_entries]; |
| 304 | |
| 305 | mutex_lock(&ep_lock); |
| 306 | ret = intel_pmt_dev_create(entry, ns: &pmt_telem_ns, dev: intel_vsec_dev, idx: i); |
| 307 | mutex_unlock(lock: &ep_lock); |
| 308 | if (ret < 0) |
| 309 | goto abort_probe; |
| 310 | if (ret) |
| 311 | continue; |
| 312 | |
| 313 | priv->num_entries++; |
| 314 | } |
| 315 | |
| 316 | return 0; |
| 317 | abort_probe: |
| 318 | pmt_telem_remove(auxdev); |
| 319 | return ret; |
| 320 | } |
| 321 | |
| 322 | static const struct auxiliary_device_id pmt_telem_id_table[] = { |
| 323 | { .name = "intel_vsec.telemetry" }, |
| 324 | {} |
| 325 | }; |
| 326 | MODULE_DEVICE_TABLE(auxiliary, pmt_telem_id_table); |
| 327 | |
| 328 | static struct auxiliary_driver pmt_telem_aux_driver = { |
| 329 | .id_table = pmt_telem_id_table, |
| 330 | .remove = pmt_telem_remove, |
| 331 | .probe = pmt_telem_probe, |
| 332 | }; |
| 333 | |
| 334 | static int __init pmt_telem_init(void) |
| 335 | { |
| 336 | return auxiliary_driver_register(&pmt_telem_aux_driver); |
| 337 | } |
| 338 | module_init(pmt_telem_init); |
| 339 | |
| 340 | static void __exit pmt_telem_exit(void) |
| 341 | { |
| 342 | auxiliary_driver_unregister(auxdrv: &pmt_telem_aux_driver); |
| 343 | xa_destroy(&telem_array); |
| 344 | } |
| 345 | module_exit(pmt_telem_exit); |
| 346 | |
| 347 | MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>" ); |
| 348 | MODULE_DESCRIPTION("Intel PMT Telemetry driver" ); |
| 349 | MODULE_LICENSE("GPL v2" ); |
| 350 | MODULE_IMPORT_NS("INTEL_PMT" ); |
| 351 | |