1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCI Express Precision Time Measurement
4 * Copyright (c) 2016, Intel Corporation.
5 */
6
7#include <linux/bitfield.h>
8#include <linux/debugfs.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/pci.h>
12#include "../pci.h"
13
14/*
15 * If the next upstream device supports PTM, return it; otherwise return
16 * NULL. PTM Messages are local, so both link partners must support it.
17 */
18static struct pci_dev *pci_upstream_ptm(struct pci_dev *dev)
19{
20 struct pci_dev *ups = pci_upstream_bridge(dev);
21
22 /*
23 * Switch Downstream Ports are not permitted to have a PTM
24 * capability; their PTM behavior is controlled by the Upstream
25 * Port (PCIe r5.0, sec 7.9.16), so if the upstream bridge is a
26 * Switch Downstream Port, look up one more level.
27 */
28 if (ups && pci_pcie_type(dev: ups) == PCI_EXP_TYPE_DOWNSTREAM)
29 ups = pci_upstream_bridge(dev: ups);
30
31 if (ups && ups->ptm_cap)
32 return ups;
33
34 return NULL;
35}
36
37/*
38 * Find the PTM Capability (if present) and extract the information we need
39 * to use it.
40 */
41void pci_ptm_init(struct pci_dev *dev)
42{
43 u16 ptm;
44 u32 cap;
45 struct pci_dev *ups;
46
47 if (!pci_is_pcie(dev))
48 return;
49
50 ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
51 if (!ptm)
52 return;
53
54 dev->ptm_cap = ptm;
55 pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_PTM, size: sizeof(u32));
56
57 pci_read_config_dword(dev, where: ptm + PCI_PTM_CAP, val: &cap);
58 dev->ptm_granularity = FIELD_GET(PCI_PTM_GRANULARITY_MASK, cap);
59
60 /*
61 * Per the spec recommendation (PCIe r6.0, sec 7.9.15.3), select the
62 * furthest upstream Time Source as the PTM Root. For Endpoints,
63 * "the Effective Granularity is the maximum Local Clock Granularity
64 * reported by the PTM Root and all intervening PTM Time Sources."
65 */
66 ups = pci_upstream_ptm(dev);
67 if (ups) {
68 if (ups->ptm_granularity == 0)
69 dev->ptm_granularity = 0;
70 else if (ups->ptm_granularity > dev->ptm_granularity)
71 dev->ptm_granularity = ups->ptm_granularity;
72 } else if (cap & PCI_PTM_CAP_ROOT) {
73 dev->ptm_root = 1;
74 } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
75
76 /*
77 * Per sec 7.9.15.3, this should be the Local Clock
78 * Granularity of the associated Time Source. But it
79 * doesn't say how to find that Time Source.
80 */
81 dev->ptm_granularity = 0;
82 }
83
84 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
85 pci_pcie_type(dev) == PCI_EXP_TYPE_UPSTREAM)
86 pci_enable_ptm(dev, NULL);
87}
88
89void pci_save_ptm_state(struct pci_dev *dev)
90{
91 u16 ptm = dev->ptm_cap;
92 struct pci_cap_saved_state *save_state;
93 u32 *cap;
94
95 if (!ptm)
96 return;
97
98 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
99 if (!save_state)
100 return;
101
102 cap = (u32 *)&save_state->cap.data[0];
103 pci_read_config_dword(dev, where: ptm + PCI_PTM_CTRL, val: cap);
104}
105
106void pci_restore_ptm_state(struct pci_dev *dev)
107{
108 u16 ptm = dev->ptm_cap;
109 struct pci_cap_saved_state *save_state;
110 u32 *cap;
111
112 if (!ptm)
113 return;
114
115 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
116 if (!save_state)
117 return;
118
119 cap = (u32 *)&save_state->cap.data[0];
120 pci_write_config_dword(dev, where: ptm + PCI_PTM_CTRL, val: *cap);
121}
122
123/* Enable PTM in the Control register if possible */
124static int __pci_enable_ptm(struct pci_dev *dev)
125{
126 u16 ptm = dev->ptm_cap;
127 struct pci_dev *ups;
128 u32 ctrl;
129
130 if (!ptm)
131 return -EINVAL;
132
133 /*
134 * A device uses local PTM Messages to request time information
135 * from a PTM Root that's farther upstream. Every device along the
136 * path must support PTM and have it enabled so it can handle the
137 * messages. Therefore, if this device is not a PTM Root, the
138 * upstream link partner must have PTM enabled before we can enable
139 * PTM.
140 */
141 if (!dev->ptm_root) {
142 ups = pci_upstream_ptm(dev);
143 if (!ups || !ups->ptm_enabled)
144 return -EINVAL;
145 }
146
147 pci_read_config_dword(dev, where: ptm + PCI_PTM_CTRL, val: &ctrl);
148
149 ctrl |= PCI_PTM_CTRL_ENABLE;
150 ctrl &= ~PCI_PTM_GRANULARITY_MASK;
151 ctrl |= FIELD_PREP(PCI_PTM_GRANULARITY_MASK, dev->ptm_granularity);
152 if (dev->ptm_root)
153 ctrl |= PCI_PTM_CTRL_ROOT;
154
155 pci_write_config_dword(dev, where: ptm + PCI_PTM_CTRL, val: ctrl);
156 return 0;
157}
158
159/**
160 * pci_enable_ptm() - Enable Precision Time Measurement
161 * @dev: PCI device
162 * @granularity: pointer to return granularity
163 *
164 * Enable Precision Time Measurement for @dev. If successful and
165 * @granularity is non-NULL, return the Effective Granularity.
166 *
167 * Return: zero if successful, or -EINVAL if @dev lacks a PTM Capability or
168 * is not a PTM Root and lacks an upstream path of PTM-enabled devices.
169 */
170int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
171{
172 int rc;
173 char clock_desc[8];
174
175 rc = __pci_enable_ptm(dev);
176 if (rc)
177 return rc;
178
179 dev->ptm_enabled = 1;
180
181 if (granularity)
182 *granularity = dev->ptm_granularity;
183
184 switch (dev->ptm_granularity) {
185 case 0:
186 snprintf(buf: clock_desc, size: sizeof(clock_desc), fmt: "unknown");
187 break;
188 case 255:
189 snprintf(buf: clock_desc, size: sizeof(clock_desc), fmt: ">254ns");
190 break;
191 default:
192 snprintf(buf: clock_desc, size: sizeof(clock_desc), fmt: "%uns",
193 dev->ptm_granularity);
194 break;
195 }
196 pci_info(dev, "PTM enabled%s, %s granularity\n",
197 dev->ptm_root ? " (root)" : "", clock_desc);
198
199 return 0;
200}
201EXPORT_SYMBOL(pci_enable_ptm);
202
203static void __pci_disable_ptm(struct pci_dev *dev)
204{
205 u16 ptm = dev->ptm_cap;
206 u32 ctrl;
207
208 if (!ptm)
209 return;
210
211 pci_read_config_dword(dev, where: ptm + PCI_PTM_CTRL, val: &ctrl);
212 ctrl &= ~(PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT);
213 pci_write_config_dword(dev, where: ptm + PCI_PTM_CTRL, val: ctrl);
214}
215
216/**
217 * pci_disable_ptm() - Disable Precision Time Measurement
218 * @dev: PCI device
219 *
220 * Disable Precision Time Measurement for @dev.
221 */
222void pci_disable_ptm(struct pci_dev *dev)
223{
224 if (dev->ptm_enabled) {
225 __pci_disable_ptm(dev);
226 dev->ptm_enabled = 0;
227 }
228}
229EXPORT_SYMBOL(pci_disable_ptm);
230
231/*
232 * Disable PTM, but preserve dev->ptm_enabled so we silently re-enable it on
233 * resume if necessary.
234 */
235void pci_suspend_ptm(struct pci_dev *dev)
236{
237 if (dev->ptm_enabled)
238 __pci_disable_ptm(dev);
239}
240
241/* If PTM was enabled before suspend, re-enable it when resuming */
242void pci_resume_ptm(struct pci_dev *dev)
243{
244 if (dev->ptm_enabled)
245 __pci_enable_ptm(dev);
246}
247
248bool pcie_ptm_enabled(struct pci_dev *dev)
249{
250 if (!dev)
251 return false;
252
253 return dev->ptm_enabled;
254}
255EXPORT_SYMBOL(pcie_ptm_enabled);
256
257static ssize_t context_update_write(struct file *file, const char __user *ubuf,
258 size_t count, loff_t *ppos)
259{
260 struct pci_ptm_debugfs *ptm_debugfs = file->private_data;
261 char buf[7];
262 int ret;
263 u8 mode;
264
265 if (!ptm_debugfs->ops->context_update_write)
266 return -EOPNOTSUPP;
267
268 if (count < 1 || count >= sizeof(buf))
269 return -EINVAL;
270
271 ret = copy_from_user(to: buf, from: ubuf, n: count);
272 if (ret)
273 return -EFAULT;
274
275 buf[count] = '\0';
276
277 if (sysfs_streq(s1: buf, s2: "auto"))
278 mode = PCIE_PTM_CONTEXT_UPDATE_AUTO;
279 else if (sysfs_streq(s1: buf, s2: "manual"))
280 mode = PCIE_PTM_CONTEXT_UPDATE_MANUAL;
281 else
282 return -EINVAL;
283
284 mutex_lock(&ptm_debugfs->lock);
285 ret = ptm_debugfs->ops->context_update_write(ptm_debugfs->pdata, mode);
286 mutex_unlock(lock: &ptm_debugfs->lock);
287 if (ret)
288 return ret;
289
290 return count;
291}
292
293static ssize_t context_update_read(struct file *file, char __user *ubuf,
294 size_t count, loff_t *ppos)
295{
296 struct pci_ptm_debugfs *ptm_debugfs = file->private_data;
297 char buf[8]; /* Extra space for NULL termination at the end */
298 ssize_t pos;
299 u8 mode;
300
301 if (!ptm_debugfs->ops->context_update_read)
302 return -EOPNOTSUPP;
303
304 mutex_lock(&ptm_debugfs->lock);
305 ptm_debugfs->ops->context_update_read(ptm_debugfs->pdata, &mode);
306 mutex_unlock(lock: &ptm_debugfs->lock);
307
308 if (mode == PCIE_PTM_CONTEXT_UPDATE_AUTO)
309 pos = scnprintf(buf, size: sizeof(buf), fmt: "auto\n");
310 else
311 pos = scnprintf(buf, size: sizeof(buf), fmt: "manual\n");
312
313 return simple_read_from_buffer(to: ubuf, count, ppos, from: buf, available: pos);
314}
315
316static const struct file_operations context_update_fops = {
317 .open = simple_open,
318 .read = context_update_read,
319 .write = context_update_write,
320};
321
322static int context_valid_get(void *data, u64 *val)
323{
324 struct pci_ptm_debugfs *ptm_debugfs = data;
325 bool valid;
326 int ret;
327
328 if (!ptm_debugfs->ops->context_valid_read)
329 return -EOPNOTSUPP;
330
331 mutex_lock(&ptm_debugfs->lock);
332 ret = ptm_debugfs->ops->context_valid_read(ptm_debugfs->pdata, &valid);
333 mutex_unlock(lock: &ptm_debugfs->lock);
334 if (ret)
335 return ret;
336
337 *val = valid;
338
339 return 0;
340}
341
342static int context_valid_set(void *data, u64 val)
343{
344 struct pci_ptm_debugfs *ptm_debugfs = data;
345 int ret;
346
347 if (!ptm_debugfs->ops->context_valid_write)
348 return -EOPNOTSUPP;
349
350 mutex_lock(&ptm_debugfs->lock);
351 ret = ptm_debugfs->ops->context_valid_write(ptm_debugfs->pdata, !!val);
352 mutex_unlock(lock: &ptm_debugfs->lock);
353
354 return ret;
355}
356
357DEFINE_DEBUGFS_ATTRIBUTE(context_valid_fops, context_valid_get,
358 context_valid_set, "%llu\n");
359
360static int local_clock_get(void *data, u64 *val)
361{
362 struct pci_ptm_debugfs *ptm_debugfs = data;
363 u64 clock;
364 int ret;
365
366 if (!ptm_debugfs->ops->local_clock_read)
367 return -EOPNOTSUPP;
368
369 ret = ptm_debugfs->ops->local_clock_read(ptm_debugfs->pdata, &clock);
370 if (ret)
371 return ret;
372
373 *val = clock;
374
375 return 0;
376}
377
378DEFINE_DEBUGFS_ATTRIBUTE(local_clock_fops, local_clock_get, NULL, "%llu\n");
379
380static int master_clock_get(void *data, u64 *val)
381{
382 struct pci_ptm_debugfs *ptm_debugfs = data;
383 u64 clock;
384 int ret;
385
386 if (!ptm_debugfs->ops->master_clock_read)
387 return -EOPNOTSUPP;
388
389 ret = ptm_debugfs->ops->master_clock_read(ptm_debugfs->pdata, &clock);
390 if (ret)
391 return ret;
392
393 *val = clock;
394
395 return 0;
396}
397
398DEFINE_DEBUGFS_ATTRIBUTE(master_clock_fops, master_clock_get, NULL, "%llu\n");
399
400static int t1_get(void *data, u64 *val)
401{
402 struct pci_ptm_debugfs *ptm_debugfs = data;
403 u64 clock;
404 int ret;
405
406 if (!ptm_debugfs->ops->t1_read)
407 return -EOPNOTSUPP;
408
409 ret = ptm_debugfs->ops->t1_read(ptm_debugfs->pdata, &clock);
410 if (ret)
411 return ret;
412
413 *val = clock;
414
415 return 0;
416}
417
418DEFINE_DEBUGFS_ATTRIBUTE(t1_fops, t1_get, NULL, "%llu\n");
419
420static int t2_get(void *data, u64 *val)
421{
422 struct pci_ptm_debugfs *ptm_debugfs = data;
423 u64 clock;
424 int ret;
425
426 if (!ptm_debugfs->ops->t2_read)
427 return -EOPNOTSUPP;
428
429 ret = ptm_debugfs->ops->t2_read(ptm_debugfs->pdata, &clock);
430 if (ret)
431 return ret;
432
433 *val = clock;
434
435 return 0;
436}
437
438DEFINE_DEBUGFS_ATTRIBUTE(t2_fops, t2_get, NULL, "%llu\n");
439
440static int t3_get(void *data, u64 *val)
441{
442 struct pci_ptm_debugfs *ptm_debugfs = data;
443 u64 clock;
444 int ret;
445
446 if (!ptm_debugfs->ops->t3_read)
447 return -EOPNOTSUPP;
448
449 ret = ptm_debugfs->ops->t3_read(ptm_debugfs->pdata, &clock);
450 if (ret)
451 return ret;
452
453 *val = clock;
454
455 return 0;
456}
457
458DEFINE_DEBUGFS_ATTRIBUTE(t3_fops, t3_get, NULL, "%llu\n");
459
460static int t4_get(void *data, u64 *val)
461{
462 struct pci_ptm_debugfs *ptm_debugfs = data;
463 u64 clock;
464 int ret;
465
466 if (!ptm_debugfs->ops->t4_read)
467 return -EOPNOTSUPP;
468
469 ret = ptm_debugfs->ops->t4_read(ptm_debugfs->pdata, &clock);
470 if (ret)
471 return ret;
472
473 *val = clock;
474
475 return 0;
476}
477
478DEFINE_DEBUGFS_ATTRIBUTE(t4_fops, t4_get, NULL, "%llu\n");
479
480#define pcie_ptm_create_debugfs_file(pdata, mode, attr) \
481 do { \
482 if (ops->attr##_visible && ops->attr##_visible(pdata)) \
483 debugfs_create_file(#attr, mode, ptm_debugfs->debugfs, \
484 ptm_debugfs, &attr##_fops); \
485 } while (0)
486
487/*
488 * pcie_ptm_create_debugfs() - Create debugfs entries for the PTM context
489 * @dev: PTM capable component device
490 * @pdata: Private data of the PTM capable component device
491 * @ops: PTM callback structure
492 *
493 * Create debugfs entries for exposing the PTM context of the PTM capable
494 * components such as Root Complex and Endpoint controllers.
495 *
496 * Return: Pointer to 'struct pci_ptm_debugfs' if success, NULL otherwise.
497 */
498struct pci_ptm_debugfs *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
499 const struct pcie_ptm_ops *ops)
500{
501 struct pci_ptm_debugfs *ptm_debugfs;
502 char *dirname;
503 int ret;
504
505 /* Caller must provide check_capability() callback */
506 if (!ops->check_capability)
507 return NULL;
508
509 /* Check for PTM capability before creating debugfs attrbutes */
510 ret = ops->check_capability(pdata);
511 if (!ret) {
512 dev_dbg(dev, "PTM capability not present\n");
513 return NULL;
514 }
515
516 ptm_debugfs = kzalloc(sizeof(*ptm_debugfs), GFP_KERNEL);
517 if (!ptm_debugfs)
518 return NULL;
519
520 dirname = devm_kasprintf(dev, GFP_KERNEL, fmt: "pcie_ptm_%s", dev_name(dev));
521 if (!dirname)
522 return NULL;
523
524 ptm_debugfs->debugfs = debugfs_create_dir(name: dirname, NULL);
525 ptm_debugfs->pdata = pdata;
526 ptm_debugfs->ops = ops;
527 mutex_init(&ptm_debugfs->lock);
528
529 pcie_ptm_create_debugfs_file(pdata, 0644, context_update);
530 pcie_ptm_create_debugfs_file(pdata, 0644, context_valid);
531 pcie_ptm_create_debugfs_file(pdata, 0444, local_clock);
532 pcie_ptm_create_debugfs_file(pdata, 0444, master_clock);
533 pcie_ptm_create_debugfs_file(pdata, 0444, t1);
534 pcie_ptm_create_debugfs_file(pdata, 0444, t2);
535 pcie_ptm_create_debugfs_file(pdata, 0444, t3);
536 pcie_ptm_create_debugfs_file(pdata, 0444, t4);
537
538 return ptm_debugfs;
539}
540EXPORT_SYMBOL_GPL(pcie_ptm_create_debugfs);
541
542/*
543 * pcie_ptm_destroy_debugfs() - Destroy debugfs entries for the PTM context
544 * @ptm_debugfs: Pointer to the PTM debugfs struct
545 */
546void pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs)
547{
548 if (!ptm_debugfs)
549 return;
550
551 mutex_destroy(lock: &ptm_debugfs->lock);
552 debugfs_remove_recursive(dentry: ptm_debugfs->debugfs);
553}
554EXPORT_SYMBOL_GPL(pcie_ptm_destroy_debugfs);
555

Provided by KDAB

Privacy Policy
Improve your Profiling and Debugging skills
Find out more

source code of linux/drivers/pci/pcie/ptm.c