1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Processor cache information made available to userspace via sysfs; |
4 | * intended to be compatible with x86 intel_cacheinfo implementation. |
5 | * |
6 | * Copyright 2008 IBM Corporation |
7 | * Author: Nathan Lynch |
8 | */ |
9 | |
10 | #define pr_fmt(fmt) "cacheinfo: " fmt |
11 | |
12 | #include <linux/cpu.h> |
13 | #include <linux/cpumask.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/kobject.h> |
16 | #include <linux/list.h> |
17 | #include <linux/notifier.h> |
18 | #include <linux/of.h> |
19 | #include <linux/percpu.h> |
20 | #include <linux/slab.h> |
21 | #include <asm/cputhreads.h> |
22 | #include <asm/smp.h> |
23 | |
24 | #include "cacheinfo.h" |
25 | |
26 | /* per-cpu object for tracking: |
27 | * - a "cache" kobject for the top-level directory |
28 | * - a list of "index" objects representing the cpu's local cache hierarchy |
29 | */ |
30 | struct cache_dir { |
31 | struct kobject *kobj; /* bare (not embedded) kobject for cache |
32 | * directory */ |
33 | struct cache_index_dir *index; /* list of index objects */ |
34 | }; |
35 | |
36 | /* "index" object: each cpu's cache directory has an index |
37 | * subdirectory corresponding to a cache object associated with the |
38 | * cpu. This object's lifetime is managed via the embedded kobject. |
39 | */ |
40 | struct cache_index_dir { |
41 | struct kobject kobj; |
42 | struct cache_index_dir *next; /* next index in parent directory */ |
43 | struct cache *cache; |
44 | }; |
45 | |
46 | /* Template for determining which OF properties to query for a given |
47 | * cache type */ |
48 | struct cache_type_info { |
49 | const char *name; |
50 | const char *size_prop; |
51 | |
52 | /* Allow for both [di]-cache-line-size and |
53 | * [di]-cache-block-size properties. According to the PowerPC |
54 | * Processor binding, -line-size should be provided if it |
55 | * differs from the cache block size (that which is operated |
56 | * on by cache instructions), so we look for -line-size first. |
57 | * See cache_get_line_size(). */ |
58 | |
59 | const char *line_size_props[2]; |
60 | const char *nr_sets_prop; |
61 | }; |
62 | |
63 | /* These are used to index the cache_type_info array. */ |
64 | #define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */ |
65 | #define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */ |
66 | #define CACHE_TYPE_INSTRUCTION 2 |
67 | #define CACHE_TYPE_DATA 3 |
68 | |
69 | static const struct cache_type_info cache_type_info[] = { |
70 | { |
71 | /* Embedded systems that use cache-size, cache-block-size, |
72 | * etc. for the Unified (typically L2) cache. */ |
73 | .name = "Unified" , |
74 | .size_prop = "cache-size" , |
75 | .line_size_props = { "cache-line-size" , |
76 | "cache-block-size" , }, |
77 | .nr_sets_prop = "cache-sets" , |
78 | }, |
79 | { |
80 | /* PowerPC Processor binding says the [di]-cache-* |
81 | * must be equal on unified caches, so just use |
82 | * d-cache properties. */ |
83 | .name = "Unified" , |
84 | .size_prop = "d-cache-size" , |
85 | .line_size_props = { "d-cache-line-size" , |
86 | "d-cache-block-size" , }, |
87 | .nr_sets_prop = "d-cache-sets" , |
88 | }, |
89 | { |
90 | .name = "Instruction" , |
91 | .size_prop = "i-cache-size" , |
92 | .line_size_props = { "i-cache-line-size" , |
93 | "i-cache-block-size" , }, |
94 | .nr_sets_prop = "i-cache-sets" , |
95 | }, |
96 | { |
97 | .name = "Data" , |
98 | .size_prop = "d-cache-size" , |
99 | .line_size_props = { "d-cache-line-size" , |
100 | "d-cache-block-size" , }, |
101 | .nr_sets_prop = "d-cache-sets" , |
102 | }, |
103 | }; |
104 | |
105 | /* Cache object: each instance of this corresponds to a distinct cache |
106 | * in the system. There are separate objects for Harvard caches: one |
107 | * each for instruction and data, and each refers to the same OF node. |
108 | * The refcount of the OF node is elevated for the lifetime of the |
109 | * cache object. A cache object is released when its shared_cpu_map |
110 | * is cleared (see cache_cpu_clear). |
111 | * |
112 | * A cache object is on two lists: an unsorted global list |
113 | * (cache_list) of cache objects; and a singly-linked list |
114 | * representing the local cache hierarchy, which is ordered by level |
115 | * (e.g. L1d -> L1i -> L2 -> L3). |
116 | */ |
117 | struct cache { |
118 | struct device_node *ofnode; /* OF node for this cache, may be cpu */ |
119 | struct cpumask shared_cpu_map; /* online CPUs using this cache */ |
120 | int type; /* split cache disambiguation */ |
121 | int level; /* level not explicit in device tree */ |
122 | int group_id; /* id of the group of threads that share this cache */ |
123 | struct list_head list; /* global list of cache objects */ |
124 | struct cache *next_local; /* next cache of >= level */ |
125 | }; |
126 | |
127 | static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu); |
128 | |
129 | /* traversal/modification of this list occurs only at cpu hotplug time; |
130 | * access is serialized by cpu hotplug locking |
131 | */ |
132 | static LIST_HEAD(cache_list); |
133 | |
134 | static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k) |
135 | { |
136 | return container_of(k, struct cache_index_dir, kobj); |
137 | } |
138 | |
139 | static const char *cache_type_string(const struct cache *cache) |
140 | { |
141 | return cache_type_info[cache->type].name; |
142 | } |
143 | |
144 | static void cache_init(struct cache *cache, int type, int level, |
145 | struct device_node *ofnode, int group_id) |
146 | { |
147 | cache->type = type; |
148 | cache->level = level; |
149 | cache->ofnode = of_node_get(node: ofnode); |
150 | cache->group_id = group_id; |
151 | INIT_LIST_HEAD(list: &cache->list); |
152 | list_add(new: &cache->list, head: &cache_list); |
153 | } |
154 | |
155 | static struct cache *new_cache(int type, int level, |
156 | struct device_node *ofnode, int group_id) |
157 | { |
158 | struct cache *cache; |
159 | |
160 | cache = kzalloc(size: sizeof(*cache), GFP_KERNEL); |
161 | if (cache) |
162 | cache_init(cache, type, level, ofnode, group_id); |
163 | |
164 | return cache; |
165 | } |
166 | |
167 | static void release_cache_debugcheck(struct cache *cache) |
168 | { |
169 | struct cache *iter; |
170 | |
171 | list_for_each_entry(iter, &cache_list, list) |
172 | WARN_ONCE(iter->next_local == cache, |
173 | "cache for %pOFP(%s) refers to cache for %pOFP(%s)\n" , |
174 | iter->ofnode, |
175 | cache_type_string(iter), |
176 | cache->ofnode, |
177 | cache_type_string(cache)); |
178 | } |
179 | |
180 | static void release_cache(struct cache *cache) |
181 | { |
182 | if (!cache) |
183 | return; |
184 | |
185 | pr_debug("freeing L%d %s cache for %pOFP\n" , cache->level, |
186 | cache_type_string(cache), cache->ofnode); |
187 | |
188 | release_cache_debugcheck(cache); |
189 | list_del(entry: &cache->list); |
190 | of_node_put(node: cache->ofnode); |
191 | kfree(objp: cache); |
192 | } |
193 | |
194 | static void cache_cpu_set(struct cache *cache, int cpu) |
195 | { |
196 | struct cache *next = cache; |
197 | |
198 | while (next) { |
199 | WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map), |
200 | "CPU %i already accounted in %pOFP(%s)\n" , |
201 | cpu, next->ofnode, |
202 | cache_type_string(next)); |
203 | cpumask_set_cpu(cpu, dstp: &next->shared_cpu_map); |
204 | next = next->next_local; |
205 | } |
206 | } |
207 | |
208 | static int cache_size(const struct cache *cache, unsigned int *ret) |
209 | { |
210 | const char *propname; |
211 | const __be32 *cache_size; |
212 | |
213 | propname = cache_type_info[cache->type].size_prop; |
214 | |
215 | cache_size = of_get_property(node: cache->ofnode, name: propname, NULL); |
216 | if (!cache_size) |
217 | return -ENODEV; |
218 | |
219 | *ret = of_read_number(cell: cache_size, size: 1); |
220 | return 0; |
221 | } |
222 | |
223 | static int cache_size_kb(const struct cache *cache, unsigned int *ret) |
224 | { |
225 | unsigned int size; |
226 | |
227 | if (cache_size(cache, ret: &size)) |
228 | return -ENODEV; |
229 | |
230 | *ret = size / 1024; |
231 | return 0; |
232 | } |
233 | |
234 | /* not cache_line_size() because that's a macro in include/linux/cache.h */ |
235 | static int cache_get_line_size(const struct cache *cache, unsigned int *ret) |
236 | { |
237 | const __be32 *line_size; |
238 | int i, lim; |
239 | |
240 | lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props); |
241 | |
242 | for (i = 0; i < lim; i++) { |
243 | const char *propname; |
244 | |
245 | propname = cache_type_info[cache->type].line_size_props[i]; |
246 | line_size = of_get_property(node: cache->ofnode, name: propname, NULL); |
247 | if (line_size) |
248 | break; |
249 | } |
250 | |
251 | if (!line_size) |
252 | return -ENODEV; |
253 | |
254 | *ret = of_read_number(cell: line_size, size: 1); |
255 | return 0; |
256 | } |
257 | |
258 | static int cache_nr_sets(const struct cache *cache, unsigned int *ret) |
259 | { |
260 | const char *propname; |
261 | const __be32 *nr_sets; |
262 | |
263 | propname = cache_type_info[cache->type].nr_sets_prop; |
264 | |
265 | nr_sets = of_get_property(node: cache->ofnode, name: propname, NULL); |
266 | if (!nr_sets) |
267 | return -ENODEV; |
268 | |
269 | *ret = of_read_number(cell: nr_sets, size: 1); |
270 | return 0; |
271 | } |
272 | |
273 | static int cache_associativity(const struct cache *cache, unsigned int *ret) |
274 | { |
275 | unsigned int line_size; |
276 | unsigned int nr_sets; |
277 | unsigned int size; |
278 | |
279 | if (cache_nr_sets(cache, ret: &nr_sets)) |
280 | goto err; |
281 | |
282 | /* If the cache is fully associative, there is no need to |
283 | * check the other properties. |
284 | */ |
285 | if (nr_sets == 1) { |
286 | *ret = 0; |
287 | return 0; |
288 | } |
289 | |
290 | if (cache_get_line_size(cache, ret: &line_size)) |
291 | goto err; |
292 | if (cache_size(cache, ret: &size)) |
293 | goto err; |
294 | |
295 | if (!(nr_sets > 0 && size > 0 && line_size > 0)) |
296 | goto err; |
297 | |
298 | *ret = (size / nr_sets) / line_size; |
299 | return 0; |
300 | err: |
301 | return -ENODEV; |
302 | } |
303 | |
304 | /* helper for dealing with split caches */ |
305 | static struct cache *cache_find_first_sibling(struct cache *cache) |
306 | { |
307 | struct cache *iter; |
308 | |
309 | if (cache->type == CACHE_TYPE_UNIFIED || |
310 | cache->type == CACHE_TYPE_UNIFIED_D) |
311 | return cache; |
312 | |
313 | list_for_each_entry(iter, &cache_list, list) |
314 | if (iter->ofnode == cache->ofnode && |
315 | iter->group_id == cache->group_id && |
316 | iter->next_local == cache) |
317 | return iter; |
318 | |
319 | return cache; |
320 | } |
321 | |
322 | /* return the first cache on a local list matching node and thread-group id */ |
323 | static struct cache *cache_lookup_by_node_group(const struct device_node *node, |
324 | int group_id) |
325 | { |
326 | struct cache *cache = NULL; |
327 | struct cache *iter; |
328 | |
329 | list_for_each_entry(iter, &cache_list, list) { |
330 | if (iter->ofnode != node || |
331 | iter->group_id != group_id) |
332 | continue; |
333 | cache = cache_find_first_sibling(cache: iter); |
334 | break; |
335 | } |
336 | |
337 | return cache; |
338 | } |
339 | |
340 | static bool cache_node_is_unified(const struct device_node *np) |
341 | { |
342 | return of_get_property(node: np, name: "cache-unified" , NULL); |
343 | } |
344 | |
345 | /* |
346 | * Unified caches can have two different sets of tags. Most embedded |
347 | * use cache-size, etc. for the unified cache size, but open firmware systems |
348 | * use d-cache-size, etc. Check on initialization for which type we have, and |
349 | * return the appropriate structure type. Assume it's embedded if it isn't |
350 | * open firmware. If it's yet a 3rd type, then there will be missing entries |
351 | * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need |
352 | * to be extended further. |
353 | */ |
354 | static int cache_is_unified_d(const struct device_node *np) |
355 | { |
356 | return of_get_property(node: np, |
357 | name: cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ? |
358 | CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED; |
359 | } |
360 | |
361 | static struct cache *cache_do_one_devnode_unified(struct device_node *node, int group_id, |
362 | int level) |
363 | { |
364 | pr_debug("creating L%d ucache for %pOFP\n" , level, node); |
365 | |
366 | return new_cache(type: cache_is_unified_d(np: node), level, ofnode: node, group_id); |
367 | } |
368 | |
369 | static struct cache *cache_do_one_devnode_split(struct device_node *node, int group_id, |
370 | int level) |
371 | { |
372 | struct cache *dcache, *icache; |
373 | |
374 | pr_debug("creating L%d dcache and icache for %pOFP\n" , level, |
375 | node); |
376 | |
377 | dcache = new_cache(CACHE_TYPE_DATA, level, ofnode: node, group_id); |
378 | icache = new_cache(CACHE_TYPE_INSTRUCTION, level, ofnode: node, group_id); |
379 | |
380 | if (!dcache || !icache) |
381 | goto err; |
382 | |
383 | dcache->next_local = icache; |
384 | |
385 | return dcache; |
386 | err: |
387 | release_cache(cache: dcache); |
388 | release_cache(cache: icache); |
389 | return NULL; |
390 | } |
391 | |
392 | static struct cache *cache_do_one_devnode(struct device_node *node, int group_id, int level) |
393 | { |
394 | struct cache *cache; |
395 | |
396 | if (cache_node_is_unified(np: node)) |
397 | cache = cache_do_one_devnode_unified(node, group_id, level); |
398 | else |
399 | cache = cache_do_one_devnode_split(node, group_id, level); |
400 | |
401 | return cache; |
402 | } |
403 | |
404 | static struct cache *cache_lookup_or_instantiate(struct device_node *node, |
405 | int group_id, |
406 | int level) |
407 | { |
408 | struct cache *cache; |
409 | |
410 | cache = cache_lookup_by_node_group(node, group_id); |
411 | |
412 | WARN_ONCE(cache && cache->level != level, |
413 | "cache level mismatch on lookup (got %d, expected %d)\n" , |
414 | cache->level, level); |
415 | |
416 | if (!cache) |
417 | cache = cache_do_one_devnode(node, group_id, level); |
418 | |
419 | return cache; |
420 | } |
421 | |
422 | static void link_cache_lists(struct cache *smaller, struct cache *bigger) |
423 | { |
424 | while (smaller->next_local) { |
425 | if (smaller->next_local == bigger) |
426 | return; /* already linked */ |
427 | smaller = smaller->next_local; |
428 | } |
429 | |
430 | smaller->next_local = bigger; |
431 | |
432 | /* |
433 | * The cache->next_local list sorts by level ascending: |
434 | * L1d -> L1i -> L2 -> L3 ... |
435 | */ |
436 | WARN_ONCE((smaller->level == 1 && bigger->level > 2) || |
437 | (smaller->level > 1 && bigger->level != smaller->level + 1), |
438 | "linking L%i cache %pOFP to L%i cache %pOFP; skipped a level?\n" , |
439 | smaller->level, smaller->ofnode, bigger->level, bigger->ofnode); |
440 | } |
441 | |
442 | static void do_subsidiary_caches_debugcheck(struct cache *cache) |
443 | { |
444 | WARN_ONCE(cache->level != 1, |
445 | "instantiating cache chain from L%d %s cache for " |
446 | "%pOFP instead of an L1\n" , cache->level, |
447 | cache_type_string(cache), cache->ofnode); |
448 | WARN_ONCE(!of_node_is_type(cache->ofnode, "cpu" ), |
449 | "instantiating cache chain from node %pOFP of type '%s' " |
450 | "instead of a cpu node\n" , cache->ofnode, |
451 | of_node_get_device_type(cache->ofnode)); |
452 | } |
453 | |
454 | /* |
455 | * If sub-groups of threads in a core containing @cpu_id share the |
456 | * L@level-cache (information obtained via "ibm,thread-groups" |
457 | * device-tree property), then we identify the group by the first |
458 | * thread-sibling in the group. We define this to be the group-id. |
459 | * |
460 | * In the absence of any thread-group information for L@level-cache, |
461 | * this function returns -1. |
462 | */ |
463 | static int get_group_id(unsigned int cpu_id, int level) |
464 | { |
465 | if (has_big_cores && level == 1) |
466 | return cpumask_first(per_cpu(thread_group_l1_cache_map, |
467 | cpu_id)); |
468 | else if (thread_group_shares_l2 && level == 2) |
469 | return cpumask_first(per_cpu(thread_group_l2_cache_map, |
470 | cpu_id)); |
471 | else if (thread_group_shares_l3 && level == 3) |
472 | return cpumask_first(per_cpu(thread_group_l3_cache_map, |
473 | cpu_id)); |
474 | return -1; |
475 | } |
476 | |
477 | static void do_subsidiary_caches(struct cache *cache, unsigned int cpu_id) |
478 | { |
479 | struct device_node *subcache_node; |
480 | int level = cache->level; |
481 | |
482 | do_subsidiary_caches_debugcheck(cache); |
483 | |
484 | while ((subcache_node = of_find_next_cache_node(cache->ofnode))) { |
485 | struct cache *subcache; |
486 | int group_id; |
487 | |
488 | level++; |
489 | group_id = get_group_id(cpu_id, level); |
490 | subcache = cache_lookup_or_instantiate(node: subcache_node, group_id, level); |
491 | of_node_put(node: subcache_node); |
492 | if (!subcache) |
493 | break; |
494 | |
495 | link_cache_lists(smaller: cache, bigger: subcache); |
496 | cache = subcache; |
497 | } |
498 | } |
499 | |
500 | static struct cache *cache_chain_instantiate(unsigned int cpu_id) |
501 | { |
502 | struct device_node *cpu_node; |
503 | struct cache *cpu_cache = NULL; |
504 | int group_id; |
505 | |
506 | pr_debug("creating cache object(s) for CPU %i\n" , cpu_id); |
507 | |
508 | cpu_node = of_get_cpu_node(cpu: cpu_id, NULL); |
509 | WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n" , cpu_id); |
510 | if (!cpu_node) |
511 | goto out; |
512 | |
513 | group_id = get_group_id(cpu_id, level: 1); |
514 | |
515 | cpu_cache = cache_lookup_or_instantiate(node: cpu_node, group_id, level: 1); |
516 | if (!cpu_cache) |
517 | goto out; |
518 | |
519 | do_subsidiary_caches(cache: cpu_cache, cpu_id); |
520 | |
521 | cache_cpu_set(cache: cpu_cache, cpu: cpu_id); |
522 | out: |
523 | of_node_put(node: cpu_node); |
524 | |
525 | return cpu_cache; |
526 | } |
527 | |
528 | static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id) |
529 | { |
530 | struct cache_dir *cache_dir; |
531 | struct device *dev; |
532 | struct kobject *kobj = NULL; |
533 | |
534 | dev = get_cpu_device(cpu: cpu_id); |
535 | WARN_ONCE(!dev, "no dev for CPU %i\n" , cpu_id); |
536 | if (!dev) |
537 | goto err; |
538 | |
539 | kobj = kobject_create_and_add(name: "cache" , parent: &dev->kobj); |
540 | if (!kobj) |
541 | goto err; |
542 | |
543 | cache_dir = kzalloc(size: sizeof(*cache_dir), GFP_KERNEL); |
544 | if (!cache_dir) |
545 | goto err; |
546 | |
547 | cache_dir->kobj = kobj; |
548 | |
549 | WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL); |
550 | |
551 | per_cpu(cache_dir_pcpu, cpu_id) = cache_dir; |
552 | |
553 | return cache_dir; |
554 | err: |
555 | kobject_put(kobj); |
556 | return NULL; |
557 | } |
558 | |
559 | static void cache_index_release(struct kobject *kobj) |
560 | { |
561 | struct cache_index_dir *index; |
562 | |
563 | index = kobj_to_cache_index_dir(k: kobj); |
564 | |
565 | pr_debug("freeing index directory for L%d %s cache\n" , |
566 | index->cache->level, cache_type_string(index->cache)); |
567 | |
568 | kfree(objp: index); |
569 | } |
570 | |
571 | static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf) |
572 | { |
573 | struct kobj_attribute *kobj_attr; |
574 | |
575 | kobj_attr = container_of(attr, struct kobj_attribute, attr); |
576 | |
577 | return kobj_attr->show(k, kobj_attr, buf); |
578 | } |
579 | |
580 | static struct cache *index_kobj_to_cache(struct kobject *k) |
581 | { |
582 | struct cache_index_dir *index; |
583 | |
584 | index = kobj_to_cache_index_dir(k); |
585 | |
586 | return index->cache; |
587 | } |
588 | |
589 | static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) |
590 | { |
591 | unsigned int size_kb; |
592 | struct cache *cache; |
593 | |
594 | cache = index_kobj_to_cache(k); |
595 | |
596 | if (cache_size_kb(cache, ret: &size_kb)) |
597 | return -ENODEV; |
598 | |
599 | return sprintf(buf, fmt: "%uK\n" , size_kb); |
600 | } |
601 | |
602 | static struct kobj_attribute cache_size_attr = |
603 | __ATTR(size, 0444, size_show, NULL); |
604 | |
605 | |
606 | static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) |
607 | { |
608 | unsigned int line_size; |
609 | struct cache *cache; |
610 | |
611 | cache = index_kobj_to_cache(k); |
612 | |
613 | if (cache_get_line_size(cache, ret: &line_size)) |
614 | return -ENODEV; |
615 | |
616 | return sprintf(buf, fmt: "%u\n" , line_size); |
617 | } |
618 | |
619 | static struct kobj_attribute cache_line_size_attr = |
620 | __ATTR(coherency_line_size, 0444, line_size_show, NULL); |
621 | |
622 | static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf) |
623 | { |
624 | unsigned int nr_sets; |
625 | struct cache *cache; |
626 | |
627 | cache = index_kobj_to_cache(k); |
628 | |
629 | if (cache_nr_sets(cache, ret: &nr_sets)) |
630 | return -ENODEV; |
631 | |
632 | return sprintf(buf, fmt: "%u\n" , nr_sets); |
633 | } |
634 | |
635 | static struct kobj_attribute cache_nr_sets_attr = |
636 | __ATTR(number_of_sets, 0444, nr_sets_show, NULL); |
637 | |
638 | static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf) |
639 | { |
640 | unsigned int associativity; |
641 | struct cache *cache; |
642 | |
643 | cache = index_kobj_to_cache(k); |
644 | |
645 | if (cache_associativity(cache, ret: &associativity)) |
646 | return -ENODEV; |
647 | |
648 | return sprintf(buf, fmt: "%u\n" , associativity); |
649 | } |
650 | |
651 | static struct kobj_attribute cache_assoc_attr = |
652 | __ATTR(ways_of_associativity, 0444, associativity_show, NULL); |
653 | |
654 | static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf) |
655 | { |
656 | struct cache *cache; |
657 | |
658 | cache = index_kobj_to_cache(k); |
659 | |
660 | return sprintf(buf, fmt: "%s\n" , cache_type_string(cache)); |
661 | } |
662 | |
663 | static struct kobj_attribute cache_type_attr = |
664 | __ATTR(type, 0444, type_show, NULL); |
665 | |
666 | static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf) |
667 | { |
668 | struct cache_index_dir *index; |
669 | struct cache *cache; |
670 | |
671 | index = kobj_to_cache_index_dir(k); |
672 | cache = index->cache; |
673 | |
674 | return sprintf(buf, fmt: "%d\n" , cache->level); |
675 | } |
676 | |
677 | static struct kobj_attribute cache_level_attr = |
678 | __ATTR(level, 0444, level_show, NULL); |
679 | |
680 | static ssize_t |
681 | show_shared_cpumap(struct kobject *k, struct kobj_attribute *attr, char *buf, bool list) |
682 | { |
683 | struct cache_index_dir *index; |
684 | struct cache *cache; |
685 | const struct cpumask *mask; |
686 | |
687 | index = kobj_to_cache_index_dir(k); |
688 | cache = index->cache; |
689 | |
690 | mask = &cache->shared_cpu_map; |
691 | |
692 | return cpumap_print_to_pagebuf(list, buf, mask); |
693 | } |
694 | |
695 | static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf) |
696 | { |
697 | return show_shared_cpumap(k, attr, buf, list: false); |
698 | } |
699 | |
700 | static ssize_t shared_cpu_list_show(struct kobject *k, struct kobj_attribute *attr, char *buf) |
701 | { |
702 | return show_shared_cpumap(k, attr, buf, list: true); |
703 | } |
704 | |
705 | static struct kobj_attribute cache_shared_cpu_map_attr = |
706 | __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL); |
707 | |
708 | static struct kobj_attribute cache_shared_cpu_list_attr = |
709 | __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL); |
710 | |
711 | /* Attributes which should always be created -- the kobject/sysfs core |
712 | * does this automatically via kobj_type->default_groups. This is the |
713 | * minimum data required to uniquely identify a cache. |
714 | */ |
715 | static struct attribute *cache_index_default_attrs[] = { |
716 | &cache_type_attr.attr, |
717 | &cache_level_attr.attr, |
718 | &cache_shared_cpu_map_attr.attr, |
719 | &cache_shared_cpu_list_attr.attr, |
720 | NULL, |
721 | }; |
722 | ATTRIBUTE_GROUPS(cache_index_default); |
723 | |
724 | /* Attributes which should be created if the cache device node has the |
725 | * right properties -- see cacheinfo_create_index_opt_attrs |
726 | */ |
727 | static struct kobj_attribute *cache_index_opt_attrs[] = { |
728 | &cache_size_attr, |
729 | &cache_line_size_attr, |
730 | &cache_nr_sets_attr, |
731 | &cache_assoc_attr, |
732 | }; |
733 | |
734 | static const struct sysfs_ops cache_index_ops = { |
735 | .show = cache_index_show, |
736 | }; |
737 | |
738 | static struct kobj_type cache_index_type = { |
739 | .release = cache_index_release, |
740 | .sysfs_ops = &cache_index_ops, |
741 | .default_groups = cache_index_default_groups, |
742 | }; |
743 | |
744 | static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir) |
745 | { |
746 | const char *cache_type; |
747 | struct cache *cache; |
748 | char *buf; |
749 | int i; |
750 | |
751 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); |
752 | if (!buf) |
753 | return; |
754 | |
755 | cache = dir->cache; |
756 | cache_type = cache_type_string(cache); |
757 | |
758 | /* We don't want to create an attribute that can't provide a |
759 | * meaningful value. Check the return value of each optional |
760 | * attribute's ->show method before registering the |
761 | * attribute. |
762 | */ |
763 | for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) { |
764 | struct kobj_attribute *attr; |
765 | ssize_t rc; |
766 | |
767 | attr = cache_index_opt_attrs[i]; |
768 | |
769 | rc = attr->show(&dir->kobj, attr, buf); |
770 | if (rc <= 0) { |
771 | pr_debug("not creating %s attribute for " |
772 | "%pOFP(%s) (rc = %zd)\n" , |
773 | attr->attr.name, cache->ofnode, |
774 | cache_type, rc); |
775 | continue; |
776 | } |
777 | if (sysfs_create_file(kobj: &dir->kobj, attr: &attr->attr)) |
778 | pr_debug("could not create %s attribute for %pOFP(%s)\n" , |
779 | attr->attr.name, cache->ofnode, cache_type); |
780 | } |
781 | |
782 | kfree(objp: buf); |
783 | } |
784 | |
785 | static void cacheinfo_create_index_dir(struct cache *cache, int index, |
786 | struct cache_dir *cache_dir) |
787 | { |
788 | struct cache_index_dir *index_dir; |
789 | int rc; |
790 | |
791 | index_dir = kzalloc(size: sizeof(*index_dir), GFP_KERNEL); |
792 | if (!index_dir) |
793 | return; |
794 | |
795 | index_dir->cache = cache; |
796 | |
797 | rc = kobject_init_and_add(kobj: &index_dir->kobj, ktype: &cache_index_type, |
798 | parent: cache_dir->kobj, fmt: "index%d" , index); |
799 | if (rc) { |
800 | kobject_put(kobj: &index_dir->kobj); |
801 | return; |
802 | } |
803 | |
804 | index_dir->next = cache_dir->index; |
805 | cache_dir->index = index_dir; |
806 | |
807 | cacheinfo_create_index_opt_attrs(dir: index_dir); |
808 | } |
809 | |
810 | static void cacheinfo_sysfs_populate(unsigned int cpu_id, |
811 | struct cache *cache_list) |
812 | { |
813 | struct cache_dir *cache_dir; |
814 | struct cache *cache; |
815 | int index = 0; |
816 | |
817 | cache_dir = cacheinfo_create_cache_dir(cpu_id); |
818 | if (!cache_dir) |
819 | return; |
820 | |
821 | cache = cache_list; |
822 | while (cache) { |
823 | cacheinfo_create_index_dir(cache, index, cache_dir); |
824 | index++; |
825 | cache = cache->next_local; |
826 | } |
827 | } |
828 | |
829 | void cacheinfo_cpu_online(unsigned int cpu_id) |
830 | { |
831 | struct cache *cache; |
832 | |
833 | cache = cache_chain_instantiate(cpu_id); |
834 | if (!cache) |
835 | return; |
836 | |
837 | cacheinfo_sysfs_populate(cpu_id, cache_list: cache); |
838 | } |
839 | |
840 | /* functions needed to remove cache entry for cpu offline or suspend/resume */ |
841 | |
842 | #if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \ |
843 | defined(CONFIG_HOTPLUG_CPU) |
844 | |
845 | static struct cache *cache_lookup_by_cpu(unsigned int cpu_id) |
846 | { |
847 | struct device_node *cpu_node; |
848 | struct cache *cache; |
849 | int group_id; |
850 | |
851 | cpu_node = of_get_cpu_node(cpu: cpu_id, NULL); |
852 | WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n" , cpu_id); |
853 | if (!cpu_node) |
854 | return NULL; |
855 | |
856 | group_id = get_group_id(cpu_id, level: 1); |
857 | cache = cache_lookup_by_node_group(node: cpu_node, group_id); |
858 | of_node_put(node: cpu_node); |
859 | |
860 | return cache; |
861 | } |
862 | |
863 | static void remove_index_dirs(struct cache_dir *cache_dir) |
864 | { |
865 | struct cache_index_dir *index; |
866 | |
867 | index = cache_dir->index; |
868 | |
869 | while (index) { |
870 | struct cache_index_dir *next; |
871 | |
872 | next = index->next; |
873 | kobject_put(kobj: &index->kobj); |
874 | index = next; |
875 | } |
876 | } |
877 | |
878 | static void remove_cache_dir(struct cache_dir *cache_dir) |
879 | { |
880 | remove_index_dirs(cache_dir); |
881 | |
882 | /* Remove cache dir from sysfs */ |
883 | kobject_del(kobj: cache_dir->kobj); |
884 | |
885 | kobject_put(kobj: cache_dir->kobj); |
886 | |
887 | kfree(objp: cache_dir); |
888 | } |
889 | |
890 | static void cache_cpu_clear(struct cache *cache, int cpu) |
891 | { |
892 | while (cache) { |
893 | struct cache *next = cache->next_local; |
894 | |
895 | WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map), |
896 | "CPU %i not accounted in %pOFP(%s)\n" , |
897 | cpu, cache->ofnode, |
898 | cache_type_string(cache)); |
899 | |
900 | cpumask_clear_cpu(cpu, dstp: &cache->shared_cpu_map); |
901 | |
902 | /* Release the cache object if all the cpus using it |
903 | * are offline */ |
904 | if (cpumask_empty(srcp: &cache->shared_cpu_map)) |
905 | release_cache(cache); |
906 | |
907 | cache = next; |
908 | } |
909 | } |
910 | |
911 | void cacheinfo_cpu_offline(unsigned int cpu_id) |
912 | { |
913 | struct cache_dir *cache_dir; |
914 | struct cache *cache; |
915 | |
916 | /* Prevent userspace from seeing inconsistent state - remove |
917 | * the sysfs hierarchy first */ |
918 | cache_dir = per_cpu(cache_dir_pcpu, cpu_id); |
919 | |
920 | /* careful, sysfs population may have failed */ |
921 | if (cache_dir) |
922 | remove_cache_dir(cache_dir); |
923 | |
924 | per_cpu(cache_dir_pcpu, cpu_id) = NULL; |
925 | |
926 | /* clear the CPU's bit in its cache chain, possibly freeing |
927 | * cache objects */ |
928 | cache = cache_lookup_by_cpu(cpu_id); |
929 | if (cache) |
930 | cache_cpu_clear(cache, cpu: cpu_id); |
931 | } |
932 | |
933 | void cacheinfo_teardown(void) |
934 | { |
935 | unsigned int cpu; |
936 | |
937 | lockdep_assert_cpus_held(); |
938 | |
939 | for_each_online_cpu(cpu) |
940 | cacheinfo_cpu_offline(cpu_id: cpu); |
941 | } |
942 | |
943 | void cacheinfo_rebuild(void) |
944 | { |
945 | unsigned int cpu; |
946 | |
947 | lockdep_assert_cpus_held(); |
948 | |
949 | for_each_online_cpu(cpu) |
950 | cacheinfo_cpu_online(cpu_id: cpu); |
951 | } |
952 | |
953 | #endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */ |
954 | |