1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * LoongArch cacheinfo support |
4 | * |
5 | * Copyright (C) 2020-2022 Loongson Technology Corporation Limited |
6 | */ |
7 | #include <linux/cacheinfo.h> |
8 | #include <linux/topology.h> |
9 | #include <asm/bootinfo.h> |
10 | #include <asm/cpu-info.h> |
11 | |
12 | int init_cache_level(unsigned int cpu) |
13 | { |
14 | int cache_present = current_cpu_data.cache_leaves_present; |
15 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
16 | |
17 | this_cpu_ci->num_levels = |
18 | current_cpu_data.cache_leaves[cache_present - 1].level; |
19 | this_cpu_ci->num_leaves = cache_present; |
20 | |
21 | return 0; |
22 | } |
23 | |
24 | static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, |
25 | struct cacheinfo *sib_leaf) |
26 | { |
27 | return (!(*(unsigned char *)(this_leaf->priv) & CACHE_PRIVATE) |
28 | && !(*(unsigned char *)(sib_leaf->priv) & CACHE_PRIVATE)); |
29 | } |
30 | |
31 | static void cache_cpumap_setup(unsigned int cpu) |
32 | { |
33 | unsigned int index; |
34 | struct cacheinfo *this_leaf, *sib_leaf; |
35 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
36 | |
37 | for (index = 0; index < this_cpu_ci->num_leaves; index++) { |
38 | unsigned int i; |
39 | |
40 | this_leaf = this_cpu_ci->info_list + index; |
41 | /* skip if shared_cpu_map is already populated */ |
42 | if (!cpumask_empty(srcp: &this_leaf->shared_cpu_map)) |
43 | continue; |
44 | |
45 | cpumask_set_cpu(cpu, dstp: &this_leaf->shared_cpu_map); |
46 | for_each_online_cpu(i) { |
47 | struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(cpu: i); |
48 | |
49 | if (i == cpu || !sib_cpu_ci->info_list || |
50 | (cpu_to_node(cpu: i) != cpu_to_node(cpu))) |
51 | continue; |
52 | |
53 | sib_leaf = sib_cpu_ci->info_list + index; |
54 | if (cache_leaves_are_shared(this_leaf, sib_leaf)) { |
55 | cpumask_set_cpu(cpu, dstp: &sib_leaf->shared_cpu_map); |
56 | cpumask_set_cpu(cpu: i, dstp: &this_leaf->shared_cpu_map); |
57 | } |
58 | } |
59 | } |
60 | } |
61 | |
62 | int populate_cache_leaves(unsigned int cpu) |
63 | { |
64 | int i, cache_present = current_cpu_data.cache_leaves_present; |
65 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
66 | struct cacheinfo *this_leaf = this_cpu_ci->info_list; |
67 | struct cache_desc *cd, *cdesc = current_cpu_data.cache_leaves; |
68 | |
69 | for (i = 0; i < cache_present; i++) { |
70 | cd = cdesc + i; |
71 | |
72 | this_leaf->type = cd->type; |
73 | this_leaf->level = cd->level; |
74 | this_leaf->coherency_line_size = cd->linesz; |
75 | this_leaf->number_of_sets = cd->sets; |
76 | this_leaf->ways_of_associativity = cd->ways; |
77 | this_leaf->size = cd->linesz * cd->sets * cd->ways; |
78 | this_leaf->priv = &cd->flags; |
79 | this_leaf++; |
80 | } |
81 | |
82 | cache_cpumap_setup(cpu); |
83 | this_cpu_ci->cpu_map_populated = true; |
84 | |
85 | return 0; |
86 | } |
87 |