1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Resource Director Technology(RDT) |
4 | * - Cache Allocation code. |
5 | * |
6 | * Copyright (C) 2016 Intel Corporation |
7 | * |
8 | * Authors: |
9 | * Fenghua Yu <fenghua.yu@intel.com> |
10 | * Tony Luck <tony.luck@intel.com> |
11 | * Vikas Shivappa <vikas.shivappa@intel.com> |
12 | * |
13 | * More information about RDT be found in the Intel (R) x86 Architecture |
14 | * Software Developer Manual June 2016, volume 3, section 17.17. |
15 | */ |
16 | |
17 | #define pr_fmt(fmt) "resctrl: " fmt |
18 | |
19 | #include <linux/cpu.h> |
20 | #include <linux/slab.h> |
21 | #include <linux/err.h> |
22 | #include <linux/cacheinfo.h> |
23 | #include <linux/cpuhotplug.h> |
24 | |
25 | #include <asm/intel-family.h> |
26 | #include <asm/resctrl.h> |
27 | #include "internal.h" |
28 | |
29 | /* |
30 | * rdt_domain structures are kfree()d when their last CPU goes offline, |
31 | * and allocated when the first CPU in a new domain comes online. |
32 | * The rdt_resource's domain list is updated when this happens. Readers of |
33 | * the domain list must either take cpus_read_lock(), or rely on an RCU |
34 | * read-side critical section, to avoid observing concurrent modification. |
35 | * All writers take this mutex: |
36 | */ |
37 | static DEFINE_MUTEX(domain_list_lock); |
38 | |
39 | /* |
40 | * The cached resctrl_pqr_state is strictly per CPU and can never be |
41 | * updated from a remote CPU. Functions which modify the state |
42 | * are called with interrupts disabled and no preemption, which |
43 | * is sufficient for the protection. |
44 | */ |
45 | DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state); |
46 | |
47 | /* |
48 | * Used to store the max resource name width and max resource data width |
49 | * to display the schemata in a tabular format |
50 | */ |
51 | int max_name_width, max_data_width; |
52 | |
53 | /* |
54 | * Global boolean for rdt_alloc which is true if any |
55 | * resource allocation is enabled. |
56 | */ |
57 | bool rdt_alloc_capable; |
58 | |
59 | static void |
60 | mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m, |
61 | struct rdt_resource *r); |
62 | static void |
63 | cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); |
64 | static void |
65 | mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, |
66 | struct rdt_resource *r); |
67 | |
68 | #define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].r_resctrl.domains) |
69 | |
70 | struct rdt_hw_resource rdt_resources_all[] = { |
71 | [RDT_RESOURCE_L3] = |
72 | { |
73 | .r_resctrl = { |
74 | .rid = RDT_RESOURCE_L3, |
75 | .name = "L3" , |
76 | .cache_level = 3, |
77 | .domains = domain_init(RDT_RESOURCE_L3), |
78 | .parse_ctrlval = parse_cbm, |
79 | .format_str = "%d=%0*x" , |
80 | .fflags = RFTYPE_RES_CACHE, |
81 | }, |
82 | .msr_base = MSR_IA32_L3_CBM_BASE, |
83 | .msr_update = cat_wrmsr, |
84 | }, |
85 | [RDT_RESOURCE_L2] = |
86 | { |
87 | .r_resctrl = { |
88 | .rid = RDT_RESOURCE_L2, |
89 | .name = "L2" , |
90 | .cache_level = 2, |
91 | .domains = domain_init(RDT_RESOURCE_L2), |
92 | .parse_ctrlval = parse_cbm, |
93 | .format_str = "%d=%0*x" , |
94 | .fflags = RFTYPE_RES_CACHE, |
95 | }, |
96 | .msr_base = MSR_IA32_L2_CBM_BASE, |
97 | .msr_update = cat_wrmsr, |
98 | }, |
99 | [RDT_RESOURCE_MBA] = |
100 | { |
101 | .r_resctrl = { |
102 | .rid = RDT_RESOURCE_MBA, |
103 | .name = "MB" , |
104 | .cache_level = 3, |
105 | .domains = domain_init(RDT_RESOURCE_MBA), |
106 | .parse_ctrlval = parse_bw, |
107 | .format_str = "%d=%*u" , |
108 | .fflags = RFTYPE_RES_MB, |
109 | }, |
110 | }, |
111 | [RDT_RESOURCE_SMBA] = |
112 | { |
113 | .r_resctrl = { |
114 | .rid = RDT_RESOURCE_SMBA, |
115 | .name = "SMBA" , |
116 | .cache_level = 3, |
117 | .domains = domain_init(RDT_RESOURCE_SMBA), |
118 | .parse_ctrlval = parse_bw, |
119 | .format_str = "%d=%*u" , |
120 | .fflags = RFTYPE_RES_MB, |
121 | }, |
122 | }, |
123 | }; |
124 | |
125 | /* |
126 | * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs |
127 | * as they do not have CPUID enumeration support for Cache allocation. |
128 | * The check for Vendor/Family/Model is not enough to guarantee that |
129 | * the MSRs won't #GP fault because only the following SKUs support |
130 | * CAT: |
131 | * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz |
132 | * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz |
133 | * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz |
134 | * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz |
135 | * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz |
136 | * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz |
137 | * |
138 | * Probe by trying to write the first of the L3 cache mask registers |
139 | * and checking that the bits stick. Max CLOSids is always 4 and max cbm length |
140 | * is always 20 on hsw server parts. The minimum cache bitmask length |
141 | * allowed for HSW server is always 2 bits. Hardcode all of them. |
142 | */ |
143 | static inline void cache_alloc_hsw_probe(void) |
144 | { |
145 | struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_L3]; |
146 | struct rdt_resource *r = &hw_res->r_resctrl; |
147 | u64 max_cbm = BIT_ULL_MASK(20) - 1, l3_cbm_0; |
148 | |
149 | if (wrmsrl_safe(MSR_IA32_L3_CBM_BASE, val: max_cbm)) |
150 | return; |
151 | |
152 | rdmsrl(MSR_IA32_L3_CBM_BASE, l3_cbm_0); |
153 | |
154 | /* If all the bits were set in MSR, return success */ |
155 | if (l3_cbm_0 != max_cbm) |
156 | return; |
157 | |
158 | hw_res->num_closid = 4; |
159 | r->default_ctrl = max_cbm; |
160 | r->cache.cbm_len = 20; |
161 | r->cache.shareable_bits = 0xc0000; |
162 | r->cache.min_cbm_bits = 2; |
163 | r->cache.arch_has_sparse_bitmasks = false; |
164 | r->alloc_capable = true; |
165 | |
166 | rdt_alloc_capable = true; |
167 | } |
168 | |
169 | bool is_mba_sc(struct rdt_resource *r) |
170 | { |
171 | if (!r) |
172 | return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.mba_sc; |
173 | |
174 | /* |
175 | * The software controller support is only applicable to MBA resource. |
176 | * Make sure to check for resource type. |
177 | */ |
178 | if (r->rid != RDT_RESOURCE_MBA) |
179 | return false; |
180 | |
181 | return r->membw.mba_sc; |
182 | } |
183 | |
184 | /* |
185 | * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values |
186 | * exposed to user interface and the h/w understandable delay values. |
187 | * |
188 | * The non-linear delay values have the granularity of power of two |
189 | * and also the h/w does not guarantee a curve for configured delay |
190 | * values vs. actual b/w enforced. |
191 | * Hence we need a mapping that is pre calibrated so the user can |
192 | * express the memory b/w as a percentage value. |
193 | */ |
194 | static inline bool rdt_get_mb_table(struct rdt_resource *r) |
195 | { |
196 | /* |
197 | * There are no Intel SKUs as of now to support non-linear delay. |
198 | */ |
199 | pr_info("MBA b/w map not implemented for cpu:%d, model:%d" , |
200 | boot_cpu_data.x86, boot_cpu_data.x86_model); |
201 | |
202 | return false; |
203 | } |
204 | |
205 | static bool __get_mem_config_intel(struct rdt_resource *r) |
206 | { |
207 | struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); |
208 | union cpuid_0x10_3_eax eax; |
209 | union cpuid_0x10_x_edx edx; |
210 | u32 ebx, ecx, max_delay; |
211 | |
212 | cpuid_count(op: 0x00000010, count: 3, eax: &eax.full, ebx: &ebx, ecx: &ecx, edx: &edx.full); |
213 | hw_res->num_closid = edx.split.cos_max + 1; |
214 | max_delay = eax.split.max_delay + 1; |
215 | r->default_ctrl = MAX_MBA_BW; |
216 | r->membw.arch_needs_linear = true; |
217 | if (ecx & MBA_IS_LINEAR) { |
218 | r->membw.delay_linear = true; |
219 | r->membw.min_bw = MAX_MBA_BW - max_delay; |
220 | r->membw.bw_gran = MAX_MBA_BW - max_delay; |
221 | } else { |
222 | if (!rdt_get_mb_table(r)) |
223 | return false; |
224 | r->membw.arch_needs_linear = false; |
225 | } |
226 | r->data_width = 3; |
227 | |
228 | if (boot_cpu_has(X86_FEATURE_PER_THREAD_MBA)) |
229 | r->membw.throttle_mode = THREAD_THROTTLE_PER_THREAD; |
230 | else |
231 | r->membw.throttle_mode = THREAD_THROTTLE_MAX; |
232 | thread_throttle_mode_init(); |
233 | |
234 | r->alloc_capable = true; |
235 | |
236 | return true; |
237 | } |
238 | |
239 | static bool __rdt_get_mem_config_amd(struct rdt_resource *r) |
240 | { |
241 | struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); |
242 | u32 eax, ebx, ecx, edx, subleaf; |
243 | |
244 | /* |
245 | * Query CPUID_Fn80000020_EDX_x01 for MBA and |
246 | * CPUID_Fn80000020_EDX_x02 for SMBA |
247 | */ |
248 | subleaf = (r->rid == RDT_RESOURCE_SMBA) ? 2 : 1; |
249 | |
250 | cpuid_count(op: 0x80000020, count: subleaf, eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx); |
251 | hw_res->num_closid = edx + 1; |
252 | r->default_ctrl = 1 << eax; |
253 | |
254 | /* AMD does not use delay */ |
255 | r->membw.delay_linear = false; |
256 | r->membw.arch_needs_linear = false; |
257 | |
258 | /* |
259 | * AMD does not use memory delay throttle model to control |
260 | * the allocation like Intel does. |
261 | */ |
262 | r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED; |
263 | r->membw.min_bw = 0; |
264 | r->membw.bw_gran = 1; |
265 | /* Max value is 2048, Data width should be 4 in decimal */ |
266 | r->data_width = 4; |
267 | |
268 | r->alloc_capable = true; |
269 | |
270 | return true; |
271 | } |
272 | |
273 | static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r) |
274 | { |
275 | struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); |
276 | union cpuid_0x10_1_eax eax; |
277 | union cpuid_0x10_x_ecx ecx; |
278 | union cpuid_0x10_x_edx edx; |
279 | u32 ebx; |
280 | |
281 | cpuid_count(op: 0x00000010, count: idx, eax: &eax.full, ebx: &ebx, ecx: &ecx.full, edx: &edx.full); |
282 | hw_res->num_closid = edx.split.cos_max + 1; |
283 | r->cache.cbm_len = eax.split.cbm_len + 1; |
284 | r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1; |
285 | r->cache.shareable_bits = ebx & r->default_ctrl; |
286 | r->data_width = (r->cache.cbm_len + 3) / 4; |
287 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
288 | r->cache.arch_has_sparse_bitmasks = ecx.split.noncont; |
289 | r->alloc_capable = true; |
290 | } |
291 | |
292 | static void rdt_get_cdp_config(int level) |
293 | { |
294 | /* |
295 | * By default, CDP is disabled. CDP can be enabled by mount parameter |
296 | * "cdp" during resctrl file system mount time. |
297 | */ |
298 | rdt_resources_all[level].cdp_enabled = false; |
299 | rdt_resources_all[level].r_resctrl.cdp_capable = true; |
300 | } |
301 | |
302 | static void rdt_get_cdp_l3_config(void) |
303 | { |
304 | rdt_get_cdp_config(level: RDT_RESOURCE_L3); |
305 | } |
306 | |
307 | static void rdt_get_cdp_l2_config(void) |
308 | { |
309 | rdt_get_cdp_config(level: RDT_RESOURCE_L2); |
310 | } |
311 | |
312 | static void |
313 | mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) |
314 | { |
315 | unsigned int i; |
316 | struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(r: d); |
317 | struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); |
318 | |
319 | for (i = m->low; i < m->high; i++) |
320 | wrmsrl(msr: hw_res->msr_base + i, val: hw_dom->ctrl_val[i]); |
321 | } |
322 | |
323 | /* |
324 | * Map the memory b/w percentage value to delay values |
325 | * that can be written to QOS_MSRs. |
326 | * There are currently no SKUs which support non linear delay values. |
327 | */ |
328 | static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r) |
329 | { |
330 | if (r->membw.delay_linear) |
331 | return MAX_MBA_BW - bw; |
332 | |
333 | pr_warn_once("Non Linear delay-bw map not supported but queried\n" ); |
334 | return r->default_ctrl; |
335 | } |
336 | |
337 | static void |
338 | mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m, |
339 | struct rdt_resource *r) |
340 | { |
341 | unsigned int i; |
342 | struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(r: d); |
343 | struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); |
344 | |
345 | /* Write the delay values for mba. */ |
346 | for (i = m->low; i < m->high; i++) |
347 | wrmsrl(msr: hw_res->msr_base + i, val: delay_bw_map(bw: hw_dom->ctrl_val[i], r)); |
348 | } |
349 | |
350 | static void |
351 | cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) |
352 | { |
353 | unsigned int i; |
354 | struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(r: d); |
355 | struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); |
356 | |
357 | for (i = m->low; i < m->high; i++) |
358 | wrmsrl(msr: hw_res->msr_base + i, val: hw_dom->ctrl_val[i]); |
359 | } |
360 | |
361 | struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r) |
362 | { |
363 | struct rdt_domain *d; |
364 | |
365 | list_for_each_entry(d, &r->domains, list) { |
366 | /* Find the domain that contains this CPU */ |
367 | if (cpumask_test_cpu(cpu, cpumask: &d->cpu_mask)) |
368 | return d; |
369 | } |
370 | |
371 | return NULL; |
372 | } |
373 | |
374 | u32 resctrl_arch_get_num_closid(struct rdt_resource *r) |
375 | { |
376 | return resctrl_to_arch_res(r)->num_closid; |
377 | } |
378 | |
379 | void rdt_ctrl_update(void *arg) |
380 | { |
381 | struct msr_param *m = arg; |
382 | struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r: m->res); |
383 | struct rdt_resource *r = m->res; |
384 | int cpu = smp_processor_id(); |
385 | struct rdt_domain *d; |
386 | |
387 | d = get_domain_from_cpu(cpu, r); |
388 | if (d) { |
389 | hw_res->msr_update(d, m, r); |
390 | return; |
391 | } |
392 | pr_warn_once("cpu %d not found in any domain for resource %s\n" , |
393 | cpu, r->name); |
394 | } |
395 | |
396 | /* |
397 | * rdt_find_domain - Find a domain in a resource that matches input resource id |
398 | * |
399 | * Search resource r's domain list to find the resource id. If the resource |
400 | * id is found in a domain, return the domain. Otherwise, if requested by |
401 | * caller, return the first domain whose id is bigger than the input id. |
402 | * The domain list is sorted by id in ascending order. |
403 | */ |
404 | struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, |
405 | struct list_head **pos) |
406 | { |
407 | struct rdt_domain *d; |
408 | struct list_head *l; |
409 | |
410 | if (id < 0) |
411 | return ERR_PTR(error: -ENODEV); |
412 | |
413 | list_for_each(l, &r->domains) { |
414 | d = list_entry(l, struct rdt_domain, list); |
415 | /* When id is found, return its domain. */ |
416 | if (id == d->id) |
417 | return d; |
418 | /* Stop searching when finding id's position in sorted list. */ |
419 | if (id < d->id) |
420 | break; |
421 | } |
422 | |
423 | if (pos) |
424 | *pos = l; |
425 | |
426 | return NULL; |
427 | } |
428 | |
429 | static void setup_default_ctrlval(struct rdt_resource *r, u32 *dc) |
430 | { |
431 | struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); |
432 | int i; |
433 | |
434 | /* |
435 | * Initialize the Control MSRs to having no control. |
436 | * For Cache Allocation: Set all bits in cbm |
437 | * For Memory Allocation: Set b/w requested to 100% |
438 | */ |
439 | for (i = 0; i < hw_res->num_closid; i++, dc++) |
440 | *dc = r->default_ctrl; |
441 | } |
442 | |
443 | static void domain_free(struct rdt_hw_domain *hw_dom) |
444 | { |
445 | kfree(objp: hw_dom->arch_mbm_total); |
446 | kfree(objp: hw_dom->arch_mbm_local); |
447 | kfree(objp: hw_dom->ctrl_val); |
448 | kfree(objp: hw_dom); |
449 | } |
450 | |
451 | static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d) |
452 | { |
453 | struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); |
454 | struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(r: d); |
455 | struct msr_param m; |
456 | u32 *dc; |
457 | |
458 | dc = kmalloc_array(n: hw_res->num_closid, size: sizeof(*hw_dom->ctrl_val), |
459 | GFP_KERNEL); |
460 | if (!dc) |
461 | return -ENOMEM; |
462 | |
463 | hw_dom->ctrl_val = dc; |
464 | setup_default_ctrlval(r, dc); |
465 | |
466 | m.low = 0; |
467 | m.high = hw_res->num_closid; |
468 | hw_res->msr_update(d, &m, r); |
469 | return 0; |
470 | } |
471 | |
472 | /** |
473 | * arch_domain_mbm_alloc() - Allocate arch private storage for the MBM counters |
474 | * @num_rmid: The size of the MBM counter array |
475 | * @hw_dom: The domain that owns the allocated arrays |
476 | */ |
477 | static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom) |
478 | { |
479 | size_t tsize; |
480 | |
481 | if (is_mbm_total_enabled()) { |
482 | tsize = sizeof(*hw_dom->arch_mbm_total); |
483 | hw_dom->arch_mbm_total = kcalloc(n: num_rmid, size: tsize, GFP_KERNEL); |
484 | if (!hw_dom->arch_mbm_total) |
485 | return -ENOMEM; |
486 | } |
487 | if (is_mbm_local_enabled()) { |
488 | tsize = sizeof(*hw_dom->arch_mbm_local); |
489 | hw_dom->arch_mbm_local = kcalloc(n: num_rmid, size: tsize, GFP_KERNEL); |
490 | if (!hw_dom->arch_mbm_local) { |
491 | kfree(objp: hw_dom->arch_mbm_total); |
492 | hw_dom->arch_mbm_total = NULL; |
493 | return -ENOMEM; |
494 | } |
495 | } |
496 | |
497 | return 0; |
498 | } |
499 | |
500 | /* |
501 | * domain_add_cpu - Add a cpu to a resource's domain list. |
502 | * |
503 | * If an existing domain in the resource r's domain list matches the cpu's |
504 | * resource id, add the cpu in the domain. |
505 | * |
506 | * Otherwise, a new domain is allocated and inserted into the right position |
507 | * in the domain list sorted by id in ascending order. |
508 | * |
509 | * The order in the domain list is visible to users when we print entries |
510 | * in the schemata file and schemata input is validated to have the same order |
511 | * as this list. |
512 | */ |
513 | static void domain_add_cpu(int cpu, struct rdt_resource *r) |
514 | { |
515 | int id = get_cpu_cacheinfo_id(cpu, level: r->cache_level); |
516 | struct list_head *add_pos = NULL; |
517 | struct rdt_hw_domain *hw_dom; |
518 | struct rdt_domain *d; |
519 | int err; |
520 | |
521 | lockdep_assert_held(&domain_list_lock); |
522 | |
523 | d = rdt_find_domain(r, id, pos: &add_pos); |
524 | if (IS_ERR(ptr: d)) { |
525 | pr_warn("Couldn't find cache id for CPU %d\n" , cpu); |
526 | return; |
527 | } |
528 | |
529 | if (d) { |
530 | cpumask_set_cpu(cpu, dstp: &d->cpu_mask); |
531 | if (r->cache.arch_has_per_cpu_cfg) |
532 | rdt_domain_reconfigure_cdp(r); |
533 | return; |
534 | } |
535 | |
536 | hw_dom = kzalloc_node(size: sizeof(*hw_dom), GFP_KERNEL, cpu_to_node(cpu)); |
537 | if (!hw_dom) |
538 | return; |
539 | |
540 | d = &hw_dom->d_resctrl; |
541 | d->id = id; |
542 | cpumask_set_cpu(cpu, dstp: &d->cpu_mask); |
543 | |
544 | rdt_domain_reconfigure_cdp(r); |
545 | |
546 | if (r->alloc_capable && domain_setup_ctrlval(r, d)) { |
547 | domain_free(hw_dom); |
548 | return; |
549 | } |
550 | |
551 | if (r->mon_capable && arch_domain_mbm_alloc(num_rmid: r->num_rmid, hw_dom)) { |
552 | domain_free(hw_dom); |
553 | return; |
554 | } |
555 | |
556 | list_add_tail_rcu(new: &d->list, head: add_pos); |
557 | |
558 | err = resctrl_online_domain(r, d); |
559 | if (err) { |
560 | list_del_rcu(entry: &d->list); |
561 | synchronize_rcu(); |
562 | domain_free(hw_dom); |
563 | } |
564 | } |
565 | |
566 | static void domain_remove_cpu(int cpu, struct rdt_resource *r) |
567 | { |
568 | int id = get_cpu_cacheinfo_id(cpu, level: r->cache_level); |
569 | struct rdt_hw_domain *hw_dom; |
570 | struct rdt_domain *d; |
571 | |
572 | lockdep_assert_held(&domain_list_lock); |
573 | |
574 | d = rdt_find_domain(r, id, NULL); |
575 | if (IS_ERR_OR_NULL(ptr: d)) { |
576 | pr_warn("Couldn't find cache id for CPU %d\n" , cpu); |
577 | return; |
578 | } |
579 | hw_dom = resctrl_to_arch_dom(r: d); |
580 | |
581 | cpumask_clear_cpu(cpu, dstp: &d->cpu_mask); |
582 | if (cpumask_empty(srcp: &d->cpu_mask)) { |
583 | resctrl_offline_domain(r, d); |
584 | list_del_rcu(entry: &d->list); |
585 | synchronize_rcu(); |
586 | |
587 | /* |
588 | * rdt_domain "d" is going to be freed below, so clear |
589 | * its pointer from pseudo_lock_region struct. |
590 | */ |
591 | if (d->plr) |
592 | d->plr->d = NULL; |
593 | domain_free(hw_dom); |
594 | |
595 | return; |
596 | } |
597 | } |
598 | |
599 | static void clear_closid_rmid(int cpu) |
600 | { |
601 | struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); |
602 | |
603 | state->default_closid = RESCTRL_RESERVED_CLOSID; |
604 | state->default_rmid = RESCTRL_RESERVED_RMID; |
605 | state->cur_closid = RESCTRL_RESERVED_CLOSID; |
606 | state->cur_rmid = RESCTRL_RESERVED_RMID; |
607 | wrmsr(MSR_IA32_PQR_ASSOC, RESCTRL_RESERVED_RMID, |
608 | RESCTRL_RESERVED_CLOSID); |
609 | } |
610 | |
611 | static int resctrl_arch_online_cpu(unsigned int cpu) |
612 | { |
613 | struct rdt_resource *r; |
614 | |
615 | mutex_lock(&domain_list_lock); |
616 | for_each_capable_rdt_resource(r) |
617 | domain_add_cpu(cpu, r); |
618 | mutex_unlock(lock: &domain_list_lock); |
619 | |
620 | clear_closid_rmid(cpu); |
621 | resctrl_online_cpu(cpu); |
622 | |
623 | return 0; |
624 | } |
625 | |
626 | static int resctrl_arch_offline_cpu(unsigned int cpu) |
627 | { |
628 | struct rdt_resource *r; |
629 | |
630 | resctrl_offline_cpu(cpu); |
631 | |
632 | mutex_lock(&domain_list_lock); |
633 | for_each_capable_rdt_resource(r) |
634 | domain_remove_cpu(cpu, r); |
635 | mutex_unlock(lock: &domain_list_lock); |
636 | |
637 | clear_closid_rmid(cpu); |
638 | |
639 | return 0; |
640 | } |
641 | |
642 | /* |
643 | * Choose a width for the resource name and resource data based on the |
644 | * resource that has widest name and cbm. |
645 | */ |
646 | static __init void rdt_init_padding(void) |
647 | { |
648 | struct rdt_resource *r; |
649 | |
650 | for_each_alloc_capable_rdt_resource(r) { |
651 | if (r->data_width > max_data_width) |
652 | max_data_width = r->data_width; |
653 | } |
654 | } |
655 | |
656 | enum { |
657 | RDT_FLAG_CMT, |
658 | RDT_FLAG_MBM_TOTAL, |
659 | RDT_FLAG_MBM_LOCAL, |
660 | RDT_FLAG_L3_CAT, |
661 | RDT_FLAG_L3_CDP, |
662 | RDT_FLAG_L2_CAT, |
663 | RDT_FLAG_L2_CDP, |
664 | RDT_FLAG_MBA, |
665 | RDT_FLAG_SMBA, |
666 | RDT_FLAG_BMEC, |
667 | }; |
668 | |
669 | #define RDT_OPT(idx, n, f) \ |
670 | [idx] = { \ |
671 | .name = n, \ |
672 | .flag = f \ |
673 | } |
674 | |
675 | struct rdt_options { |
676 | char *name; |
677 | int flag; |
678 | bool force_off, force_on; |
679 | }; |
680 | |
681 | static struct rdt_options rdt_options[] __initdata = { |
682 | RDT_OPT(RDT_FLAG_CMT, "cmt" , X86_FEATURE_CQM_OCCUP_LLC), |
683 | RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal" , X86_FEATURE_CQM_MBM_TOTAL), |
684 | RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal" , X86_FEATURE_CQM_MBM_LOCAL), |
685 | RDT_OPT(RDT_FLAG_L3_CAT, "l3cat" , X86_FEATURE_CAT_L3), |
686 | RDT_OPT(RDT_FLAG_L3_CDP, "l3cdp" , X86_FEATURE_CDP_L3), |
687 | RDT_OPT(RDT_FLAG_L2_CAT, "l2cat" , X86_FEATURE_CAT_L2), |
688 | RDT_OPT(RDT_FLAG_L2_CDP, "l2cdp" , X86_FEATURE_CDP_L2), |
689 | RDT_OPT(RDT_FLAG_MBA, "mba" , X86_FEATURE_MBA), |
690 | RDT_OPT(RDT_FLAG_SMBA, "smba" , X86_FEATURE_SMBA), |
691 | RDT_OPT(RDT_FLAG_BMEC, "bmec" , X86_FEATURE_BMEC), |
692 | }; |
693 | #define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options) |
694 | |
695 | static int __init set_rdt_options(char *str) |
696 | { |
697 | struct rdt_options *o; |
698 | bool force_off; |
699 | char *tok; |
700 | |
701 | if (*str == '=') |
702 | str++; |
703 | while ((tok = strsep(&str, "," )) != NULL) { |
704 | force_off = *tok == '!'; |
705 | if (force_off) |
706 | tok++; |
707 | for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) { |
708 | if (strcmp(tok, o->name) == 0) { |
709 | if (force_off) |
710 | o->force_off = true; |
711 | else |
712 | o->force_on = true; |
713 | break; |
714 | } |
715 | } |
716 | } |
717 | return 1; |
718 | } |
719 | __setup("rdt" , set_rdt_options); |
720 | |
721 | bool __init rdt_cpu_has(int flag) |
722 | { |
723 | bool ret = boot_cpu_has(flag); |
724 | struct rdt_options *o; |
725 | |
726 | if (!ret) |
727 | return ret; |
728 | |
729 | for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) { |
730 | if (flag == o->flag) { |
731 | if (o->force_off) |
732 | ret = false; |
733 | if (o->force_on) |
734 | ret = true; |
735 | break; |
736 | } |
737 | } |
738 | return ret; |
739 | } |
740 | |
741 | static __init bool get_mem_config(void) |
742 | { |
743 | struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_MBA]; |
744 | |
745 | if (!rdt_cpu_has(X86_FEATURE_MBA)) |
746 | return false; |
747 | |
748 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
749 | return __get_mem_config_intel(r: &hw_res->r_resctrl); |
750 | else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) |
751 | return __rdt_get_mem_config_amd(r: &hw_res->r_resctrl); |
752 | |
753 | return false; |
754 | } |
755 | |
756 | static __init bool get_slow_mem_config(void) |
757 | { |
758 | struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_SMBA]; |
759 | |
760 | if (!rdt_cpu_has(X86_FEATURE_SMBA)) |
761 | return false; |
762 | |
763 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) |
764 | return __rdt_get_mem_config_amd(r: &hw_res->r_resctrl); |
765 | |
766 | return false; |
767 | } |
768 | |
769 | static __init bool get_rdt_alloc_resources(void) |
770 | { |
771 | struct rdt_resource *r; |
772 | bool ret = false; |
773 | |
774 | if (rdt_alloc_capable) |
775 | return true; |
776 | |
777 | if (!boot_cpu_has(X86_FEATURE_RDT_A)) |
778 | return false; |
779 | |
780 | if (rdt_cpu_has(X86_FEATURE_CAT_L3)) { |
781 | r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; |
782 | rdt_get_cache_alloc_cfg(idx: 1, r); |
783 | if (rdt_cpu_has(X86_FEATURE_CDP_L3)) |
784 | rdt_get_cdp_l3_config(); |
785 | ret = true; |
786 | } |
787 | if (rdt_cpu_has(X86_FEATURE_CAT_L2)) { |
788 | /* CPUID 0x10.2 fields are same format at 0x10.1 */ |
789 | r = &rdt_resources_all[RDT_RESOURCE_L2].r_resctrl; |
790 | rdt_get_cache_alloc_cfg(idx: 2, r); |
791 | if (rdt_cpu_has(X86_FEATURE_CDP_L2)) |
792 | rdt_get_cdp_l2_config(); |
793 | ret = true; |
794 | } |
795 | |
796 | if (get_mem_config()) |
797 | ret = true; |
798 | |
799 | if (get_slow_mem_config()) |
800 | ret = true; |
801 | |
802 | return ret; |
803 | } |
804 | |
805 | static __init bool get_rdt_mon_resources(void) |
806 | { |
807 | struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; |
808 | |
809 | if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC)) |
810 | rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID); |
811 | if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL)) |
812 | rdt_mon_features |= (1 << QOS_L3_MBM_TOTAL_EVENT_ID); |
813 | if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL)) |
814 | rdt_mon_features |= (1 << QOS_L3_MBM_LOCAL_EVENT_ID); |
815 | |
816 | if (!rdt_mon_features) |
817 | return false; |
818 | |
819 | return !rdt_get_mon_l3_config(r); |
820 | } |
821 | |
822 | static __init void __check_quirks_intel(void) |
823 | { |
824 | switch (boot_cpu_data.x86_model) { |
825 | case INTEL_FAM6_HASWELL_X: |
826 | if (!rdt_options[RDT_FLAG_L3_CAT].force_off) |
827 | cache_alloc_hsw_probe(); |
828 | break; |
829 | case INTEL_FAM6_SKYLAKE_X: |
830 | if (boot_cpu_data.x86_stepping <= 4) |
831 | set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat" ); |
832 | else |
833 | set_rdt_options("!l3cat" ); |
834 | fallthrough; |
835 | case INTEL_FAM6_BROADWELL_X: |
836 | intel_rdt_mbm_apply_quirk(); |
837 | break; |
838 | } |
839 | } |
840 | |
841 | static __init void check_quirks(void) |
842 | { |
843 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
844 | __check_quirks_intel(); |
845 | } |
846 | |
847 | static __init bool get_rdt_resources(void) |
848 | { |
849 | rdt_alloc_capable = get_rdt_alloc_resources(); |
850 | rdt_mon_capable = get_rdt_mon_resources(); |
851 | |
852 | return (rdt_mon_capable || rdt_alloc_capable); |
853 | } |
854 | |
855 | static __init void rdt_init_res_defs_intel(void) |
856 | { |
857 | struct rdt_hw_resource *hw_res; |
858 | struct rdt_resource *r; |
859 | |
860 | for_each_rdt_resource(r) { |
861 | hw_res = resctrl_to_arch_res(r); |
862 | |
863 | if (r->rid == RDT_RESOURCE_L3 || |
864 | r->rid == RDT_RESOURCE_L2) { |
865 | r->cache.arch_has_per_cpu_cfg = false; |
866 | r->cache.min_cbm_bits = 1; |
867 | } else if (r->rid == RDT_RESOURCE_MBA) { |
868 | hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE; |
869 | hw_res->msr_update = mba_wrmsr_intel; |
870 | } |
871 | } |
872 | } |
873 | |
874 | static __init void rdt_init_res_defs_amd(void) |
875 | { |
876 | struct rdt_hw_resource *hw_res; |
877 | struct rdt_resource *r; |
878 | |
879 | for_each_rdt_resource(r) { |
880 | hw_res = resctrl_to_arch_res(r); |
881 | |
882 | if (r->rid == RDT_RESOURCE_L3 || |
883 | r->rid == RDT_RESOURCE_L2) { |
884 | r->cache.arch_has_sparse_bitmasks = true; |
885 | r->cache.arch_has_per_cpu_cfg = true; |
886 | r->cache.min_cbm_bits = 0; |
887 | } else if (r->rid == RDT_RESOURCE_MBA) { |
888 | hw_res->msr_base = MSR_IA32_MBA_BW_BASE; |
889 | hw_res->msr_update = mba_wrmsr_amd; |
890 | } else if (r->rid == RDT_RESOURCE_SMBA) { |
891 | hw_res->msr_base = MSR_IA32_SMBA_BW_BASE; |
892 | hw_res->msr_update = mba_wrmsr_amd; |
893 | } |
894 | } |
895 | } |
896 | |
897 | static __init void rdt_init_res_defs(void) |
898 | { |
899 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
900 | rdt_init_res_defs_intel(); |
901 | else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) |
902 | rdt_init_res_defs_amd(); |
903 | } |
904 | |
905 | static enum cpuhp_state rdt_online; |
906 | |
907 | /* Runs once on the BSP during boot. */ |
908 | void resctrl_cpu_detect(struct cpuinfo_x86 *c) |
909 | { |
910 | if (!cpu_has(c, X86_FEATURE_CQM_LLC)) { |
911 | c->x86_cache_max_rmid = -1; |
912 | c->x86_cache_occ_scale = -1; |
913 | c->x86_cache_mbm_width_offset = -1; |
914 | return; |
915 | } |
916 | |
917 | /* will be overridden if occupancy monitoring exists */ |
918 | c->x86_cache_max_rmid = cpuid_ebx(op: 0xf); |
919 | |
920 | if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) || |
921 | cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) || |
922 | cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) { |
923 | u32 eax, ebx, ecx, edx; |
924 | |
925 | /* QoS sub-leaf, EAX=0Fh, ECX=1 */ |
926 | cpuid_count(op: 0xf, count: 1, eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx); |
927 | |
928 | c->x86_cache_max_rmid = ecx; |
929 | c->x86_cache_occ_scale = ebx; |
930 | c->x86_cache_mbm_width_offset = eax & 0xff; |
931 | |
932 | if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset) |
933 | c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD; |
934 | } |
935 | } |
936 | |
937 | static int __init resctrl_late_init(void) |
938 | { |
939 | struct rdt_resource *r; |
940 | int state, ret; |
941 | |
942 | /* |
943 | * Initialize functions(or definitions) that are different |
944 | * between vendors here. |
945 | */ |
946 | rdt_init_res_defs(); |
947 | |
948 | check_quirks(); |
949 | |
950 | if (!get_rdt_resources()) |
951 | return -ENODEV; |
952 | |
953 | rdt_init_padding(); |
954 | |
955 | state = cpuhp_setup_state(state: CPUHP_AP_ONLINE_DYN, |
956 | name: "x86/resctrl/cat:online:" , |
957 | startup: resctrl_arch_online_cpu, |
958 | teardown: resctrl_arch_offline_cpu); |
959 | if (state < 0) |
960 | return state; |
961 | |
962 | ret = rdtgroup_init(); |
963 | if (ret) { |
964 | cpuhp_remove_state(state); |
965 | return ret; |
966 | } |
967 | rdt_online = state; |
968 | |
969 | for_each_alloc_capable_rdt_resource(r) |
970 | pr_info("%s allocation detected\n" , r->name); |
971 | |
972 | for_each_mon_capable_rdt_resource(r) |
973 | pr_info("%s monitoring detected\n" , r->name); |
974 | |
975 | return 0; |
976 | } |
977 | |
978 | late_initcall(resctrl_late_init); |
979 | |
980 | static void __exit resctrl_exit(void) |
981 | { |
982 | struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; |
983 | |
984 | cpuhp_remove_state(state: rdt_online); |
985 | |
986 | rdtgroup_exit(); |
987 | |
988 | if (r->mon_capable) |
989 | rdt_put_mon_l3_config(); |
990 | } |
991 | |
992 | __exitcall(resctrl_exit); |
993 | |