1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Resource Director Technology(RDT) |
4 | * - Cache Allocation code. |
5 | * |
6 | * Copyright (C) 2016 Intel Corporation |
7 | * |
8 | * Authors: |
9 | * Fenghua Yu <fenghua.yu@intel.com> |
10 | * Tony Luck <tony.luck@intel.com> |
11 | * Vikas Shivappa <vikas.shivappa@intel.com> |
12 | * |
13 | * More information about RDT be found in the Intel (R) x86 Architecture |
14 | * Software Developer Manual June 2016, volume 3, section 17.17. |
15 | */ |
16 | |
17 | #define pr_fmt(fmt) "resctrl: " fmt |
18 | |
19 | #include <linux/slab.h> |
20 | #include <linux/err.h> |
21 | #include <linux/cacheinfo.h> |
22 | #include <linux/cpuhotplug.h> |
23 | |
24 | #include <asm/intel-family.h> |
25 | #include <asm/resctrl.h> |
26 | #include "internal.h" |
27 | |
28 | /* Mutex to protect rdtgroup access. */ |
29 | DEFINE_MUTEX(rdtgroup_mutex); |
30 | |
31 | /* |
32 | * The cached resctrl_pqr_state is strictly per CPU and can never be |
33 | * updated from a remote CPU. Functions which modify the state |
34 | * are called with interrupts disabled and no preemption, which |
35 | * is sufficient for the protection. |
36 | */ |
37 | DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state); |
38 | |
39 | /* |
40 | * Used to store the max resource name width and max resource data width |
41 | * to display the schemata in a tabular format |
42 | */ |
43 | int max_name_width, max_data_width; |
44 | |
45 | /* |
46 | * Global boolean for rdt_alloc which is true if any |
47 | * resource allocation is enabled. |
48 | */ |
49 | bool rdt_alloc_capable; |
50 | |
51 | static void |
52 | mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m, |
53 | struct rdt_resource *r); |
54 | static void |
55 | cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); |
56 | static void |
57 | mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, |
58 | struct rdt_resource *r); |
59 | |
60 | #define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].r_resctrl.domains) |
61 | |
62 | struct rdt_hw_resource rdt_resources_all[] = { |
63 | [RDT_RESOURCE_L3] = |
64 | { |
65 | .r_resctrl = { |
66 | .rid = RDT_RESOURCE_L3, |
67 | .name = "L3" , |
68 | .cache_level = 3, |
69 | .domains = domain_init(RDT_RESOURCE_L3), |
70 | .parse_ctrlval = parse_cbm, |
71 | .format_str = "%d=%0*x" , |
72 | .fflags = RFTYPE_RES_CACHE, |
73 | }, |
74 | .msr_base = MSR_IA32_L3_CBM_BASE, |
75 | .msr_update = cat_wrmsr, |
76 | }, |
77 | [RDT_RESOURCE_L2] = |
78 | { |
79 | .r_resctrl = { |
80 | .rid = RDT_RESOURCE_L2, |
81 | .name = "L2" , |
82 | .cache_level = 2, |
83 | .domains = domain_init(RDT_RESOURCE_L2), |
84 | .parse_ctrlval = parse_cbm, |
85 | .format_str = "%d=%0*x" , |
86 | .fflags = RFTYPE_RES_CACHE, |
87 | }, |
88 | .msr_base = MSR_IA32_L2_CBM_BASE, |
89 | .msr_update = cat_wrmsr, |
90 | }, |
91 | [RDT_RESOURCE_MBA] = |
92 | { |
93 | .r_resctrl = { |
94 | .rid = RDT_RESOURCE_MBA, |
95 | .name = "MB" , |
96 | .cache_level = 3, |
97 | .domains = domain_init(RDT_RESOURCE_MBA), |
98 | .parse_ctrlval = parse_bw, |
99 | .format_str = "%d=%*u" , |
100 | .fflags = RFTYPE_RES_MB, |
101 | }, |
102 | }, |
103 | [RDT_RESOURCE_SMBA] = |
104 | { |
105 | .r_resctrl = { |
106 | .rid = RDT_RESOURCE_SMBA, |
107 | .name = "SMBA" , |
108 | .cache_level = 3, |
109 | .domains = domain_init(RDT_RESOURCE_SMBA), |
110 | .parse_ctrlval = parse_bw, |
111 | .format_str = "%d=%*u" , |
112 | .fflags = RFTYPE_RES_MB, |
113 | }, |
114 | }, |
115 | }; |
116 | |
117 | /* |
118 | * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs |
119 | * as they do not have CPUID enumeration support for Cache allocation. |
120 | * The check for Vendor/Family/Model is not enough to guarantee that |
121 | * the MSRs won't #GP fault because only the following SKUs support |
122 | * CAT: |
123 | * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz |
124 | * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz |
125 | * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz |
126 | * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz |
127 | * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz |
128 | * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz |
129 | * |
130 | * Probe by trying to write the first of the L3 cache mask registers |
131 | * and checking that the bits stick. Max CLOSids is always 4 and max cbm length |
132 | * is always 20 on hsw server parts. The minimum cache bitmask length |
133 | * allowed for HSW server is always 2 bits. Hardcode all of them. |
134 | */ |
135 | static inline void cache_alloc_hsw_probe(void) |
136 | { |
137 | struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_L3]; |
138 | struct rdt_resource *r = &hw_res->r_resctrl; |
139 | u32 l, h, max_cbm = BIT_MASK(20) - 1; |
140 | |
141 | if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0)) |
142 | return; |
143 | |
144 | rdmsr(MSR_IA32_L3_CBM_BASE, l, h); |
145 | |
146 | /* If all the bits were set in MSR, return success */ |
147 | if (l != max_cbm) |
148 | return; |
149 | |
150 | hw_res->num_closid = 4; |
151 | r->default_ctrl = max_cbm; |
152 | r->cache.cbm_len = 20; |
153 | r->cache.shareable_bits = 0xc0000; |
154 | r->cache.min_cbm_bits = 2; |
155 | r->cache.arch_has_sparse_bitmasks = false; |
156 | r->alloc_capable = true; |
157 | |
158 | rdt_alloc_capable = true; |
159 | } |
160 | |
161 | bool is_mba_sc(struct rdt_resource *r) |
162 | { |
163 | if (!r) |
164 | return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.mba_sc; |
165 | |
166 | /* |
167 | * The software controller support is only applicable to MBA resource. |
168 | * Make sure to check for resource type. |
169 | */ |
170 | if (r->rid != RDT_RESOURCE_MBA) |
171 | return false; |
172 | |
173 | return r->membw.mba_sc; |
174 | } |
175 | |
176 | /* |
177 | * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values |
178 | * exposed to user interface and the h/w understandable delay values. |
179 | * |
180 | * The non-linear delay values have the granularity of power of two |
181 | * and also the h/w does not guarantee a curve for configured delay |
182 | * values vs. actual b/w enforced. |
183 | * Hence we need a mapping that is pre calibrated so the user can |
184 | * express the memory b/w as a percentage value. |
185 | */ |
186 | static inline bool rdt_get_mb_table(struct rdt_resource *r) |
187 | { |
188 | /* |
189 | * There are no Intel SKUs as of now to support non-linear delay. |
190 | */ |
191 | pr_info("MBA b/w map not implemented for cpu:%d, model:%d" , |
192 | boot_cpu_data.x86, boot_cpu_data.x86_model); |
193 | |
194 | return false; |
195 | } |
196 | |
197 | static bool __get_mem_config_intel(struct rdt_resource *r) |
198 | { |
199 | struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); |
200 | union cpuid_0x10_3_eax eax; |
201 | union cpuid_0x10_x_edx edx; |
202 | u32 ebx, ecx, max_delay; |
203 | |
204 | cpuid_count(op: 0x00000010, count: 3, eax: &eax.full, ebx: &ebx, ecx: &ecx, edx: &edx.full); |
205 | hw_res->num_closid = edx.split.cos_max + 1; |
206 | max_delay = eax.split.max_delay + 1; |
207 | r->default_ctrl = MAX_MBA_BW; |
208 | r->membw.arch_needs_linear = true; |
209 | if (ecx & MBA_IS_LINEAR) { |
210 | r->membw.delay_linear = true; |
211 | r->membw.min_bw = MAX_MBA_BW - max_delay; |
212 | r->membw.bw_gran = MAX_MBA_BW - max_delay; |
213 | } else { |
214 | if (!rdt_get_mb_table(r)) |
215 | return false; |
216 | r->membw.arch_needs_linear = false; |
217 | } |
218 | r->data_width = 3; |
219 | |
220 | if (boot_cpu_has(X86_FEATURE_PER_THREAD_MBA)) |
221 | r->membw.throttle_mode = THREAD_THROTTLE_PER_THREAD; |
222 | else |
223 | r->membw.throttle_mode = THREAD_THROTTLE_MAX; |
224 | thread_throttle_mode_init(); |
225 | |
226 | r->alloc_capable = true; |
227 | |
228 | return true; |
229 | } |
230 | |
231 | static bool __rdt_get_mem_config_amd(struct rdt_resource *r) |
232 | { |
233 | struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); |
234 | union cpuid_0x10_3_eax eax; |
235 | union cpuid_0x10_x_edx edx; |
236 | u32 ebx, ecx, subleaf; |
237 | |
238 | /* |
239 | * Query CPUID_Fn80000020_EDX_x01 for MBA and |
240 | * CPUID_Fn80000020_EDX_x02 for SMBA |
241 | */ |
242 | subleaf = (r->rid == RDT_RESOURCE_SMBA) ? 2 : 1; |
243 | |
244 | cpuid_count(op: 0x80000020, count: subleaf, eax: &eax.full, ebx: &ebx, ecx: &ecx, edx: &edx.full); |
245 | hw_res->num_closid = edx.split.cos_max + 1; |
246 | r->default_ctrl = MAX_MBA_BW_AMD; |
247 | |
248 | /* AMD does not use delay */ |
249 | r->membw.delay_linear = false; |
250 | r->membw.arch_needs_linear = false; |
251 | |
252 | /* |
253 | * AMD does not use memory delay throttle model to control |
254 | * the allocation like Intel does. |
255 | */ |
256 | r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED; |
257 | r->membw.min_bw = 0; |
258 | r->membw.bw_gran = 1; |
259 | /* Max value is 2048, Data width should be 4 in decimal */ |
260 | r->data_width = 4; |
261 | |
262 | r->alloc_capable = true; |
263 | |
264 | return true; |
265 | } |
266 | |
267 | static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r) |
268 | { |
269 | struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); |
270 | union cpuid_0x10_1_eax eax; |
271 | union cpuid_0x10_x_ecx ecx; |
272 | union cpuid_0x10_x_edx edx; |
273 | u32 ebx; |
274 | |
275 | cpuid_count(op: 0x00000010, count: idx, eax: &eax.full, ebx: &ebx, ecx: &ecx.full, edx: &edx.full); |
276 | hw_res->num_closid = edx.split.cos_max + 1; |
277 | r->cache.cbm_len = eax.split.cbm_len + 1; |
278 | r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1; |
279 | r->cache.shareable_bits = ebx & r->default_ctrl; |
280 | r->data_width = (r->cache.cbm_len + 3) / 4; |
281 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
282 | r->cache.arch_has_sparse_bitmasks = ecx.split.noncont; |
283 | r->alloc_capable = true; |
284 | } |
285 | |
286 | static void rdt_get_cdp_config(int level) |
287 | { |
288 | /* |
289 | * By default, CDP is disabled. CDP can be enabled by mount parameter |
290 | * "cdp" during resctrl file system mount time. |
291 | */ |
292 | rdt_resources_all[level].cdp_enabled = false; |
293 | rdt_resources_all[level].r_resctrl.cdp_capable = true; |
294 | } |
295 | |
296 | static void rdt_get_cdp_l3_config(void) |
297 | { |
298 | rdt_get_cdp_config(level: RDT_RESOURCE_L3); |
299 | } |
300 | |
301 | static void rdt_get_cdp_l2_config(void) |
302 | { |
303 | rdt_get_cdp_config(level: RDT_RESOURCE_L2); |
304 | } |
305 | |
306 | static void |
307 | mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) |
308 | { |
309 | unsigned int i; |
310 | struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(r: d); |
311 | struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); |
312 | |
313 | for (i = m->low; i < m->high; i++) |
314 | wrmsrl(msr: hw_res->msr_base + i, val: hw_dom->ctrl_val[i]); |
315 | } |
316 | |
317 | /* |
318 | * Map the memory b/w percentage value to delay values |
319 | * that can be written to QOS_MSRs. |
320 | * There are currently no SKUs which support non linear delay values. |
321 | */ |
322 | static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r) |
323 | { |
324 | if (r->membw.delay_linear) |
325 | return MAX_MBA_BW - bw; |
326 | |
327 | pr_warn_once("Non Linear delay-bw map not supported but queried\n" ); |
328 | return r->default_ctrl; |
329 | } |
330 | |
331 | static void |
332 | mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m, |
333 | struct rdt_resource *r) |
334 | { |
335 | unsigned int i; |
336 | struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(r: d); |
337 | struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); |
338 | |
339 | /* Write the delay values for mba. */ |
340 | for (i = m->low; i < m->high; i++) |
341 | wrmsrl(msr: hw_res->msr_base + i, val: delay_bw_map(bw: hw_dom->ctrl_val[i], r)); |
342 | } |
343 | |
344 | static void |
345 | cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) |
346 | { |
347 | unsigned int i; |
348 | struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(r: d); |
349 | struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); |
350 | |
351 | for (i = m->low; i < m->high; i++) |
352 | wrmsrl(msr: hw_res->msr_base + i, val: hw_dom->ctrl_val[i]); |
353 | } |
354 | |
355 | struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r) |
356 | { |
357 | struct rdt_domain *d; |
358 | |
359 | list_for_each_entry(d, &r->domains, list) { |
360 | /* Find the domain that contains this CPU */ |
361 | if (cpumask_test_cpu(cpu, cpumask: &d->cpu_mask)) |
362 | return d; |
363 | } |
364 | |
365 | return NULL; |
366 | } |
367 | |
368 | u32 resctrl_arch_get_num_closid(struct rdt_resource *r) |
369 | { |
370 | return resctrl_to_arch_res(r)->num_closid; |
371 | } |
372 | |
373 | void rdt_ctrl_update(void *arg) |
374 | { |
375 | struct msr_param *m = arg; |
376 | struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r: m->res); |
377 | struct rdt_resource *r = m->res; |
378 | int cpu = smp_processor_id(); |
379 | struct rdt_domain *d; |
380 | |
381 | d = get_domain_from_cpu(cpu, r); |
382 | if (d) { |
383 | hw_res->msr_update(d, m, r); |
384 | return; |
385 | } |
386 | pr_warn_once("cpu %d not found in any domain for resource %s\n" , |
387 | cpu, r->name); |
388 | } |
389 | |
390 | /* |
391 | * rdt_find_domain - Find a domain in a resource that matches input resource id |
392 | * |
393 | * Search resource r's domain list to find the resource id. If the resource |
394 | * id is found in a domain, return the domain. Otherwise, if requested by |
395 | * caller, return the first domain whose id is bigger than the input id. |
396 | * The domain list is sorted by id in ascending order. |
397 | */ |
398 | struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, |
399 | struct list_head **pos) |
400 | { |
401 | struct rdt_domain *d; |
402 | struct list_head *l; |
403 | |
404 | if (id < 0) |
405 | return ERR_PTR(error: -ENODEV); |
406 | |
407 | list_for_each(l, &r->domains) { |
408 | d = list_entry(l, struct rdt_domain, list); |
409 | /* When id is found, return its domain. */ |
410 | if (id == d->id) |
411 | return d; |
412 | /* Stop searching when finding id's position in sorted list. */ |
413 | if (id < d->id) |
414 | break; |
415 | } |
416 | |
417 | if (pos) |
418 | *pos = l; |
419 | |
420 | return NULL; |
421 | } |
422 | |
423 | static void setup_default_ctrlval(struct rdt_resource *r, u32 *dc) |
424 | { |
425 | struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); |
426 | int i; |
427 | |
428 | /* |
429 | * Initialize the Control MSRs to having no control. |
430 | * For Cache Allocation: Set all bits in cbm |
431 | * For Memory Allocation: Set b/w requested to 100% |
432 | */ |
433 | for (i = 0; i < hw_res->num_closid; i++, dc++) |
434 | *dc = r->default_ctrl; |
435 | } |
436 | |
437 | static void domain_free(struct rdt_hw_domain *hw_dom) |
438 | { |
439 | kfree(objp: hw_dom->arch_mbm_total); |
440 | kfree(objp: hw_dom->arch_mbm_local); |
441 | kfree(objp: hw_dom->ctrl_val); |
442 | kfree(objp: hw_dom); |
443 | } |
444 | |
445 | static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d) |
446 | { |
447 | struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); |
448 | struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(r: d); |
449 | struct msr_param m; |
450 | u32 *dc; |
451 | |
452 | dc = kmalloc_array(n: hw_res->num_closid, size: sizeof(*hw_dom->ctrl_val), |
453 | GFP_KERNEL); |
454 | if (!dc) |
455 | return -ENOMEM; |
456 | |
457 | hw_dom->ctrl_val = dc; |
458 | setup_default_ctrlval(r, dc); |
459 | |
460 | m.low = 0; |
461 | m.high = hw_res->num_closid; |
462 | hw_res->msr_update(d, &m, r); |
463 | return 0; |
464 | } |
465 | |
466 | /** |
467 | * arch_domain_mbm_alloc() - Allocate arch private storage for the MBM counters |
468 | * @num_rmid: The size of the MBM counter array |
469 | * @hw_dom: The domain that owns the allocated arrays |
470 | */ |
471 | static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom) |
472 | { |
473 | size_t tsize; |
474 | |
475 | if (is_mbm_total_enabled()) { |
476 | tsize = sizeof(*hw_dom->arch_mbm_total); |
477 | hw_dom->arch_mbm_total = kcalloc(n: num_rmid, size: tsize, GFP_KERNEL); |
478 | if (!hw_dom->arch_mbm_total) |
479 | return -ENOMEM; |
480 | } |
481 | if (is_mbm_local_enabled()) { |
482 | tsize = sizeof(*hw_dom->arch_mbm_local); |
483 | hw_dom->arch_mbm_local = kcalloc(n: num_rmid, size: tsize, GFP_KERNEL); |
484 | if (!hw_dom->arch_mbm_local) { |
485 | kfree(objp: hw_dom->arch_mbm_total); |
486 | hw_dom->arch_mbm_total = NULL; |
487 | return -ENOMEM; |
488 | } |
489 | } |
490 | |
491 | return 0; |
492 | } |
493 | |
494 | /* |
495 | * domain_add_cpu - Add a cpu to a resource's domain list. |
496 | * |
497 | * If an existing domain in the resource r's domain list matches the cpu's |
498 | * resource id, add the cpu in the domain. |
499 | * |
500 | * Otherwise, a new domain is allocated and inserted into the right position |
501 | * in the domain list sorted by id in ascending order. |
502 | * |
503 | * The order in the domain list is visible to users when we print entries |
504 | * in the schemata file and schemata input is validated to have the same order |
505 | * as this list. |
506 | */ |
507 | static void domain_add_cpu(int cpu, struct rdt_resource *r) |
508 | { |
509 | int id = get_cpu_cacheinfo_id(cpu, level: r->cache_level); |
510 | struct list_head *add_pos = NULL; |
511 | struct rdt_hw_domain *hw_dom; |
512 | struct rdt_domain *d; |
513 | int err; |
514 | |
515 | d = rdt_find_domain(r, id, pos: &add_pos); |
516 | if (IS_ERR(ptr: d)) { |
517 | pr_warn("Couldn't find cache id for CPU %d\n" , cpu); |
518 | return; |
519 | } |
520 | |
521 | if (d) { |
522 | cpumask_set_cpu(cpu, dstp: &d->cpu_mask); |
523 | if (r->cache.arch_has_per_cpu_cfg) |
524 | rdt_domain_reconfigure_cdp(r); |
525 | return; |
526 | } |
527 | |
528 | hw_dom = kzalloc_node(size: sizeof(*hw_dom), GFP_KERNEL, cpu_to_node(cpu)); |
529 | if (!hw_dom) |
530 | return; |
531 | |
532 | d = &hw_dom->d_resctrl; |
533 | d->id = id; |
534 | cpumask_set_cpu(cpu, dstp: &d->cpu_mask); |
535 | |
536 | rdt_domain_reconfigure_cdp(r); |
537 | |
538 | if (r->alloc_capable && domain_setup_ctrlval(r, d)) { |
539 | domain_free(hw_dom); |
540 | return; |
541 | } |
542 | |
543 | if (r->mon_capable && arch_domain_mbm_alloc(num_rmid: r->num_rmid, hw_dom)) { |
544 | domain_free(hw_dom); |
545 | return; |
546 | } |
547 | |
548 | list_add_tail(new: &d->list, head: add_pos); |
549 | |
550 | err = resctrl_online_domain(r, d); |
551 | if (err) { |
552 | list_del(entry: &d->list); |
553 | domain_free(hw_dom); |
554 | } |
555 | } |
556 | |
557 | static void domain_remove_cpu(int cpu, struct rdt_resource *r) |
558 | { |
559 | int id = get_cpu_cacheinfo_id(cpu, level: r->cache_level); |
560 | struct rdt_hw_domain *hw_dom; |
561 | struct rdt_domain *d; |
562 | |
563 | d = rdt_find_domain(r, id, NULL); |
564 | if (IS_ERR_OR_NULL(ptr: d)) { |
565 | pr_warn("Couldn't find cache id for CPU %d\n" , cpu); |
566 | return; |
567 | } |
568 | hw_dom = resctrl_to_arch_dom(r: d); |
569 | |
570 | cpumask_clear_cpu(cpu, dstp: &d->cpu_mask); |
571 | if (cpumask_empty(srcp: &d->cpu_mask)) { |
572 | resctrl_offline_domain(r, d); |
573 | list_del(entry: &d->list); |
574 | |
575 | /* |
576 | * rdt_domain "d" is going to be freed below, so clear |
577 | * its pointer from pseudo_lock_region struct. |
578 | */ |
579 | if (d->plr) |
580 | d->plr->d = NULL; |
581 | domain_free(hw_dom); |
582 | |
583 | return; |
584 | } |
585 | |
586 | if (r == &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl) { |
587 | if (is_mbm_enabled() && cpu == d->mbm_work_cpu) { |
588 | cancel_delayed_work(dwork: &d->mbm_over); |
589 | mbm_setup_overflow_handler(dom: d, delay_ms: 0); |
590 | } |
591 | if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu && |
592 | has_busy_rmid(r, d)) { |
593 | cancel_delayed_work(dwork: &d->cqm_limbo); |
594 | cqm_setup_limbo_handler(dom: d, delay_ms: 0); |
595 | } |
596 | } |
597 | } |
598 | |
599 | static void clear_closid_rmid(int cpu) |
600 | { |
601 | struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); |
602 | |
603 | state->default_closid = 0; |
604 | state->default_rmid = 0; |
605 | state->cur_closid = 0; |
606 | state->cur_rmid = 0; |
607 | wrmsr(MSR_IA32_PQR_ASSOC, 0, 0); |
608 | } |
609 | |
610 | static int resctrl_online_cpu(unsigned int cpu) |
611 | { |
612 | struct rdt_resource *r; |
613 | |
614 | mutex_lock(&rdtgroup_mutex); |
615 | for_each_capable_rdt_resource(r) |
616 | domain_add_cpu(cpu, r); |
617 | /* The cpu is set in default rdtgroup after online. */ |
618 | cpumask_set_cpu(cpu, dstp: &rdtgroup_default.cpu_mask); |
619 | clear_closid_rmid(cpu); |
620 | mutex_unlock(lock: &rdtgroup_mutex); |
621 | |
622 | return 0; |
623 | } |
624 | |
625 | static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) |
626 | { |
627 | struct rdtgroup *cr; |
628 | |
629 | list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { |
630 | if (cpumask_test_and_clear_cpu(cpu, cpumask: &cr->cpu_mask)) { |
631 | break; |
632 | } |
633 | } |
634 | } |
635 | |
636 | static int resctrl_offline_cpu(unsigned int cpu) |
637 | { |
638 | struct rdtgroup *rdtgrp; |
639 | struct rdt_resource *r; |
640 | |
641 | mutex_lock(&rdtgroup_mutex); |
642 | for_each_capable_rdt_resource(r) |
643 | domain_remove_cpu(cpu, r); |
644 | list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { |
645 | if (cpumask_test_and_clear_cpu(cpu, cpumask: &rdtgrp->cpu_mask)) { |
646 | clear_childcpus(r: rdtgrp, cpu); |
647 | break; |
648 | } |
649 | } |
650 | clear_closid_rmid(cpu); |
651 | mutex_unlock(lock: &rdtgroup_mutex); |
652 | |
653 | return 0; |
654 | } |
655 | |
656 | /* |
657 | * Choose a width for the resource name and resource data based on the |
658 | * resource that has widest name and cbm. |
659 | */ |
660 | static __init void rdt_init_padding(void) |
661 | { |
662 | struct rdt_resource *r; |
663 | |
664 | for_each_alloc_capable_rdt_resource(r) { |
665 | if (r->data_width > max_data_width) |
666 | max_data_width = r->data_width; |
667 | } |
668 | } |
669 | |
670 | enum { |
671 | RDT_FLAG_CMT, |
672 | RDT_FLAG_MBM_TOTAL, |
673 | RDT_FLAG_MBM_LOCAL, |
674 | RDT_FLAG_L3_CAT, |
675 | RDT_FLAG_L3_CDP, |
676 | RDT_FLAG_L2_CAT, |
677 | RDT_FLAG_L2_CDP, |
678 | RDT_FLAG_MBA, |
679 | RDT_FLAG_SMBA, |
680 | RDT_FLAG_BMEC, |
681 | }; |
682 | |
683 | #define RDT_OPT(idx, n, f) \ |
684 | [idx] = { \ |
685 | .name = n, \ |
686 | .flag = f \ |
687 | } |
688 | |
689 | struct rdt_options { |
690 | char *name; |
691 | int flag; |
692 | bool force_off, force_on; |
693 | }; |
694 | |
695 | static struct rdt_options rdt_options[] __initdata = { |
696 | RDT_OPT(RDT_FLAG_CMT, "cmt" , X86_FEATURE_CQM_OCCUP_LLC), |
697 | RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal" , X86_FEATURE_CQM_MBM_TOTAL), |
698 | RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal" , X86_FEATURE_CQM_MBM_LOCAL), |
699 | RDT_OPT(RDT_FLAG_L3_CAT, "l3cat" , X86_FEATURE_CAT_L3), |
700 | RDT_OPT(RDT_FLAG_L3_CDP, "l3cdp" , X86_FEATURE_CDP_L3), |
701 | RDT_OPT(RDT_FLAG_L2_CAT, "l2cat" , X86_FEATURE_CAT_L2), |
702 | RDT_OPT(RDT_FLAG_L2_CDP, "l2cdp" , X86_FEATURE_CDP_L2), |
703 | RDT_OPT(RDT_FLAG_MBA, "mba" , X86_FEATURE_MBA), |
704 | RDT_OPT(RDT_FLAG_SMBA, "smba" , X86_FEATURE_SMBA), |
705 | RDT_OPT(RDT_FLAG_BMEC, "bmec" , X86_FEATURE_BMEC), |
706 | }; |
707 | #define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options) |
708 | |
709 | static int __init set_rdt_options(char *str) |
710 | { |
711 | struct rdt_options *o; |
712 | bool force_off; |
713 | char *tok; |
714 | |
715 | if (*str == '=') |
716 | str++; |
717 | while ((tok = strsep(&str, "," )) != NULL) { |
718 | force_off = *tok == '!'; |
719 | if (force_off) |
720 | tok++; |
721 | for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) { |
722 | if (strcmp(tok, o->name) == 0) { |
723 | if (force_off) |
724 | o->force_off = true; |
725 | else |
726 | o->force_on = true; |
727 | break; |
728 | } |
729 | } |
730 | } |
731 | return 1; |
732 | } |
733 | __setup("rdt" , set_rdt_options); |
734 | |
735 | bool __init rdt_cpu_has(int flag) |
736 | { |
737 | bool ret = boot_cpu_has(flag); |
738 | struct rdt_options *o; |
739 | |
740 | if (!ret) |
741 | return ret; |
742 | |
743 | for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) { |
744 | if (flag == o->flag) { |
745 | if (o->force_off) |
746 | ret = false; |
747 | if (o->force_on) |
748 | ret = true; |
749 | break; |
750 | } |
751 | } |
752 | return ret; |
753 | } |
754 | |
755 | static __init bool get_mem_config(void) |
756 | { |
757 | struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_MBA]; |
758 | |
759 | if (!rdt_cpu_has(X86_FEATURE_MBA)) |
760 | return false; |
761 | |
762 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
763 | return __get_mem_config_intel(r: &hw_res->r_resctrl); |
764 | else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) |
765 | return __rdt_get_mem_config_amd(r: &hw_res->r_resctrl); |
766 | |
767 | return false; |
768 | } |
769 | |
770 | static __init bool get_slow_mem_config(void) |
771 | { |
772 | struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_SMBA]; |
773 | |
774 | if (!rdt_cpu_has(X86_FEATURE_SMBA)) |
775 | return false; |
776 | |
777 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) |
778 | return __rdt_get_mem_config_amd(r: &hw_res->r_resctrl); |
779 | |
780 | return false; |
781 | } |
782 | |
783 | static __init bool get_rdt_alloc_resources(void) |
784 | { |
785 | struct rdt_resource *r; |
786 | bool ret = false; |
787 | |
788 | if (rdt_alloc_capable) |
789 | return true; |
790 | |
791 | if (!boot_cpu_has(X86_FEATURE_RDT_A)) |
792 | return false; |
793 | |
794 | if (rdt_cpu_has(X86_FEATURE_CAT_L3)) { |
795 | r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; |
796 | rdt_get_cache_alloc_cfg(idx: 1, r); |
797 | if (rdt_cpu_has(X86_FEATURE_CDP_L3)) |
798 | rdt_get_cdp_l3_config(); |
799 | ret = true; |
800 | } |
801 | if (rdt_cpu_has(X86_FEATURE_CAT_L2)) { |
802 | /* CPUID 0x10.2 fields are same format at 0x10.1 */ |
803 | r = &rdt_resources_all[RDT_RESOURCE_L2].r_resctrl; |
804 | rdt_get_cache_alloc_cfg(idx: 2, r); |
805 | if (rdt_cpu_has(X86_FEATURE_CDP_L2)) |
806 | rdt_get_cdp_l2_config(); |
807 | ret = true; |
808 | } |
809 | |
810 | if (get_mem_config()) |
811 | ret = true; |
812 | |
813 | if (get_slow_mem_config()) |
814 | ret = true; |
815 | |
816 | return ret; |
817 | } |
818 | |
819 | static __init bool get_rdt_mon_resources(void) |
820 | { |
821 | struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; |
822 | |
823 | if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC)) |
824 | rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID); |
825 | if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL)) |
826 | rdt_mon_features |= (1 << QOS_L3_MBM_TOTAL_EVENT_ID); |
827 | if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL)) |
828 | rdt_mon_features |= (1 << QOS_L3_MBM_LOCAL_EVENT_ID); |
829 | |
830 | if (!rdt_mon_features) |
831 | return false; |
832 | |
833 | return !rdt_get_mon_l3_config(r); |
834 | } |
835 | |
836 | static __init void __check_quirks_intel(void) |
837 | { |
838 | switch (boot_cpu_data.x86_model) { |
839 | case INTEL_FAM6_HASWELL_X: |
840 | if (!rdt_options[RDT_FLAG_L3_CAT].force_off) |
841 | cache_alloc_hsw_probe(); |
842 | break; |
843 | case INTEL_FAM6_SKYLAKE_X: |
844 | if (boot_cpu_data.x86_stepping <= 4) |
845 | set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat" ); |
846 | else |
847 | set_rdt_options("!l3cat" ); |
848 | fallthrough; |
849 | case INTEL_FAM6_BROADWELL_X: |
850 | intel_rdt_mbm_apply_quirk(); |
851 | break; |
852 | } |
853 | } |
854 | |
855 | static __init void check_quirks(void) |
856 | { |
857 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
858 | __check_quirks_intel(); |
859 | } |
860 | |
861 | static __init bool get_rdt_resources(void) |
862 | { |
863 | rdt_alloc_capable = get_rdt_alloc_resources(); |
864 | rdt_mon_capable = get_rdt_mon_resources(); |
865 | |
866 | return (rdt_mon_capable || rdt_alloc_capable); |
867 | } |
868 | |
869 | static __init void rdt_init_res_defs_intel(void) |
870 | { |
871 | struct rdt_hw_resource *hw_res; |
872 | struct rdt_resource *r; |
873 | |
874 | for_each_rdt_resource(r) { |
875 | hw_res = resctrl_to_arch_res(r); |
876 | |
877 | if (r->rid == RDT_RESOURCE_L3 || |
878 | r->rid == RDT_RESOURCE_L2) { |
879 | r->cache.arch_has_per_cpu_cfg = false; |
880 | r->cache.min_cbm_bits = 1; |
881 | } else if (r->rid == RDT_RESOURCE_MBA) { |
882 | hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE; |
883 | hw_res->msr_update = mba_wrmsr_intel; |
884 | } |
885 | } |
886 | } |
887 | |
888 | static __init void rdt_init_res_defs_amd(void) |
889 | { |
890 | struct rdt_hw_resource *hw_res; |
891 | struct rdt_resource *r; |
892 | |
893 | for_each_rdt_resource(r) { |
894 | hw_res = resctrl_to_arch_res(r); |
895 | |
896 | if (r->rid == RDT_RESOURCE_L3 || |
897 | r->rid == RDT_RESOURCE_L2) { |
898 | r->cache.arch_has_sparse_bitmasks = true; |
899 | r->cache.arch_has_per_cpu_cfg = true; |
900 | r->cache.min_cbm_bits = 0; |
901 | } else if (r->rid == RDT_RESOURCE_MBA) { |
902 | hw_res->msr_base = MSR_IA32_MBA_BW_BASE; |
903 | hw_res->msr_update = mba_wrmsr_amd; |
904 | } else if (r->rid == RDT_RESOURCE_SMBA) { |
905 | hw_res->msr_base = MSR_IA32_SMBA_BW_BASE; |
906 | hw_res->msr_update = mba_wrmsr_amd; |
907 | } |
908 | } |
909 | } |
910 | |
911 | static __init void rdt_init_res_defs(void) |
912 | { |
913 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
914 | rdt_init_res_defs_intel(); |
915 | else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) |
916 | rdt_init_res_defs_amd(); |
917 | } |
918 | |
919 | static enum cpuhp_state rdt_online; |
920 | |
921 | /* Runs once on the BSP during boot. */ |
922 | void resctrl_cpu_detect(struct cpuinfo_x86 *c) |
923 | { |
924 | if (!cpu_has(c, X86_FEATURE_CQM_LLC)) { |
925 | c->x86_cache_max_rmid = -1; |
926 | c->x86_cache_occ_scale = -1; |
927 | c->x86_cache_mbm_width_offset = -1; |
928 | return; |
929 | } |
930 | |
931 | /* will be overridden if occupancy monitoring exists */ |
932 | c->x86_cache_max_rmid = cpuid_ebx(op: 0xf); |
933 | |
934 | if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) || |
935 | cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) || |
936 | cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) { |
937 | u32 eax, ebx, ecx, edx; |
938 | |
939 | /* QoS sub-leaf, EAX=0Fh, ECX=1 */ |
940 | cpuid_count(op: 0xf, count: 1, eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx); |
941 | |
942 | c->x86_cache_max_rmid = ecx; |
943 | c->x86_cache_occ_scale = ebx; |
944 | c->x86_cache_mbm_width_offset = eax & 0xff; |
945 | |
946 | if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset) |
947 | c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD; |
948 | } |
949 | } |
950 | |
951 | static int __init resctrl_late_init(void) |
952 | { |
953 | struct rdt_resource *r; |
954 | int state, ret; |
955 | |
956 | /* |
957 | * Initialize functions(or definitions) that are different |
958 | * between vendors here. |
959 | */ |
960 | rdt_init_res_defs(); |
961 | |
962 | check_quirks(); |
963 | |
964 | if (!get_rdt_resources()) |
965 | return -ENODEV; |
966 | |
967 | rdt_init_padding(); |
968 | |
969 | state = cpuhp_setup_state(state: CPUHP_AP_ONLINE_DYN, |
970 | name: "x86/resctrl/cat:online:" , |
971 | startup: resctrl_online_cpu, teardown: resctrl_offline_cpu); |
972 | if (state < 0) |
973 | return state; |
974 | |
975 | ret = rdtgroup_init(); |
976 | if (ret) { |
977 | cpuhp_remove_state(state); |
978 | return ret; |
979 | } |
980 | rdt_online = state; |
981 | |
982 | for_each_alloc_capable_rdt_resource(r) |
983 | pr_info("%s allocation detected\n" , r->name); |
984 | |
985 | for_each_mon_capable_rdt_resource(r) |
986 | pr_info("%s monitoring detected\n" , r->name); |
987 | |
988 | return 0; |
989 | } |
990 | |
991 | late_initcall(resctrl_late_init); |
992 | |
993 | static void __exit resctrl_exit(void) |
994 | { |
995 | cpuhp_remove_state(state: rdt_online); |
996 | rdtgroup_exit(); |
997 | } |
998 | |
999 | __exitcall(resctrl_exit); |
1000 | |