1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * CPU subsystem support |
4 | */ |
5 | |
6 | #include <linux/kernel.h> |
7 | #include <linux/module.h> |
8 | #include <linux/init.h> |
9 | #include <linux/sched.h> |
10 | #include <linux/cpu.h> |
11 | #include <linux/topology.h> |
12 | #include <linux/device.h> |
13 | #include <linux/node.h> |
14 | #include <linux/gfp.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/percpu.h> |
17 | #include <linux/acpi.h> |
18 | #include <linux/of.h> |
19 | #include <linux/cpufeature.h> |
20 | #include <linux/tick.h> |
21 | #include <linux/pm_qos.h> |
22 | #include <linux/sched/isolation.h> |
23 | |
24 | #include "base.h" |
25 | |
26 | static DEFINE_PER_CPU(struct device *, cpu_sys_devices); |
27 | |
28 | static int cpu_subsys_match(struct device *dev, struct device_driver *drv) |
29 | { |
30 | /* ACPI style match is the only one that may succeed. */ |
31 | if (acpi_driver_match_device(dev, drv)) |
32 | return 1; |
33 | |
34 | return 0; |
35 | } |
36 | |
37 | #ifdef CONFIG_HOTPLUG_CPU |
38 | static void change_cpu_under_node(struct cpu *cpu, |
39 | unsigned int from_nid, unsigned int to_nid) |
40 | { |
41 | int cpuid = cpu->dev.id; |
42 | unregister_cpu_under_node(cpuid, from_nid); |
43 | register_cpu_under_node(cpuid, to_nid); |
44 | cpu->node_id = to_nid; |
45 | } |
46 | |
47 | static int cpu_subsys_online(struct device *dev) |
48 | { |
49 | struct cpu *cpu = container_of(dev, struct cpu, dev); |
50 | int cpuid = dev->id; |
51 | int from_nid, to_nid; |
52 | int ret; |
53 | |
54 | from_nid = cpu_to_node(cpuid); |
55 | if (from_nid == NUMA_NO_NODE) |
56 | return -ENODEV; |
57 | |
58 | ret = cpu_up(cpuid); |
59 | /* |
60 | * When hot adding memory to memoryless node and enabling a cpu |
61 | * on the node, node number of the cpu may internally change. |
62 | */ |
63 | to_nid = cpu_to_node(cpuid); |
64 | if (from_nid != to_nid) |
65 | change_cpu_under_node(cpu, from_nid, to_nid); |
66 | |
67 | return ret; |
68 | } |
69 | |
70 | static int cpu_subsys_offline(struct device *dev) |
71 | { |
72 | return cpu_down(dev->id); |
73 | } |
74 | |
75 | void unregister_cpu(struct cpu *cpu) |
76 | { |
77 | int logical_cpu = cpu->dev.id; |
78 | |
79 | unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu)); |
80 | |
81 | device_unregister(&cpu->dev); |
82 | per_cpu(cpu_sys_devices, logical_cpu) = NULL; |
83 | return; |
84 | } |
85 | |
86 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE |
87 | static ssize_t cpu_probe_store(struct device *dev, |
88 | struct device_attribute *attr, |
89 | const char *buf, |
90 | size_t count) |
91 | { |
92 | ssize_t cnt; |
93 | int ret; |
94 | |
95 | ret = lock_device_hotplug_sysfs(); |
96 | if (ret) |
97 | return ret; |
98 | |
99 | cnt = arch_cpu_probe(buf, count); |
100 | |
101 | unlock_device_hotplug(); |
102 | return cnt; |
103 | } |
104 | |
105 | static ssize_t cpu_release_store(struct device *dev, |
106 | struct device_attribute *attr, |
107 | const char *buf, |
108 | size_t count) |
109 | { |
110 | ssize_t cnt; |
111 | int ret; |
112 | |
113 | ret = lock_device_hotplug_sysfs(); |
114 | if (ret) |
115 | return ret; |
116 | |
117 | cnt = arch_cpu_release(buf, count); |
118 | |
119 | unlock_device_hotplug(); |
120 | return cnt; |
121 | } |
122 | |
123 | static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); |
124 | static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store); |
125 | #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ |
126 | #endif /* CONFIG_HOTPLUG_CPU */ |
127 | |
128 | struct bus_type cpu_subsys = { |
129 | .name = "cpu" , |
130 | .dev_name = "cpu" , |
131 | .match = cpu_subsys_match, |
132 | #ifdef CONFIG_HOTPLUG_CPU |
133 | .online = cpu_subsys_online, |
134 | .offline = cpu_subsys_offline, |
135 | #endif |
136 | }; |
137 | EXPORT_SYMBOL_GPL(cpu_subsys); |
138 | |
139 | #ifdef CONFIG_KEXEC |
140 | #include <linux/kexec.h> |
141 | |
142 | static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr, |
143 | char *buf) |
144 | { |
145 | struct cpu *cpu = container_of(dev, struct cpu, dev); |
146 | ssize_t rc; |
147 | unsigned long long addr; |
148 | int cpunum; |
149 | |
150 | cpunum = cpu->dev.id; |
151 | |
152 | /* |
153 | * Might be reading other cpu's data based on which cpu read thread |
154 | * has been scheduled. But cpu data (memory) is allocated once during |
155 | * boot up and this data does not change there after. Hence this |
156 | * operation should be safe. No locking required. |
157 | */ |
158 | addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum)); |
159 | rc = sprintf(buf, "%Lx\n" , addr); |
160 | return rc; |
161 | } |
162 | static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL); |
163 | |
164 | static ssize_t show_crash_notes_size(struct device *dev, |
165 | struct device_attribute *attr, |
166 | char *buf) |
167 | { |
168 | ssize_t rc; |
169 | |
170 | rc = sprintf(buf, "%zu\n" , sizeof(note_buf_t)); |
171 | return rc; |
172 | } |
173 | static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL); |
174 | |
175 | static struct attribute *crash_note_cpu_attrs[] = { |
176 | &dev_attr_crash_notes.attr, |
177 | &dev_attr_crash_notes_size.attr, |
178 | NULL |
179 | }; |
180 | |
181 | static struct attribute_group crash_note_cpu_attr_group = { |
182 | .attrs = crash_note_cpu_attrs, |
183 | }; |
184 | #endif |
185 | |
186 | static const struct attribute_group *common_cpu_attr_groups[] = { |
187 | #ifdef CONFIG_KEXEC |
188 | &crash_note_cpu_attr_group, |
189 | #endif |
190 | NULL |
191 | }; |
192 | |
193 | static const struct attribute_group *hotplugable_cpu_attr_groups[] = { |
194 | #ifdef CONFIG_KEXEC |
195 | &crash_note_cpu_attr_group, |
196 | #endif |
197 | NULL |
198 | }; |
199 | |
200 | /* |
201 | * Print cpu online, possible, present, and system maps |
202 | */ |
203 | |
204 | struct cpu_attr { |
205 | struct device_attribute attr; |
206 | const struct cpumask *const map; |
207 | }; |
208 | |
209 | static ssize_t show_cpus_attr(struct device *dev, |
210 | struct device_attribute *attr, |
211 | char *buf) |
212 | { |
213 | struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr); |
214 | |
215 | return cpumap_print_to_pagebuf(true, buf, ca->map); |
216 | } |
217 | |
218 | #define _CPU_ATTR(name, map) \ |
219 | { __ATTR(name, 0444, show_cpus_attr, NULL), map } |
220 | |
221 | /* Keep in sync with cpu_subsys_attrs */ |
222 | static struct cpu_attr cpu_attrs[] = { |
223 | _CPU_ATTR(online, &__cpu_online_mask), |
224 | _CPU_ATTR(possible, &__cpu_possible_mask), |
225 | _CPU_ATTR(present, &__cpu_present_mask), |
226 | }; |
227 | |
228 | /* |
229 | * Print values for NR_CPUS and offlined cpus |
230 | */ |
231 | static ssize_t print_cpus_kernel_max(struct device *dev, |
232 | struct device_attribute *attr, char *buf) |
233 | { |
234 | int n = snprintf(buf, PAGE_SIZE-2, "%d\n" , NR_CPUS - 1); |
235 | return n; |
236 | } |
237 | static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL); |
238 | |
239 | /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */ |
240 | unsigned int total_cpus; |
241 | |
242 | static ssize_t print_cpus_offline(struct device *dev, |
243 | struct device_attribute *attr, char *buf) |
244 | { |
245 | int n = 0, len = PAGE_SIZE-2; |
246 | cpumask_var_t offline; |
247 | |
248 | /* display offline cpus < nr_cpu_ids */ |
249 | if (!alloc_cpumask_var(&offline, GFP_KERNEL)) |
250 | return -ENOMEM; |
251 | cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask); |
252 | n = scnprintf(buf, len, "%*pbl" , cpumask_pr_args(offline)); |
253 | free_cpumask_var(offline); |
254 | |
255 | /* display offline cpus >= nr_cpu_ids */ |
256 | if (total_cpus && nr_cpu_ids < total_cpus) { |
257 | if (n && n < len) |
258 | buf[n++] = ','; |
259 | |
260 | if (nr_cpu_ids == total_cpus-1) |
261 | n += snprintf(&buf[n], len - n, "%u" , nr_cpu_ids); |
262 | else |
263 | n += snprintf(&buf[n], len - n, "%u-%d" , |
264 | nr_cpu_ids, total_cpus-1); |
265 | } |
266 | |
267 | n += snprintf(&buf[n], len - n, "\n" ); |
268 | return n; |
269 | } |
270 | static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL); |
271 | |
272 | static ssize_t print_cpus_isolated(struct device *dev, |
273 | struct device_attribute *attr, char *buf) |
274 | { |
275 | int n = 0, len = PAGE_SIZE-2; |
276 | cpumask_var_t isolated; |
277 | |
278 | if (!alloc_cpumask_var(&isolated, GFP_KERNEL)) |
279 | return -ENOMEM; |
280 | |
281 | cpumask_andnot(isolated, cpu_possible_mask, |
282 | housekeeping_cpumask(HK_FLAG_DOMAIN)); |
283 | n = scnprintf(buf, len, "%*pbl\n" , cpumask_pr_args(isolated)); |
284 | |
285 | free_cpumask_var(isolated); |
286 | |
287 | return n; |
288 | } |
289 | static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL); |
290 | |
291 | #ifdef CONFIG_NO_HZ_FULL |
292 | static ssize_t print_cpus_nohz_full(struct device *dev, |
293 | struct device_attribute *attr, char *buf) |
294 | { |
295 | int n = 0, len = PAGE_SIZE-2; |
296 | |
297 | n = scnprintf(buf, len, "%*pbl\n" , cpumask_pr_args(tick_nohz_full_mask)); |
298 | |
299 | return n; |
300 | } |
301 | static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL); |
302 | #endif |
303 | |
304 | static void cpu_device_release(struct device *dev) |
305 | { |
306 | /* |
307 | * This is an empty function to prevent the driver core from spitting a |
308 | * warning at us. Yes, I know this is directly opposite of what the |
309 | * documentation for the driver core and kobjects say, and the author |
310 | * of this code has already been publically ridiculed for doing |
311 | * something as foolish as this. However, at this point in time, it is |
312 | * the only way to handle the issue of statically allocated cpu |
313 | * devices. The different architectures will have their cpu device |
314 | * code reworked to properly handle this in the near future, so this |
315 | * function will then be changed to correctly free up the memory held |
316 | * by the cpu device. |
317 | * |
318 | * Never copy this way of doing things, or you too will be made fun of |
319 | * on the linux-kernel list, you have been warned. |
320 | */ |
321 | } |
322 | |
323 | #ifdef CONFIG_GENERIC_CPU_AUTOPROBE |
324 | static ssize_t print_cpu_modalias(struct device *dev, |
325 | struct device_attribute *attr, |
326 | char *buf) |
327 | { |
328 | ssize_t n; |
329 | u32 i; |
330 | |
331 | n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:" , |
332 | CPU_FEATURE_TYPEVAL); |
333 | |
334 | for (i = 0; i < MAX_CPU_FEATURES; i++) |
335 | if (cpu_have_feature(i)) { |
336 | if (PAGE_SIZE < n + sizeof(",XXXX\n" )) { |
337 | WARN(1, "CPU features overflow page\n" ); |
338 | break; |
339 | } |
340 | n += sprintf(&buf[n], ",%04X" , i); |
341 | } |
342 | buf[n++] = '\n'; |
343 | return n; |
344 | } |
345 | |
346 | static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env) |
347 | { |
348 | char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL); |
349 | if (buf) { |
350 | print_cpu_modalias(NULL, NULL, buf); |
351 | add_uevent_var(env, "MODALIAS=%s" , buf); |
352 | kfree(buf); |
353 | } |
354 | return 0; |
355 | } |
356 | #endif |
357 | |
358 | /* |
359 | * register_cpu - Setup a sysfs device for a CPU. |
360 | * @cpu - cpu->hotpluggable field set to 1 will generate a control file in |
361 | * sysfs for this CPU. |
362 | * @num - CPU number to use when creating the device. |
363 | * |
364 | * Initialize and register the CPU device. |
365 | */ |
366 | int register_cpu(struct cpu *cpu, int num) |
367 | { |
368 | int error; |
369 | |
370 | cpu->node_id = cpu_to_node(num); |
371 | memset(&cpu->dev, 0x00, sizeof(struct device)); |
372 | cpu->dev.id = num; |
373 | cpu->dev.bus = &cpu_subsys; |
374 | cpu->dev.release = cpu_device_release; |
375 | cpu->dev.offline_disabled = !cpu->hotpluggable; |
376 | cpu->dev.offline = !cpu_online(num); |
377 | cpu->dev.of_node = of_get_cpu_node(num, NULL); |
378 | #ifdef CONFIG_GENERIC_CPU_AUTOPROBE |
379 | cpu->dev.bus->uevent = cpu_uevent; |
380 | #endif |
381 | cpu->dev.groups = common_cpu_attr_groups; |
382 | if (cpu->hotpluggable) |
383 | cpu->dev.groups = hotplugable_cpu_attr_groups; |
384 | error = device_register(&cpu->dev); |
385 | if (error) { |
386 | put_device(&cpu->dev); |
387 | return error; |
388 | } |
389 | |
390 | per_cpu(cpu_sys_devices, num) = &cpu->dev; |
391 | register_cpu_under_node(num, cpu_to_node(num)); |
392 | dev_pm_qos_expose_latency_limit(&cpu->dev, |
393 | PM_QOS_RESUME_LATENCY_NO_CONSTRAINT); |
394 | |
395 | return 0; |
396 | } |
397 | |
398 | struct device *get_cpu_device(unsigned cpu) |
399 | { |
400 | if (cpu < nr_cpu_ids && cpu_possible(cpu)) |
401 | return per_cpu(cpu_sys_devices, cpu); |
402 | else |
403 | return NULL; |
404 | } |
405 | EXPORT_SYMBOL_GPL(get_cpu_device); |
406 | |
407 | static void device_create_release(struct device *dev) |
408 | { |
409 | kfree(dev); |
410 | } |
411 | |
412 | __printf(4, 0) |
413 | static struct device * |
414 | __cpu_device_create(struct device *parent, void *drvdata, |
415 | const struct attribute_group **groups, |
416 | const char *fmt, va_list args) |
417 | { |
418 | struct device *dev = NULL; |
419 | int retval = -ENODEV; |
420 | |
421 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
422 | if (!dev) { |
423 | retval = -ENOMEM; |
424 | goto error; |
425 | } |
426 | |
427 | device_initialize(dev); |
428 | dev->parent = parent; |
429 | dev->groups = groups; |
430 | dev->release = device_create_release; |
431 | device_set_pm_not_required(dev); |
432 | dev_set_drvdata(dev, drvdata); |
433 | |
434 | retval = kobject_set_name_vargs(&dev->kobj, fmt, args); |
435 | if (retval) |
436 | goto error; |
437 | |
438 | retval = device_add(dev); |
439 | if (retval) |
440 | goto error; |
441 | |
442 | return dev; |
443 | |
444 | error: |
445 | put_device(dev); |
446 | return ERR_PTR(retval); |
447 | } |
448 | |
449 | struct device *cpu_device_create(struct device *parent, void *drvdata, |
450 | const struct attribute_group **groups, |
451 | const char *fmt, ...) |
452 | { |
453 | va_list vargs; |
454 | struct device *dev; |
455 | |
456 | va_start(vargs, fmt); |
457 | dev = __cpu_device_create(parent, drvdata, groups, fmt, vargs); |
458 | va_end(vargs); |
459 | return dev; |
460 | } |
461 | EXPORT_SYMBOL_GPL(cpu_device_create); |
462 | |
463 | #ifdef CONFIG_GENERIC_CPU_AUTOPROBE |
464 | static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL); |
465 | #endif |
466 | |
467 | static struct attribute *cpu_root_attrs[] = { |
468 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE |
469 | &dev_attr_probe.attr, |
470 | &dev_attr_release.attr, |
471 | #endif |
472 | &cpu_attrs[0].attr.attr, |
473 | &cpu_attrs[1].attr.attr, |
474 | &cpu_attrs[2].attr.attr, |
475 | &dev_attr_kernel_max.attr, |
476 | &dev_attr_offline.attr, |
477 | &dev_attr_isolated.attr, |
478 | #ifdef CONFIG_NO_HZ_FULL |
479 | &dev_attr_nohz_full.attr, |
480 | #endif |
481 | #ifdef CONFIG_GENERIC_CPU_AUTOPROBE |
482 | &dev_attr_modalias.attr, |
483 | #endif |
484 | NULL |
485 | }; |
486 | |
487 | static struct attribute_group cpu_root_attr_group = { |
488 | .attrs = cpu_root_attrs, |
489 | }; |
490 | |
491 | static const struct attribute_group *cpu_root_attr_groups[] = { |
492 | &cpu_root_attr_group, |
493 | NULL, |
494 | }; |
495 | |
496 | bool cpu_is_hotpluggable(unsigned cpu) |
497 | { |
498 | struct device *dev = get_cpu_device(cpu); |
499 | return dev && container_of(dev, struct cpu, dev)->hotpluggable; |
500 | } |
501 | EXPORT_SYMBOL_GPL(cpu_is_hotpluggable); |
502 | |
503 | #ifdef CONFIG_GENERIC_CPU_DEVICES |
504 | static DEFINE_PER_CPU(struct cpu, cpu_devices); |
505 | #endif |
506 | |
507 | static void __init cpu_dev_register_generic(void) |
508 | { |
509 | #ifdef CONFIG_GENERIC_CPU_DEVICES |
510 | int i; |
511 | |
512 | for_each_possible_cpu(i) { |
513 | if (register_cpu(&per_cpu(cpu_devices, i), i)) |
514 | panic("Failed to register CPU device" ); |
515 | } |
516 | #endif |
517 | } |
518 | |
519 | #ifdef CONFIG_GENERIC_CPU_VULNERABILITIES |
520 | |
521 | ssize_t __weak cpu_show_meltdown(struct device *dev, |
522 | struct device_attribute *attr, char *buf) |
523 | { |
524 | return sprintf(buf, "Not affected\n" ); |
525 | } |
526 | |
527 | ssize_t __weak cpu_show_spectre_v1(struct device *dev, |
528 | struct device_attribute *attr, char *buf) |
529 | { |
530 | return sprintf(buf, "Not affected\n" ); |
531 | } |
532 | |
533 | ssize_t __weak cpu_show_spectre_v2(struct device *dev, |
534 | struct device_attribute *attr, char *buf) |
535 | { |
536 | return sprintf(buf, "Not affected\n" ); |
537 | } |
538 | |
539 | ssize_t __weak cpu_show_spec_store_bypass(struct device *dev, |
540 | struct device_attribute *attr, char *buf) |
541 | { |
542 | return sprintf(buf, "Not affected\n" ); |
543 | } |
544 | |
545 | ssize_t __weak cpu_show_l1tf(struct device *dev, |
546 | struct device_attribute *attr, char *buf) |
547 | { |
548 | return sprintf(buf, "Not affected\n" ); |
549 | } |
550 | |
551 | static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); |
552 | static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); |
553 | static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); |
554 | static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL); |
555 | static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL); |
556 | |
557 | static struct attribute *cpu_root_vulnerabilities_attrs[] = { |
558 | &dev_attr_meltdown.attr, |
559 | &dev_attr_spectre_v1.attr, |
560 | &dev_attr_spectre_v2.attr, |
561 | &dev_attr_spec_store_bypass.attr, |
562 | &dev_attr_l1tf.attr, |
563 | NULL |
564 | }; |
565 | |
566 | static const struct attribute_group cpu_root_vulnerabilities_group = { |
567 | .name = "vulnerabilities" , |
568 | .attrs = cpu_root_vulnerabilities_attrs, |
569 | }; |
570 | |
571 | static void __init cpu_register_vulnerabilities(void) |
572 | { |
573 | if (sysfs_create_group(&cpu_subsys.dev_root->kobj, |
574 | &cpu_root_vulnerabilities_group)) |
575 | pr_err("Unable to register CPU vulnerabilities\n" ); |
576 | } |
577 | |
578 | #else |
579 | static inline void cpu_register_vulnerabilities(void) { } |
580 | #endif |
581 | |
582 | void __init cpu_dev_init(void) |
583 | { |
584 | if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups)) |
585 | panic("Failed to register CPU subsystem" ); |
586 | |
587 | cpu_dev_register_generic(); |
588 | cpu_register_vulnerabilities(); |
589 | } |
590 | |