1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Basic Node interface support |
4 | */ |
5 | |
6 | #include <linux/module.h> |
7 | #include <linux/init.h> |
8 | #include <linux/mm.h> |
9 | #include <linux/memory.h> |
10 | #include <linux/vmstat.h> |
11 | #include <linux/notifier.h> |
12 | #include <linux/node.h> |
13 | #include <linux/hugetlb.h> |
14 | #include <linux/compaction.h> |
15 | #include <linux/cpumask.h> |
16 | #include <linux/topology.h> |
17 | #include <linux/nodemask.h> |
18 | #include <linux/cpu.h> |
19 | #include <linux/device.h> |
20 | #include <linux/pm_runtime.h> |
21 | #include <linux/swap.h> |
22 | #include <linux/slab.h> |
23 | |
24 | static const struct bus_type node_subsys = { |
25 | .name = "node" , |
26 | .dev_name = "node" , |
27 | }; |
28 | |
29 | static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj, |
30 | struct bin_attribute *attr, char *buf, |
31 | loff_t off, size_t count) |
32 | { |
33 | struct device *dev = kobj_to_dev(kobj); |
34 | struct node *node_dev = to_node(dev); |
35 | cpumask_var_t mask; |
36 | ssize_t n; |
37 | |
38 | if (!alloc_cpumask_var(mask: &mask, GFP_KERNEL)) |
39 | return 0; |
40 | |
41 | cpumask_and(dstp: mask, src1p: cpumask_of_node(node: node_dev->dev.id), cpu_online_mask); |
42 | n = cpumap_print_bitmask_to_buf(buf, mask, off, count); |
43 | free_cpumask_var(mask); |
44 | |
45 | return n; |
46 | } |
47 | |
48 | static BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES); |
49 | |
50 | static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj, |
51 | struct bin_attribute *attr, char *buf, |
52 | loff_t off, size_t count) |
53 | { |
54 | struct device *dev = kobj_to_dev(kobj); |
55 | struct node *node_dev = to_node(dev); |
56 | cpumask_var_t mask; |
57 | ssize_t n; |
58 | |
59 | if (!alloc_cpumask_var(mask: &mask, GFP_KERNEL)) |
60 | return 0; |
61 | |
62 | cpumask_and(dstp: mask, src1p: cpumask_of_node(node: node_dev->dev.id), cpu_online_mask); |
63 | n = cpumap_print_list_to_buf(buf, mask, off, count); |
64 | free_cpumask_var(mask); |
65 | |
66 | return n; |
67 | } |
68 | |
69 | static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES); |
70 | |
71 | /** |
72 | * struct node_access_nodes - Access class device to hold user visible |
73 | * relationships to other nodes. |
74 | * @dev: Device for this memory access class |
75 | * @list_node: List element in the node's access list |
76 | * @access: The access class rank |
77 | * @coord: Heterogeneous memory performance coordinates |
78 | */ |
79 | struct node_access_nodes { |
80 | struct device dev; |
81 | struct list_head list_node; |
82 | unsigned int access; |
83 | #ifdef CONFIG_HMEM_REPORTING |
84 | struct access_coordinate coord; |
85 | #endif |
86 | }; |
87 | #define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev) |
88 | |
89 | static struct attribute *node_init_access_node_attrs[] = { |
90 | NULL, |
91 | }; |
92 | |
93 | static struct attribute *node_targ_access_node_attrs[] = { |
94 | NULL, |
95 | }; |
96 | |
97 | static const struct attribute_group initiators = { |
98 | .name = "initiators" , |
99 | .attrs = node_init_access_node_attrs, |
100 | }; |
101 | |
102 | static const struct attribute_group targets = { |
103 | .name = "targets" , |
104 | .attrs = node_targ_access_node_attrs, |
105 | }; |
106 | |
107 | static const struct attribute_group *node_access_node_groups[] = { |
108 | &initiators, |
109 | &targets, |
110 | NULL, |
111 | }; |
112 | |
113 | static void node_remove_accesses(struct node *node) |
114 | { |
115 | struct node_access_nodes *c, *cnext; |
116 | |
117 | list_for_each_entry_safe(c, cnext, &node->access_list, list_node) { |
118 | list_del(entry: &c->list_node); |
119 | device_unregister(dev: &c->dev); |
120 | } |
121 | } |
122 | |
123 | static void node_access_release(struct device *dev) |
124 | { |
125 | kfree(to_access_nodes(dev)); |
126 | } |
127 | |
128 | static struct node_access_nodes *node_init_node_access(struct node *node, |
129 | enum access_coordinate_class access) |
130 | { |
131 | struct node_access_nodes *access_node; |
132 | struct device *dev; |
133 | |
134 | list_for_each_entry(access_node, &node->access_list, list_node) |
135 | if (access_node->access == access) |
136 | return access_node; |
137 | |
138 | access_node = kzalloc(size: sizeof(*access_node), GFP_KERNEL); |
139 | if (!access_node) |
140 | return NULL; |
141 | |
142 | access_node->access = access; |
143 | dev = &access_node->dev; |
144 | dev->parent = &node->dev; |
145 | dev->release = node_access_release; |
146 | dev->groups = node_access_node_groups; |
147 | if (dev_set_name(dev, name: "access%u" , access)) |
148 | goto free; |
149 | |
150 | if (device_register(dev)) |
151 | goto free_name; |
152 | |
153 | pm_runtime_no_callbacks(dev); |
154 | list_add_tail(new: &access_node->list_node, head: &node->access_list); |
155 | return access_node; |
156 | free_name: |
157 | kfree_const(x: dev->kobj.name); |
158 | free: |
159 | kfree(objp: access_node); |
160 | return NULL; |
161 | } |
162 | |
163 | #ifdef CONFIG_HMEM_REPORTING |
164 | #define ACCESS_ATTR(property) \ |
165 | static ssize_t property##_show(struct device *dev, \ |
166 | struct device_attribute *attr, \ |
167 | char *buf) \ |
168 | { \ |
169 | return sysfs_emit(buf, "%u\n", \ |
170 | to_access_nodes(dev)->coord.property); \ |
171 | } \ |
172 | static DEVICE_ATTR_RO(property) |
173 | |
174 | ACCESS_ATTR(read_bandwidth); |
175 | ACCESS_ATTR(read_latency); |
176 | ACCESS_ATTR(write_bandwidth); |
177 | ACCESS_ATTR(write_latency); |
178 | |
179 | static struct attribute *access_attrs[] = { |
180 | &dev_attr_read_bandwidth.attr, |
181 | &dev_attr_read_latency.attr, |
182 | &dev_attr_write_bandwidth.attr, |
183 | &dev_attr_write_latency.attr, |
184 | NULL, |
185 | }; |
186 | |
187 | /** |
188 | * node_set_perf_attrs - Set the performance values for given access class |
189 | * @nid: Node identifier to be set |
190 | * @coord: Heterogeneous memory performance coordinates |
191 | * @access: The access class the for the given attributes |
192 | */ |
193 | void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord, |
194 | enum access_coordinate_class access) |
195 | { |
196 | struct node_access_nodes *c; |
197 | struct node *node; |
198 | int i; |
199 | |
200 | if (WARN_ON_ONCE(!node_online(nid))) |
201 | return; |
202 | |
203 | node = node_devices[nid]; |
204 | c = node_init_node_access(node, access); |
205 | if (!c) |
206 | return; |
207 | |
208 | c->coord = *coord; |
209 | for (i = 0; access_attrs[i] != NULL; i++) { |
210 | if (sysfs_add_file_to_group(kobj: &c->dev.kobj, attr: access_attrs[i], |
211 | group: "initiators" )) { |
212 | pr_info("failed to add performance attribute to node %d\n" , |
213 | nid); |
214 | break; |
215 | } |
216 | } |
217 | } |
218 | EXPORT_SYMBOL_GPL(node_set_perf_attrs); |
219 | |
220 | /** |
221 | * struct node_cache_info - Internal tracking for memory node caches |
222 | * @dev: Device represeting the cache level |
223 | * @node: List element for tracking in the node |
224 | * @cache_attrs:Attributes for this cache level |
225 | */ |
226 | struct node_cache_info { |
227 | struct device dev; |
228 | struct list_head node; |
229 | struct node_cache_attrs cache_attrs; |
230 | }; |
231 | #define to_cache_info(device) container_of(device, struct node_cache_info, dev) |
232 | |
233 | #define CACHE_ATTR(name, fmt) \ |
234 | static ssize_t name##_show(struct device *dev, \ |
235 | struct device_attribute *attr, \ |
236 | char *buf) \ |
237 | { \ |
238 | return sysfs_emit(buf, fmt "\n", \ |
239 | to_cache_info(dev)->cache_attrs.name); \ |
240 | } \ |
241 | static DEVICE_ATTR_RO(name); |
242 | |
243 | CACHE_ATTR(size, "%llu" ) |
244 | CACHE_ATTR(line_size, "%u" ) |
245 | CACHE_ATTR(indexing, "%u" ) |
246 | CACHE_ATTR(write_policy, "%u" ) |
247 | |
248 | static struct attribute *cache_attrs[] = { |
249 | &dev_attr_indexing.attr, |
250 | &dev_attr_size.attr, |
251 | &dev_attr_line_size.attr, |
252 | &dev_attr_write_policy.attr, |
253 | NULL, |
254 | }; |
255 | ATTRIBUTE_GROUPS(cache); |
256 | |
257 | static void node_cache_release(struct device *dev) |
258 | { |
259 | kfree(objp: dev); |
260 | } |
261 | |
262 | static void node_cacheinfo_release(struct device *dev) |
263 | { |
264 | struct node_cache_info *info = to_cache_info(dev); |
265 | kfree(objp: info); |
266 | } |
267 | |
268 | static void node_init_cache_dev(struct node *node) |
269 | { |
270 | struct device *dev; |
271 | |
272 | dev = kzalloc(size: sizeof(*dev), GFP_KERNEL); |
273 | if (!dev) |
274 | return; |
275 | |
276 | device_initialize(dev); |
277 | dev->parent = &node->dev; |
278 | dev->release = node_cache_release; |
279 | if (dev_set_name(dev, name: "memory_side_cache" )) |
280 | goto put_device; |
281 | |
282 | if (device_add(dev)) |
283 | goto put_device; |
284 | |
285 | pm_runtime_no_callbacks(dev); |
286 | node->cache_dev = dev; |
287 | return; |
288 | put_device: |
289 | put_device(dev); |
290 | } |
291 | |
292 | /** |
293 | * node_add_cache() - add cache attribute to a memory node |
294 | * @nid: Node identifier that has new cache attributes |
295 | * @cache_attrs: Attributes for the cache being added |
296 | */ |
297 | void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs) |
298 | { |
299 | struct node_cache_info *info; |
300 | struct device *dev; |
301 | struct node *node; |
302 | |
303 | if (!node_online(nid) || !node_devices[nid]) |
304 | return; |
305 | |
306 | node = node_devices[nid]; |
307 | list_for_each_entry(info, &node->cache_attrs, node) { |
308 | if (info->cache_attrs.level == cache_attrs->level) { |
309 | dev_warn(&node->dev, |
310 | "attempt to add duplicate cache level:%d\n" , |
311 | cache_attrs->level); |
312 | return; |
313 | } |
314 | } |
315 | |
316 | if (!node->cache_dev) |
317 | node_init_cache_dev(node); |
318 | if (!node->cache_dev) |
319 | return; |
320 | |
321 | info = kzalloc(size: sizeof(*info), GFP_KERNEL); |
322 | if (!info) |
323 | return; |
324 | |
325 | dev = &info->dev; |
326 | device_initialize(dev); |
327 | dev->parent = node->cache_dev; |
328 | dev->release = node_cacheinfo_release; |
329 | dev->groups = cache_groups; |
330 | if (dev_set_name(dev, name: "index%d" , cache_attrs->level)) |
331 | goto put_device; |
332 | |
333 | info->cache_attrs = *cache_attrs; |
334 | if (device_add(dev)) { |
335 | dev_warn(&node->dev, "failed to add cache level:%d\n" , |
336 | cache_attrs->level); |
337 | goto put_device; |
338 | } |
339 | pm_runtime_no_callbacks(dev); |
340 | list_add_tail(new: &info->node, head: &node->cache_attrs); |
341 | return; |
342 | put_device: |
343 | put_device(dev); |
344 | } |
345 | |
346 | static void node_remove_caches(struct node *node) |
347 | { |
348 | struct node_cache_info *info, *next; |
349 | |
350 | if (!node->cache_dev) |
351 | return; |
352 | |
353 | list_for_each_entry_safe(info, next, &node->cache_attrs, node) { |
354 | list_del(entry: &info->node); |
355 | device_unregister(dev: &info->dev); |
356 | } |
357 | device_unregister(dev: node->cache_dev); |
358 | } |
359 | |
360 | static void node_init_caches(unsigned int nid) |
361 | { |
362 | INIT_LIST_HEAD(list: &node_devices[nid]->cache_attrs); |
363 | } |
364 | #else |
365 | static void node_init_caches(unsigned int nid) { } |
366 | static void node_remove_caches(struct node *node) { } |
367 | #endif |
368 | |
369 | #define K(x) ((x) << (PAGE_SHIFT - 10)) |
370 | static ssize_t node_read_meminfo(struct device *dev, |
371 | struct device_attribute *attr, char *buf) |
372 | { |
373 | int len = 0; |
374 | int nid = dev->id; |
375 | struct pglist_data *pgdat = NODE_DATA(nid); |
376 | struct sysinfo i; |
377 | unsigned long sreclaimable, sunreclaimable; |
378 | unsigned long swapcached = 0; |
379 | |
380 | si_meminfo_node(val: &i, nid); |
381 | sreclaimable = node_page_state_pages(pgdat, item: NR_SLAB_RECLAIMABLE_B); |
382 | sunreclaimable = node_page_state_pages(pgdat, item: NR_SLAB_UNRECLAIMABLE_B); |
383 | #ifdef CONFIG_SWAP |
384 | swapcached = node_page_state_pages(pgdat, item: NR_SWAPCACHE); |
385 | #endif |
386 | len = sysfs_emit_at(buf, at: len, |
387 | fmt: "Node %d MemTotal: %8lu kB\n" |
388 | "Node %d MemFree: %8lu kB\n" |
389 | "Node %d MemUsed: %8lu kB\n" |
390 | "Node %d SwapCached: %8lu kB\n" |
391 | "Node %d Active: %8lu kB\n" |
392 | "Node %d Inactive: %8lu kB\n" |
393 | "Node %d Active(anon): %8lu kB\n" |
394 | "Node %d Inactive(anon): %8lu kB\n" |
395 | "Node %d Active(file): %8lu kB\n" |
396 | "Node %d Inactive(file): %8lu kB\n" |
397 | "Node %d Unevictable: %8lu kB\n" |
398 | "Node %d Mlocked: %8lu kB\n" , |
399 | nid, K(i.totalram), |
400 | nid, K(i.freeram), |
401 | nid, K(i.totalram - i.freeram), |
402 | nid, K(swapcached), |
403 | nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) + |
404 | node_page_state(pgdat, NR_ACTIVE_FILE)), |
405 | nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) + |
406 | node_page_state(pgdat, NR_INACTIVE_FILE)), |
407 | nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)), |
408 | nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)), |
409 | nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)), |
410 | nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)), |
411 | nid, K(node_page_state(pgdat, NR_UNEVICTABLE)), |
412 | nid, K(sum_zone_node_page_state(nid, NR_MLOCK))); |
413 | |
414 | #ifdef CONFIG_HIGHMEM |
415 | len += sysfs_emit_at(buf, len, |
416 | "Node %d HighTotal: %8lu kB\n" |
417 | "Node %d HighFree: %8lu kB\n" |
418 | "Node %d LowTotal: %8lu kB\n" |
419 | "Node %d LowFree: %8lu kB\n" , |
420 | nid, K(i.totalhigh), |
421 | nid, K(i.freehigh), |
422 | nid, K(i.totalram - i.totalhigh), |
423 | nid, K(i.freeram - i.freehigh)); |
424 | #endif |
425 | len += sysfs_emit_at(buf, at: len, |
426 | fmt: "Node %d Dirty: %8lu kB\n" |
427 | "Node %d Writeback: %8lu kB\n" |
428 | "Node %d FilePages: %8lu kB\n" |
429 | "Node %d Mapped: %8lu kB\n" |
430 | "Node %d AnonPages: %8lu kB\n" |
431 | "Node %d Shmem: %8lu kB\n" |
432 | "Node %d KernelStack: %8lu kB\n" |
433 | #ifdef CONFIG_SHADOW_CALL_STACK |
434 | "Node %d ShadowCallStack:%8lu kB\n" |
435 | #endif |
436 | "Node %d PageTables: %8lu kB\n" |
437 | "Node %d SecPageTables: %8lu kB\n" |
438 | "Node %d NFS_Unstable: %8lu kB\n" |
439 | "Node %d Bounce: %8lu kB\n" |
440 | "Node %d WritebackTmp: %8lu kB\n" |
441 | "Node %d KReclaimable: %8lu kB\n" |
442 | "Node %d Slab: %8lu kB\n" |
443 | "Node %d SReclaimable: %8lu kB\n" |
444 | "Node %d SUnreclaim: %8lu kB\n" |
445 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
446 | "Node %d AnonHugePages: %8lu kB\n" |
447 | "Node %d ShmemHugePages: %8lu kB\n" |
448 | "Node %d ShmemPmdMapped: %8lu kB\n" |
449 | "Node %d FileHugePages: %8lu kB\n" |
450 | "Node %d FilePmdMapped: %8lu kB\n" |
451 | #endif |
452 | #ifdef CONFIG_UNACCEPTED_MEMORY |
453 | "Node %d Unaccepted: %8lu kB\n" |
454 | #endif |
455 | , |
456 | nid, K(node_page_state(pgdat, NR_FILE_DIRTY)), |
457 | nid, K(node_page_state(pgdat, NR_WRITEBACK)), |
458 | nid, K(node_page_state(pgdat, NR_FILE_PAGES)), |
459 | nid, K(node_page_state(pgdat, NR_FILE_MAPPED)), |
460 | nid, K(node_page_state(pgdat, NR_ANON_MAPPED)), |
461 | nid, K(i.sharedram), |
462 | nid, node_page_state(pgdat, item: NR_KERNEL_STACK_KB), |
463 | #ifdef CONFIG_SHADOW_CALL_STACK |
464 | nid, node_page_state(pgdat, NR_KERNEL_SCS_KB), |
465 | #endif |
466 | nid, K(node_page_state(pgdat, NR_PAGETABLE)), |
467 | nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)), |
468 | nid, 0UL, |
469 | nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)), |
470 | nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), |
471 | nid, K(sreclaimable + |
472 | node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)), |
473 | nid, K(sreclaimable + sunreclaimable), |
474 | nid, K(sreclaimable), |
475 | nid, K(sunreclaimable) |
476 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
477 | , |
478 | nid, K(node_page_state(pgdat, NR_ANON_THPS)), |
479 | nid, K(node_page_state(pgdat, NR_SHMEM_THPS)), |
480 | nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)), |
481 | nid, K(node_page_state(pgdat, NR_FILE_THPS)), |
482 | nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED)) |
483 | #endif |
484 | #ifdef CONFIG_UNACCEPTED_MEMORY |
485 | , |
486 | nid, K(sum_zone_node_page_state(nid, NR_UNACCEPTED)) |
487 | #endif |
488 | ); |
489 | len += hugetlb_report_node_meminfo(buf, len, nid); |
490 | return len; |
491 | } |
492 | |
493 | #undef K |
494 | static DEVICE_ATTR(meminfo, 0444, node_read_meminfo, NULL); |
495 | |
496 | static ssize_t node_read_numastat(struct device *dev, |
497 | struct device_attribute *attr, char *buf) |
498 | { |
499 | fold_vm_numa_events(); |
500 | return sysfs_emit(buf, |
501 | fmt: "numa_hit %lu\n" |
502 | "numa_miss %lu\n" |
503 | "numa_foreign %lu\n" |
504 | "interleave_hit %lu\n" |
505 | "local_node %lu\n" |
506 | "other_node %lu\n" , |
507 | sum_zone_numa_event_state(node: dev->id, item: NUMA_HIT), |
508 | sum_zone_numa_event_state(node: dev->id, item: NUMA_MISS), |
509 | sum_zone_numa_event_state(node: dev->id, item: NUMA_FOREIGN), |
510 | sum_zone_numa_event_state(node: dev->id, item: NUMA_INTERLEAVE_HIT), |
511 | sum_zone_numa_event_state(node: dev->id, item: NUMA_LOCAL), |
512 | sum_zone_numa_event_state(node: dev->id, item: NUMA_OTHER)); |
513 | } |
514 | static DEVICE_ATTR(numastat, 0444, node_read_numastat, NULL); |
515 | |
516 | static ssize_t node_read_vmstat(struct device *dev, |
517 | struct device_attribute *attr, char *buf) |
518 | { |
519 | int nid = dev->id; |
520 | struct pglist_data *pgdat = NODE_DATA(nid); |
521 | int i; |
522 | int len = 0; |
523 | |
524 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
525 | len += sysfs_emit_at(buf, at: len, fmt: "%s %lu\n" , |
526 | zone_stat_name(item: i), |
527 | sum_zone_node_page_state(node: nid, item: i)); |
528 | |
529 | #ifdef CONFIG_NUMA |
530 | fold_vm_numa_events(); |
531 | for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) |
532 | len += sysfs_emit_at(buf, at: len, fmt: "%s %lu\n" , |
533 | numa_stat_name(item: i), |
534 | sum_zone_numa_event_state(node: nid, item: i)); |
535 | |
536 | #endif |
537 | for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { |
538 | unsigned long pages = node_page_state_pages(pgdat, item: i); |
539 | |
540 | if (vmstat_item_print_in_thp(item: i)) |
541 | pages /= HPAGE_PMD_NR; |
542 | len += sysfs_emit_at(buf, at: len, fmt: "%s %lu\n" , node_stat_name(item: i), |
543 | pages); |
544 | } |
545 | |
546 | return len; |
547 | } |
548 | static DEVICE_ATTR(vmstat, 0444, node_read_vmstat, NULL); |
549 | |
550 | static ssize_t node_read_distance(struct device *dev, |
551 | struct device_attribute *attr, char *buf) |
552 | { |
553 | int nid = dev->id; |
554 | int len = 0; |
555 | int i; |
556 | |
557 | /* |
558 | * buf is currently PAGE_SIZE in length and each node needs 4 chars |
559 | * at the most (distance + space or newline). |
560 | */ |
561 | BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE); |
562 | |
563 | for_each_online_node(i) { |
564 | len += sysfs_emit_at(buf, at: len, fmt: "%s%d" , |
565 | i ? " " : "" , node_distance(nid, i)); |
566 | } |
567 | |
568 | len += sysfs_emit_at(buf, at: len, fmt: "\n" ); |
569 | return len; |
570 | } |
571 | static DEVICE_ATTR(distance, 0444, node_read_distance, NULL); |
572 | |
573 | static struct attribute *node_dev_attrs[] = { |
574 | &dev_attr_meminfo.attr, |
575 | &dev_attr_numastat.attr, |
576 | &dev_attr_distance.attr, |
577 | &dev_attr_vmstat.attr, |
578 | NULL |
579 | }; |
580 | |
581 | static struct bin_attribute *node_dev_bin_attrs[] = { |
582 | &bin_attr_cpumap, |
583 | &bin_attr_cpulist, |
584 | NULL |
585 | }; |
586 | |
587 | static const struct attribute_group node_dev_group = { |
588 | .attrs = node_dev_attrs, |
589 | .bin_attrs = node_dev_bin_attrs |
590 | }; |
591 | |
592 | static const struct attribute_group *node_dev_groups[] = { |
593 | &node_dev_group, |
594 | #ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP |
595 | &arch_node_dev_group, |
596 | #endif |
597 | #ifdef CONFIG_MEMORY_FAILURE |
598 | &memory_failure_attr_group, |
599 | #endif |
600 | NULL |
601 | }; |
602 | |
603 | static void node_device_release(struct device *dev) |
604 | { |
605 | kfree(to_node(dev)); |
606 | } |
607 | |
608 | /* |
609 | * register_node - Setup a sysfs device for a node. |
610 | * @num - Node number to use when creating the device. |
611 | * |
612 | * Initialize and register the node device. |
613 | */ |
614 | static int register_node(struct node *node, int num) |
615 | { |
616 | int error; |
617 | |
618 | node->dev.id = num; |
619 | node->dev.bus = &node_subsys; |
620 | node->dev.release = node_device_release; |
621 | node->dev.groups = node_dev_groups; |
622 | error = device_register(dev: &node->dev); |
623 | |
624 | if (error) { |
625 | put_device(dev: &node->dev); |
626 | } else { |
627 | hugetlb_register_node(node); |
628 | compaction_register_node(node); |
629 | } |
630 | |
631 | return error; |
632 | } |
633 | |
634 | /** |
635 | * unregister_node - unregister a node device |
636 | * @node: node going away |
637 | * |
638 | * Unregisters a node device @node. All the devices on the node must be |
639 | * unregistered before calling this function. |
640 | */ |
641 | void unregister_node(struct node *node) |
642 | { |
643 | hugetlb_unregister_node(node); |
644 | compaction_unregister_node(node); |
645 | node_remove_accesses(node); |
646 | node_remove_caches(node); |
647 | device_unregister(dev: &node->dev); |
648 | } |
649 | |
650 | struct node *node_devices[MAX_NUMNODES]; |
651 | |
652 | /* |
653 | * register cpu under node |
654 | */ |
655 | int register_cpu_under_node(unsigned int cpu, unsigned int nid) |
656 | { |
657 | int ret; |
658 | struct device *obj; |
659 | |
660 | if (!node_online(nid)) |
661 | return 0; |
662 | |
663 | obj = get_cpu_device(cpu); |
664 | if (!obj) |
665 | return 0; |
666 | |
667 | ret = sysfs_create_link(kobj: &node_devices[nid]->dev.kobj, |
668 | target: &obj->kobj, |
669 | name: kobject_name(kobj: &obj->kobj)); |
670 | if (ret) |
671 | return ret; |
672 | |
673 | return sysfs_create_link(kobj: &obj->kobj, |
674 | target: &node_devices[nid]->dev.kobj, |
675 | name: kobject_name(kobj: &node_devices[nid]->dev.kobj)); |
676 | } |
677 | |
678 | /** |
679 | * register_memory_node_under_compute_node - link memory node to its compute |
680 | * node for a given access class. |
681 | * @mem_nid: Memory node number |
682 | * @cpu_nid: Cpu node number |
683 | * @access: Access class to register |
684 | * |
685 | * Description: |
686 | * For use with platforms that may have separate memory and compute nodes. |
687 | * This function will export node relationships linking which memory |
688 | * initiator nodes can access memory targets at a given ranked access |
689 | * class. |
690 | */ |
691 | int register_memory_node_under_compute_node(unsigned int mem_nid, |
692 | unsigned int cpu_nid, |
693 | enum access_coordinate_class access) |
694 | { |
695 | struct node *init_node, *targ_node; |
696 | struct node_access_nodes *initiator, *target; |
697 | int ret; |
698 | |
699 | if (!node_online(cpu_nid) || !node_online(mem_nid)) |
700 | return -ENODEV; |
701 | |
702 | init_node = node_devices[cpu_nid]; |
703 | targ_node = node_devices[mem_nid]; |
704 | initiator = node_init_node_access(node: init_node, access); |
705 | target = node_init_node_access(node: targ_node, access); |
706 | if (!initiator || !target) |
707 | return -ENOMEM; |
708 | |
709 | ret = sysfs_add_link_to_group(kobj: &initiator->dev.kobj, group_name: "targets" , |
710 | target: &targ_node->dev.kobj, |
711 | link_name: dev_name(dev: &targ_node->dev)); |
712 | if (ret) |
713 | return ret; |
714 | |
715 | ret = sysfs_add_link_to_group(kobj: &target->dev.kobj, group_name: "initiators" , |
716 | target: &init_node->dev.kobj, |
717 | link_name: dev_name(dev: &init_node->dev)); |
718 | if (ret) |
719 | goto err; |
720 | |
721 | return 0; |
722 | err: |
723 | sysfs_remove_link_from_group(kobj: &initiator->dev.kobj, group_name: "targets" , |
724 | link_name: dev_name(dev: &targ_node->dev)); |
725 | return ret; |
726 | } |
727 | |
728 | int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) |
729 | { |
730 | struct device *obj; |
731 | |
732 | if (!node_online(nid)) |
733 | return 0; |
734 | |
735 | obj = get_cpu_device(cpu); |
736 | if (!obj) |
737 | return 0; |
738 | |
739 | sysfs_remove_link(kobj: &node_devices[nid]->dev.kobj, |
740 | name: kobject_name(kobj: &obj->kobj)); |
741 | sysfs_remove_link(kobj: &obj->kobj, |
742 | name: kobject_name(kobj: &node_devices[nid]->dev.kobj)); |
743 | |
744 | return 0; |
745 | } |
746 | |
747 | #ifdef CONFIG_MEMORY_HOTPLUG |
748 | static int __ref get_nid_for_pfn(unsigned long pfn) |
749 | { |
750 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
751 | if (system_state < SYSTEM_RUNNING) |
752 | return early_pfn_to_nid(pfn); |
753 | #endif |
754 | return pfn_to_nid(pfn); |
755 | } |
756 | |
757 | static void do_register_memory_block_under_node(int nid, |
758 | struct memory_block *mem_blk, |
759 | enum meminit_context context) |
760 | { |
761 | int ret; |
762 | |
763 | memory_block_add_nid(mem: mem_blk, nid, context); |
764 | |
765 | ret = sysfs_create_link_nowarn(kobj: &node_devices[nid]->dev.kobj, |
766 | target: &mem_blk->dev.kobj, |
767 | name: kobject_name(kobj: &mem_blk->dev.kobj)); |
768 | if (ret && ret != -EEXIST) |
769 | dev_err_ratelimited(&node_devices[nid]->dev, |
770 | "can't create link to %s in sysfs (%d)\n" , |
771 | kobject_name(&mem_blk->dev.kobj), ret); |
772 | |
773 | ret = sysfs_create_link_nowarn(kobj: &mem_blk->dev.kobj, |
774 | target: &node_devices[nid]->dev.kobj, |
775 | name: kobject_name(kobj: &node_devices[nid]->dev.kobj)); |
776 | if (ret && ret != -EEXIST) |
777 | dev_err_ratelimited(&mem_blk->dev, |
778 | "can't create link to %s in sysfs (%d)\n" , |
779 | kobject_name(&node_devices[nid]->dev.kobj), |
780 | ret); |
781 | } |
782 | |
783 | /* register memory section under specified node if it spans that node */ |
784 | static int register_mem_block_under_node_early(struct memory_block *mem_blk, |
785 | void *arg) |
786 | { |
787 | unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE; |
788 | unsigned long start_pfn = section_nr_to_pfn(sec: mem_blk->start_section_nr); |
789 | unsigned long end_pfn = start_pfn + memory_block_pfns - 1; |
790 | int nid = *(int *)arg; |
791 | unsigned long pfn; |
792 | |
793 | for (pfn = start_pfn; pfn <= end_pfn; pfn++) { |
794 | int page_nid; |
795 | |
796 | /* |
797 | * memory block could have several absent sections from start. |
798 | * skip pfn range from absent section |
799 | */ |
800 | if (!pfn_in_present_section(pfn)) { |
801 | pfn = round_down(pfn + PAGES_PER_SECTION, |
802 | PAGES_PER_SECTION) - 1; |
803 | continue; |
804 | } |
805 | |
806 | /* |
807 | * We need to check if page belongs to nid only at the boot |
808 | * case because node's ranges can be interleaved. |
809 | */ |
810 | page_nid = get_nid_for_pfn(pfn); |
811 | if (page_nid < 0) |
812 | continue; |
813 | if (page_nid != nid) |
814 | continue; |
815 | |
816 | do_register_memory_block_under_node(nid, mem_blk, context: MEMINIT_EARLY); |
817 | return 0; |
818 | } |
819 | /* mem section does not span the specified node */ |
820 | return 0; |
821 | } |
822 | |
823 | /* |
824 | * During hotplug we know that all pages in the memory block belong to the same |
825 | * node. |
826 | */ |
827 | static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk, |
828 | void *arg) |
829 | { |
830 | int nid = *(int *)arg; |
831 | |
832 | do_register_memory_block_under_node(nid, mem_blk, context: MEMINIT_HOTPLUG); |
833 | return 0; |
834 | } |
835 | |
836 | /* |
837 | * Unregister a memory block device under the node it spans. Memory blocks |
838 | * with multiple nodes cannot be offlined and therefore also never be removed. |
839 | */ |
840 | void unregister_memory_block_under_nodes(struct memory_block *mem_blk) |
841 | { |
842 | if (mem_blk->nid == NUMA_NO_NODE) |
843 | return; |
844 | |
845 | sysfs_remove_link(kobj: &node_devices[mem_blk->nid]->dev.kobj, |
846 | name: kobject_name(kobj: &mem_blk->dev.kobj)); |
847 | sysfs_remove_link(kobj: &mem_blk->dev.kobj, |
848 | name: kobject_name(kobj: &node_devices[mem_blk->nid]->dev.kobj)); |
849 | } |
850 | |
851 | void register_memory_blocks_under_node(int nid, unsigned long start_pfn, |
852 | unsigned long end_pfn, |
853 | enum meminit_context context) |
854 | { |
855 | walk_memory_blocks_func_t func; |
856 | |
857 | if (context == MEMINIT_HOTPLUG) |
858 | func = register_mem_block_under_node_hotplug; |
859 | else |
860 | func = register_mem_block_under_node_early; |
861 | |
862 | walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), |
863 | arg: (void *)&nid, func); |
864 | return; |
865 | } |
866 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
867 | |
868 | int __register_one_node(int nid) |
869 | { |
870 | int error; |
871 | int cpu; |
872 | struct node *node; |
873 | |
874 | node = kzalloc(size: sizeof(struct node), GFP_KERNEL); |
875 | if (!node) |
876 | return -ENOMEM; |
877 | |
878 | INIT_LIST_HEAD(list: &node->access_list); |
879 | node_devices[nid] = node; |
880 | |
881 | error = register_node(node: node_devices[nid], num: nid); |
882 | |
883 | /* link cpu under this node */ |
884 | for_each_present_cpu(cpu) { |
885 | if (cpu_to_node(cpu) == nid) |
886 | register_cpu_under_node(cpu, nid); |
887 | } |
888 | |
889 | node_init_caches(nid); |
890 | |
891 | return error; |
892 | } |
893 | |
894 | void unregister_one_node(int nid) |
895 | { |
896 | if (!node_devices[nid]) |
897 | return; |
898 | |
899 | unregister_node(node: node_devices[nid]); |
900 | node_devices[nid] = NULL; |
901 | } |
902 | |
903 | /* |
904 | * node states attributes |
905 | */ |
906 | |
907 | struct node_attr { |
908 | struct device_attribute attr; |
909 | enum node_states state; |
910 | }; |
911 | |
912 | static ssize_t show_node_state(struct device *dev, |
913 | struct device_attribute *attr, char *buf) |
914 | { |
915 | struct node_attr *na = container_of(attr, struct node_attr, attr); |
916 | |
917 | return sysfs_emit(buf, fmt: "%*pbl\n" , |
918 | nodemask_pr_args(&node_states[na->state])); |
919 | } |
920 | |
921 | #define _NODE_ATTR(name, state) \ |
922 | { __ATTR(name, 0444, show_node_state, NULL), state } |
923 | |
924 | static struct node_attr node_state_attr[] = { |
925 | [N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE), |
926 | [N_ONLINE] = _NODE_ATTR(online, N_ONLINE), |
927 | [N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY), |
928 | #ifdef CONFIG_HIGHMEM |
929 | [N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY), |
930 | #endif |
931 | [N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY), |
932 | [N_CPU] = _NODE_ATTR(has_cpu, N_CPU), |
933 | [N_GENERIC_INITIATOR] = _NODE_ATTR(has_generic_initiator, |
934 | N_GENERIC_INITIATOR), |
935 | }; |
936 | |
937 | static struct attribute *node_state_attrs[] = { |
938 | &node_state_attr[N_POSSIBLE].attr.attr, |
939 | &node_state_attr[N_ONLINE].attr.attr, |
940 | &node_state_attr[N_NORMAL_MEMORY].attr.attr, |
941 | #ifdef CONFIG_HIGHMEM |
942 | &node_state_attr[N_HIGH_MEMORY].attr.attr, |
943 | #endif |
944 | &node_state_attr[N_MEMORY].attr.attr, |
945 | &node_state_attr[N_CPU].attr.attr, |
946 | &node_state_attr[N_GENERIC_INITIATOR].attr.attr, |
947 | NULL |
948 | }; |
949 | |
950 | static const struct attribute_group memory_root_attr_group = { |
951 | .attrs = node_state_attrs, |
952 | }; |
953 | |
954 | static const struct attribute_group *cpu_root_attr_groups[] = { |
955 | &memory_root_attr_group, |
956 | NULL, |
957 | }; |
958 | |
959 | void __init node_dev_init(void) |
960 | { |
961 | int ret, i; |
962 | |
963 | BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES); |
964 | BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES); |
965 | |
966 | ret = subsys_system_register(subsys: &node_subsys, groups: cpu_root_attr_groups); |
967 | if (ret) |
968 | panic(fmt: "%s() failed to register subsystem: %d\n" , __func__, ret); |
969 | |
970 | /* |
971 | * Create all node devices, which will properly link the node |
972 | * to applicable memory block devices and already created cpu devices. |
973 | */ |
974 | for_each_online_node(i) { |
975 | ret = register_one_node(nid: i); |
976 | if (ret) |
977 | panic(fmt: "%s() failed to add node: %d\n" , __func__, ret); |
978 | } |
979 | } |
980 | |