1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * linux/mm/memory_hotplug.c |
4 | * |
5 | * Copyright (C) |
6 | */ |
7 | |
8 | #include <linux/stddef.h> |
9 | #include <linux/mm.h> |
10 | #include <linux/sched/signal.h> |
11 | #include <linux/swap.h> |
12 | #include <linux/interrupt.h> |
13 | #include <linux/pagemap.h> |
14 | #include <linux/compiler.h> |
15 | #include <linux/export.h> |
16 | #include <linux/writeback.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/sysctl.h> |
19 | #include <linux/cpu.h> |
20 | #include <linux/memory.h> |
21 | #include <linux/memremap.h> |
22 | #include <linux/memory_hotplug.h> |
23 | #include <linux/vmalloc.h> |
24 | #include <linux/ioport.h> |
25 | #include <linux/delay.h> |
26 | #include <linux/migrate.h> |
27 | #include <linux/page-isolation.h> |
28 | #include <linux/pfn.h> |
29 | #include <linux/suspend.h> |
30 | #include <linux/mm_inline.h> |
31 | #include <linux/firmware-map.h> |
32 | #include <linux/stop_machine.h> |
33 | #include <linux/hugetlb.h> |
34 | #include <linux/memblock.h> |
35 | #include <linux/compaction.h> |
36 | #include <linux/rmap.h> |
37 | #include <linux/module.h> |
38 | |
39 | #include <asm/tlbflush.h> |
40 | |
41 | #include "internal.h" |
42 | #include "shuffle.h" |
43 | |
44 | enum { |
45 | MEMMAP_ON_MEMORY_DISABLE = 0, |
46 | MEMMAP_ON_MEMORY_ENABLE, |
47 | MEMMAP_ON_MEMORY_FORCE, |
48 | }; |
49 | |
50 | static int memmap_mode __read_mostly = MEMMAP_ON_MEMORY_DISABLE; |
51 | |
52 | static inline unsigned long memory_block_memmap_size(void) |
53 | { |
54 | return PHYS_PFN(memory_block_size_bytes()) * sizeof(struct page); |
55 | } |
56 | |
57 | static inline unsigned long memory_block_memmap_on_memory_pages(void) |
58 | { |
59 | unsigned long nr_pages = PFN_UP(memory_block_memmap_size()); |
60 | |
61 | /* |
62 | * In "forced" memmap_on_memory mode, we add extra pages to align the |
63 | * vmemmap size to cover full pageblocks. That way, we can add memory |
64 | * even if the vmemmap size is not properly aligned, however, we might waste |
65 | * memory. |
66 | */ |
67 | if (memmap_mode == MEMMAP_ON_MEMORY_FORCE) |
68 | return pageblock_align(nr_pages); |
69 | return nr_pages; |
70 | } |
71 | |
72 | #ifdef CONFIG_MHP_MEMMAP_ON_MEMORY |
73 | /* |
74 | * memory_hotplug.memmap_on_memory parameter |
75 | */ |
76 | static int set_memmap_mode(const char *val, const struct kernel_param *kp) |
77 | { |
78 | int ret, mode; |
79 | bool enabled; |
80 | |
81 | if (sysfs_streq(s1: val, s2: "force" ) || sysfs_streq(s1: val, s2: "FORCE" )) { |
82 | mode = MEMMAP_ON_MEMORY_FORCE; |
83 | } else { |
84 | ret = kstrtobool(s: val, res: &enabled); |
85 | if (ret < 0) |
86 | return ret; |
87 | if (enabled) |
88 | mode = MEMMAP_ON_MEMORY_ENABLE; |
89 | else |
90 | mode = MEMMAP_ON_MEMORY_DISABLE; |
91 | } |
92 | *((int *)kp->arg) = mode; |
93 | if (mode == MEMMAP_ON_MEMORY_FORCE) { |
94 | unsigned long memmap_pages = memory_block_memmap_on_memory_pages(); |
95 | |
96 | pr_info_once("Memory hotplug will waste %ld pages in each memory block\n" , |
97 | memmap_pages - PFN_UP(memory_block_memmap_size())); |
98 | } |
99 | return 0; |
100 | } |
101 | |
102 | static int get_memmap_mode(char *buffer, const struct kernel_param *kp) |
103 | { |
104 | if (*((int *)kp->arg) == MEMMAP_ON_MEMORY_FORCE) |
105 | return sprintf(buf: buffer, fmt: "force\n" ); |
106 | return param_get_bool(buffer, kp); |
107 | } |
108 | |
109 | static const struct kernel_param_ops memmap_mode_ops = { |
110 | .set = set_memmap_mode, |
111 | .get = get_memmap_mode, |
112 | }; |
113 | module_param_cb(memmap_on_memory, &memmap_mode_ops, &memmap_mode, 0444); |
114 | MODULE_PARM_DESC(memmap_on_memory, "Enable memmap on memory for memory hotplug\n" |
115 | "With value \"force\" it could result in memory wastage due " |
116 | "to memmap size limitations (Y/N/force)" ); |
117 | |
118 | static inline bool mhp_memmap_on_memory(void) |
119 | { |
120 | return memmap_mode != MEMMAP_ON_MEMORY_DISABLE; |
121 | } |
122 | #else |
123 | static inline bool mhp_memmap_on_memory(void) |
124 | { |
125 | return false; |
126 | } |
127 | #endif |
128 | |
129 | enum { |
130 | ONLINE_POLICY_CONTIG_ZONES = 0, |
131 | ONLINE_POLICY_AUTO_MOVABLE, |
132 | }; |
133 | |
134 | static const char * const online_policy_to_str[] = { |
135 | [ONLINE_POLICY_CONTIG_ZONES] = "contig-zones" , |
136 | [ONLINE_POLICY_AUTO_MOVABLE] = "auto-movable" , |
137 | }; |
138 | |
139 | static int set_online_policy(const char *val, const struct kernel_param *kp) |
140 | { |
141 | int ret = sysfs_match_string(online_policy_to_str, val); |
142 | |
143 | if (ret < 0) |
144 | return ret; |
145 | *((int *)kp->arg) = ret; |
146 | return 0; |
147 | } |
148 | |
149 | static int get_online_policy(char *buffer, const struct kernel_param *kp) |
150 | { |
151 | return sprintf(buf: buffer, fmt: "%s\n" , online_policy_to_str[*((int *)kp->arg)]); |
152 | } |
153 | |
154 | /* |
155 | * memory_hotplug.online_policy: configure online behavior when onlining without |
156 | * specifying a zone (MMOP_ONLINE) |
157 | * |
158 | * "contig-zones": keep zone contiguous |
159 | * "auto-movable": online memory to ZONE_MOVABLE if the configuration |
160 | * (auto_movable_ratio, auto_movable_numa_aware) allows for it |
161 | */ |
162 | static int online_policy __read_mostly = ONLINE_POLICY_CONTIG_ZONES; |
163 | static const struct kernel_param_ops online_policy_ops = { |
164 | .set = set_online_policy, |
165 | .get = get_online_policy, |
166 | }; |
167 | module_param_cb(online_policy, &online_policy_ops, &online_policy, 0644); |
168 | MODULE_PARM_DESC(online_policy, |
169 | "Set the online policy (\"contig-zones\", \"auto-movable\") " |
170 | "Default: \"contig-zones\"" ); |
171 | |
172 | /* |
173 | * memory_hotplug.auto_movable_ratio: specify maximum MOVABLE:KERNEL ratio |
174 | * |
175 | * The ratio represent an upper limit and the kernel might decide to not |
176 | * online some memory to ZONE_MOVABLE -- e.g., because hotplugged KERNEL memory |
177 | * doesn't allow for more MOVABLE memory. |
178 | */ |
179 | static unsigned int auto_movable_ratio __read_mostly = 301; |
180 | module_param(auto_movable_ratio, uint, 0644); |
181 | MODULE_PARM_DESC(auto_movable_ratio, |
182 | "Set the maximum ratio of MOVABLE:KERNEL memory in the system " |
183 | "in percent for \"auto-movable\" online policy. Default: 301" ); |
184 | |
185 | /* |
186 | * memory_hotplug.auto_movable_numa_aware: consider numa node stats |
187 | */ |
188 | #ifdef CONFIG_NUMA |
189 | static bool auto_movable_numa_aware __read_mostly = true; |
190 | module_param(auto_movable_numa_aware, bool, 0644); |
191 | MODULE_PARM_DESC(auto_movable_numa_aware, |
192 | "Consider numa node stats in addition to global stats in " |
193 | "\"auto-movable\" online policy. Default: true" ); |
194 | #endif /* CONFIG_NUMA */ |
195 | |
196 | /* |
197 | * online_page_callback contains pointer to current page onlining function. |
198 | * Initially it is generic_online_page(). If it is required it could be |
199 | * changed by calling set_online_page_callback() for callback registration |
200 | * and restore_online_page_callback() for generic callback restore. |
201 | */ |
202 | |
203 | static online_page_callback_t online_page_callback = generic_online_page; |
204 | static DEFINE_MUTEX(online_page_callback_lock); |
205 | |
206 | DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock); |
207 | |
208 | void get_online_mems(void) |
209 | { |
210 | percpu_down_read(sem: &mem_hotplug_lock); |
211 | } |
212 | |
213 | void put_online_mems(void) |
214 | { |
215 | percpu_up_read(sem: &mem_hotplug_lock); |
216 | } |
217 | |
218 | bool movable_node_enabled = false; |
219 | |
220 | #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE |
221 | int mhp_default_online_type = MMOP_OFFLINE; |
222 | #else |
223 | int mhp_default_online_type = MMOP_ONLINE; |
224 | #endif |
225 | |
226 | static int __init setup_memhp_default_state(char *str) |
227 | { |
228 | const int online_type = mhp_online_type_from_str(str); |
229 | |
230 | if (online_type >= 0) |
231 | mhp_default_online_type = online_type; |
232 | |
233 | return 1; |
234 | } |
235 | __setup("memhp_default_state=" , setup_memhp_default_state); |
236 | |
237 | void mem_hotplug_begin(void) |
238 | { |
239 | cpus_read_lock(); |
240 | percpu_down_write(&mem_hotplug_lock); |
241 | } |
242 | |
243 | void mem_hotplug_done(void) |
244 | { |
245 | percpu_up_write(&mem_hotplug_lock); |
246 | cpus_read_unlock(); |
247 | } |
248 | |
249 | u64 max_mem_size = U64_MAX; |
250 | |
251 | /* add this memory to iomem resource */ |
252 | static struct resource *register_memory_resource(u64 start, u64 size, |
253 | const char *resource_name) |
254 | { |
255 | struct resource *res; |
256 | unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; |
257 | |
258 | if (strcmp(resource_name, "System RAM" )) |
259 | flags |= IORESOURCE_SYSRAM_DRIVER_MANAGED; |
260 | |
261 | if (!mhp_range_allowed(start, size, need_mapping: true)) |
262 | return ERR_PTR(error: -E2BIG); |
263 | |
264 | /* |
265 | * Make sure value parsed from 'mem=' only restricts memory adding |
266 | * while booting, so that memory hotplug won't be impacted. Please |
267 | * refer to document of 'mem=' in kernel-parameters.txt for more |
268 | * details. |
269 | */ |
270 | if (start + size > max_mem_size && system_state < SYSTEM_RUNNING) |
271 | return ERR_PTR(error: -E2BIG); |
272 | |
273 | /* |
274 | * Request ownership of the new memory range. This might be |
275 | * a child of an existing resource that was present but |
276 | * not marked as busy. |
277 | */ |
278 | res = __request_region(&iomem_resource, start, n: size, |
279 | name: resource_name, flags); |
280 | |
281 | if (!res) { |
282 | pr_debug("Unable to reserve System RAM region: %016llx->%016llx\n" , |
283 | start, start + size); |
284 | return ERR_PTR(error: -EEXIST); |
285 | } |
286 | return res; |
287 | } |
288 | |
289 | static void release_memory_resource(struct resource *res) |
290 | { |
291 | if (!res) |
292 | return; |
293 | release_resource(new: res); |
294 | kfree(objp: res); |
295 | } |
296 | |
297 | static int check_pfn_span(unsigned long pfn, unsigned long nr_pages) |
298 | { |
299 | /* |
300 | * Disallow all operations smaller than a sub-section and only |
301 | * allow operations smaller than a section for |
302 | * SPARSEMEM_VMEMMAP. Note that check_hotplug_memory_range() |
303 | * enforces a larger memory_block_size_bytes() granularity for |
304 | * memory that will be marked online, so this check should only |
305 | * fire for direct arch_{add,remove}_memory() users outside of |
306 | * add_memory_resource(). |
307 | */ |
308 | unsigned long min_align; |
309 | |
310 | if (IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) |
311 | min_align = PAGES_PER_SUBSECTION; |
312 | else |
313 | min_align = PAGES_PER_SECTION; |
314 | if (!IS_ALIGNED(pfn | nr_pages, min_align)) |
315 | return -EINVAL; |
316 | return 0; |
317 | } |
318 | |
319 | /* |
320 | * Return page for the valid pfn only if the page is online. All pfn |
321 | * walkers which rely on the fully initialized page->flags and others |
322 | * should use this rather than pfn_valid && pfn_to_page |
323 | */ |
324 | struct page *pfn_to_online_page(unsigned long pfn) |
325 | { |
326 | unsigned long nr = pfn_to_section_nr(pfn); |
327 | struct dev_pagemap *pgmap; |
328 | struct mem_section *ms; |
329 | |
330 | if (nr >= NR_MEM_SECTIONS) |
331 | return NULL; |
332 | |
333 | ms = __nr_to_section(nr); |
334 | if (!online_section(section: ms)) |
335 | return NULL; |
336 | |
337 | /* |
338 | * Save some code text when online_section() + |
339 | * pfn_section_valid() are sufficient. |
340 | */ |
341 | if (IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) && !pfn_valid(pfn)) |
342 | return NULL; |
343 | |
344 | if (!pfn_section_valid(ms, pfn)) |
345 | return NULL; |
346 | |
347 | if (!online_device_section(section: ms)) |
348 | return pfn_to_page(pfn); |
349 | |
350 | /* |
351 | * Slowpath: when ZONE_DEVICE collides with |
352 | * ZONE_{NORMAL,MOVABLE} within the same section some pfns in |
353 | * the section may be 'offline' but 'valid'. Only |
354 | * get_dev_pagemap() can determine sub-section online status. |
355 | */ |
356 | pgmap = get_dev_pagemap(pfn, NULL); |
357 | put_dev_pagemap(pgmap); |
358 | |
359 | /* The presence of a pgmap indicates ZONE_DEVICE offline pfn */ |
360 | if (pgmap) |
361 | return NULL; |
362 | |
363 | return pfn_to_page(pfn); |
364 | } |
365 | EXPORT_SYMBOL_GPL(pfn_to_online_page); |
366 | |
367 | int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, |
368 | struct mhp_params *params) |
369 | { |
370 | const unsigned long end_pfn = pfn + nr_pages; |
371 | unsigned long cur_nr_pages; |
372 | int err; |
373 | struct vmem_altmap *altmap = params->altmap; |
374 | |
375 | if (WARN_ON_ONCE(!pgprot_val(params->pgprot))) |
376 | return -EINVAL; |
377 | |
378 | VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false)); |
379 | |
380 | if (altmap) { |
381 | /* |
382 | * Validate altmap is within bounds of the total request |
383 | */ |
384 | if (altmap->base_pfn != pfn |
385 | || vmem_altmap_offset(altmap) > nr_pages) { |
386 | pr_warn_once("memory add fail, invalid altmap\n" ); |
387 | return -EINVAL; |
388 | } |
389 | altmap->alloc = 0; |
390 | } |
391 | |
392 | if (check_pfn_span(pfn, nr_pages)) { |
393 | WARN(1, "Misaligned %s start: %#lx end: %#lx\n" , __func__, pfn, pfn + nr_pages - 1); |
394 | return -EINVAL; |
395 | } |
396 | |
397 | for (; pfn < end_pfn; pfn += cur_nr_pages) { |
398 | /* Select all remaining pages up to the next section boundary */ |
399 | cur_nr_pages = min(end_pfn - pfn, |
400 | SECTION_ALIGN_UP(pfn + 1) - pfn); |
401 | err = sparse_add_section(nid, pfn, nr_pages: cur_nr_pages, altmap, |
402 | pgmap: params->pgmap); |
403 | if (err) |
404 | break; |
405 | cond_resched(); |
406 | } |
407 | vmemmap_populate_print_last(); |
408 | return err; |
409 | } |
410 | |
411 | /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ |
412 | static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, |
413 | unsigned long start_pfn, |
414 | unsigned long end_pfn) |
415 | { |
416 | for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) { |
417 | if (unlikely(!pfn_to_online_page(start_pfn))) |
418 | continue; |
419 | |
420 | if (unlikely(pfn_to_nid(start_pfn) != nid)) |
421 | continue; |
422 | |
423 | if (zone != page_zone(pfn_to_page(start_pfn))) |
424 | continue; |
425 | |
426 | return start_pfn; |
427 | } |
428 | |
429 | return 0; |
430 | } |
431 | |
432 | /* find the biggest valid pfn in the range [start_pfn, end_pfn). */ |
433 | static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, |
434 | unsigned long start_pfn, |
435 | unsigned long end_pfn) |
436 | { |
437 | unsigned long pfn; |
438 | |
439 | /* pfn is the end pfn of a memory section. */ |
440 | pfn = end_pfn - 1; |
441 | for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) { |
442 | if (unlikely(!pfn_to_online_page(pfn))) |
443 | continue; |
444 | |
445 | if (unlikely(pfn_to_nid(pfn) != nid)) |
446 | continue; |
447 | |
448 | if (zone != page_zone(pfn_to_page(pfn))) |
449 | continue; |
450 | |
451 | return pfn; |
452 | } |
453 | |
454 | return 0; |
455 | } |
456 | |
457 | static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, |
458 | unsigned long end_pfn) |
459 | { |
460 | unsigned long pfn; |
461 | int nid = zone_to_nid(zone); |
462 | |
463 | if (zone->zone_start_pfn == start_pfn) { |
464 | /* |
465 | * If the section is smallest section in the zone, it need |
466 | * shrink zone->zone_start_pfn and zone->zone_spanned_pages. |
467 | * In this case, we find second smallest valid mem_section |
468 | * for shrinking zone. |
469 | */ |
470 | pfn = find_smallest_section_pfn(nid, zone, start_pfn: end_pfn, |
471 | end_pfn: zone_end_pfn(zone)); |
472 | if (pfn) { |
473 | zone->spanned_pages = zone_end_pfn(zone) - pfn; |
474 | zone->zone_start_pfn = pfn; |
475 | } else { |
476 | zone->zone_start_pfn = 0; |
477 | zone->spanned_pages = 0; |
478 | } |
479 | } else if (zone_end_pfn(zone) == end_pfn) { |
480 | /* |
481 | * If the section is biggest section in the zone, it need |
482 | * shrink zone->spanned_pages. |
483 | * In this case, we find second biggest valid mem_section for |
484 | * shrinking zone. |
485 | */ |
486 | pfn = find_biggest_section_pfn(nid, zone, start_pfn: zone->zone_start_pfn, |
487 | end_pfn: start_pfn); |
488 | if (pfn) |
489 | zone->spanned_pages = pfn - zone->zone_start_pfn + 1; |
490 | else { |
491 | zone->zone_start_pfn = 0; |
492 | zone->spanned_pages = 0; |
493 | } |
494 | } |
495 | } |
496 | |
497 | static void update_pgdat_span(struct pglist_data *pgdat) |
498 | { |
499 | unsigned long node_start_pfn = 0, node_end_pfn = 0; |
500 | struct zone *zone; |
501 | |
502 | for (zone = pgdat->node_zones; |
503 | zone < pgdat->node_zones + MAX_NR_ZONES; zone++) { |
504 | unsigned long end_pfn = zone_end_pfn(zone); |
505 | |
506 | /* No need to lock the zones, they can't change. */ |
507 | if (!zone->spanned_pages) |
508 | continue; |
509 | if (!node_end_pfn) { |
510 | node_start_pfn = zone->zone_start_pfn; |
511 | node_end_pfn = end_pfn; |
512 | continue; |
513 | } |
514 | |
515 | if (end_pfn > node_end_pfn) |
516 | node_end_pfn = end_pfn; |
517 | if (zone->zone_start_pfn < node_start_pfn) |
518 | node_start_pfn = zone->zone_start_pfn; |
519 | } |
520 | |
521 | pgdat->node_start_pfn = node_start_pfn; |
522 | pgdat->node_spanned_pages = node_end_pfn - node_start_pfn; |
523 | } |
524 | |
525 | void __ref remove_pfn_range_from_zone(struct zone *zone, |
526 | unsigned long start_pfn, |
527 | unsigned long nr_pages) |
528 | { |
529 | const unsigned long end_pfn = start_pfn + nr_pages; |
530 | struct pglist_data *pgdat = zone->zone_pgdat; |
531 | unsigned long pfn, cur_nr_pages; |
532 | |
533 | /* Poison struct pages because they are now uninitialized again. */ |
534 | for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) { |
535 | cond_resched(); |
536 | |
537 | /* Select all remaining pages up to the next section boundary */ |
538 | cur_nr_pages = |
539 | min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn); |
540 | page_init_poison(pfn_to_page(pfn), |
541 | size: sizeof(struct page) * cur_nr_pages); |
542 | } |
543 | |
544 | /* |
545 | * Zone shrinking code cannot properly deal with ZONE_DEVICE. So |
546 | * we will not try to shrink the zones - which is okay as |
547 | * set_zone_contiguous() cannot deal with ZONE_DEVICE either way. |
548 | */ |
549 | if (zone_is_zone_device(zone)) |
550 | return; |
551 | |
552 | clear_zone_contiguous(zone); |
553 | |
554 | shrink_zone_span(zone, start_pfn, end_pfn: start_pfn + nr_pages); |
555 | update_pgdat_span(pgdat); |
556 | |
557 | set_zone_contiguous(zone); |
558 | } |
559 | |
560 | /** |
561 | * __remove_pages() - remove sections of pages |
562 | * @pfn: starting pageframe (must be aligned to start of a section) |
563 | * @nr_pages: number of pages to remove (must be multiple of section size) |
564 | * @altmap: alternative device page map or %NULL if default memmap is used |
565 | * |
566 | * Generic helper function to remove section mappings and sysfs entries |
567 | * for the section of the memory we are removing. Caller needs to make |
568 | * sure that pages are marked reserved and zones are adjust properly by |
569 | * calling offline_pages(). |
570 | */ |
571 | void __remove_pages(unsigned long pfn, unsigned long nr_pages, |
572 | struct vmem_altmap *altmap) |
573 | { |
574 | const unsigned long end_pfn = pfn + nr_pages; |
575 | unsigned long cur_nr_pages; |
576 | |
577 | if (check_pfn_span(pfn, nr_pages)) { |
578 | WARN(1, "Misaligned %s start: %#lx end: %#lx\n" , __func__, pfn, pfn + nr_pages - 1); |
579 | return; |
580 | } |
581 | |
582 | for (; pfn < end_pfn; pfn += cur_nr_pages) { |
583 | cond_resched(); |
584 | /* Select all remaining pages up to the next section boundary */ |
585 | cur_nr_pages = min(end_pfn - pfn, |
586 | SECTION_ALIGN_UP(pfn + 1) - pfn); |
587 | sparse_remove_section(pfn, nr_pages: cur_nr_pages, altmap); |
588 | } |
589 | } |
590 | |
591 | int set_online_page_callback(online_page_callback_t callback) |
592 | { |
593 | int rc = -EINVAL; |
594 | |
595 | get_online_mems(); |
596 | mutex_lock(&online_page_callback_lock); |
597 | |
598 | if (online_page_callback == generic_online_page) { |
599 | online_page_callback = callback; |
600 | rc = 0; |
601 | } |
602 | |
603 | mutex_unlock(lock: &online_page_callback_lock); |
604 | put_online_mems(); |
605 | |
606 | return rc; |
607 | } |
608 | EXPORT_SYMBOL_GPL(set_online_page_callback); |
609 | |
610 | int restore_online_page_callback(online_page_callback_t callback) |
611 | { |
612 | int rc = -EINVAL; |
613 | |
614 | get_online_mems(); |
615 | mutex_lock(&online_page_callback_lock); |
616 | |
617 | if (online_page_callback == callback) { |
618 | online_page_callback = generic_online_page; |
619 | rc = 0; |
620 | } |
621 | |
622 | mutex_unlock(lock: &online_page_callback_lock); |
623 | put_online_mems(); |
624 | |
625 | return rc; |
626 | } |
627 | EXPORT_SYMBOL_GPL(restore_online_page_callback); |
628 | |
629 | void generic_online_page(struct page *page, unsigned int order) |
630 | { |
631 | /* |
632 | * Freeing the page with debug_pagealloc enabled will try to unmap it, |
633 | * so we should map it first. This is better than introducing a special |
634 | * case in page freeing fast path. |
635 | */ |
636 | debug_pagealloc_map_pages(page, numpages: 1 << order); |
637 | __free_pages_core(page, order); |
638 | totalram_pages_add(count: 1UL << order); |
639 | } |
640 | EXPORT_SYMBOL_GPL(generic_online_page); |
641 | |
642 | static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages) |
643 | { |
644 | const unsigned long end_pfn = start_pfn + nr_pages; |
645 | unsigned long pfn; |
646 | |
647 | /* |
648 | * Online the pages in MAX_ORDER aligned chunks. The callback might |
649 | * decide to not expose all pages to the buddy (e.g., expose them |
650 | * later). We account all pages as being online and belonging to this |
651 | * zone ("present"). |
652 | * When using memmap_on_memory, the range might not be aligned to |
653 | * MAX_ORDER_NR_PAGES - 1, but pageblock aligned. __ffs() will detect |
654 | * this and the first chunk to online will be pageblock_nr_pages. |
655 | */ |
656 | for (pfn = start_pfn; pfn < end_pfn;) { |
657 | int order; |
658 | |
659 | /* |
660 | * Free to online pages in the largest chunks alignment allows. |
661 | * |
662 | * __ffs() behaviour is undefined for 0. start == 0 is |
663 | * MAX_ORDER-aligned, Set order to MAX_ORDER for the case. |
664 | */ |
665 | if (pfn) |
666 | order = min_t(int, MAX_ORDER, __ffs(pfn)); |
667 | else |
668 | order = MAX_ORDER; |
669 | |
670 | (*online_page_callback)(pfn_to_page(pfn), order); |
671 | pfn += (1UL << order); |
672 | } |
673 | |
674 | /* mark all involved sections as online */ |
675 | online_mem_sections(start_pfn, end_pfn); |
676 | } |
677 | |
678 | /* check which state of node_states will be changed when online memory */ |
679 | static void node_states_check_changes_online(unsigned long nr_pages, |
680 | struct zone *zone, struct memory_notify *arg) |
681 | { |
682 | int nid = zone_to_nid(zone); |
683 | |
684 | arg->status_change_nid = NUMA_NO_NODE; |
685 | arg->status_change_nid_normal = NUMA_NO_NODE; |
686 | |
687 | if (!node_state(node: nid, state: N_MEMORY)) |
688 | arg->status_change_nid = nid; |
689 | if (zone_idx(zone) <= ZONE_NORMAL && !node_state(node: nid, state: N_NORMAL_MEMORY)) |
690 | arg->status_change_nid_normal = nid; |
691 | } |
692 | |
693 | static void node_states_set_node(int node, struct memory_notify *arg) |
694 | { |
695 | if (arg->status_change_nid_normal >= 0) |
696 | node_set_state(node, state: N_NORMAL_MEMORY); |
697 | |
698 | if (arg->status_change_nid >= 0) |
699 | node_set_state(node, state: N_MEMORY); |
700 | } |
701 | |
702 | static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn, |
703 | unsigned long nr_pages) |
704 | { |
705 | unsigned long old_end_pfn = zone_end_pfn(zone); |
706 | |
707 | if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) |
708 | zone->zone_start_pfn = start_pfn; |
709 | |
710 | zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; |
711 | } |
712 | |
713 | static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn, |
714 | unsigned long nr_pages) |
715 | { |
716 | unsigned long old_end_pfn = pgdat_end_pfn(pgdat); |
717 | |
718 | if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) |
719 | pgdat->node_start_pfn = start_pfn; |
720 | |
721 | pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; |
722 | |
723 | } |
724 | |
725 | #ifdef CONFIG_ZONE_DEVICE |
726 | static void section_taint_zone_device(unsigned long pfn) |
727 | { |
728 | struct mem_section *ms = __pfn_to_section(pfn); |
729 | |
730 | ms->section_mem_map |= SECTION_TAINT_ZONE_DEVICE; |
731 | } |
732 | #else |
733 | static inline void section_taint_zone_device(unsigned long pfn) |
734 | { |
735 | } |
736 | #endif |
737 | |
738 | /* |
739 | * Associate the pfn range with the given zone, initializing the memmaps |
740 | * and resizing the pgdat/zone data to span the added pages. After this |
741 | * call, all affected pages are PG_reserved. |
742 | * |
743 | * All aligned pageblocks are initialized to the specified migratetype |
744 | * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related |
745 | * zone stats (e.g., nr_isolate_pageblock) are touched. |
746 | */ |
747 | void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, |
748 | unsigned long nr_pages, |
749 | struct vmem_altmap *altmap, int migratetype) |
750 | { |
751 | struct pglist_data *pgdat = zone->zone_pgdat; |
752 | int nid = pgdat->node_id; |
753 | |
754 | clear_zone_contiguous(zone); |
755 | |
756 | if (zone_is_empty(zone)) |
757 | init_currently_empty_zone(zone, start_pfn, size: nr_pages); |
758 | resize_zone_range(zone, start_pfn, nr_pages); |
759 | resize_pgdat_range(pgdat, start_pfn, nr_pages); |
760 | |
761 | /* |
762 | * Subsection population requires care in pfn_to_online_page(). |
763 | * Set the taint to enable the slow path detection of |
764 | * ZONE_DEVICE pages in an otherwise ZONE_{NORMAL,MOVABLE} |
765 | * section. |
766 | */ |
767 | if (zone_is_zone_device(zone)) { |
768 | if (!IS_ALIGNED(start_pfn, PAGES_PER_SECTION)) |
769 | section_taint_zone_device(pfn: start_pfn); |
770 | if (!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)) |
771 | section_taint_zone_device(pfn: start_pfn + nr_pages); |
772 | } |
773 | |
774 | /* |
775 | * TODO now we have a visible range of pages which are not associated |
776 | * with their zone properly. Not nice but set_pfnblock_flags_mask |
777 | * expects the zone spans the pfn range. All the pages in the range |
778 | * are reserved so nobody should be touching them so we should be safe |
779 | */ |
780 | memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0, |
781 | MEMINIT_HOTPLUG, altmap, migratetype); |
782 | |
783 | set_zone_contiguous(zone); |
784 | } |
785 | |
786 | struct auto_movable_stats { |
787 | unsigned long kernel_early_pages; |
788 | unsigned long movable_pages; |
789 | }; |
790 | |
791 | static void auto_movable_stats_account_zone(struct auto_movable_stats *stats, |
792 | struct zone *zone) |
793 | { |
794 | if (zone_idx(zone) == ZONE_MOVABLE) { |
795 | stats->movable_pages += zone->present_pages; |
796 | } else { |
797 | stats->kernel_early_pages += zone->present_early_pages; |
798 | #ifdef CONFIG_CMA |
799 | /* |
800 | * CMA pages (never on hotplugged memory) behave like |
801 | * ZONE_MOVABLE. |
802 | */ |
803 | stats->movable_pages += zone->cma_pages; |
804 | stats->kernel_early_pages -= zone->cma_pages; |
805 | #endif /* CONFIG_CMA */ |
806 | } |
807 | } |
808 | struct auto_movable_group_stats { |
809 | unsigned long movable_pages; |
810 | unsigned long req_kernel_early_pages; |
811 | }; |
812 | |
813 | static int auto_movable_stats_account_group(struct memory_group *group, |
814 | void *arg) |
815 | { |
816 | const int ratio = READ_ONCE(auto_movable_ratio); |
817 | struct auto_movable_group_stats *stats = arg; |
818 | long pages; |
819 | |
820 | /* |
821 | * We don't support modifying the config while the auto-movable online |
822 | * policy is already enabled. Just avoid the division by zero below. |
823 | */ |
824 | if (!ratio) |
825 | return 0; |
826 | |
827 | /* |
828 | * Calculate how many early kernel pages this group requires to |
829 | * satisfy the configured zone ratio. |
830 | */ |
831 | pages = group->present_movable_pages * 100 / ratio; |
832 | pages -= group->present_kernel_pages; |
833 | |
834 | if (pages > 0) |
835 | stats->req_kernel_early_pages += pages; |
836 | stats->movable_pages += group->present_movable_pages; |
837 | return 0; |
838 | } |
839 | |
840 | static bool auto_movable_can_online_movable(int nid, struct memory_group *group, |
841 | unsigned long nr_pages) |
842 | { |
843 | unsigned long kernel_early_pages, movable_pages; |
844 | struct auto_movable_group_stats group_stats = {}; |
845 | struct auto_movable_stats stats = {}; |
846 | pg_data_t *pgdat = NODE_DATA(nid); |
847 | struct zone *zone; |
848 | int i; |
849 | |
850 | /* Walk all relevant zones and collect MOVABLE vs. KERNEL stats. */ |
851 | if (nid == NUMA_NO_NODE) { |
852 | /* TODO: cache values */ |
853 | for_each_populated_zone(zone) |
854 | auto_movable_stats_account_zone(stats: &stats, zone); |
855 | } else { |
856 | for (i = 0; i < MAX_NR_ZONES; i++) { |
857 | zone = pgdat->node_zones + i; |
858 | if (populated_zone(zone)) |
859 | auto_movable_stats_account_zone(stats: &stats, zone); |
860 | } |
861 | } |
862 | |
863 | kernel_early_pages = stats.kernel_early_pages; |
864 | movable_pages = stats.movable_pages; |
865 | |
866 | /* |
867 | * Kernel memory inside dynamic memory group allows for more MOVABLE |
868 | * memory within the same group. Remove the effect of all but the |
869 | * current group from the stats. |
870 | */ |
871 | walk_dynamic_memory_groups(nid, func: auto_movable_stats_account_group, |
872 | excluded: group, arg: &group_stats); |
873 | if (kernel_early_pages <= group_stats.req_kernel_early_pages) |
874 | return false; |
875 | kernel_early_pages -= group_stats.req_kernel_early_pages; |
876 | movable_pages -= group_stats.movable_pages; |
877 | |
878 | if (group && group->is_dynamic) |
879 | kernel_early_pages += group->present_kernel_pages; |
880 | |
881 | /* |
882 | * Test if we could online the given number of pages to ZONE_MOVABLE |
883 | * and still stay in the configured ratio. |
884 | */ |
885 | movable_pages += nr_pages; |
886 | return movable_pages <= (auto_movable_ratio * kernel_early_pages) / 100; |
887 | } |
888 | |
889 | /* |
890 | * Returns a default kernel memory zone for the given pfn range. |
891 | * If no kernel zone covers this pfn range it will automatically go |
892 | * to the ZONE_NORMAL. |
893 | */ |
894 | static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn, |
895 | unsigned long nr_pages) |
896 | { |
897 | struct pglist_data *pgdat = NODE_DATA(nid); |
898 | int zid; |
899 | |
900 | for (zid = 0; zid < ZONE_NORMAL; zid++) { |
901 | struct zone *zone = &pgdat->node_zones[zid]; |
902 | |
903 | if (zone_intersects(zone, start_pfn, nr_pages)) |
904 | return zone; |
905 | } |
906 | |
907 | return &pgdat->node_zones[ZONE_NORMAL]; |
908 | } |
909 | |
910 | /* |
911 | * Determine to which zone to online memory dynamically based on user |
912 | * configuration and system stats. We care about the following ratio: |
913 | * |
914 | * MOVABLE : KERNEL |
915 | * |
916 | * Whereby MOVABLE is memory in ZONE_MOVABLE and KERNEL is memory in |
917 | * one of the kernel zones. CMA pages inside one of the kernel zones really |
918 | * behaves like ZONE_MOVABLE, so we treat them accordingly. |
919 | * |
920 | * We don't allow for hotplugged memory in a KERNEL zone to increase the |
921 | * amount of MOVABLE memory we can have, so we end up with: |
922 | * |
923 | * MOVABLE : KERNEL_EARLY |
924 | * |
925 | * Whereby KERNEL_EARLY is memory in one of the kernel zones, available sinze |
926 | * boot. We base our calculation on KERNEL_EARLY internally, because: |
927 | * |
928 | * a) Hotplugged memory in one of the kernel zones can sometimes still get |
929 | * hotunplugged, especially when hot(un)plugging individual memory blocks. |
930 | * There is no coordination across memory devices, therefore "automatic" |
931 | * hotunplugging, as implemented in hypervisors, could result in zone |
932 | * imbalances. |
933 | * b) Early/boot memory in one of the kernel zones can usually not get |
934 | * hotunplugged again (e.g., no firmware interface to unplug, fragmented |
935 | * with unmovable allocations). While there are corner cases where it might |
936 | * still work, it is barely relevant in practice. |
937 | * |
938 | * Exceptions are dynamic memory groups, which allow for more MOVABLE |
939 | * memory within the same memory group -- because in that case, there is |
940 | * coordination within the single memory device managed by a single driver. |
941 | * |
942 | * We rely on "present pages" instead of "managed pages", as the latter is |
943 | * highly unreliable and dynamic in virtualized environments, and does not |
944 | * consider boot time allocations. For example, memory ballooning adjusts the |
945 | * managed pages when inflating/deflating the balloon, and balloon compaction |
946 | * can even migrate inflated pages between zones. |
947 | * |
948 | * Using "present pages" is better but some things to keep in mind are: |
949 | * |
950 | * a) Some memblock allocations, such as for the crashkernel area, are |
951 | * effectively unused by the kernel, yet they account to "present pages". |
952 | * Fortunately, these allocations are comparatively small in relevant setups |
953 | * (e.g., fraction of system memory). |
954 | * b) Some hotplugged memory blocks in virtualized environments, esecially |
955 | * hotplugged by virtio-mem, look like they are completely present, however, |
956 | * only parts of the memory block are actually currently usable. |
957 | * "present pages" is an upper limit that can get reached at runtime. As |
958 | * we base our calculations on KERNEL_EARLY, this is not an issue. |
959 | */ |
960 | static struct zone *auto_movable_zone_for_pfn(int nid, |
961 | struct memory_group *group, |
962 | unsigned long pfn, |
963 | unsigned long nr_pages) |
964 | { |
965 | unsigned long online_pages = 0, max_pages, end_pfn; |
966 | struct page *page; |
967 | |
968 | if (!auto_movable_ratio) |
969 | goto kernel_zone; |
970 | |
971 | if (group && !group->is_dynamic) { |
972 | max_pages = group->s.max_pages; |
973 | online_pages = group->present_movable_pages; |
974 | |
975 | /* If anything is !MOVABLE online the rest !MOVABLE. */ |
976 | if (group->present_kernel_pages) |
977 | goto kernel_zone; |
978 | } else if (!group || group->d.unit_pages == nr_pages) { |
979 | max_pages = nr_pages; |
980 | } else { |
981 | max_pages = group->d.unit_pages; |
982 | /* |
983 | * Take a look at all online sections in the current unit. |
984 | * We can safely assume that all pages within a section belong |
985 | * to the same zone, because dynamic memory groups only deal |
986 | * with hotplugged memory. |
987 | */ |
988 | pfn = ALIGN_DOWN(pfn, group->d.unit_pages); |
989 | end_pfn = pfn + group->d.unit_pages; |
990 | for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
991 | page = pfn_to_online_page(pfn); |
992 | if (!page) |
993 | continue; |
994 | /* If anything is !MOVABLE online the rest !MOVABLE. */ |
995 | if (!is_zone_movable_page(page)) |
996 | goto kernel_zone; |
997 | online_pages += PAGES_PER_SECTION; |
998 | } |
999 | } |
1000 | |
1001 | /* |
1002 | * Online MOVABLE if we could *currently* online all remaining parts |
1003 | * MOVABLE. We expect to (add+) online them immediately next, so if |
1004 | * nobody interferes, all will be MOVABLE if possible. |
1005 | */ |
1006 | nr_pages = max_pages - online_pages; |
1007 | if (!auto_movable_can_online_movable(NUMA_NO_NODE, group, nr_pages)) |
1008 | goto kernel_zone; |
1009 | |
1010 | #ifdef CONFIG_NUMA |
1011 | if (auto_movable_numa_aware && |
1012 | !auto_movable_can_online_movable(nid, group, nr_pages)) |
1013 | goto kernel_zone; |
1014 | #endif /* CONFIG_NUMA */ |
1015 | |
1016 | return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; |
1017 | kernel_zone: |
1018 | return default_kernel_zone_for_pfn(nid, start_pfn: pfn, nr_pages); |
1019 | } |
1020 | |
1021 | static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn, |
1022 | unsigned long nr_pages) |
1023 | { |
1024 | struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn, |
1025 | nr_pages); |
1026 | struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; |
1027 | bool in_kernel = zone_intersects(zone: kernel_zone, start_pfn, nr_pages); |
1028 | bool in_movable = zone_intersects(zone: movable_zone, start_pfn, nr_pages); |
1029 | |
1030 | /* |
1031 | * We inherit the existing zone in a simple case where zones do not |
1032 | * overlap in the given range |
1033 | */ |
1034 | if (in_kernel ^ in_movable) |
1035 | return (in_kernel) ? kernel_zone : movable_zone; |
1036 | |
1037 | /* |
1038 | * If the range doesn't belong to any zone or two zones overlap in the |
1039 | * given range then we use movable zone only if movable_node is |
1040 | * enabled because we always online to a kernel zone by default. |
1041 | */ |
1042 | return movable_node_enabled ? movable_zone : kernel_zone; |
1043 | } |
1044 | |
1045 | struct zone *zone_for_pfn_range(int online_type, int nid, |
1046 | struct memory_group *group, unsigned long start_pfn, |
1047 | unsigned long nr_pages) |
1048 | { |
1049 | if (online_type == MMOP_ONLINE_KERNEL) |
1050 | return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages); |
1051 | |
1052 | if (online_type == MMOP_ONLINE_MOVABLE) |
1053 | return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; |
1054 | |
1055 | if (online_policy == ONLINE_POLICY_AUTO_MOVABLE) |
1056 | return auto_movable_zone_for_pfn(nid, group, pfn: start_pfn, nr_pages); |
1057 | |
1058 | return default_zone_for_pfn(nid, start_pfn, nr_pages); |
1059 | } |
1060 | |
1061 | /* |
1062 | * This function should only be called by memory_block_{online,offline}, |
1063 | * and {online,offline}_pages. |
1064 | */ |
1065 | void adjust_present_page_count(struct page *page, struct memory_group *group, |
1066 | long nr_pages) |
1067 | { |
1068 | struct zone *zone = page_zone(page); |
1069 | const bool movable = zone_idx(zone) == ZONE_MOVABLE; |
1070 | |
1071 | /* |
1072 | * We only support onlining/offlining/adding/removing of complete |
1073 | * memory blocks; therefore, either all is either early or hotplugged. |
1074 | */ |
1075 | if (early_section(section: __pfn_to_section(page_to_pfn(page)))) |
1076 | zone->present_early_pages += nr_pages; |
1077 | zone->present_pages += nr_pages; |
1078 | zone->zone_pgdat->node_present_pages += nr_pages; |
1079 | |
1080 | if (group && movable) |
1081 | group->present_movable_pages += nr_pages; |
1082 | else if (group && !movable) |
1083 | group->present_kernel_pages += nr_pages; |
1084 | } |
1085 | |
1086 | int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, |
1087 | struct zone *zone) |
1088 | { |
1089 | unsigned long end_pfn = pfn + nr_pages; |
1090 | int ret, i; |
1091 | |
1092 | ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); |
1093 | if (ret) |
1094 | return ret; |
1095 | |
1096 | move_pfn_range_to_zone(zone, start_pfn: pfn, nr_pages, NULL, migratetype: MIGRATE_UNMOVABLE); |
1097 | |
1098 | for (i = 0; i < nr_pages; i++) |
1099 | SetPageVmemmapSelfHosted(pfn_to_page(pfn + i)); |
1100 | |
1101 | /* |
1102 | * It might be that the vmemmap_pages fully span sections. If that is |
1103 | * the case, mark those sections online here as otherwise they will be |
1104 | * left offline. |
1105 | */ |
1106 | if (nr_pages >= PAGES_PER_SECTION) |
1107 | online_mem_sections(start_pfn: pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); |
1108 | |
1109 | return ret; |
1110 | } |
1111 | |
1112 | void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages) |
1113 | { |
1114 | unsigned long end_pfn = pfn + nr_pages; |
1115 | |
1116 | /* |
1117 | * It might be that the vmemmap_pages fully span sections. If that is |
1118 | * the case, mark those sections offline here as otherwise they will be |
1119 | * left online. |
1120 | */ |
1121 | if (nr_pages >= PAGES_PER_SECTION) |
1122 | offline_mem_sections(start_pfn: pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); |
1123 | |
1124 | /* |
1125 | * The pages associated with this vmemmap have been offlined, so |
1126 | * we can reset its state here. |
1127 | */ |
1128 | remove_pfn_range_from_zone(zone: page_zone(pfn_to_page(pfn)), start_pfn: pfn, nr_pages); |
1129 | kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); |
1130 | } |
1131 | |
1132 | int __ref online_pages(unsigned long pfn, unsigned long nr_pages, |
1133 | struct zone *zone, struct memory_group *group) |
1134 | { |
1135 | unsigned long flags; |
1136 | int need_zonelists_rebuild = 0; |
1137 | const int nid = zone_to_nid(zone); |
1138 | int ret; |
1139 | struct memory_notify arg; |
1140 | |
1141 | /* |
1142 | * {on,off}lining is constrained to full memory sections (or more |
1143 | * precisely to memory blocks from the user space POV). |
1144 | * memmap_on_memory is an exception because it reserves initial part |
1145 | * of the physical memory space for vmemmaps. That space is pageblock |
1146 | * aligned. |
1147 | */ |
1148 | if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(pfn) || |
1149 | !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION))) |
1150 | return -EINVAL; |
1151 | |
1152 | mem_hotplug_begin(); |
1153 | |
1154 | /* associate pfn range with the zone */ |
1155 | move_pfn_range_to_zone(zone, start_pfn: pfn, nr_pages, NULL, migratetype: MIGRATE_ISOLATE); |
1156 | |
1157 | arg.start_pfn = pfn; |
1158 | arg.nr_pages = nr_pages; |
1159 | node_states_check_changes_online(nr_pages, zone, arg: &arg); |
1160 | |
1161 | ret = memory_notify(MEM_GOING_ONLINE, v: &arg); |
1162 | ret = notifier_to_errno(ret); |
1163 | if (ret) |
1164 | goto failed_addition; |
1165 | |
1166 | /* |
1167 | * Fixup the number of isolated pageblocks before marking the sections |
1168 | * onlining, such that undo_isolate_page_range() works correctly. |
1169 | */ |
1170 | spin_lock_irqsave(&zone->lock, flags); |
1171 | zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages; |
1172 | spin_unlock_irqrestore(lock: &zone->lock, flags); |
1173 | |
1174 | /* |
1175 | * If this zone is not populated, then it is not in zonelist. |
1176 | * This means the page allocator ignores this zone. |
1177 | * So, zonelist must be updated after online. |
1178 | */ |
1179 | if (!populated_zone(zone)) { |
1180 | need_zonelists_rebuild = 1; |
1181 | setup_zone_pageset(zone); |
1182 | } |
1183 | |
1184 | online_pages_range(start_pfn: pfn, nr_pages); |
1185 | adjust_present_page_count(pfn_to_page(pfn), group, nr_pages); |
1186 | |
1187 | node_states_set_node(node: nid, arg: &arg); |
1188 | if (need_zonelists_rebuild) |
1189 | build_all_zonelists(NULL); |
1190 | |
1191 | /* Basic onlining is complete, allow allocation of onlined pages. */ |
1192 | undo_isolate_page_range(start_pfn: pfn, end_pfn: pfn + nr_pages, migratetype: MIGRATE_MOVABLE); |
1193 | |
1194 | /* |
1195 | * Freshly onlined pages aren't shuffled (e.g., all pages are placed to |
1196 | * the tail of the freelist when undoing isolation). Shuffle the whole |
1197 | * zone to make sure the just onlined pages are properly distributed |
1198 | * across the whole freelist - to create an initial shuffle. |
1199 | */ |
1200 | shuffle_zone(z: zone); |
1201 | |
1202 | /* reinitialise watermarks and update pcp limits */ |
1203 | init_per_zone_wmark_min(); |
1204 | |
1205 | kswapd_run(nid); |
1206 | kcompactd_run(nid); |
1207 | |
1208 | writeback_set_ratelimit(); |
1209 | |
1210 | memory_notify(MEM_ONLINE, v: &arg); |
1211 | mem_hotplug_done(); |
1212 | return 0; |
1213 | |
1214 | failed_addition: |
1215 | pr_debug("online_pages [mem %#010llx-%#010llx] failed\n" , |
1216 | (unsigned long long) pfn << PAGE_SHIFT, |
1217 | (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); |
1218 | memory_notify(MEM_CANCEL_ONLINE, v: &arg); |
1219 | remove_pfn_range_from_zone(zone, start_pfn: pfn, nr_pages); |
1220 | mem_hotplug_done(); |
1221 | return ret; |
1222 | } |
1223 | |
1224 | /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ |
1225 | static pg_data_t __ref *hotadd_init_pgdat(int nid) |
1226 | { |
1227 | struct pglist_data *pgdat; |
1228 | |
1229 | /* |
1230 | * NODE_DATA is preallocated (free_area_init) but its internal |
1231 | * state is not allocated completely. Add missing pieces. |
1232 | * Completely offline nodes stay around and they just need |
1233 | * reintialization. |
1234 | */ |
1235 | pgdat = NODE_DATA(nid); |
1236 | |
1237 | /* init node's zones as empty zones, we don't have any present pages.*/ |
1238 | free_area_init_core_hotplug(pgdat); |
1239 | |
1240 | /* |
1241 | * The node we allocated has no zone fallback lists. For avoiding |
1242 | * to access not-initialized zonelist, build here. |
1243 | */ |
1244 | build_all_zonelists(pgdat); |
1245 | |
1246 | return pgdat; |
1247 | } |
1248 | |
1249 | /* |
1250 | * __try_online_node - online a node if offlined |
1251 | * @nid: the node ID |
1252 | * @set_node_online: Whether we want to online the node |
1253 | * called by cpu_up() to online a node without onlined memory. |
1254 | * |
1255 | * Returns: |
1256 | * 1 -> a new node has been allocated |
1257 | * 0 -> the node is already online |
1258 | * -ENOMEM -> the node could not be allocated |
1259 | */ |
1260 | static int __try_online_node(int nid, bool set_node_online) |
1261 | { |
1262 | pg_data_t *pgdat; |
1263 | int ret = 1; |
1264 | |
1265 | if (node_online(nid)) |
1266 | return 0; |
1267 | |
1268 | pgdat = hotadd_init_pgdat(nid); |
1269 | if (!pgdat) { |
1270 | pr_err("Cannot online node %d due to NULL pgdat\n" , nid); |
1271 | ret = -ENOMEM; |
1272 | goto out; |
1273 | } |
1274 | |
1275 | if (set_node_online) { |
1276 | node_set_online(nid); |
1277 | ret = register_one_node(nid); |
1278 | BUG_ON(ret); |
1279 | } |
1280 | out: |
1281 | return ret; |
1282 | } |
1283 | |
1284 | /* |
1285 | * Users of this function always want to online/register the node |
1286 | */ |
1287 | int try_online_node(int nid) |
1288 | { |
1289 | int ret; |
1290 | |
1291 | mem_hotplug_begin(); |
1292 | ret = __try_online_node(nid, set_node_online: true); |
1293 | mem_hotplug_done(); |
1294 | return ret; |
1295 | } |
1296 | |
1297 | static int check_hotplug_memory_range(u64 start, u64 size) |
1298 | { |
1299 | /* memory range must be block size aligned */ |
1300 | if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) || |
1301 | !IS_ALIGNED(size, memory_block_size_bytes())) { |
1302 | pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx" , |
1303 | memory_block_size_bytes(), start, size); |
1304 | return -EINVAL; |
1305 | } |
1306 | |
1307 | return 0; |
1308 | } |
1309 | |
1310 | static int online_memory_block(struct memory_block *mem, void *arg) |
1311 | { |
1312 | mem->online_type = mhp_default_online_type; |
1313 | return device_online(dev: &mem->dev); |
1314 | } |
1315 | |
1316 | #ifndef arch_supports_memmap_on_memory |
1317 | static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size) |
1318 | { |
1319 | /* |
1320 | * As default, we want the vmemmap to span a complete PMD such that we |
1321 | * can map the vmemmap using a single PMD if supported by the |
1322 | * architecture. |
1323 | */ |
1324 | return IS_ALIGNED(vmemmap_size, PMD_SIZE); |
1325 | } |
1326 | #endif |
1327 | |
1328 | static bool mhp_supports_memmap_on_memory(unsigned long size) |
1329 | { |
1330 | unsigned long vmemmap_size = memory_block_memmap_size(); |
1331 | unsigned long memmap_pages = memory_block_memmap_on_memory_pages(); |
1332 | |
1333 | /* |
1334 | * Besides having arch support and the feature enabled at runtime, we |
1335 | * need a few more assumptions to hold true: |
1336 | * |
1337 | * a) We span a single memory block: memory onlining/offlinin;g happens |
1338 | * in memory block granularity. We don't want the vmemmap of online |
1339 | * memory blocks to reside on offline memory blocks. In the future, |
1340 | * we might want to support variable-sized memory blocks to make the |
1341 | * feature more versatile. |
1342 | * |
1343 | * b) The vmemmap pages span complete PMDs: We don't want vmemmap code |
1344 | * to populate memory from the altmap for unrelated parts (i.e., |
1345 | * other memory blocks) |
1346 | * |
1347 | * c) The vmemmap pages (and thereby the pages that will be exposed to |
1348 | * the buddy) have to cover full pageblocks: memory onlining/offlining |
1349 | * code requires applicable ranges to be page-aligned, for example, to |
1350 | * set the migratetypes properly. |
1351 | * |
1352 | * TODO: Although we have a check here to make sure that vmemmap pages |
1353 | * fully populate a PMD, it is not the right place to check for |
1354 | * this. A much better solution involves improving vmemmap code |
1355 | * to fallback to base pages when trying to populate vmemmap using |
1356 | * altmap as an alternative source of memory, and we do not exactly |
1357 | * populate a single PMD. |
1358 | */ |
1359 | if (!mhp_memmap_on_memory() || size != memory_block_size_bytes()) |
1360 | return false; |
1361 | |
1362 | /* |
1363 | * Make sure the vmemmap allocation is fully contained |
1364 | * so that we always allocate vmemmap memory from altmap area. |
1365 | */ |
1366 | if (!IS_ALIGNED(vmemmap_size, PAGE_SIZE)) |
1367 | return false; |
1368 | |
1369 | /* |
1370 | * start pfn should be pageblock_nr_pages aligned for correctly |
1371 | * setting migrate types |
1372 | */ |
1373 | if (!pageblock_aligned(memmap_pages)) |
1374 | return false; |
1375 | |
1376 | if (memmap_pages == PHYS_PFN(memory_block_size_bytes())) |
1377 | /* No effective hotplugged memory doesn't make sense. */ |
1378 | return false; |
1379 | |
1380 | return arch_supports_memmap_on_memory(vmemmap_size); |
1381 | } |
1382 | |
1383 | /* |
1384 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug |
1385 | * and online/offline operations (triggered e.g. by sysfs). |
1386 | * |
1387 | * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG |
1388 | */ |
1389 | int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) |
1390 | { |
1391 | struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) }; |
1392 | enum memblock_flags memblock_flags = MEMBLOCK_NONE; |
1393 | struct vmem_altmap mhp_altmap = { |
1394 | .base_pfn = PHYS_PFN(res->start), |
1395 | .end_pfn = PHYS_PFN(res->end), |
1396 | }; |
1397 | struct memory_group *group = NULL; |
1398 | u64 start, size; |
1399 | bool new_node = false; |
1400 | int ret; |
1401 | |
1402 | start = res->start; |
1403 | size = resource_size(res); |
1404 | |
1405 | ret = check_hotplug_memory_range(start, size); |
1406 | if (ret) |
1407 | return ret; |
1408 | |
1409 | if (mhp_flags & MHP_NID_IS_MGID) { |
1410 | group = memory_group_find_by_id(mgid: nid); |
1411 | if (!group) |
1412 | return -EINVAL; |
1413 | nid = group->nid; |
1414 | } |
1415 | |
1416 | if (!node_possible(nid)) { |
1417 | WARN(1, "node %d was absent from the node_possible_map\n" , nid); |
1418 | return -EINVAL; |
1419 | } |
1420 | |
1421 | mem_hotplug_begin(); |
1422 | |
1423 | if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) { |
1424 | if (res->flags & IORESOURCE_SYSRAM_DRIVER_MANAGED) |
1425 | memblock_flags = MEMBLOCK_DRIVER_MANAGED; |
1426 | ret = memblock_add_node(base: start, size, nid, flags: memblock_flags); |
1427 | if (ret) |
1428 | goto error_mem_hotplug_end; |
1429 | } |
1430 | |
1431 | ret = __try_online_node(nid, set_node_online: false); |
1432 | if (ret < 0) |
1433 | goto error; |
1434 | new_node = ret; |
1435 | |
1436 | /* |
1437 | * Self hosted memmap array |
1438 | */ |
1439 | if (mhp_flags & MHP_MEMMAP_ON_MEMORY) { |
1440 | if (mhp_supports_memmap_on_memory(size)) { |
1441 | mhp_altmap.free = memory_block_memmap_on_memory_pages(); |
1442 | params.altmap = kmalloc(size: sizeof(struct vmem_altmap), GFP_KERNEL); |
1443 | if (!params.altmap) { |
1444 | ret = -ENOMEM; |
1445 | goto error; |
1446 | } |
1447 | |
1448 | memcpy(params.altmap, &mhp_altmap, sizeof(mhp_altmap)); |
1449 | } |
1450 | /* fallback to not using altmap */ |
1451 | } |
1452 | |
1453 | /* call arch's memory hotadd */ |
1454 | ret = arch_add_memory(nid, start, size, params: ¶ms); |
1455 | if (ret < 0) |
1456 | goto error_free; |
1457 | |
1458 | /* create memory block devices after memory was added */ |
1459 | ret = create_memory_block_devices(start, size, altmap: params.altmap, group); |
1460 | if (ret) { |
1461 | arch_remove_memory(start, size, NULL); |
1462 | goto error_free; |
1463 | } |
1464 | |
1465 | if (new_node) { |
1466 | /* If sysfs file of new node can't be created, cpu on the node |
1467 | * can't be hot-added. There is no rollback way now. |
1468 | * So, check by BUG_ON() to catch it reluctantly.. |
1469 | * We online node here. We can't roll back from here. |
1470 | */ |
1471 | node_set_online(nid); |
1472 | ret = __register_one_node(nid); |
1473 | BUG_ON(ret); |
1474 | } |
1475 | |
1476 | register_memory_blocks_under_node(nid, PFN_DOWN(start), |
1477 | PFN_UP(start + size - 1), |
1478 | context: MEMINIT_HOTPLUG); |
1479 | |
1480 | /* create new memmap entry */ |
1481 | if (!strcmp(res->name, "System RAM" )) |
1482 | firmware_map_add_hotplug(start, end: start + size, type: "System RAM" ); |
1483 | |
1484 | /* device_online() will take the lock when calling online_pages() */ |
1485 | mem_hotplug_done(); |
1486 | |
1487 | /* |
1488 | * In case we're allowed to merge the resource, flag it and trigger |
1489 | * merging now that adding succeeded. |
1490 | */ |
1491 | if (mhp_flags & MHP_MERGE_RESOURCE) |
1492 | merge_system_ram_resource(res); |
1493 | |
1494 | /* online pages if requested */ |
1495 | if (mhp_default_online_type != MMOP_OFFLINE) |
1496 | walk_memory_blocks(start, size, NULL, func: online_memory_block); |
1497 | |
1498 | return ret; |
1499 | error_free: |
1500 | kfree(objp: params.altmap); |
1501 | error: |
1502 | if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) |
1503 | memblock_remove(base: start, size); |
1504 | error_mem_hotplug_end: |
1505 | mem_hotplug_done(); |
1506 | return ret; |
1507 | } |
1508 | |
1509 | /* requires device_hotplug_lock, see add_memory_resource() */ |
1510 | int __ref __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags) |
1511 | { |
1512 | struct resource *res; |
1513 | int ret; |
1514 | |
1515 | res = register_memory_resource(start, size, resource_name: "System RAM" ); |
1516 | if (IS_ERR(ptr: res)) |
1517 | return PTR_ERR(ptr: res); |
1518 | |
1519 | ret = add_memory_resource(nid, res, mhp_flags); |
1520 | if (ret < 0) |
1521 | release_memory_resource(res); |
1522 | return ret; |
1523 | } |
1524 | |
1525 | int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags) |
1526 | { |
1527 | int rc; |
1528 | |
1529 | lock_device_hotplug(); |
1530 | rc = __add_memory(nid, start, size, mhp_flags); |
1531 | unlock_device_hotplug(); |
1532 | |
1533 | return rc; |
1534 | } |
1535 | EXPORT_SYMBOL_GPL(add_memory); |
1536 | |
1537 | /* |
1538 | * Add special, driver-managed memory to the system as system RAM. Such |
1539 | * memory is not exposed via the raw firmware-provided memmap as system |
1540 | * RAM, instead, it is detected and added by a driver - during cold boot, |
1541 | * after a reboot, and after kexec. |
1542 | * |
1543 | * Reasons why this memory should not be used for the initial memmap of a |
1544 | * kexec kernel or for placing kexec images: |
1545 | * - The booting kernel is in charge of determining how this memory will be |
1546 | * used (e.g., use persistent memory as system RAM) |
1547 | * - Coordination with a hypervisor is required before this memory |
1548 | * can be used (e.g., inaccessible parts). |
1549 | * |
1550 | * For this memory, no entries in /sys/firmware/memmap ("raw firmware-provided |
1551 | * memory map") are created. Also, the created memory resource is flagged |
1552 | * with IORESOURCE_SYSRAM_DRIVER_MANAGED, so in-kernel users can special-case |
1553 | * this memory as well (esp., not place kexec images onto it). |
1554 | * |
1555 | * The resource_name (visible via /proc/iomem) has to have the format |
1556 | * "System RAM ($DRIVER)". |
1557 | */ |
1558 | int add_memory_driver_managed(int nid, u64 start, u64 size, |
1559 | const char *resource_name, mhp_t mhp_flags) |
1560 | { |
1561 | struct resource *res; |
1562 | int rc; |
1563 | |
1564 | if (!resource_name || |
1565 | strstr(resource_name, "System RAM (" ) != resource_name || |
1566 | resource_name[strlen(resource_name) - 1] != ')') |
1567 | return -EINVAL; |
1568 | |
1569 | lock_device_hotplug(); |
1570 | |
1571 | res = register_memory_resource(start, size, resource_name); |
1572 | if (IS_ERR(ptr: res)) { |
1573 | rc = PTR_ERR(ptr: res); |
1574 | goto out_unlock; |
1575 | } |
1576 | |
1577 | rc = add_memory_resource(nid, res, mhp_flags); |
1578 | if (rc < 0) |
1579 | release_memory_resource(res); |
1580 | |
1581 | out_unlock: |
1582 | unlock_device_hotplug(); |
1583 | return rc; |
1584 | } |
1585 | EXPORT_SYMBOL_GPL(add_memory_driver_managed); |
1586 | |
1587 | /* |
1588 | * Platforms should define arch_get_mappable_range() that provides |
1589 | * maximum possible addressable physical memory range for which the |
1590 | * linear mapping could be created. The platform returned address |
1591 | * range must adhere to these following semantics. |
1592 | * |
1593 | * - range.start <= range.end |
1594 | * - Range includes both end points [range.start..range.end] |
1595 | * |
1596 | * There is also a fallback definition provided here, allowing the |
1597 | * entire possible physical address range in case any platform does |
1598 | * not define arch_get_mappable_range(). |
1599 | */ |
1600 | struct range __weak arch_get_mappable_range(void) |
1601 | { |
1602 | struct range mhp_range = { |
1603 | .start = 0UL, |
1604 | .end = -1ULL, |
1605 | }; |
1606 | return mhp_range; |
1607 | } |
1608 | |
1609 | struct range mhp_get_pluggable_range(bool need_mapping) |
1610 | { |
1611 | const u64 max_phys = (1ULL << MAX_PHYSMEM_BITS) - 1; |
1612 | struct range mhp_range; |
1613 | |
1614 | if (need_mapping) { |
1615 | mhp_range = arch_get_mappable_range(); |
1616 | if (mhp_range.start > max_phys) { |
1617 | mhp_range.start = 0; |
1618 | mhp_range.end = 0; |
1619 | } |
1620 | mhp_range.end = min_t(u64, mhp_range.end, max_phys); |
1621 | } else { |
1622 | mhp_range.start = 0; |
1623 | mhp_range.end = max_phys; |
1624 | } |
1625 | return mhp_range; |
1626 | } |
1627 | EXPORT_SYMBOL_GPL(mhp_get_pluggable_range); |
1628 | |
1629 | bool mhp_range_allowed(u64 start, u64 size, bool need_mapping) |
1630 | { |
1631 | struct range mhp_range = mhp_get_pluggable_range(need_mapping); |
1632 | u64 end = start + size; |
1633 | |
1634 | if (start < end && start >= mhp_range.start && (end - 1) <= mhp_range.end) |
1635 | return true; |
1636 | |
1637 | pr_warn("Hotplug memory [%#llx-%#llx] exceeds maximum addressable range [%#llx-%#llx]\n" , |
1638 | start, end, mhp_range.start, mhp_range.end); |
1639 | return false; |
1640 | } |
1641 | |
1642 | #ifdef CONFIG_MEMORY_HOTREMOVE |
1643 | /* |
1644 | * Scan pfn range [start,end) to find movable/migratable pages (LRU pages, |
1645 | * non-lru movable pages and hugepages). Will skip over most unmovable |
1646 | * pages (esp., pages that can be skipped when offlining), but bail out on |
1647 | * definitely unmovable pages. |
1648 | * |
1649 | * Returns: |
1650 | * 0 in case a movable page is found and movable_pfn was updated. |
1651 | * -ENOENT in case no movable page was found. |
1652 | * -EBUSY in case a definitely unmovable page was found. |
1653 | */ |
1654 | static int scan_movable_pages(unsigned long start, unsigned long end, |
1655 | unsigned long *movable_pfn) |
1656 | { |
1657 | unsigned long pfn; |
1658 | |
1659 | for (pfn = start; pfn < end; pfn++) { |
1660 | struct page *page, *head; |
1661 | unsigned long skip; |
1662 | |
1663 | if (!pfn_valid(pfn)) |
1664 | continue; |
1665 | page = pfn_to_page(pfn); |
1666 | if (PageLRU(page)) |
1667 | goto found; |
1668 | if (__PageMovable(page)) |
1669 | goto found; |
1670 | |
1671 | /* |
1672 | * PageOffline() pages that are not marked __PageMovable() and |
1673 | * have a reference count > 0 (after MEM_GOING_OFFLINE) are |
1674 | * definitely unmovable. If their reference count would be 0, |
1675 | * they could at least be skipped when offlining memory. |
1676 | */ |
1677 | if (PageOffline(page) && page_count(page)) |
1678 | return -EBUSY; |
1679 | |
1680 | if (!PageHuge(page)) |
1681 | continue; |
1682 | head = compound_head(page); |
1683 | /* |
1684 | * This test is racy as we hold no reference or lock. The |
1685 | * hugetlb page could have been free'ed and head is no longer |
1686 | * a hugetlb page before the following check. In such unlikely |
1687 | * cases false positives and negatives are possible. Calling |
1688 | * code must deal with these scenarios. |
1689 | */ |
1690 | if (HPageMigratable(page: head)) |
1691 | goto found; |
1692 | skip = compound_nr(page: head) - (pfn - page_to_pfn(head)); |
1693 | pfn += skip - 1; |
1694 | } |
1695 | return -ENOENT; |
1696 | found: |
1697 | *movable_pfn = pfn; |
1698 | return 0; |
1699 | } |
1700 | |
1701 | static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) |
1702 | { |
1703 | unsigned long pfn; |
1704 | struct page *page, *head; |
1705 | LIST_HEAD(source); |
1706 | static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL, |
1707 | DEFAULT_RATELIMIT_BURST); |
1708 | |
1709 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { |
1710 | struct folio *folio; |
1711 | bool isolated; |
1712 | |
1713 | if (!pfn_valid(pfn)) |
1714 | continue; |
1715 | page = pfn_to_page(pfn); |
1716 | folio = page_folio(page); |
1717 | head = &folio->page; |
1718 | |
1719 | if (PageHuge(page)) { |
1720 | pfn = page_to_pfn(head) + compound_nr(page: head) - 1; |
1721 | isolate_hugetlb(folio, list: &source); |
1722 | continue; |
1723 | } else if (PageTransHuge(page)) |
1724 | pfn = page_to_pfn(head) + thp_nr_pages(page) - 1; |
1725 | |
1726 | /* |
1727 | * HWPoison pages have elevated reference counts so the migration would |
1728 | * fail on them. It also doesn't make any sense to migrate them in the |
1729 | * first place. Still try to unmap such a page in case it is still mapped |
1730 | * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep |
1731 | * the unmap as the catch all safety net). |
1732 | */ |
1733 | if (PageHWPoison(page)) { |
1734 | if (WARN_ON(folio_test_lru(folio))) |
1735 | folio_isolate_lru(folio); |
1736 | if (folio_mapped(folio)) |
1737 | try_to_unmap(folio, flags: TTU_IGNORE_MLOCK); |
1738 | continue; |
1739 | } |
1740 | |
1741 | if (!get_page_unless_zero(page)) |
1742 | continue; |
1743 | /* |
1744 | * We can skip free pages. And we can deal with pages on |
1745 | * LRU and non-lru movable pages. |
1746 | */ |
1747 | if (PageLRU(page)) |
1748 | isolated = isolate_lru_page(page); |
1749 | else |
1750 | isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE); |
1751 | if (isolated) { |
1752 | list_add_tail(new: &page->lru, head: &source); |
1753 | if (!__PageMovable(page)) |
1754 | inc_node_page_state(page, NR_ISOLATED_ANON + |
1755 | page_is_file_lru(page)); |
1756 | |
1757 | } else { |
1758 | if (__ratelimit(&migrate_rs)) { |
1759 | pr_warn("failed to isolate pfn %lx\n" , pfn); |
1760 | dump_page(page, reason: "isolation failed" ); |
1761 | } |
1762 | } |
1763 | put_page(page); |
1764 | } |
1765 | if (!list_empty(head: &source)) { |
1766 | nodemask_t nmask = node_states[N_MEMORY]; |
1767 | struct migration_target_control mtc = { |
1768 | .nmask = &nmask, |
1769 | .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, |
1770 | }; |
1771 | int ret; |
1772 | |
1773 | /* |
1774 | * We have checked that migration range is on a single zone so |
1775 | * we can use the nid of the first page to all the others. |
1776 | */ |
1777 | mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru)); |
1778 | |
1779 | /* |
1780 | * try to allocate from a different node but reuse this node |
1781 | * if there are no other online nodes to be used (e.g. we are |
1782 | * offlining a part of the only existing node) |
1783 | */ |
1784 | node_clear(mtc.nid, nmask); |
1785 | if (nodes_empty(nmask)) |
1786 | node_set(mtc.nid, nmask); |
1787 | ret = migrate_pages(l: &source, new: alloc_migration_target, NULL, |
1788 | private: (unsigned long)&mtc, mode: MIGRATE_SYNC, reason: MR_MEMORY_HOTPLUG, NULL); |
1789 | if (ret) { |
1790 | list_for_each_entry(page, &source, lru) { |
1791 | if (__ratelimit(&migrate_rs)) { |
1792 | pr_warn("migrating pfn %lx failed ret:%d\n" , |
1793 | page_to_pfn(page), ret); |
1794 | dump_page(page, reason: "migration failure" ); |
1795 | } |
1796 | } |
1797 | putback_movable_pages(l: &source); |
1798 | } |
1799 | } |
1800 | } |
1801 | |
1802 | static int __init cmdline_parse_movable_node(char *p) |
1803 | { |
1804 | movable_node_enabled = true; |
1805 | return 0; |
1806 | } |
1807 | early_param("movable_node" , cmdline_parse_movable_node); |
1808 | |
1809 | /* check which state of node_states will be changed when offline memory */ |
1810 | static void node_states_check_changes_offline(unsigned long nr_pages, |
1811 | struct zone *zone, struct memory_notify *arg) |
1812 | { |
1813 | struct pglist_data *pgdat = zone->zone_pgdat; |
1814 | unsigned long present_pages = 0; |
1815 | enum zone_type zt; |
1816 | |
1817 | arg->status_change_nid = NUMA_NO_NODE; |
1818 | arg->status_change_nid_normal = NUMA_NO_NODE; |
1819 | |
1820 | /* |
1821 | * Check whether node_states[N_NORMAL_MEMORY] will be changed. |
1822 | * If the memory to be offline is within the range |
1823 | * [0..ZONE_NORMAL], and it is the last present memory there, |
1824 | * the zones in that range will become empty after the offlining, |
1825 | * thus we can determine that we need to clear the node from |
1826 | * node_states[N_NORMAL_MEMORY]. |
1827 | */ |
1828 | for (zt = 0; zt <= ZONE_NORMAL; zt++) |
1829 | present_pages += pgdat->node_zones[zt].present_pages; |
1830 | if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages) |
1831 | arg->status_change_nid_normal = zone_to_nid(zone); |
1832 | |
1833 | /* |
1834 | * We have accounted the pages from [0..ZONE_NORMAL); ZONE_HIGHMEM |
1835 | * does not apply as we don't support 32bit. |
1836 | * Here we count the possible pages from ZONE_MOVABLE. |
1837 | * If after having accounted all the pages, we see that the nr_pages |
1838 | * to be offlined is over or equal to the accounted pages, |
1839 | * we know that the node will become empty, and so, we can clear |
1840 | * it for N_MEMORY as well. |
1841 | */ |
1842 | present_pages += pgdat->node_zones[ZONE_MOVABLE].present_pages; |
1843 | |
1844 | if (nr_pages >= present_pages) |
1845 | arg->status_change_nid = zone_to_nid(zone); |
1846 | } |
1847 | |
1848 | static void node_states_clear_node(int node, struct memory_notify *arg) |
1849 | { |
1850 | if (arg->status_change_nid_normal >= 0) |
1851 | node_clear_state(node, state: N_NORMAL_MEMORY); |
1852 | |
1853 | if (arg->status_change_nid >= 0) |
1854 | node_clear_state(node, state: N_MEMORY); |
1855 | } |
1856 | |
1857 | static int count_system_ram_pages_cb(unsigned long start_pfn, |
1858 | unsigned long nr_pages, void *data) |
1859 | { |
1860 | unsigned long *nr_system_ram_pages = data; |
1861 | |
1862 | *nr_system_ram_pages += nr_pages; |
1863 | return 0; |
1864 | } |
1865 | |
1866 | int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages, |
1867 | struct zone *zone, struct memory_group *group) |
1868 | { |
1869 | const unsigned long end_pfn = start_pfn + nr_pages; |
1870 | unsigned long pfn, system_ram_pages = 0; |
1871 | const int node = zone_to_nid(zone); |
1872 | unsigned long flags; |
1873 | struct memory_notify arg; |
1874 | char *reason; |
1875 | int ret; |
1876 | |
1877 | /* |
1878 | * {on,off}lining is constrained to full memory sections (or more |
1879 | * precisely to memory blocks from the user space POV). |
1880 | * memmap_on_memory is an exception because it reserves initial part |
1881 | * of the physical memory space for vmemmaps. That space is pageblock |
1882 | * aligned. |
1883 | */ |
1884 | if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(start_pfn) || |
1885 | !IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION))) |
1886 | return -EINVAL; |
1887 | |
1888 | mem_hotplug_begin(); |
1889 | |
1890 | /* |
1891 | * Don't allow to offline memory blocks that contain holes. |
1892 | * Consequently, memory blocks with holes can never get onlined |
1893 | * via the hotplug path - online_pages() - as hotplugged memory has |
1894 | * no holes. This way, we e.g., don't have to worry about marking |
1895 | * memory holes PG_reserved, don't need pfn_valid() checks, and can |
1896 | * avoid using walk_system_ram_range() later. |
1897 | */ |
1898 | walk_system_ram_range(start_pfn, nr_pages, arg: &system_ram_pages, |
1899 | func: count_system_ram_pages_cb); |
1900 | if (system_ram_pages != nr_pages) { |
1901 | ret = -EINVAL; |
1902 | reason = "memory holes" ; |
1903 | goto failed_removal; |
1904 | } |
1905 | |
1906 | /* |
1907 | * We only support offlining of memory blocks managed by a single zone, |
1908 | * checked by calling code. This is just a sanity check that we might |
1909 | * want to remove in the future. |
1910 | */ |
1911 | if (WARN_ON_ONCE(page_zone(pfn_to_page(start_pfn)) != zone || |
1912 | page_zone(pfn_to_page(end_pfn - 1)) != zone)) { |
1913 | ret = -EINVAL; |
1914 | reason = "multizone range" ; |
1915 | goto failed_removal; |
1916 | } |
1917 | |
1918 | /* |
1919 | * Disable pcplists so that page isolation cannot race with freeing |
1920 | * in a way that pages from isolated pageblock are left on pcplists. |
1921 | */ |
1922 | zone_pcp_disable(zone); |
1923 | lru_cache_disable(); |
1924 | |
1925 | /* set above range as isolated */ |
1926 | ret = start_isolate_page_range(start_pfn, end_pfn, |
1927 | migratetype: MIGRATE_MOVABLE, |
1928 | MEMORY_OFFLINE | REPORT_FAILURE, |
1929 | GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL); |
1930 | if (ret) { |
1931 | reason = "failure to isolate range" ; |
1932 | goto failed_removal_pcplists_disabled; |
1933 | } |
1934 | |
1935 | arg.start_pfn = start_pfn; |
1936 | arg.nr_pages = nr_pages; |
1937 | node_states_check_changes_offline(nr_pages, zone, arg: &arg); |
1938 | |
1939 | ret = memory_notify(MEM_GOING_OFFLINE, v: &arg); |
1940 | ret = notifier_to_errno(ret); |
1941 | if (ret) { |
1942 | reason = "notifier failure" ; |
1943 | goto failed_removal_isolated; |
1944 | } |
1945 | |
1946 | do { |
1947 | pfn = start_pfn; |
1948 | do { |
1949 | /* |
1950 | * Historically we always checked for any signal and |
1951 | * can't limit it to fatal signals without eventually |
1952 | * breaking user space. |
1953 | */ |
1954 | if (signal_pending(current)) { |
1955 | ret = -EINTR; |
1956 | reason = "signal backoff" ; |
1957 | goto failed_removal_isolated; |
1958 | } |
1959 | |
1960 | cond_resched(); |
1961 | |
1962 | ret = scan_movable_pages(start: pfn, end: end_pfn, movable_pfn: &pfn); |
1963 | if (!ret) { |
1964 | /* |
1965 | * TODO: fatal migration failures should bail |
1966 | * out |
1967 | */ |
1968 | do_migrate_range(start_pfn: pfn, end_pfn); |
1969 | } |
1970 | } while (!ret); |
1971 | |
1972 | if (ret != -ENOENT) { |
1973 | reason = "unmovable page" ; |
1974 | goto failed_removal_isolated; |
1975 | } |
1976 | |
1977 | /* |
1978 | * Dissolve free hugepages in the memory block before doing |
1979 | * offlining actually in order to make hugetlbfs's object |
1980 | * counting consistent. |
1981 | */ |
1982 | ret = dissolve_free_huge_pages(start_pfn, end_pfn); |
1983 | if (ret) { |
1984 | reason = "failure to dissolve huge pages" ; |
1985 | goto failed_removal_isolated; |
1986 | } |
1987 | |
1988 | ret = test_pages_isolated(start_pfn, end_pfn, MEMORY_OFFLINE); |
1989 | |
1990 | } while (ret); |
1991 | |
1992 | /* Mark all sections offline and remove free pages from the buddy. */ |
1993 | __offline_isolated_pages(start_pfn, end_pfn); |
1994 | pr_debug("Offlined Pages %ld\n" , nr_pages); |
1995 | |
1996 | /* |
1997 | * The memory sections are marked offline, and the pageblock flags |
1998 | * effectively stale; nobody should be touching them. Fixup the number |
1999 | * of isolated pageblocks, memory onlining will properly revert this. |
2000 | */ |
2001 | spin_lock_irqsave(&zone->lock, flags); |
2002 | zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages; |
2003 | spin_unlock_irqrestore(lock: &zone->lock, flags); |
2004 | |
2005 | lru_cache_enable(); |
2006 | zone_pcp_enable(zone); |
2007 | |
2008 | /* removal success */ |
2009 | adjust_managed_page_count(pfn_to_page(start_pfn), count: -nr_pages); |
2010 | adjust_present_page_count(pfn_to_page(start_pfn), group, nr_pages: -nr_pages); |
2011 | |
2012 | /* reinitialise watermarks and update pcp limits */ |
2013 | init_per_zone_wmark_min(); |
2014 | |
2015 | /* |
2016 | * Make sure to mark the node as memory-less before rebuilding the zone |
2017 | * list. Otherwise this node would still appear in the fallback lists. |
2018 | */ |
2019 | node_states_clear_node(node, arg: &arg); |
2020 | if (!populated_zone(zone)) { |
2021 | zone_pcp_reset(zone); |
2022 | build_all_zonelists(NULL); |
2023 | } |
2024 | |
2025 | if (arg.status_change_nid >= 0) { |
2026 | kcompactd_stop(nid: node); |
2027 | kswapd_stop(nid: node); |
2028 | } |
2029 | |
2030 | writeback_set_ratelimit(); |
2031 | |
2032 | memory_notify(MEM_OFFLINE, v: &arg); |
2033 | remove_pfn_range_from_zone(zone, start_pfn, nr_pages); |
2034 | mem_hotplug_done(); |
2035 | return 0; |
2036 | |
2037 | failed_removal_isolated: |
2038 | /* pushback to free area */ |
2039 | undo_isolate_page_range(start_pfn, end_pfn, migratetype: MIGRATE_MOVABLE); |
2040 | memory_notify(MEM_CANCEL_OFFLINE, v: &arg); |
2041 | failed_removal_pcplists_disabled: |
2042 | lru_cache_enable(); |
2043 | zone_pcp_enable(zone); |
2044 | failed_removal: |
2045 | pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n" , |
2046 | (unsigned long long) start_pfn << PAGE_SHIFT, |
2047 | ((unsigned long long) end_pfn << PAGE_SHIFT) - 1, |
2048 | reason); |
2049 | mem_hotplug_done(); |
2050 | return ret; |
2051 | } |
2052 | |
2053 | static int check_memblock_offlined_cb(struct memory_block *mem, void *arg) |
2054 | { |
2055 | int *nid = arg; |
2056 | |
2057 | *nid = mem->nid; |
2058 | if (unlikely(mem->state != MEM_OFFLINE)) { |
2059 | phys_addr_t beginpa, endpa; |
2060 | |
2061 | beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)); |
2062 | endpa = beginpa + memory_block_size_bytes() - 1; |
2063 | pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n" , |
2064 | &beginpa, &endpa); |
2065 | |
2066 | return -EBUSY; |
2067 | } |
2068 | return 0; |
2069 | } |
2070 | |
2071 | static int test_has_altmap_cb(struct memory_block *mem, void *arg) |
2072 | { |
2073 | struct memory_block **mem_ptr = (struct memory_block **)arg; |
2074 | /* |
2075 | * return the memblock if we have altmap |
2076 | * and break callback. |
2077 | */ |
2078 | if (mem->altmap) { |
2079 | *mem_ptr = mem; |
2080 | return 1; |
2081 | } |
2082 | return 0; |
2083 | } |
2084 | |
2085 | static int check_cpu_on_node(int nid) |
2086 | { |
2087 | int cpu; |
2088 | |
2089 | for_each_present_cpu(cpu) { |
2090 | if (cpu_to_node(cpu) == nid) |
2091 | /* |
2092 | * the cpu on this node isn't removed, and we can't |
2093 | * offline this node. |
2094 | */ |
2095 | return -EBUSY; |
2096 | } |
2097 | |
2098 | return 0; |
2099 | } |
2100 | |
2101 | static int check_no_memblock_for_node_cb(struct memory_block *mem, void *arg) |
2102 | { |
2103 | int nid = *(int *)arg; |
2104 | |
2105 | /* |
2106 | * If a memory block belongs to multiple nodes, the stored nid is not |
2107 | * reliable. However, such blocks are always online (e.g., cannot get |
2108 | * offlined) and, therefore, are still spanned by the node. |
2109 | */ |
2110 | return mem->nid == nid ? -EEXIST : 0; |
2111 | } |
2112 | |
2113 | /** |
2114 | * try_offline_node |
2115 | * @nid: the node ID |
2116 | * |
2117 | * Offline a node if all memory sections and cpus of the node are removed. |
2118 | * |
2119 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug |
2120 | * and online/offline operations before this call. |
2121 | */ |
2122 | void try_offline_node(int nid) |
2123 | { |
2124 | int rc; |
2125 | |
2126 | /* |
2127 | * If the node still spans pages (especially ZONE_DEVICE), don't |
2128 | * offline it. A node spans memory after move_pfn_range_to_zone(), |
2129 | * e.g., after the memory block was onlined. |
2130 | */ |
2131 | if (node_spanned_pages(nid)) |
2132 | return; |
2133 | |
2134 | /* |
2135 | * Especially offline memory blocks might not be spanned by the |
2136 | * node. They will get spanned by the node once they get onlined. |
2137 | * However, they link to the node in sysfs and can get onlined later. |
2138 | */ |
2139 | rc = for_each_memory_block(arg: &nid, func: check_no_memblock_for_node_cb); |
2140 | if (rc) |
2141 | return; |
2142 | |
2143 | if (check_cpu_on_node(nid)) |
2144 | return; |
2145 | |
2146 | /* |
2147 | * all memory/cpu of this node are removed, we can offline this |
2148 | * node now. |
2149 | */ |
2150 | node_set_offline(nid); |
2151 | unregister_one_node(nid); |
2152 | } |
2153 | EXPORT_SYMBOL(try_offline_node); |
2154 | |
2155 | static int __ref try_remove_memory(u64 start, u64 size) |
2156 | { |
2157 | struct memory_block *mem; |
2158 | int rc = 0, nid = NUMA_NO_NODE; |
2159 | struct vmem_altmap *altmap = NULL; |
2160 | |
2161 | BUG_ON(check_hotplug_memory_range(start, size)); |
2162 | |
2163 | /* |
2164 | * All memory blocks must be offlined before removing memory. Check |
2165 | * whether all memory blocks in question are offline and return error |
2166 | * if this is not the case. |
2167 | * |
2168 | * While at it, determine the nid. Note that if we'd have mixed nodes, |
2169 | * we'd only try to offline the last determined one -- which is good |
2170 | * enough for the cases we care about. |
2171 | */ |
2172 | rc = walk_memory_blocks(start, size, arg: &nid, func: check_memblock_offlined_cb); |
2173 | if (rc) |
2174 | return rc; |
2175 | |
2176 | /* |
2177 | * We only support removing memory added with MHP_MEMMAP_ON_MEMORY in |
2178 | * the same granularity it was added - a single memory block. |
2179 | */ |
2180 | if (mhp_memmap_on_memory()) { |
2181 | rc = walk_memory_blocks(start, size, arg: &mem, func: test_has_altmap_cb); |
2182 | if (rc) { |
2183 | if (size != memory_block_size_bytes()) { |
2184 | pr_warn("Refuse to remove %#llx - %#llx," |
2185 | "wrong granularity\n" , |
2186 | start, start + size); |
2187 | return -EINVAL; |
2188 | } |
2189 | altmap = mem->altmap; |
2190 | /* |
2191 | * Mark altmap NULL so that we can add a debug |
2192 | * check on memblock free. |
2193 | */ |
2194 | mem->altmap = NULL; |
2195 | } |
2196 | } |
2197 | |
2198 | /* remove memmap entry */ |
2199 | firmware_map_remove(start, end: start + size, type: "System RAM" ); |
2200 | |
2201 | /* |
2202 | * Memory block device removal under the device_hotplug_lock is |
2203 | * a barrier against racing online attempts. |
2204 | */ |
2205 | remove_memory_block_devices(start, size); |
2206 | |
2207 | mem_hotplug_begin(); |
2208 | |
2209 | arch_remove_memory(start, size, altmap); |
2210 | |
2211 | /* Verify that all vmemmap pages have actually been freed. */ |
2212 | if (altmap) { |
2213 | WARN(altmap->alloc, "Altmap not fully unmapped" ); |
2214 | kfree(objp: altmap); |
2215 | } |
2216 | |
2217 | if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) { |
2218 | memblock_phys_free(base: start, size); |
2219 | memblock_remove(base: start, size); |
2220 | } |
2221 | |
2222 | release_mem_region_adjustable(start, size); |
2223 | |
2224 | if (nid != NUMA_NO_NODE) |
2225 | try_offline_node(nid); |
2226 | |
2227 | mem_hotplug_done(); |
2228 | return 0; |
2229 | } |
2230 | |
2231 | /** |
2232 | * __remove_memory - Remove memory if every memory block is offline |
2233 | * @start: physical address of the region to remove |
2234 | * @size: size of the region to remove |
2235 | * |
2236 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug |
2237 | * and online/offline operations before this call, as required by |
2238 | * try_offline_node(). |
2239 | */ |
2240 | void __remove_memory(u64 start, u64 size) |
2241 | { |
2242 | |
2243 | /* |
2244 | * trigger BUG() if some memory is not offlined prior to calling this |
2245 | * function |
2246 | */ |
2247 | if (try_remove_memory(start, size)) |
2248 | BUG(); |
2249 | } |
2250 | |
2251 | /* |
2252 | * Remove memory if every memory block is offline, otherwise return -EBUSY is |
2253 | * some memory is not offline |
2254 | */ |
2255 | int remove_memory(u64 start, u64 size) |
2256 | { |
2257 | int rc; |
2258 | |
2259 | lock_device_hotplug(); |
2260 | rc = try_remove_memory(start, size); |
2261 | unlock_device_hotplug(); |
2262 | |
2263 | return rc; |
2264 | } |
2265 | EXPORT_SYMBOL_GPL(remove_memory); |
2266 | |
2267 | static int try_offline_memory_block(struct memory_block *mem, void *arg) |
2268 | { |
2269 | uint8_t online_type = MMOP_ONLINE_KERNEL; |
2270 | uint8_t **online_types = arg; |
2271 | struct page *page; |
2272 | int rc; |
2273 | |
2274 | /* |
2275 | * Sense the online_type via the zone of the memory block. Offlining |
2276 | * with multiple zones within one memory block will be rejected |
2277 | * by offlining code ... so we don't care about that. |
2278 | */ |
2279 | page = pfn_to_online_page(section_nr_to_pfn(sec: mem->start_section_nr)); |
2280 | if (page && zone_idx(page_zone(page)) == ZONE_MOVABLE) |
2281 | online_type = MMOP_ONLINE_MOVABLE; |
2282 | |
2283 | rc = device_offline(dev: &mem->dev); |
2284 | /* |
2285 | * Default is MMOP_OFFLINE - change it only if offlining succeeded, |
2286 | * so try_reonline_memory_block() can do the right thing. |
2287 | */ |
2288 | if (!rc) |
2289 | **online_types = online_type; |
2290 | |
2291 | (*online_types)++; |
2292 | /* Ignore if already offline. */ |
2293 | return rc < 0 ? rc : 0; |
2294 | } |
2295 | |
2296 | static int try_reonline_memory_block(struct memory_block *mem, void *arg) |
2297 | { |
2298 | uint8_t **online_types = arg; |
2299 | int rc; |
2300 | |
2301 | if (**online_types != MMOP_OFFLINE) { |
2302 | mem->online_type = **online_types; |
2303 | rc = device_online(dev: &mem->dev); |
2304 | if (rc < 0) |
2305 | pr_warn("%s: Failed to re-online memory: %d" , |
2306 | __func__, rc); |
2307 | } |
2308 | |
2309 | /* Continue processing all remaining memory blocks. */ |
2310 | (*online_types)++; |
2311 | return 0; |
2312 | } |
2313 | |
2314 | /* |
2315 | * Try to offline and remove memory. Might take a long time to finish in case |
2316 | * memory is still in use. Primarily useful for memory devices that logically |
2317 | * unplugged all memory (so it's no longer in use) and want to offline + remove |
2318 | * that memory. |
2319 | */ |
2320 | int offline_and_remove_memory(u64 start, u64 size) |
2321 | { |
2322 | const unsigned long mb_count = size / memory_block_size_bytes(); |
2323 | uint8_t *online_types, *tmp; |
2324 | int rc; |
2325 | |
2326 | if (!IS_ALIGNED(start, memory_block_size_bytes()) || |
2327 | !IS_ALIGNED(size, memory_block_size_bytes()) || !size) |
2328 | return -EINVAL; |
2329 | |
2330 | /* |
2331 | * We'll remember the old online type of each memory block, so we can |
2332 | * try to revert whatever we did when offlining one memory block fails |
2333 | * after offlining some others succeeded. |
2334 | */ |
2335 | online_types = kmalloc_array(n: mb_count, size: sizeof(*online_types), |
2336 | GFP_KERNEL); |
2337 | if (!online_types) |
2338 | return -ENOMEM; |
2339 | /* |
2340 | * Initialize all states to MMOP_OFFLINE, so when we abort processing in |
2341 | * try_offline_memory_block(), we'll skip all unprocessed blocks in |
2342 | * try_reonline_memory_block(). |
2343 | */ |
2344 | memset(online_types, MMOP_OFFLINE, mb_count); |
2345 | |
2346 | lock_device_hotplug(); |
2347 | |
2348 | tmp = online_types; |
2349 | rc = walk_memory_blocks(start, size, arg: &tmp, func: try_offline_memory_block); |
2350 | |
2351 | /* |
2352 | * In case we succeeded to offline all memory, remove it. |
2353 | * This cannot fail as it cannot get onlined in the meantime. |
2354 | */ |
2355 | if (!rc) { |
2356 | rc = try_remove_memory(start, size); |
2357 | if (rc) |
2358 | pr_err("%s: Failed to remove memory: %d" , __func__, rc); |
2359 | } |
2360 | |
2361 | /* |
2362 | * Rollback what we did. While memory onlining might theoretically fail |
2363 | * (nacked by a notifier), it barely ever happens. |
2364 | */ |
2365 | if (rc) { |
2366 | tmp = online_types; |
2367 | walk_memory_blocks(start, size, arg: &tmp, |
2368 | func: try_reonline_memory_block); |
2369 | } |
2370 | unlock_device_hotplug(); |
2371 | |
2372 | kfree(objp: online_types); |
2373 | return rc; |
2374 | } |
2375 | EXPORT_SYMBOL_GPL(offline_and_remove_memory); |
2376 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
2377 | |