1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * sparse memory mappings. |
4 | */ |
5 | #include <linux/mm.h> |
6 | #include <linux/slab.h> |
7 | #include <linux/mmzone.h> |
8 | #include <linux/memblock.h> |
9 | #include <linux/compiler.h> |
10 | #include <linux/highmem.h> |
11 | #include <linux/export.h> |
12 | #include <linux/spinlock.h> |
13 | #include <linux/vmalloc.h> |
14 | #include <linux/swap.h> |
15 | #include <linux/swapops.h> |
16 | #include <linux/bootmem_info.h> |
17 | |
18 | #include "internal.h" |
19 | #include <asm/dma.h> |
20 | |
21 | /* |
22 | * Permanent SPARSEMEM data: |
23 | * |
24 | * 1) mem_section - memory sections, mem_map's for valid memory |
25 | */ |
26 | #ifdef CONFIG_SPARSEMEM_EXTREME |
27 | struct mem_section **mem_section; |
28 | #else |
29 | struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] |
30 | ____cacheline_internodealigned_in_smp; |
31 | #endif |
32 | EXPORT_SYMBOL(mem_section); |
33 | |
34 | #ifdef NODE_NOT_IN_PAGE_FLAGS |
35 | /* |
36 | * If we did not store the node number in the page then we have to |
37 | * do a lookup in the section_to_node_table in order to find which |
38 | * node the page belongs to. |
39 | */ |
40 | #if MAX_NUMNODES <= 256 |
41 | static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; |
42 | #else |
43 | static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; |
44 | #endif |
45 | |
46 | int page_to_nid(const struct page *page) |
47 | { |
48 | return section_to_node_table[page_to_section(page)]; |
49 | } |
50 | EXPORT_SYMBOL(page_to_nid); |
51 | |
52 | static void set_section_nid(unsigned long section_nr, int nid) |
53 | { |
54 | section_to_node_table[section_nr] = nid; |
55 | } |
56 | #else /* !NODE_NOT_IN_PAGE_FLAGS */ |
57 | static inline void set_section_nid(unsigned long section_nr, int nid) |
58 | { |
59 | } |
60 | #endif |
61 | |
62 | #ifdef CONFIG_SPARSEMEM_EXTREME |
63 | static noinline struct mem_section __ref *sparse_index_alloc(int nid) |
64 | { |
65 | struct mem_section *section = NULL; |
66 | unsigned long array_size = SECTIONS_PER_ROOT * |
67 | sizeof(struct mem_section); |
68 | |
69 | if (slab_is_available()) { |
70 | section = kzalloc_node(size: array_size, GFP_KERNEL, node: nid); |
71 | } else { |
72 | section = memblock_alloc_node(size: array_size, SMP_CACHE_BYTES, |
73 | nid); |
74 | if (!section) |
75 | panic(fmt: "%s: Failed to allocate %lu bytes nid=%d\n" , |
76 | __func__, array_size, nid); |
77 | } |
78 | |
79 | return section; |
80 | } |
81 | |
82 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) |
83 | { |
84 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); |
85 | struct mem_section *section; |
86 | |
87 | /* |
88 | * An existing section is possible in the sub-section hotplug |
89 | * case. First hot-add instantiates, follow-on hot-add reuses |
90 | * the existing section. |
91 | * |
92 | * The mem_hotplug_lock resolves the apparent race below. |
93 | */ |
94 | if (mem_section[root]) |
95 | return 0; |
96 | |
97 | section = sparse_index_alloc(nid); |
98 | if (!section) |
99 | return -ENOMEM; |
100 | |
101 | mem_section[root] = section; |
102 | |
103 | return 0; |
104 | } |
105 | #else /* !SPARSEMEM_EXTREME */ |
106 | static inline int sparse_index_init(unsigned long section_nr, int nid) |
107 | { |
108 | return 0; |
109 | } |
110 | #endif |
111 | |
112 | /* |
113 | * During early boot, before section_mem_map is used for an actual |
114 | * mem_map, we use section_mem_map to store the section's NUMA |
115 | * node. This keeps us from having to use another data structure. The |
116 | * node information is cleared just before we store the real mem_map. |
117 | */ |
118 | static inline unsigned long sparse_encode_early_nid(int nid) |
119 | { |
120 | return ((unsigned long)nid << SECTION_NID_SHIFT); |
121 | } |
122 | |
123 | static inline int sparse_early_nid(struct mem_section *section) |
124 | { |
125 | return (section->section_mem_map >> SECTION_NID_SHIFT); |
126 | } |
127 | |
128 | /* Validate the physical addressing limitations of the model */ |
129 | static void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, |
130 | unsigned long *end_pfn) |
131 | { |
132 | unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); |
133 | |
134 | /* |
135 | * Sanity checks - do not allow an architecture to pass |
136 | * in larger pfns than the maximum scope of sparsemem: |
137 | */ |
138 | if (*start_pfn > max_sparsemem_pfn) { |
139 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation" , |
140 | "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n" , |
141 | *start_pfn, *end_pfn, max_sparsemem_pfn); |
142 | WARN_ON_ONCE(1); |
143 | *start_pfn = max_sparsemem_pfn; |
144 | *end_pfn = max_sparsemem_pfn; |
145 | } else if (*end_pfn > max_sparsemem_pfn) { |
146 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation" , |
147 | "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n" , |
148 | *start_pfn, *end_pfn, max_sparsemem_pfn); |
149 | WARN_ON_ONCE(1); |
150 | *end_pfn = max_sparsemem_pfn; |
151 | } |
152 | } |
153 | |
154 | /* |
155 | * There are a number of times that we loop over NR_MEM_SECTIONS, |
156 | * looking for section_present() on each. But, when we have very |
157 | * large physical address spaces, NR_MEM_SECTIONS can also be |
158 | * very large which makes the loops quite long. |
159 | * |
160 | * Keeping track of this gives us an easy way to break out of |
161 | * those loops early. |
162 | */ |
163 | unsigned long __highest_present_section_nr; |
164 | static void __section_mark_present(struct mem_section *ms, |
165 | unsigned long section_nr) |
166 | { |
167 | if (section_nr > __highest_present_section_nr) |
168 | __highest_present_section_nr = section_nr; |
169 | |
170 | ms->section_mem_map |= SECTION_MARKED_PRESENT; |
171 | } |
172 | |
173 | #define for_each_present_section_nr(start, section_nr) \ |
174 | for (section_nr = next_present_section_nr(start-1); \ |
175 | section_nr != -1; \ |
176 | section_nr = next_present_section_nr(section_nr)) |
177 | |
178 | static inline unsigned long first_present_section_nr(void) |
179 | { |
180 | return next_present_section_nr(section_nr: -1); |
181 | } |
182 | |
183 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
184 | static void subsection_mask_set(unsigned long *map, unsigned long pfn, |
185 | unsigned long nr_pages) |
186 | { |
187 | int idx = subsection_map_index(pfn); |
188 | int end = subsection_map_index(pfn: pfn + nr_pages - 1); |
189 | |
190 | bitmap_set(map, start: idx, nbits: end - idx + 1); |
191 | } |
192 | |
193 | void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) |
194 | { |
195 | int end_sec = pfn_to_section_nr(pfn: pfn + nr_pages - 1); |
196 | unsigned long nr, start_sec = pfn_to_section_nr(pfn); |
197 | |
198 | if (!nr_pages) |
199 | return; |
200 | |
201 | for (nr = start_sec; nr <= end_sec; nr++) { |
202 | struct mem_section *ms; |
203 | unsigned long pfns; |
204 | |
205 | pfns = min(nr_pages, PAGES_PER_SECTION |
206 | - (pfn & ~PAGE_SECTION_MASK)); |
207 | ms = __nr_to_section(nr); |
208 | subsection_mask_set(map: ms->usage->subsection_map, pfn, nr_pages: pfns); |
209 | |
210 | pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n" , __func__, nr, |
211 | pfns, subsection_map_index(pfn), |
212 | subsection_map_index(pfn + pfns - 1)); |
213 | |
214 | pfn += pfns; |
215 | nr_pages -= pfns; |
216 | } |
217 | } |
218 | #else |
219 | void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) |
220 | { |
221 | } |
222 | #endif |
223 | |
224 | /* Record a memory area against a node. */ |
225 | static void __init memory_present(int nid, unsigned long start, unsigned long end) |
226 | { |
227 | unsigned long pfn; |
228 | |
229 | #ifdef CONFIG_SPARSEMEM_EXTREME |
230 | if (unlikely(!mem_section)) { |
231 | unsigned long size, align; |
232 | |
233 | size = sizeof(struct mem_section *) * NR_SECTION_ROOTS; |
234 | align = 1 << (INTERNODE_CACHE_SHIFT); |
235 | mem_section = memblock_alloc(size, align); |
236 | if (!mem_section) |
237 | panic(fmt: "%s: Failed to allocate %lu bytes align=0x%lx\n" , |
238 | __func__, size, align); |
239 | } |
240 | #endif |
241 | |
242 | start &= PAGE_SECTION_MASK; |
243 | mminit_validate_memmodel_limits(start_pfn: &start, end_pfn: &end); |
244 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { |
245 | unsigned long section = pfn_to_section_nr(pfn); |
246 | struct mem_section *ms; |
247 | |
248 | sparse_index_init(section_nr: section, nid); |
249 | set_section_nid(section_nr: section, nid); |
250 | |
251 | ms = __nr_to_section(nr: section); |
252 | if (!ms->section_mem_map) { |
253 | ms->section_mem_map = sparse_encode_early_nid(nid) | |
254 | SECTION_IS_ONLINE; |
255 | __section_mark_present(ms, section_nr: section); |
256 | } |
257 | } |
258 | } |
259 | |
260 | /* |
261 | * Mark all memblocks as present using memory_present(). |
262 | * This is a convenience function that is useful to mark all of the systems |
263 | * memory as present during initialization. |
264 | */ |
265 | static void __init memblocks_present(void) |
266 | { |
267 | unsigned long start, end; |
268 | int i, nid; |
269 | |
270 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) |
271 | memory_present(nid, start, end); |
272 | } |
273 | |
274 | /* |
275 | * Subtle, we encode the real pfn into the mem_map such that |
276 | * the identity pfn - section_mem_map will return the actual |
277 | * physical page frame number. |
278 | */ |
279 | static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) |
280 | { |
281 | unsigned long coded_mem_map = |
282 | (unsigned long)(mem_map - (section_nr_to_pfn(sec: pnum))); |
283 | BUILD_BUG_ON(SECTION_MAP_LAST_BIT > PFN_SECTION_SHIFT); |
284 | BUG_ON(coded_mem_map & ~SECTION_MAP_MASK); |
285 | return coded_mem_map; |
286 | } |
287 | |
288 | #ifdef CONFIG_MEMORY_HOTPLUG |
289 | /* |
290 | * Decode mem_map from the coded memmap |
291 | */ |
292 | struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) |
293 | { |
294 | /* mask off the extra low bits of information */ |
295 | coded_mem_map &= SECTION_MAP_MASK; |
296 | return ((struct page *)coded_mem_map) + section_nr_to_pfn(sec: pnum); |
297 | } |
298 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
299 | |
300 | static void __meminit sparse_init_one_section(struct mem_section *ms, |
301 | unsigned long pnum, struct page *mem_map, |
302 | struct mem_section_usage *usage, unsigned long flags) |
303 | { |
304 | ms->section_mem_map &= ~SECTION_MAP_MASK; |
305 | ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
306 | | SECTION_HAS_MEM_MAP | flags; |
307 | ms->usage = usage; |
308 | } |
309 | |
310 | static unsigned long usemap_size(void) |
311 | { |
312 | return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long); |
313 | } |
314 | |
315 | size_t mem_section_usage_size(void) |
316 | { |
317 | return sizeof(struct mem_section_usage) + usemap_size(); |
318 | } |
319 | |
320 | #ifdef CONFIG_MEMORY_HOTREMOVE |
321 | static inline phys_addr_t pgdat_to_phys(struct pglist_data *pgdat) |
322 | { |
323 | #ifndef CONFIG_NUMA |
324 | VM_BUG_ON(pgdat != &contig_page_data); |
325 | return __pa_symbol(&contig_page_data); |
326 | #else |
327 | return __pa(pgdat); |
328 | #endif |
329 | } |
330 | |
331 | static struct mem_section_usage * __init |
332 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
333 | unsigned long size) |
334 | { |
335 | struct mem_section_usage *usage; |
336 | unsigned long goal, limit; |
337 | int nid; |
338 | /* |
339 | * A page may contain usemaps for other sections preventing the |
340 | * page being freed and making a section unremovable while |
341 | * other sections referencing the usemap remain active. Similarly, |
342 | * a pgdat can prevent a section being removed. If section A |
343 | * contains a pgdat and section B contains the usemap, both |
344 | * sections become inter-dependent. This allocates usemaps |
345 | * from the same section as the pgdat where possible to avoid |
346 | * this problem. |
347 | */ |
348 | goal = pgdat_to_phys(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); |
349 | limit = goal + (1UL << PA_SECTION_SHIFT); |
350 | nid = early_pfn_to_nid(pfn: goal >> PAGE_SHIFT); |
351 | again: |
352 | usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, min_addr: goal, max_addr: limit, nid); |
353 | if (!usage && limit) { |
354 | limit = 0; |
355 | goto again; |
356 | } |
357 | return usage; |
358 | } |
359 | |
360 | static void __init check_usemap_section_nr(int nid, |
361 | struct mem_section_usage *usage) |
362 | { |
363 | unsigned long usemap_snr, pgdat_snr; |
364 | static unsigned long old_usemap_snr; |
365 | static unsigned long old_pgdat_snr; |
366 | struct pglist_data *pgdat = NODE_DATA(nid); |
367 | int usemap_nid; |
368 | |
369 | /* First call */ |
370 | if (!old_usemap_snr) { |
371 | old_usemap_snr = NR_MEM_SECTIONS; |
372 | old_pgdat_snr = NR_MEM_SECTIONS; |
373 | } |
374 | |
375 | usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT); |
376 | pgdat_snr = pfn_to_section_nr(pfn: pgdat_to_phys(pgdat) >> PAGE_SHIFT); |
377 | if (usemap_snr == pgdat_snr) |
378 | return; |
379 | |
380 | if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) |
381 | /* skip redundant message */ |
382 | return; |
383 | |
384 | old_usemap_snr = usemap_snr; |
385 | old_pgdat_snr = pgdat_snr; |
386 | |
387 | usemap_nid = sparse_early_nid(section: __nr_to_section(nr: usemap_snr)); |
388 | if (usemap_nid != nid) { |
389 | pr_info("node %d must be removed before remove section %ld\n" , |
390 | nid, usemap_snr); |
391 | return; |
392 | } |
393 | /* |
394 | * There is a circular dependency. |
395 | * Some platforms allow un-removable section because they will just |
396 | * gather other removable sections for dynamic partitioning. |
397 | * Just notify un-removable section's number here. |
398 | */ |
399 | pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n" , |
400 | usemap_snr, pgdat_snr, nid); |
401 | } |
402 | #else |
403 | static struct mem_section_usage * __init |
404 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
405 | unsigned long size) |
406 | { |
407 | return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id); |
408 | } |
409 | |
410 | static void __init check_usemap_section_nr(int nid, |
411 | struct mem_section_usage *usage) |
412 | { |
413 | } |
414 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
415 | |
416 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
417 | static unsigned long __init section_map_size(void) |
418 | { |
419 | return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE); |
420 | } |
421 | |
422 | #else |
423 | static unsigned long __init section_map_size(void) |
424 | { |
425 | return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); |
426 | } |
427 | |
428 | struct page __init *__populate_section_memmap(unsigned long pfn, |
429 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap, |
430 | struct dev_pagemap *pgmap) |
431 | { |
432 | unsigned long size = section_map_size(); |
433 | struct page *map = sparse_buffer_alloc(size); |
434 | phys_addr_t addr = __pa(MAX_DMA_ADDRESS); |
435 | |
436 | if (map) |
437 | return map; |
438 | |
439 | map = memmap_alloc(size, size, addr, nid, false); |
440 | if (!map) |
441 | panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n" , |
442 | __func__, size, PAGE_SIZE, nid, &addr); |
443 | |
444 | return map; |
445 | } |
446 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ |
447 | |
448 | static void *sparsemap_buf __meminitdata; |
449 | static void *sparsemap_buf_end __meminitdata; |
450 | |
451 | static inline void __meminit sparse_buffer_free(unsigned long size) |
452 | { |
453 | WARN_ON(!sparsemap_buf || size == 0); |
454 | memblock_free(ptr: sparsemap_buf, size); |
455 | } |
456 | |
457 | static void __init sparse_buffer_init(unsigned long size, int nid) |
458 | { |
459 | phys_addr_t addr = __pa(MAX_DMA_ADDRESS); |
460 | WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ |
461 | /* |
462 | * Pre-allocated buffer is mainly used by __populate_section_memmap |
463 | * and we want it to be properly aligned to the section size - this is |
464 | * especially the case for VMEMMAP which maps memmap to PMDs |
465 | */ |
466 | sparsemap_buf = memmap_alloc(size, align: section_map_size(), min_addr: addr, nid, exact_nid: true); |
467 | sparsemap_buf_end = sparsemap_buf + size; |
468 | } |
469 | |
470 | static void __init sparse_buffer_fini(void) |
471 | { |
472 | unsigned long size = sparsemap_buf_end - sparsemap_buf; |
473 | |
474 | if (sparsemap_buf && size > 0) |
475 | sparse_buffer_free(size); |
476 | sparsemap_buf = NULL; |
477 | } |
478 | |
479 | void * __meminit sparse_buffer_alloc(unsigned long size) |
480 | { |
481 | void *ptr = NULL; |
482 | |
483 | if (sparsemap_buf) { |
484 | ptr = (void *) roundup((unsigned long)sparsemap_buf, size); |
485 | if (ptr + size > sparsemap_buf_end) |
486 | ptr = NULL; |
487 | else { |
488 | /* Free redundant aligned space */ |
489 | if ((unsigned long)(ptr - sparsemap_buf) > 0) |
490 | sparse_buffer_free(size: (unsigned long)(ptr - sparsemap_buf)); |
491 | sparsemap_buf = ptr + size; |
492 | } |
493 | } |
494 | return ptr; |
495 | } |
496 | |
497 | void __weak __meminit vmemmap_populate_print_last(void) |
498 | { |
499 | } |
500 | |
501 | /* |
502 | * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end) |
503 | * And number of present sections in this node is map_count. |
504 | */ |
505 | static void __init sparse_init_nid(int nid, unsigned long pnum_begin, |
506 | unsigned long pnum_end, |
507 | unsigned long map_count) |
508 | { |
509 | struct mem_section_usage *usage; |
510 | unsigned long pnum; |
511 | struct page *map; |
512 | |
513 | usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid), |
514 | size: mem_section_usage_size() * map_count); |
515 | if (!usage) { |
516 | pr_err("%s: node[%d] usemap allocation failed" , __func__, nid); |
517 | goto failed; |
518 | } |
519 | sparse_buffer_init(size: map_count * section_map_size(), nid); |
520 | for_each_present_section_nr(pnum_begin, pnum) { |
521 | unsigned long pfn = section_nr_to_pfn(sec: pnum); |
522 | |
523 | if (pnum >= pnum_end) |
524 | break; |
525 | |
526 | map = __populate_section_memmap(pfn, PAGES_PER_SECTION, |
527 | nid, NULL, NULL); |
528 | if (!map) { |
529 | pr_err("%s: node[%d] memory map backing failed. Some memory will not be available." , |
530 | __func__, nid); |
531 | pnum_begin = pnum; |
532 | sparse_buffer_fini(); |
533 | goto failed; |
534 | } |
535 | check_usemap_section_nr(nid, usage); |
536 | sparse_init_one_section(ms: __nr_to_section(nr: pnum), pnum, mem_map: map, usage, |
537 | SECTION_IS_EARLY); |
538 | usage = (void *) usage + mem_section_usage_size(); |
539 | } |
540 | sparse_buffer_fini(); |
541 | return; |
542 | failed: |
543 | /* We failed to allocate, mark all the following pnums as not present */ |
544 | for_each_present_section_nr(pnum_begin, pnum) { |
545 | struct mem_section *ms; |
546 | |
547 | if (pnum >= pnum_end) |
548 | break; |
549 | ms = __nr_to_section(nr: pnum); |
550 | ms->section_mem_map = 0; |
551 | } |
552 | } |
553 | |
554 | /* |
555 | * Allocate the accumulated non-linear sections, allocate a mem_map |
556 | * for each and record the physical to section mapping. |
557 | */ |
558 | void __init sparse_init(void) |
559 | { |
560 | unsigned long pnum_end, pnum_begin, map_count = 1; |
561 | int nid_begin; |
562 | |
563 | memblocks_present(); |
564 | |
565 | pnum_begin = first_present_section_nr(); |
566 | nid_begin = sparse_early_nid(section: __nr_to_section(nr: pnum_begin)); |
567 | |
568 | /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ |
569 | set_pageblock_order(); |
570 | |
571 | for_each_present_section_nr(pnum_begin + 1, pnum_end) { |
572 | int nid = sparse_early_nid(section: __nr_to_section(nr: pnum_end)); |
573 | |
574 | if (nid == nid_begin) { |
575 | map_count++; |
576 | continue; |
577 | } |
578 | /* Init node with sections in range [pnum_begin, pnum_end) */ |
579 | sparse_init_nid(nid: nid_begin, pnum_begin, pnum_end, map_count); |
580 | nid_begin = nid; |
581 | pnum_begin = pnum_end; |
582 | map_count = 1; |
583 | } |
584 | /* cover the last node */ |
585 | sparse_init_nid(nid: nid_begin, pnum_begin, pnum_end, map_count); |
586 | vmemmap_populate_print_last(); |
587 | } |
588 | |
589 | #ifdef CONFIG_MEMORY_HOTPLUG |
590 | |
591 | /* Mark all memory sections within the pfn range as online */ |
592 | void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) |
593 | { |
594 | unsigned long pfn; |
595 | |
596 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
597 | unsigned long section_nr = pfn_to_section_nr(pfn); |
598 | struct mem_section *ms; |
599 | |
600 | /* onlining code should never touch invalid ranges */ |
601 | if (WARN_ON(!valid_section_nr(section_nr))) |
602 | continue; |
603 | |
604 | ms = __nr_to_section(nr: section_nr); |
605 | ms->section_mem_map |= SECTION_IS_ONLINE; |
606 | } |
607 | } |
608 | |
609 | /* Mark all memory sections within the pfn range as offline */ |
610 | void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) |
611 | { |
612 | unsigned long pfn; |
613 | |
614 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
615 | unsigned long section_nr = pfn_to_section_nr(pfn); |
616 | struct mem_section *ms; |
617 | |
618 | /* |
619 | * TODO this needs some double checking. Offlining code makes |
620 | * sure to check pfn_valid but those checks might be just bogus |
621 | */ |
622 | if (WARN_ON(!valid_section_nr(section_nr))) |
623 | continue; |
624 | |
625 | ms = __nr_to_section(nr: section_nr); |
626 | ms->section_mem_map &= ~SECTION_IS_ONLINE; |
627 | } |
628 | } |
629 | |
630 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
631 | static struct page * __meminit populate_section_memmap(unsigned long pfn, |
632 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap, |
633 | struct dev_pagemap *pgmap) |
634 | { |
635 | return __populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap); |
636 | } |
637 | |
638 | static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages, |
639 | struct vmem_altmap *altmap) |
640 | { |
641 | unsigned long start = (unsigned long) pfn_to_page(pfn); |
642 | unsigned long end = start + nr_pages * sizeof(struct page); |
643 | |
644 | vmemmap_free(start, end, altmap); |
645 | } |
646 | static void free_map_bootmem(struct page *memmap) |
647 | { |
648 | unsigned long start = (unsigned long)memmap; |
649 | unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); |
650 | |
651 | vmemmap_free(start, end, NULL); |
652 | } |
653 | |
654 | static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages) |
655 | { |
656 | DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; |
657 | DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 }; |
658 | struct mem_section *ms = __pfn_to_section(pfn); |
659 | unsigned long *subsection_map = ms->usage |
660 | ? &ms->usage->subsection_map[0] : NULL; |
661 | |
662 | subsection_mask_set(map, pfn, nr_pages); |
663 | if (subsection_map) |
664 | bitmap_and(dst: tmp, src1: map, src2: subsection_map, SUBSECTIONS_PER_SECTION); |
665 | |
666 | if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION), |
667 | "section already deactivated (%#lx + %ld)\n" , |
668 | pfn, nr_pages)) |
669 | return -EINVAL; |
670 | |
671 | bitmap_xor(dst: subsection_map, src1: map, src2: subsection_map, SUBSECTIONS_PER_SECTION); |
672 | return 0; |
673 | } |
674 | |
675 | static bool is_subsection_map_empty(struct mem_section *ms) |
676 | { |
677 | return bitmap_empty(src: &ms->usage->subsection_map[0], |
678 | SUBSECTIONS_PER_SECTION); |
679 | } |
680 | |
681 | static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages) |
682 | { |
683 | struct mem_section *ms = __pfn_to_section(pfn); |
684 | DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; |
685 | unsigned long *subsection_map; |
686 | int rc = 0; |
687 | |
688 | subsection_mask_set(map, pfn, nr_pages); |
689 | |
690 | subsection_map = &ms->usage->subsection_map[0]; |
691 | |
692 | if (bitmap_empty(src: map, SUBSECTIONS_PER_SECTION)) |
693 | rc = -EINVAL; |
694 | else if (bitmap_intersects(src1: map, src2: subsection_map, SUBSECTIONS_PER_SECTION)) |
695 | rc = -EEXIST; |
696 | else |
697 | bitmap_or(dst: subsection_map, src1: map, src2: subsection_map, |
698 | SUBSECTIONS_PER_SECTION); |
699 | |
700 | return rc; |
701 | } |
702 | #else |
703 | static struct page * __meminit populate_section_memmap(unsigned long pfn, |
704 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap, |
705 | struct dev_pagemap *pgmap) |
706 | { |
707 | return kvmalloc_node(array_size(sizeof(struct page), |
708 | PAGES_PER_SECTION), GFP_KERNEL, nid); |
709 | } |
710 | |
711 | static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages, |
712 | struct vmem_altmap *altmap) |
713 | { |
714 | kvfree(pfn_to_page(pfn)); |
715 | } |
716 | |
717 | static void free_map_bootmem(struct page *memmap) |
718 | { |
719 | unsigned long maps_section_nr, removing_section_nr, i; |
720 | unsigned long magic, nr_pages; |
721 | struct page *page = virt_to_page(memmap); |
722 | |
723 | nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) |
724 | >> PAGE_SHIFT; |
725 | |
726 | for (i = 0; i < nr_pages; i++, page++) { |
727 | magic = page->index; |
728 | |
729 | BUG_ON(magic == NODE_INFO); |
730 | |
731 | maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); |
732 | removing_section_nr = page_private(page); |
733 | |
734 | /* |
735 | * When this function is called, the removing section is |
736 | * logical offlined state. This means all pages are isolated |
737 | * from page allocator. If removing section's memmap is placed |
738 | * on the same section, it must not be freed. |
739 | * If it is freed, page allocator may allocate it which will |
740 | * be removed physically soon. |
741 | */ |
742 | if (maps_section_nr != removing_section_nr) |
743 | put_page_bootmem(page); |
744 | } |
745 | } |
746 | |
747 | static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages) |
748 | { |
749 | return 0; |
750 | } |
751 | |
752 | static bool is_subsection_map_empty(struct mem_section *ms) |
753 | { |
754 | return true; |
755 | } |
756 | |
757 | static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages) |
758 | { |
759 | return 0; |
760 | } |
761 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
762 | |
763 | /* |
764 | * To deactivate a memory region, there are 3 cases to handle across |
765 | * two configurations (SPARSEMEM_VMEMMAP={y,n}): |
766 | * |
767 | * 1. deactivation of a partial hot-added section (only possible in |
768 | * the SPARSEMEM_VMEMMAP=y case). |
769 | * a) section was present at memory init. |
770 | * b) section was hot-added post memory init. |
771 | * 2. deactivation of a complete hot-added section. |
772 | * 3. deactivation of a complete section from memory init. |
773 | * |
774 | * For 1, when subsection_map does not empty we will not be freeing the |
775 | * usage map, but still need to free the vmemmap range. |
776 | * |
777 | * For 2 and 3, the SPARSEMEM_VMEMMAP={y,n} cases are unified |
778 | */ |
779 | static void section_deactivate(unsigned long pfn, unsigned long nr_pages, |
780 | struct vmem_altmap *altmap) |
781 | { |
782 | struct mem_section *ms = __pfn_to_section(pfn); |
783 | bool section_is_early = early_section(section: ms); |
784 | struct page *memmap = NULL; |
785 | bool empty; |
786 | |
787 | if (clear_subsection_map(pfn, nr_pages)) |
788 | return; |
789 | |
790 | empty = is_subsection_map_empty(ms); |
791 | if (empty) { |
792 | unsigned long section_nr = pfn_to_section_nr(pfn); |
793 | |
794 | /* |
795 | * When removing an early section, the usage map is kept (as the |
796 | * usage maps of other sections fall into the same page). It |
797 | * will be re-used when re-adding the section - which is then no |
798 | * longer an early section. If the usage map is PageReserved, it |
799 | * was allocated during boot. |
800 | */ |
801 | if (!PageReserved(virt_to_page(ms->usage))) { |
802 | kfree(objp: ms->usage); |
803 | ms->usage = NULL; |
804 | } |
805 | memmap = sparse_decode_mem_map(coded_mem_map: ms->section_mem_map, pnum: section_nr); |
806 | /* |
807 | * Mark the section invalid so that valid_section() |
808 | * return false. This prevents code from dereferencing |
809 | * ms->usage array. |
810 | */ |
811 | ms->section_mem_map &= ~SECTION_HAS_MEM_MAP; |
812 | } |
813 | |
814 | /* |
815 | * The memmap of early sections is always fully populated. See |
816 | * section_activate() and pfn_valid() . |
817 | */ |
818 | if (!section_is_early) |
819 | depopulate_section_memmap(pfn, nr_pages, altmap); |
820 | else if (memmap) |
821 | free_map_bootmem(memmap); |
822 | |
823 | if (empty) |
824 | ms->section_mem_map = (unsigned long)NULL; |
825 | } |
826 | |
827 | static struct page * __meminit section_activate(int nid, unsigned long pfn, |
828 | unsigned long nr_pages, struct vmem_altmap *altmap, |
829 | struct dev_pagemap *pgmap) |
830 | { |
831 | struct mem_section *ms = __pfn_to_section(pfn); |
832 | struct mem_section_usage *usage = NULL; |
833 | struct page *memmap; |
834 | int rc; |
835 | |
836 | if (!ms->usage) { |
837 | usage = kzalloc(size: mem_section_usage_size(), GFP_KERNEL); |
838 | if (!usage) |
839 | return ERR_PTR(error: -ENOMEM); |
840 | ms->usage = usage; |
841 | } |
842 | |
843 | rc = fill_subsection_map(pfn, nr_pages); |
844 | if (rc) { |
845 | if (usage) |
846 | ms->usage = NULL; |
847 | kfree(objp: usage); |
848 | return ERR_PTR(error: rc); |
849 | } |
850 | |
851 | /* |
852 | * The early init code does not consider partially populated |
853 | * initial sections, it simply assumes that memory will never be |
854 | * referenced. If we hot-add memory into such a section then we |
855 | * do not need to populate the memmap and can simply reuse what |
856 | * is already there. |
857 | */ |
858 | if (nr_pages < PAGES_PER_SECTION && early_section(section: ms)) |
859 | return pfn_to_page(pfn); |
860 | |
861 | memmap = populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap); |
862 | if (!memmap) { |
863 | section_deactivate(pfn, nr_pages, altmap); |
864 | return ERR_PTR(error: -ENOMEM); |
865 | } |
866 | |
867 | return memmap; |
868 | } |
869 | |
870 | /** |
871 | * sparse_add_section - add a memory section, or populate an existing one |
872 | * @nid: The node to add section on |
873 | * @start_pfn: start pfn of the memory range |
874 | * @nr_pages: number of pfns to add in the section |
875 | * @altmap: alternate pfns to allocate the memmap backing store |
876 | * @pgmap: alternate compound page geometry for devmap mappings |
877 | * |
878 | * This is only intended for hotplug. |
879 | * |
880 | * Note that only VMEMMAP supports sub-section aligned hotplug, |
881 | * the proper alignment and size are gated by check_pfn_span(). |
882 | * |
883 | * |
884 | * Return: |
885 | * * 0 - On success. |
886 | * * -EEXIST - Section has been present. |
887 | * * -ENOMEM - Out of memory. |
888 | */ |
889 | int __meminit sparse_add_section(int nid, unsigned long start_pfn, |
890 | unsigned long nr_pages, struct vmem_altmap *altmap, |
891 | struct dev_pagemap *pgmap) |
892 | { |
893 | unsigned long section_nr = pfn_to_section_nr(pfn: start_pfn); |
894 | struct mem_section *ms; |
895 | struct page *memmap; |
896 | int ret; |
897 | |
898 | ret = sparse_index_init(section_nr, nid); |
899 | if (ret < 0) |
900 | return ret; |
901 | |
902 | memmap = section_activate(nid, pfn: start_pfn, nr_pages, altmap, pgmap); |
903 | if (IS_ERR(ptr: memmap)) |
904 | return PTR_ERR(ptr: memmap); |
905 | |
906 | /* |
907 | * Poison uninitialized struct pages in order to catch invalid flags |
908 | * combinations. |
909 | */ |
910 | page_init_poison(page: memmap, size: sizeof(struct page) * nr_pages); |
911 | |
912 | ms = __nr_to_section(nr: section_nr); |
913 | set_section_nid(section_nr, nid); |
914 | __section_mark_present(ms, section_nr); |
915 | |
916 | /* Align memmap to section boundary in the subsection case */ |
917 | if (section_nr_to_pfn(sec: section_nr) != start_pfn) |
918 | memmap = pfn_to_page(section_nr_to_pfn(section_nr)); |
919 | sparse_init_one_section(ms, pnum: section_nr, mem_map: memmap, usage: ms->usage, flags: 0); |
920 | |
921 | return 0; |
922 | } |
923 | |
924 | void sparse_remove_section(unsigned long pfn, unsigned long nr_pages, |
925 | struct vmem_altmap *altmap) |
926 | { |
927 | struct mem_section *ms = __pfn_to_section(pfn); |
928 | |
929 | if (WARN_ON_ONCE(!valid_section(ms))) |
930 | return; |
931 | |
932 | section_deactivate(pfn, nr_pages, altmap); |
933 | } |
934 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
935 | |