1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * linux/arch/x86_64/mm/init.c |
4 | * |
5 | * Copyright (C) 1995 Linus Torvalds |
6 | * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz> |
7 | * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> |
8 | */ |
9 | |
10 | #include <linux/signal.h> |
11 | #include <linux/sched.h> |
12 | #include <linux/kernel.h> |
13 | #include <linux/errno.h> |
14 | #include <linux/string.h> |
15 | #include <linux/types.h> |
16 | #include <linux/ptrace.h> |
17 | #include <linux/mman.h> |
18 | #include <linux/mm.h> |
19 | #include <linux/swap.h> |
20 | #include <linux/smp.h> |
21 | #include <linux/init.h> |
22 | #include <linux/initrd.h> |
23 | #include <linux/pagemap.h> |
24 | #include <linux/memblock.h> |
25 | #include <linux/proc_fs.h> |
26 | #include <linux/pci.h> |
27 | #include <linux/pfn.h> |
28 | #include <linux/poison.h> |
29 | #include <linux/dma-mapping.h> |
30 | #include <linux/memory.h> |
31 | #include <linux/memory_hotplug.h> |
32 | #include <linux/memremap.h> |
33 | #include <linux/nmi.h> |
34 | #include <linux/gfp.h> |
35 | #include <linux/kcore.h> |
36 | #include <linux/bootmem_info.h> |
37 | |
38 | #include <asm/processor.h> |
39 | #include <asm/bios_ebda.h> |
40 | #include <linux/uaccess.h> |
41 | #include <asm/pgalloc.h> |
42 | #include <asm/dma.h> |
43 | #include <asm/fixmap.h> |
44 | #include <asm/e820/api.h> |
45 | #include <asm/apic.h> |
46 | #include <asm/tlb.h> |
47 | #include <asm/mmu_context.h> |
48 | #include <asm/proto.h> |
49 | #include <asm/smp.h> |
50 | #include <asm/sections.h> |
51 | #include <asm/kdebug.h> |
52 | #include <asm/numa.h> |
53 | #include <asm/set_memory.h> |
54 | #include <asm/init.h> |
55 | #include <asm/uv/uv.h> |
56 | #include <asm/setup.h> |
57 | #include <asm/ftrace.h> |
58 | |
59 | #include "mm_internal.h" |
60 | |
61 | #include "ident_map.c" |
62 | |
63 | #define DEFINE_POPULATE(fname, type1, type2, init) \ |
64 | static inline void fname##_init(struct mm_struct *mm, \ |
65 | type1##_t *arg1, type2##_t *arg2, bool init) \ |
66 | { \ |
67 | if (init) \ |
68 | fname##_safe(mm, arg1, arg2); \ |
69 | else \ |
70 | fname(mm, arg1, arg2); \ |
71 | } |
72 | |
73 | DEFINE_POPULATE(p4d_populate, p4d, pud, init) |
74 | DEFINE_POPULATE(pgd_populate, pgd, p4d, init) |
75 | DEFINE_POPULATE(pud_populate, pud, pmd, init) |
76 | DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init) |
77 | |
78 | #define DEFINE_ENTRY(type1, type2, init) \ |
79 | static inline void set_##type1##_init(type1##_t *arg1, \ |
80 | type2##_t arg2, bool init) \ |
81 | { \ |
82 | if (init) \ |
83 | set_##type1##_safe(arg1, arg2); \ |
84 | else \ |
85 | set_##type1(arg1, arg2); \ |
86 | } |
87 | |
88 | DEFINE_ENTRY(p4d, p4d, init) |
89 | DEFINE_ENTRY(pud, pud, init) |
90 | DEFINE_ENTRY(pmd, pmd, init) |
91 | DEFINE_ENTRY(pte, pte, init) |
92 | |
93 | static inline pgprot_t prot_sethuge(pgprot_t prot) |
94 | { |
95 | WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PAT); |
96 | |
97 | return __pgprot(pgprot_val(prot) | _PAGE_PSE); |
98 | } |
99 | |
100 | /* |
101 | * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the |
102 | * physical space so we can cache the place of the first one and move |
103 | * around without checking the pgd every time. |
104 | */ |
105 | |
106 | /* Bits supported by the hardware: */ |
107 | pteval_t __supported_pte_mask __read_mostly = ~0; |
108 | /* Bits allowed in normal kernel mappings: */ |
109 | pteval_t __default_kernel_pte_mask __read_mostly = ~0; |
110 | EXPORT_SYMBOL_GPL(__supported_pte_mask); |
111 | /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */ |
112 | EXPORT_SYMBOL(__default_kernel_pte_mask); |
113 | |
114 | int force_personality32; |
115 | |
116 | /* |
117 | * noexec32=on|off |
118 | * Control non executable heap for 32bit processes. |
119 | * |
120 | * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default) |
121 | * off PROT_READ implies PROT_EXEC |
122 | */ |
123 | static int __init nonx32_setup(char *str) |
124 | { |
125 | if (!strcmp(str, "on" )) |
126 | force_personality32 &= ~READ_IMPLIES_EXEC; |
127 | else if (!strcmp(str, "off" )) |
128 | force_personality32 |= READ_IMPLIES_EXEC; |
129 | return 1; |
130 | } |
131 | __setup("noexec32=" , nonx32_setup); |
132 | |
133 | static void sync_global_pgds_l5(unsigned long start, unsigned long end) |
134 | { |
135 | unsigned long addr; |
136 | |
137 | for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) { |
138 | const pgd_t *pgd_ref = pgd_offset_k(addr); |
139 | struct page *page; |
140 | |
141 | /* Check for overflow */ |
142 | if (addr < start) |
143 | break; |
144 | |
145 | if (pgd_none(pgd: *pgd_ref)) |
146 | continue; |
147 | |
148 | spin_lock(lock: &pgd_lock); |
149 | list_for_each_entry(page, &pgd_list, lru) { |
150 | pgd_t *pgd; |
151 | spinlock_t *pgt_lock; |
152 | |
153 | pgd = (pgd_t *)page_address(page) + pgd_index(addr); |
154 | /* the pgt_lock only for Xen */ |
155 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
156 | spin_lock(lock: pgt_lock); |
157 | |
158 | if (!pgd_none(pgd: *pgd_ref) && !pgd_none(pgd: *pgd)) |
159 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); |
160 | |
161 | if (pgd_none(pgd: *pgd)) |
162 | set_pgd(pgd, *pgd_ref); |
163 | |
164 | spin_unlock(lock: pgt_lock); |
165 | } |
166 | spin_unlock(lock: &pgd_lock); |
167 | } |
168 | } |
169 | |
170 | static void sync_global_pgds_l4(unsigned long start, unsigned long end) |
171 | { |
172 | unsigned long addr; |
173 | |
174 | for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) { |
175 | pgd_t *pgd_ref = pgd_offset_k(addr); |
176 | const p4d_t *p4d_ref; |
177 | struct page *page; |
178 | |
179 | /* |
180 | * With folded p4d, pgd_none() is always false, we need to |
181 | * handle synchronization on p4d level. |
182 | */ |
183 | MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref)); |
184 | p4d_ref = p4d_offset(pgd: pgd_ref, address: addr); |
185 | |
186 | if (p4d_none(p4d: *p4d_ref)) |
187 | continue; |
188 | |
189 | spin_lock(lock: &pgd_lock); |
190 | list_for_each_entry(page, &pgd_list, lru) { |
191 | pgd_t *pgd; |
192 | p4d_t *p4d; |
193 | spinlock_t *pgt_lock; |
194 | |
195 | pgd = (pgd_t *)page_address(page) + pgd_index(addr); |
196 | p4d = p4d_offset(pgd, address: addr); |
197 | /* the pgt_lock only for Xen */ |
198 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
199 | spin_lock(lock: pgt_lock); |
200 | |
201 | if (!p4d_none(p4d: *p4d_ref) && !p4d_none(p4d: *p4d)) |
202 | BUG_ON(p4d_pgtable(*p4d) |
203 | != p4d_pgtable(*p4d_ref)); |
204 | |
205 | if (p4d_none(p4d: *p4d)) |
206 | set_p4d(p4dp: p4d, p4d: *p4d_ref); |
207 | |
208 | spin_unlock(lock: pgt_lock); |
209 | } |
210 | spin_unlock(lock: &pgd_lock); |
211 | } |
212 | } |
213 | |
214 | /* |
215 | * When memory was added make sure all the processes MM have |
216 | * suitable PGD entries in the local PGD level page. |
217 | */ |
218 | static void sync_global_pgds(unsigned long start, unsigned long end) |
219 | { |
220 | if (pgtable_l5_enabled()) |
221 | sync_global_pgds_l5(start, end); |
222 | else |
223 | sync_global_pgds_l4(start, end); |
224 | } |
225 | |
226 | /* |
227 | * NOTE: This function is marked __ref because it calls __init function |
228 | * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. |
229 | */ |
230 | static __ref void *spp_getpage(void) |
231 | { |
232 | void *ptr; |
233 | |
234 | if (after_bootmem) |
235 | ptr = (void *) get_zeroed_page(GFP_ATOMIC); |
236 | else |
237 | ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
238 | |
239 | if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { |
240 | panic(fmt: "set_pte_phys: cannot allocate page data %s\n" , |
241 | after_bootmem ? "after bootmem" : "" ); |
242 | } |
243 | |
244 | pr_debug("spp_getpage %p\n" , ptr); |
245 | |
246 | return ptr; |
247 | } |
248 | |
249 | static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr) |
250 | { |
251 | if (pgd_none(pgd: *pgd)) { |
252 | p4d_t *p4d = (p4d_t *)spp_getpage(); |
253 | pgd_populate(mm: &init_mm, pgd, p4d); |
254 | if (p4d != p4d_offset(pgd, address: 0)) |
255 | printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n" , |
256 | p4d, p4d_offset(pgd, 0)); |
257 | } |
258 | return p4d_offset(pgd, address: vaddr); |
259 | } |
260 | |
261 | static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr) |
262 | { |
263 | if (p4d_none(p4d: *p4d)) { |
264 | pud_t *pud = (pud_t *)spp_getpage(); |
265 | p4d_populate(mm: &init_mm, p4d, pud); |
266 | if (pud != pud_offset(p4d, address: 0)) |
267 | printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n" , |
268 | pud, pud_offset(p4d, 0)); |
269 | } |
270 | return pud_offset(p4d, address: vaddr); |
271 | } |
272 | |
273 | static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) |
274 | { |
275 | if (pud_none(pud: *pud)) { |
276 | pmd_t *pmd = (pmd_t *) spp_getpage(); |
277 | pud_populate(mm: &init_mm, pud, pmd); |
278 | if (pmd != pmd_offset(pud, address: 0)) |
279 | printk(KERN_ERR "PAGETABLE BUG #02! %p <-> %p\n" , |
280 | pmd, pmd_offset(pud, 0)); |
281 | } |
282 | return pmd_offset(pud, address: vaddr); |
283 | } |
284 | |
285 | static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) |
286 | { |
287 | if (pmd_none(pmd: *pmd)) { |
288 | pte_t *pte = (pte_t *) spp_getpage(); |
289 | pmd_populate_kernel(mm: &init_mm, pmd, pte); |
290 | if (pte != pte_offset_kernel(pmd, address: 0)) |
291 | printk(KERN_ERR "PAGETABLE BUG #03!\n" ); |
292 | } |
293 | return pte_offset_kernel(pmd, address: vaddr); |
294 | } |
295 | |
296 | static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte) |
297 | { |
298 | pmd_t *pmd = fill_pmd(pud, vaddr); |
299 | pte_t *pte = fill_pte(pmd, vaddr); |
300 | |
301 | set_pte(ptep: pte, pte: new_pte); |
302 | |
303 | /* |
304 | * It's enough to flush this one mapping. |
305 | * (PGE mappings get flushed as well) |
306 | */ |
307 | flush_tlb_one_kernel(addr: vaddr); |
308 | } |
309 | |
310 | void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte) |
311 | { |
312 | p4d_t *p4d = p4d_page + p4d_index(address: vaddr); |
313 | pud_t *pud = fill_pud(p4d, vaddr); |
314 | |
315 | __set_pte_vaddr(pud, vaddr, new_pte); |
316 | } |
317 | |
318 | void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) |
319 | { |
320 | pud_t *pud = pud_page + pud_index(address: vaddr); |
321 | |
322 | __set_pte_vaddr(pud, vaddr, new_pte); |
323 | } |
324 | |
325 | void set_pte_vaddr(unsigned long vaddr, pte_t pteval) |
326 | { |
327 | pgd_t *pgd; |
328 | p4d_t *p4d_page; |
329 | |
330 | pr_debug("set_pte_vaddr %lx to %lx\n" , vaddr, native_pte_val(pteval)); |
331 | |
332 | pgd = pgd_offset_k(vaddr); |
333 | if (pgd_none(pgd: *pgd)) { |
334 | printk(KERN_ERR |
335 | "PGD FIXMAP MISSING, it should be setup in head.S!\n" ); |
336 | return; |
337 | } |
338 | |
339 | p4d_page = p4d_offset(pgd, address: 0); |
340 | set_pte_vaddr_p4d(p4d_page, vaddr, new_pte: pteval); |
341 | } |
342 | |
343 | pmd_t * __init (unsigned long vaddr) |
344 | { |
345 | pgd_t *pgd; |
346 | p4d_t *p4d; |
347 | pud_t *pud; |
348 | |
349 | pgd = pgd_offset_k(vaddr); |
350 | p4d = fill_p4d(pgd, vaddr); |
351 | pud = fill_pud(p4d, vaddr); |
352 | return fill_pmd(pud, vaddr); |
353 | } |
354 | |
355 | pte_t * __init (unsigned long vaddr) |
356 | { |
357 | pmd_t *pmd; |
358 | |
359 | pmd = populate_extra_pmd(vaddr); |
360 | return fill_pte(pmd, vaddr); |
361 | } |
362 | |
363 | /* |
364 | * Create large page table mappings for a range of physical addresses. |
365 | */ |
366 | static void __init (unsigned long phys, unsigned long size, |
367 | enum page_cache_mode cache) |
368 | { |
369 | pgd_t *pgd; |
370 | p4d_t *p4d; |
371 | pud_t *pud; |
372 | pmd_t *pmd; |
373 | pgprot_t prot; |
374 | |
375 | pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) | |
376 | protval_4k_2_large(val: cachemode2protval(pcm: cache)); |
377 | BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); |
378 | for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { |
379 | pgd = pgd_offset_k((unsigned long)__va(phys)); |
380 | if (pgd_none(pgd: *pgd)) { |
381 | p4d = (p4d_t *) spp_getpage(); |
382 | set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE | |
383 | _PAGE_USER)); |
384 | } |
385 | p4d = p4d_offset(pgd, address: (unsigned long)__va(phys)); |
386 | if (p4d_none(p4d: *p4d)) { |
387 | pud = (pud_t *) spp_getpage(); |
388 | set_p4d(p4dp: p4d, p4d: __p4d(__pa(pud) | _KERNPG_TABLE | |
389 | _PAGE_USER)); |
390 | } |
391 | pud = pud_offset(p4d, address: (unsigned long)__va(phys)); |
392 | if (pud_none(pud: *pud)) { |
393 | pmd = (pmd_t *) spp_getpage(); |
394 | set_pud(pudp: pud, pud: __pud(__pa(pmd) | _KERNPG_TABLE | |
395 | _PAGE_USER)); |
396 | } |
397 | pmd = pmd_offset(pud, address: phys); |
398 | BUG_ON(!pmd_none(*pmd)); |
399 | set_pmd(pmdp: pmd, pmd: __pmd(val: phys | pgprot_val(prot))); |
400 | } |
401 | } |
402 | |
403 | void __init (unsigned long phys, unsigned long size) |
404 | { |
405 | __init_extra_mapping(phys, size, cache: _PAGE_CACHE_MODE_WB); |
406 | } |
407 | |
408 | void __init (unsigned long phys, unsigned long size) |
409 | { |
410 | __init_extra_mapping(phys, size, cache: _PAGE_CACHE_MODE_UC); |
411 | } |
412 | |
413 | /* |
414 | * The head.S code sets up the kernel high mapping: |
415 | * |
416 | * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text) |
417 | * |
418 | * phys_base holds the negative offset to the kernel, which is added |
419 | * to the compile time generated pmds. This results in invalid pmds up |
420 | * to the point where we hit the physaddr 0 mapping. |
421 | * |
422 | * We limit the mappings to the region from _text to _brk_end. _brk_end |
423 | * is rounded up to the 2MB boundary. This catches the invalid pmds as |
424 | * well, as they are located before _text: |
425 | */ |
426 | void __init cleanup_highmap(void) |
427 | { |
428 | unsigned long vaddr = __START_KERNEL_map; |
429 | unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE; |
430 | unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; |
431 | pmd_t *pmd = level2_kernel_pgt; |
432 | |
433 | /* |
434 | * Native path, max_pfn_mapped is not set yet. |
435 | * Xen has valid max_pfn_mapped set in |
436 | * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable(). |
437 | */ |
438 | if (max_pfn_mapped) |
439 | vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); |
440 | |
441 | for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { |
442 | if (pmd_none(pmd: *pmd)) |
443 | continue; |
444 | if (vaddr < (unsigned long) _text || vaddr > end) |
445 | set_pmd(pmdp: pmd, pmd: __pmd(val: 0)); |
446 | } |
447 | } |
448 | |
449 | /* |
450 | * Create PTE level page table mapping for physical addresses. |
451 | * It returns the last physical address mapped. |
452 | */ |
453 | static unsigned long __meminit |
454 | phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end, |
455 | pgprot_t prot, bool init) |
456 | { |
457 | unsigned long pages = 0, paddr_next; |
458 | unsigned long paddr_last = paddr_end; |
459 | pte_t *pte; |
460 | int i; |
461 | |
462 | pte = pte_page + pte_index(address: paddr); |
463 | i = pte_index(address: paddr); |
464 | |
465 | for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) { |
466 | paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE; |
467 | if (paddr >= paddr_end) { |
468 | if (!after_bootmem && |
469 | !e820__mapped_any(start: paddr & PAGE_MASK, end: paddr_next, |
470 | type: E820_TYPE_RAM) && |
471 | !e820__mapped_any(start: paddr & PAGE_MASK, end: paddr_next, |
472 | type: E820_TYPE_RESERVED_KERN)) |
473 | set_pte_init(arg1: pte, arg2: __pte(val: 0), init); |
474 | continue; |
475 | } |
476 | |
477 | /* |
478 | * We will re-use the existing mapping. |
479 | * Xen for example has some special requirements, like mapping |
480 | * pagetable pages as RO. So assume someone who pre-setup |
481 | * these mappings are more intelligent. |
482 | */ |
483 | if (!pte_none(pte: *pte)) { |
484 | if (!after_bootmem) |
485 | pages++; |
486 | continue; |
487 | } |
488 | |
489 | if (0) |
490 | pr_info(" pte=%p addr=%lx pte=%016lx\n" , pte, paddr, |
491 | pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte); |
492 | pages++; |
493 | set_pte_init(arg1: pte, arg2: pfn_pte(page_nr: paddr >> PAGE_SHIFT, pgprot: prot), init); |
494 | paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE; |
495 | } |
496 | |
497 | update_page_count(level: PG_LEVEL_4K, pages); |
498 | |
499 | return paddr_last; |
500 | } |
501 | |
502 | /* |
503 | * Create PMD level page table mapping for physical addresses. The virtual |
504 | * and physical address have to be aligned at this level. |
505 | * It returns the last physical address mapped. |
506 | */ |
507 | static unsigned long __meminit |
508 | phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end, |
509 | unsigned long page_size_mask, pgprot_t prot, bool init) |
510 | { |
511 | unsigned long pages = 0, paddr_next; |
512 | unsigned long paddr_last = paddr_end; |
513 | |
514 | int i = pmd_index(address: paddr); |
515 | |
516 | for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) { |
517 | pmd_t *pmd = pmd_page + pmd_index(address: paddr); |
518 | pte_t *pte; |
519 | pgprot_t new_prot = prot; |
520 | |
521 | paddr_next = (paddr & PMD_MASK) + PMD_SIZE; |
522 | if (paddr >= paddr_end) { |
523 | if (!after_bootmem && |
524 | !e820__mapped_any(start: paddr & PMD_MASK, end: paddr_next, |
525 | type: E820_TYPE_RAM) && |
526 | !e820__mapped_any(start: paddr & PMD_MASK, end: paddr_next, |
527 | type: E820_TYPE_RESERVED_KERN)) |
528 | set_pmd_init(arg1: pmd, arg2: __pmd(val: 0), init); |
529 | continue; |
530 | } |
531 | |
532 | if (!pmd_none(pmd: *pmd)) { |
533 | if (!pmd_leaf(pte: *pmd)) { |
534 | spin_lock(lock: &init_mm.page_table_lock); |
535 | pte = (pte_t *)pmd_page_vaddr(pmd: *pmd); |
536 | paddr_last = phys_pte_init(pte_page: pte, paddr, |
537 | paddr_end, prot, |
538 | init); |
539 | spin_unlock(lock: &init_mm.page_table_lock); |
540 | continue; |
541 | } |
542 | /* |
543 | * If we are ok with PG_LEVEL_2M mapping, then we will |
544 | * use the existing mapping, |
545 | * |
546 | * Otherwise, we will split the large page mapping but |
547 | * use the same existing protection bits except for |
548 | * large page, so that we don't violate Intel's TLB |
549 | * Application note (317080) which says, while changing |
550 | * the page sizes, new and old translations should |
551 | * not differ with respect to page frame and |
552 | * attributes. |
553 | */ |
554 | if (page_size_mask & (1 << PG_LEVEL_2M)) { |
555 | if (!after_bootmem) |
556 | pages++; |
557 | paddr_last = paddr_next; |
558 | continue; |
559 | } |
560 | new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); |
561 | } |
562 | |
563 | if (page_size_mask & (1<<PG_LEVEL_2M)) { |
564 | pages++; |
565 | spin_lock(lock: &init_mm.page_table_lock); |
566 | set_pmd_init(arg1: pmd, |
567 | arg2: pfn_pmd(page_nr: paddr >> PAGE_SHIFT, pgprot: prot_sethuge(prot)), |
568 | init); |
569 | spin_unlock(lock: &init_mm.page_table_lock); |
570 | paddr_last = paddr_next; |
571 | continue; |
572 | } |
573 | |
574 | pte = alloc_low_page(); |
575 | paddr_last = phys_pte_init(pte_page: pte, paddr, paddr_end, prot: new_prot, init); |
576 | |
577 | spin_lock(lock: &init_mm.page_table_lock); |
578 | pmd_populate_kernel_init(mm: &init_mm, arg1: pmd, arg2: pte, init); |
579 | spin_unlock(lock: &init_mm.page_table_lock); |
580 | } |
581 | update_page_count(level: PG_LEVEL_2M, pages); |
582 | return paddr_last; |
583 | } |
584 | |
585 | /* |
586 | * Create PUD level page table mapping for physical addresses. The virtual |
587 | * and physical address do not have to be aligned at this level. KASLR can |
588 | * randomize virtual addresses up to this level. |
589 | * It returns the last physical address mapped. |
590 | */ |
591 | static unsigned long __meminit |
592 | phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, |
593 | unsigned long page_size_mask, pgprot_t _prot, bool init) |
594 | { |
595 | unsigned long pages = 0, paddr_next; |
596 | unsigned long paddr_last = paddr_end; |
597 | unsigned long vaddr = (unsigned long)__va(paddr); |
598 | int i = pud_index(address: vaddr); |
599 | |
600 | for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) { |
601 | pud_t *pud; |
602 | pmd_t *pmd; |
603 | pgprot_t prot = _prot; |
604 | |
605 | vaddr = (unsigned long)__va(paddr); |
606 | pud = pud_page + pud_index(address: vaddr); |
607 | paddr_next = (paddr & PUD_MASK) + PUD_SIZE; |
608 | |
609 | if (paddr >= paddr_end) { |
610 | if (!after_bootmem && |
611 | !e820__mapped_any(start: paddr & PUD_MASK, end: paddr_next, |
612 | type: E820_TYPE_RAM) && |
613 | !e820__mapped_any(start: paddr & PUD_MASK, end: paddr_next, |
614 | type: E820_TYPE_RESERVED_KERN)) |
615 | set_pud_init(arg1: pud, arg2: __pud(val: 0), init); |
616 | continue; |
617 | } |
618 | |
619 | if (!pud_none(pud: *pud)) { |
620 | if (!pud_leaf(pud: *pud)) { |
621 | pmd = pmd_offset(pud, address: 0); |
622 | paddr_last = phys_pmd_init(pmd_page: pmd, paddr, |
623 | paddr_end, |
624 | page_size_mask, |
625 | prot, init); |
626 | continue; |
627 | } |
628 | /* |
629 | * If we are ok with PG_LEVEL_1G mapping, then we will |
630 | * use the existing mapping. |
631 | * |
632 | * Otherwise, we will split the gbpage mapping but use |
633 | * the same existing protection bits except for large |
634 | * page, so that we don't violate Intel's TLB |
635 | * Application note (317080) which says, while changing |
636 | * the page sizes, new and old translations should |
637 | * not differ with respect to page frame and |
638 | * attributes. |
639 | */ |
640 | if (page_size_mask & (1 << PG_LEVEL_1G)) { |
641 | if (!after_bootmem) |
642 | pages++; |
643 | paddr_last = paddr_next; |
644 | continue; |
645 | } |
646 | prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); |
647 | } |
648 | |
649 | if (page_size_mask & (1<<PG_LEVEL_1G)) { |
650 | pages++; |
651 | spin_lock(lock: &init_mm.page_table_lock); |
652 | set_pud_init(arg1: pud, |
653 | arg2: pfn_pud(page_nr: paddr >> PAGE_SHIFT, pgprot: prot_sethuge(prot)), |
654 | init); |
655 | spin_unlock(lock: &init_mm.page_table_lock); |
656 | paddr_last = paddr_next; |
657 | continue; |
658 | } |
659 | |
660 | pmd = alloc_low_page(); |
661 | paddr_last = phys_pmd_init(pmd_page: pmd, paddr, paddr_end, |
662 | page_size_mask, prot, init); |
663 | |
664 | spin_lock(lock: &init_mm.page_table_lock); |
665 | pud_populate_init(mm: &init_mm, arg1: pud, arg2: pmd, init); |
666 | spin_unlock(lock: &init_mm.page_table_lock); |
667 | } |
668 | |
669 | update_page_count(level: PG_LEVEL_1G, pages); |
670 | |
671 | return paddr_last; |
672 | } |
673 | |
674 | static unsigned long __meminit |
675 | phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, |
676 | unsigned long page_size_mask, pgprot_t prot, bool init) |
677 | { |
678 | unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last; |
679 | |
680 | paddr_last = paddr_end; |
681 | vaddr = (unsigned long)__va(paddr); |
682 | vaddr_end = (unsigned long)__va(paddr_end); |
683 | |
684 | if (!pgtable_l5_enabled()) |
685 | return phys_pud_init(pud_page: (pud_t *) p4d_page, paddr, paddr_end, |
686 | page_size_mask, prot: prot, init); |
687 | |
688 | for (; vaddr < vaddr_end; vaddr = vaddr_next) { |
689 | p4d_t *p4d = p4d_page + p4d_index(address: vaddr); |
690 | pud_t *pud; |
691 | |
692 | vaddr_next = (vaddr & P4D_MASK) + P4D_SIZE; |
693 | paddr = __pa(vaddr); |
694 | |
695 | if (paddr >= paddr_end) { |
696 | paddr_next = __pa(vaddr_next); |
697 | if (!after_bootmem && |
698 | !e820__mapped_any(start: paddr & P4D_MASK, end: paddr_next, |
699 | type: E820_TYPE_RAM) && |
700 | !e820__mapped_any(start: paddr & P4D_MASK, end: paddr_next, |
701 | type: E820_TYPE_RESERVED_KERN)) |
702 | set_p4d_init(arg1: p4d, arg2: __p4d(val: 0), init); |
703 | continue; |
704 | } |
705 | |
706 | if (!p4d_none(p4d: *p4d)) { |
707 | pud = pud_offset(p4d, address: 0); |
708 | paddr_last = phys_pud_init(pud_page: pud, paddr, __pa(vaddr_end), |
709 | page_size_mask, prot: prot, init); |
710 | continue; |
711 | } |
712 | |
713 | pud = alloc_low_page(); |
714 | paddr_last = phys_pud_init(pud_page: pud, paddr, __pa(vaddr_end), |
715 | page_size_mask, prot: prot, init); |
716 | |
717 | spin_lock(lock: &init_mm.page_table_lock); |
718 | p4d_populate_init(mm: &init_mm, arg1: p4d, arg2: pud, init); |
719 | spin_unlock(lock: &init_mm.page_table_lock); |
720 | } |
721 | |
722 | return paddr_last; |
723 | } |
724 | |
725 | static unsigned long __meminit |
726 | __kernel_physical_mapping_init(unsigned long paddr_start, |
727 | unsigned long paddr_end, |
728 | unsigned long page_size_mask, |
729 | pgprot_t prot, bool init) |
730 | { |
731 | bool pgd_changed = false; |
732 | unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last; |
733 | |
734 | paddr_last = paddr_end; |
735 | vaddr = (unsigned long)__va(paddr_start); |
736 | vaddr_end = (unsigned long)__va(paddr_end); |
737 | vaddr_start = vaddr; |
738 | |
739 | for (; vaddr < vaddr_end; vaddr = vaddr_next) { |
740 | pgd_t *pgd = pgd_offset_k(vaddr); |
741 | p4d_t *p4d; |
742 | |
743 | vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE; |
744 | |
745 | if (pgd_val(pgd: *pgd)) { |
746 | p4d = (p4d_t *)pgd_page_vaddr(pgd: *pgd); |
747 | paddr_last = phys_p4d_init(p4d_page: p4d, __pa(vaddr), |
748 | __pa(vaddr_end), |
749 | page_size_mask, |
750 | prot, init); |
751 | continue; |
752 | } |
753 | |
754 | p4d = alloc_low_page(); |
755 | paddr_last = phys_p4d_init(p4d_page: p4d, __pa(vaddr), __pa(vaddr_end), |
756 | page_size_mask, prot, init); |
757 | |
758 | spin_lock(lock: &init_mm.page_table_lock); |
759 | if (pgtable_l5_enabled()) |
760 | pgd_populate_init(mm: &init_mm, arg1: pgd, arg2: p4d, init); |
761 | else |
762 | p4d_populate_init(mm: &init_mm, arg1: p4d_offset(pgd, address: vaddr), |
763 | arg2: (pud_t *) p4d, init); |
764 | |
765 | spin_unlock(lock: &init_mm.page_table_lock); |
766 | pgd_changed = true; |
767 | } |
768 | |
769 | if (pgd_changed) |
770 | sync_global_pgds(start: vaddr_start, end: vaddr_end - 1); |
771 | |
772 | return paddr_last; |
773 | } |
774 | |
775 | |
776 | /* |
777 | * Create page table mapping for the physical memory for specific physical |
778 | * addresses. Note that it can only be used to populate non-present entries. |
779 | * The virtual and physical addresses have to be aligned on PMD level |
780 | * down. It returns the last physical address mapped. |
781 | */ |
782 | unsigned long __meminit |
783 | kernel_physical_mapping_init(unsigned long paddr_start, |
784 | unsigned long paddr_end, |
785 | unsigned long page_size_mask, pgprot_t prot) |
786 | { |
787 | return __kernel_physical_mapping_init(paddr_start, paddr_end, |
788 | page_size_mask, prot, init: true); |
789 | } |
790 | |
791 | /* |
792 | * This function is similar to kernel_physical_mapping_init() above with the |
793 | * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe() |
794 | * when updating the mapping. The caller is responsible to flush the TLBs after |
795 | * the function returns. |
796 | */ |
797 | unsigned long __meminit |
798 | kernel_physical_mapping_change(unsigned long paddr_start, |
799 | unsigned long paddr_end, |
800 | unsigned long page_size_mask) |
801 | { |
802 | return __kernel_physical_mapping_init(paddr_start, paddr_end, |
803 | page_size_mask, PAGE_KERNEL, |
804 | init: false); |
805 | } |
806 | |
807 | #ifndef CONFIG_NUMA |
808 | void __init initmem_init(void) |
809 | { |
810 | memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); |
811 | } |
812 | #endif |
813 | |
814 | void __init paging_init(void) |
815 | { |
816 | sparse_init(); |
817 | |
818 | /* |
819 | * clear the default setting with node 0 |
820 | * note: don't use nodes_clear here, that is really clearing when |
821 | * numa support is not compiled in, and later node_set_state |
822 | * will not set it back. |
823 | */ |
824 | node_clear_state(node: 0, state: N_MEMORY); |
825 | node_clear_state(node: 0, state: N_NORMAL_MEMORY); |
826 | |
827 | zone_sizes_init(); |
828 | } |
829 | |
830 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
831 | #define PAGE_UNUSED 0xFD |
832 | |
833 | /* |
834 | * The unused vmemmap range, which was not yet memset(PAGE_UNUSED), ranges |
835 | * from unused_pmd_start to next PMD_SIZE boundary. |
836 | */ |
837 | static unsigned long unused_pmd_start __meminitdata; |
838 | |
839 | static void __meminit vmemmap_flush_unused_pmd(void) |
840 | { |
841 | if (!unused_pmd_start) |
842 | return; |
843 | /* |
844 | * Clears (unused_pmd_start, PMD_END] |
845 | */ |
846 | memset((void *)unused_pmd_start, PAGE_UNUSED, |
847 | ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start); |
848 | unused_pmd_start = 0; |
849 | } |
850 | |
851 | #ifdef CONFIG_MEMORY_HOTPLUG |
852 | /* Returns true if the PMD is completely unused and thus it can be freed */ |
853 | static bool __meminit vmemmap_pmd_is_unused(unsigned long addr, unsigned long end) |
854 | { |
855 | unsigned long start = ALIGN_DOWN(addr, PMD_SIZE); |
856 | |
857 | /* |
858 | * Flush the unused range cache to ensure that memchr_inv() will work |
859 | * for the whole range. |
860 | */ |
861 | vmemmap_flush_unused_pmd(); |
862 | memset((void *)addr, PAGE_UNUSED, end - addr); |
863 | |
864 | return !memchr_inv(p: (void *)start, PAGE_UNUSED, PMD_SIZE); |
865 | } |
866 | #endif |
867 | |
868 | static void __meminit __vmemmap_use_sub_pmd(unsigned long start) |
869 | { |
870 | /* |
871 | * As we expect to add in the same granularity as we remove, it's |
872 | * sufficient to mark only some piece used to block the memmap page from |
873 | * getting removed when removing some other adjacent memmap (just in |
874 | * case the first memmap never gets initialized e.g., because the memory |
875 | * block never gets onlined). |
876 | */ |
877 | memset((void *)start, 0, sizeof(struct page)); |
878 | } |
879 | |
880 | static void __meminit vmemmap_use_sub_pmd(unsigned long start, unsigned long end) |
881 | { |
882 | /* |
883 | * We only optimize if the new used range directly follows the |
884 | * previously unused range (esp., when populating consecutive sections). |
885 | */ |
886 | if (unused_pmd_start == start) { |
887 | if (likely(IS_ALIGNED(end, PMD_SIZE))) |
888 | unused_pmd_start = 0; |
889 | else |
890 | unused_pmd_start = end; |
891 | return; |
892 | } |
893 | |
894 | /* |
895 | * If the range does not contiguously follows previous one, make sure |
896 | * to mark the unused range of the previous one so it can be removed. |
897 | */ |
898 | vmemmap_flush_unused_pmd(); |
899 | __vmemmap_use_sub_pmd(start); |
900 | } |
901 | |
902 | |
903 | static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) |
904 | { |
905 | const unsigned long page = ALIGN_DOWN(start, PMD_SIZE); |
906 | |
907 | vmemmap_flush_unused_pmd(); |
908 | |
909 | /* |
910 | * Could be our memmap page is filled with PAGE_UNUSED already from a |
911 | * previous remove. Make sure to reset it. |
912 | */ |
913 | __vmemmap_use_sub_pmd(start); |
914 | |
915 | /* |
916 | * Mark with PAGE_UNUSED the unused parts of the new memmap range |
917 | */ |
918 | if (!IS_ALIGNED(start, PMD_SIZE)) |
919 | memset((void *)page, PAGE_UNUSED, start - page); |
920 | |
921 | /* |
922 | * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of |
923 | * consecutive sections. Remember for the last added PMD where the |
924 | * unused range begins. |
925 | */ |
926 | if (!IS_ALIGNED(end, PMD_SIZE)) |
927 | unused_pmd_start = end; |
928 | } |
929 | #endif |
930 | |
931 | /* |
932 | * Memory hotplug specific functions |
933 | */ |
934 | #ifdef CONFIG_MEMORY_HOTPLUG |
935 | /* |
936 | * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need |
937 | * updating. |
938 | */ |
939 | static void update_end_of_memory_vars(u64 start, u64 size) |
940 | { |
941 | unsigned long end_pfn = PFN_UP(start + size); |
942 | |
943 | if (end_pfn > max_pfn) { |
944 | max_pfn = end_pfn; |
945 | max_low_pfn = end_pfn; |
946 | high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; |
947 | } |
948 | } |
949 | |
950 | int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, |
951 | struct mhp_params *params) |
952 | { |
953 | int ret; |
954 | |
955 | ret = __add_pages(nid, start_pfn, nr_pages, params); |
956 | WARN_ON_ONCE(ret); |
957 | |
958 | /* update max_pfn, max_low_pfn and high_memory */ |
959 | update_end_of_memory_vars(start: start_pfn << PAGE_SHIFT, |
960 | size: nr_pages << PAGE_SHIFT); |
961 | |
962 | return ret; |
963 | } |
964 | |
965 | int arch_add_memory(int nid, u64 start, u64 size, |
966 | struct mhp_params *params) |
967 | { |
968 | unsigned long start_pfn = start >> PAGE_SHIFT; |
969 | unsigned long nr_pages = size >> PAGE_SHIFT; |
970 | |
971 | init_memory_mapping(start, end: start + size, prot: params->pgprot); |
972 | |
973 | return add_pages(nid, start_pfn, nr_pages, params); |
974 | } |
975 | |
976 | static void __meminit free_pagetable(struct page *page, int order) |
977 | { |
978 | unsigned long magic; |
979 | unsigned int nr_pages = 1 << order; |
980 | |
981 | /* bootmem page has reserved flag */ |
982 | if (PageReserved(page)) { |
983 | __ClearPageReserved(page); |
984 | |
985 | magic = page->index; |
986 | if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) { |
987 | while (nr_pages--) |
988 | put_page_bootmem(page: page++); |
989 | } else |
990 | while (nr_pages--) |
991 | free_reserved_page(page: page++); |
992 | } else |
993 | free_pages(addr: (unsigned long)page_address(page), order); |
994 | } |
995 | |
996 | static void __meminit free_hugepage_table(struct page *page, |
997 | struct vmem_altmap *altmap) |
998 | { |
999 | if (altmap) |
1000 | vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE); |
1001 | else |
1002 | free_pagetable(page, order: get_order(PMD_SIZE)); |
1003 | } |
1004 | |
1005 | static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) |
1006 | { |
1007 | pte_t *pte; |
1008 | int i; |
1009 | |
1010 | for (i = 0; i < PTRS_PER_PTE; i++) { |
1011 | pte = pte_start + i; |
1012 | if (!pte_none(pte: *pte)) |
1013 | return; |
1014 | } |
1015 | |
1016 | /* free a pte table */ |
1017 | free_pagetable(pmd_page(*pmd), order: 0); |
1018 | spin_lock(lock: &init_mm.page_table_lock); |
1019 | pmd_clear(pmdp: pmd); |
1020 | spin_unlock(lock: &init_mm.page_table_lock); |
1021 | } |
1022 | |
1023 | static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud) |
1024 | { |
1025 | pmd_t *pmd; |
1026 | int i; |
1027 | |
1028 | for (i = 0; i < PTRS_PER_PMD; i++) { |
1029 | pmd = pmd_start + i; |
1030 | if (!pmd_none(pmd: *pmd)) |
1031 | return; |
1032 | } |
1033 | |
1034 | /* free a pmd table */ |
1035 | free_pagetable(pud_page(*pud), order: 0); |
1036 | spin_lock(lock: &init_mm.page_table_lock); |
1037 | pud_clear(pudp: pud); |
1038 | spin_unlock(lock: &init_mm.page_table_lock); |
1039 | } |
1040 | |
1041 | static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d) |
1042 | { |
1043 | pud_t *pud; |
1044 | int i; |
1045 | |
1046 | for (i = 0; i < PTRS_PER_PUD; i++) { |
1047 | pud = pud_start + i; |
1048 | if (!pud_none(pud: *pud)) |
1049 | return; |
1050 | } |
1051 | |
1052 | /* free a pud table */ |
1053 | free_pagetable(p4d_page(*p4d), order: 0); |
1054 | spin_lock(lock: &init_mm.page_table_lock); |
1055 | p4d_clear(p4dp: p4d); |
1056 | spin_unlock(lock: &init_mm.page_table_lock); |
1057 | } |
1058 | |
1059 | static void __meminit |
1060 | remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, |
1061 | bool direct) |
1062 | { |
1063 | unsigned long next, pages = 0; |
1064 | pte_t *pte; |
1065 | phys_addr_t phys_addr; |
1066 | |
1067 | pte = pte_start + pte_index(address: addr); |
1068 | for (; addr < end; addr = next, pte++) { |
1069 | next = (addr + PAGE_SIZE) & PAGE_MASK; |
1070 | if (next > end) |
1071 | next = end; |
1072 | |
1073 | if (!pte_present(a: *pte)) |
1074 | continue; |
1075 | |
1076 | /* |
1077 | * We mapped [0,1G) memory as identity mapping when |
1078 | * initializing, in arch/x86/kernel/head_64.S. These |
1079 | * pagetables cannot be removed. |
1080 | */ |
1081 | phys_addr = pte_val(pte: *pte) + (addr & PAGE_MASK); |
1082 | if (phys_addr < (phys_addr_t)0x40000000) |
1083 | return; |
1084 | |
1085 | if (!direct) |
1086 | free_pagetable(pte_page(*pte), order: 0); |
1087 | |
1088 | spin_lock(lock: &init_mm.page_table_lock); |
1089 | pte_clear(mm: &init_mm, addr, ptep: pte); |
1090 | spin_unlock(lock: &init_mm.page_table_lock); |
1091 | |
1092 | /* For non-direct mapping, pages means nothing. */ |
1093 | pages++; |
1094 | } |
1095 | |
1096 | /* Call free_pte_table() in remove_pmd_table(). */ |
1097 | flush_tlb_all(); |
1098 | if (direct) |
1099 | update_page_count(level: PG_LEVEL_4K, pages: -pages); |
1100 | } |
1101 | |
1102 | static void __meminit |
1103 | remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, |
1104 | bool direct, struct vmem_altmap *altmap) |
1105 | { |
1106 | unsigned long next, pages = 0; |
1107 | pte_t *pte_base; |
1108 | pmd_t *pmd; |
1109 | |
1110 | pmd = pmd_start + pmd_index(address: addr); |
1111 | for (; addr < end; addr = next, pmd++) { |
1112 | next = pmd_addr_end(addr, end); |
1113 | |
1114 | if (!pmd_present(pmd: *pmd)) |
1115 | continue; |
1116 | |
1117 | if (pmd_leaf(pte: *pmd)) { |
1118 | if (IS_ALIGNED(addr, PMD_SIZE) && |
1119 | IS_ALIGNED(next, PMD_SIZE)) { |
1120 | if (!direct) |
1121 | free_hugepage_table(pmd_page(*pmd), |
1122 | altmap); |
1123 | |
1124 | spin_lock(lock: &init_mm.page_table_lock); |
1125 | pmd_clear(pmdp: pmd); |
1126 | spin_unlock(lock: &init_mm.page_table_lock); |
1127 | pages++; |
1128 | } |
1129 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
1130 | else if (vmemmap_pmd_is_unused(addr, end: next)) { |
1131 | free_hugepage_table(pmd_page(*pmd), |
1132 | altmap); |
1133 | spin_lock(lock: &init_mm.page_table_lock); |
1134 | pmd_clear(pmdp: pmd); |
1135 | spin_unlock(lock: &init_mm.page_table_lock); |
1136 | } |
1137 | #endif |
1138 | continue; |
1139 | } |
1140 | |
1141 | pte_base = (pte_t *)pmd_page_vaddr(pmd: *pmd); |
1142 | remove_pte_table(pte_start: pte_base, addr, end: next, direct); |
1143 | free_pte_table(pte_start: pte_base, pmd); |
1144 | } |
1145 | |
1146 | /* Call free_pmd_table() in remove_pud_table(). */ |
1147 | if (direct) |
1148 | update_page_count(level: PG_LEVEL_2M, pages: -pages); |
1149 | } |
1150 | |
1151 | static void __meminit |
1152 | remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, |
1153 | struct vmem_altmap *altmap, bool direct) |
1154 | { |
1155 | unsigned long next, pages = 0; |
1156 | pmd_t *pmd_base; |
1157 | pud_t *pud; |
1158 | |
1159 | pud = pud_start + pud_index(address: addr); |
1160 | for (; addr < end; addr = next, pud++) { |
1161 | next = pud_addr_end(addr, end); |
1162 | |
1163 | if (!pud_present(pud: *pud)) |
1164 | continue; |
1165 | |
1166 | if (pud_leaf(pud: *pud) && |
1167 | IS_ALIGNED(addr, PUD_SIZE) && |
1168 | IS_ALIGNED(next, PUD_SIZE)) { |
1169 | spin_lock(lock: &init_mm.page_table_lock); |
1170 | pud_clear(pudp: pud); |
1171 | spin_unlock(lock: &init_mm.page_table_lock); |
1172 | pages++; |
1173 | continue; |
1174 | } |
1175 | |
1176 | pmd_base = pmd_offset(pud, address: 0); |
1177 | remove_pmd_table(pmd_start: pmd_base, addr, end: next, direct, altmap); |
1178 | free_pmd_table(pmd_start: pmd_base, pud); |
1179 | } |
1180 | |
1181 | if (direct) |
1182 | update_page_count(level: PG_LEVEL_1G, pages: -pages); |
1183 | } |
1184 | |
1185 | static void __meminit |
1186 | remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end, |
1187 | struct vmem_altmap *altmap, bool direct) |
1188 | { |
1189 | unsigned long next, pages = 0; |
1190 | pud_t *pud_base; |
1191 | p4d_t *p4d; |
1192 | |
1193 | p4d = p4d_start + p4d_index(address: addr); |
1194 | for (; addr < end; addr = next, p4d++) { |
1195 | next = p4d_addr_end(addr, end); |
1196 | |
1197 | if (!p4d_present(p4d: *p4d)) |
1198 | continue; |
1199 | |
1200 | BUILD_BUG_ON(p4d_leaf(*p4d)); |
1201 | |
1202 | pud_base = pud_offset(p4d, address: 0); |
1203 | remove_pud_table(pud_start: pud_base, addr, end: next, altmap, direct); |
1204 | /* |
1205 | * For 4-level page tables we do not want to free PUDs, but in the |
1206 | * 5-level case we should free them. This code will have to change |
1207 | * to adapt for boot-time switching between 4 and 5 level page tables. |
1208 | */ |
1209 | if (pgtable_l5_enabled()) |
1210 | free_pud_table(pud_start: pud_base, p4d); |
1211 | } |
1212 | |
1213 | if (direct) |
1214 | update_page_count(level: PG_LEVEL_512G, pages: -pages); |
1215 | } |
1216 | |
1217 | /* start and end are both virtual address. */ |
1218 | static void __meminit |
1219 | remove_pagetable(unsigned long start, unsigned long end, bool direct, |
1220 | struct vmem_altmap *altmap) |
1221 | { |
1222 | unsigned long next; |
1223 | unsigned long addr; |
1224 | pgd_t *pgd; |
1225 | p4d_t *p4d; |
1226 | |
1227 | for (addr = start; addr < end; addr = next) { |
1228 | next = pgd_addr_end(addr, end); |
1229 | |
1230 | pgd = pgd_offset_k(addr); |
1231 | if (!pgd_present(pgd: *pgd)) |
1232 | continue; |
1233 | |
1234 | p4d = p4d_offset(pgd, address: 0); |
1235 | remove_p4d_table(p4d_start: p4d, addr, end: next, altmap, direct); |
1236 | } |
1237 | |
1238 | flush_tlb_all(); |
1239 | } |
1240 | |
1241 | void __ref vmemmap_free(unsigned long start, unsigned long end, |
1242 | struct vmem_altmap *altmap) |
1243 | { |
1244 | VM_BUG_ON(!PAGE_ALIGNED(start)); |
1245 | VM_BUG_ON(!PAGE_ALIGNED(end)); |
1246 | |
1247 | remove_pagetable(start, end, direct: false, altmap); |
1248 | } |
1249 | |
1250 | static void __meminit |
1251 | kernel_physical_mapping_remove(unsigned long start, unsigned long end) |
1252 | { |
1253 | start = (unsigned long)__va(start); |
1254 | end = (unsigned long)__va(end); |
1255 | |
1256 | remove_pagetable(start, end, direct: true, NULL); |
1257 | } |
1258 | |
1259 | void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) |
1260 | { |
1261 | unsigned long start_pfn = start >> PAGE_SHIFT; |
1262 | unsigned long nr_pages = size >> PAGE_SHIFT; |
1263 | |
1264 | __remove_pages(start_pfn, nr_pages, altmap); |
1265 | kernel_physical_mapping_remove(start, end: start + size); |
1266 | } |
1267 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
1268 | |
1269 | static struct kcore_list kcore_vsyscall; |
1270 | |
1271 | static void __init register_page_bootmem_info(void) |
1272 | { |
1273 | #if defined(CONFIG_NUMA) || defined(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP) |
1274 | int i; |
1275 | |
1276 | for_each_online_node(i) |
1277 | register_page_bootmem_info_node(NODE_DATA(i)); |
1278 | #endif |
1279 | } |
1280 | |
1281 | /* |
1282 | * Pre-allocates page-table pages for the vmalloc area in the kernel page-table. |
1283 | * Only the level which needs to be synchronized between all page-tables is |
1284 | * allocated because the synchronization can be expensive. |
1285 | */ |
1286 | static void __init preallocate_vmalloc_pages(void) |
1287 | { |
1288 | unsigned long addr; |
1289 | const char *lvl; |
1290 | |
1291 | for (addr = VMALLOC_START; addr <= VMEMORY_END; addr = ALIGN(addr + 1, PGDIR_SIZE)) { |
1292 | pgd_t *pgd = pgd_offset_k(addr); |
1293 | p4d_t *p4d; |
1294 | pud_t *pud; |
1295 | |
1296 | lvl = "p4d" ; |
1297 | p4d = p4d_alloc(mm: &init_mm, pgd, address: addr); |
1298 | if (!p4d) |
1299 | goto failed; |
1300 | |
1301 | if (pgtable_l5_enabled()) |
1302 | continue; |
1303 | |
1304 | /* |
1305 | * The goal here is to allocate all possibly required |
1306 | * hardware page tables pointed to by the top hardware |
1307 | * level. |
1308 | * |
1309 | * On 4-level systems, the P4D layer is folded away and |
1310 | * the above code does no preallocation. Below, go down |
1311 | * to the pud _software_ level to ensure the second |
1312 | * hardware level is allocated on 4-level systems too. |
1313 | */ |
1314 | lvl = "pud" ; |
1315 | pud = pud_alloc(mm: &init_mm, p4d, address: addr); |
1316 | if (!pud) |
1317 | goto failed; |
1318 | } |
1319 | |
1320 | return; |
1321 | |
1322 | failed: |
1323 | |
1324 | /* |
1325 | * The pages have to be there now or they will be missing in |
1326 | * process page-tables later. |
1327 | */ |
1328 | panic(fmt: "Failed to pre-allocate %s pages for vmalloc area\n" , lvl); |
1329 | } |
1330 | |
1331 | void __init mem_init(void) |
1332 | { |
1333 | pci_iommu_alloc(); |
1334 | |
1335 | /* clear_bss() already clear the empty_zero_page */ |
1336 | |
1337 | /* this will put all memory onto the freelists */ |
1338 | memblock_free_all(); |
1339 | after_bootmem = 1; |
1340 | x86_init.hyper.init_after_bootmem(); |
1341 | |
1342 | /* |
1343 | * Must be done after boot memory is put on freelist, because here we |
1344 | * might set fields in deferred struct pages that have not yet been |
1345 | * initialized, and memblock_free_all() initializes all the reserved |
1346 | * deferred pages for us. |
1347 | */ |
1348 | register_page_bootmem_info(); |
1349 | |
1350 | /* Register memory areas for /proc/kcore */ |
1351 | if (get_gate_vma(mm: &init_mm)) |
1352 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, type: KCORE_USER); |
1353 | |
1354 | preallocate_vmalloc_pages(); |
1355 | } |
1356 | |
1357 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
1358 | int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask) |
1359 | { |
1360 | /* |
1361 | * More CPUs always led to greater speedups on tested systems, up to |
1362 | * all the nodes' CPUs. Use all since the system is otherwise idle |
1363 | * now. |
1364 | */ |
1365 | return max_t(int, cpumask_weight(node_cpumask), 1); |
1366 | } |
1367 | #endif |
1368 | |
1369 | int kernel_set_to_readonly; |
1370 | |
1371 | void mark_rodata_ro(void) |
1372 | { |
1373 | unsigned long start = PFN_ALIGN(_text); |
1374 | unsigned long rodata_start = PFN_ALIGN(__start_rodata); |
1375 | unsigned long end = (unsigned long)__end_rodata_hpage_align; |
1376 | unsigned long text_end = PFN_ALIGN(_etext); |
1377 | unsigned long rodata_end = PFN_ALIGN(__end_rodata); |
1378 | unsigned long all_end; |
1379 | |
1380 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n" , |
1381 | (end - start) >> 10); |
1382 | set_memory_ro(addr: start, numpages: (end - start) >> PAGE_SHIFT); |
1383 | |
1384 | kernel_set_to_readonly = 1; |
1385 | |
1386 | /* |
1387 | * The rodata/data/bss/brk section (but not the kernel text!) |
1388 | * should also be not-executable. |
1389 | * |
1390 | * We align all_end to PMD_SIZE because the existing mapping |
1391 | * is a full PMD. If we would align _brk_end to PAGE_SIZE we |
1392 | * split the PMD and the reminder between _brk_end and the end |
1393 | * of the PMD will remain mapped executable. |
1394 | * |
1395 | * Any PMD which was setup after the one which covers _brk_end |
1396 | * has been zapped already via cleanup_highmem(). |
1397 | */ |
1398 | all_end = roundup((unsigned long)_brk_end, PMD_SIZE); |
1399 | set_memory_nx(addr: text_end, numpages: (all_end - text_end) >> PAGE_SHIFT); |
1400 | |
1401 | set_ftrace_ops_ro(); |
1402 | |
1403 | #ifdef CONFIG_CPA_DEBUG |
1404 | printk(KERN_INFO "Testing CPA: undo %lx-%lx\n" , start, end); |
1405 | set_memory_rw(addr: start, numpages: (end-start) >> PAGE_SHIFT); |
1406 | |
1407 | printk(KERN_INFO "Testing CPA: again\n" ); |
1408 | set_memory_ro(addr: start, numpages: (end-start) >> PAGE_SHIFT); |
1409 | #endif |
1410 | |
1411 | free_kernel_image_pages(what: "unused kernel image (text/rodata gap)" , |
1412 | begin: (void *)text_end, end: (void *)rodata_start); |
1413 | free_kernel_image_pages(what: "unused kernel image (rodata/data gap)" , |
1414 | begin: (void *)rodata_end, end: (void *)_sdata); |
1415 | } |
1416 | |
1417 | /* |
1418 | * Block size is the minimum amount of memory which can be hotplugged or |
1419 | * hotremoved. It must be power of two and must be equal or larger than |
1420 | * MIN_MEMORY_BLOCK_SIZE. |
1421 | */ |
1422 | #define MAX_BLOCK_SIZE (2UL << 30) |
1423 | |
1424 | /* Amount of ram needed to start using large blocks */ |
1425 | #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30) |
1426 | |
1427 | /* Adjustable memory block size */ |
1428 | static unsigned long set_memory_block_size; |
1429 | int __init set_memory_block_size_order(unsigned int order) |
1430 | { |
1431 | unsigned long size = 1UL << order; |
1432 | |
1433 | if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE) |
1434 | return -EINVAL; |
1435 | |
1436 | set_memory_block_size = size; |
1437 | return 0; |
1438 | } |
1439 | |
1440 | static unsigned long probe_memory_block_size(void) |
1441 | { |
1442 | unsigned long boot_mem_end = max_pfn << PAGE_SHIFT; |
1443 | unsigned long bz; |
1444 | |
1445 | /* If memory block size has been set, then use it */ |
1446 | bz = set_memory_block_size; |
1447 | if (bz) |
1448 | goto done; |
1449 | |
1450 | /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */ |
1451 | if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) { |
1452 | bz = MIN_MEMORY_BLOCK_SIZE; |
1453 | goto done; |
1454 | } |
1455 | |
1456 | /* |
1457 | * Use max block size to minimize overhead on bare metal, where |
1458 | * alignment for memory hotplug isn't a concern. |
1459 | */ |
1460 | if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) { |
1461 | bz = MAX_BLOCK_SIZE; |
1462 | goto done; |
1463 | } |
1464 | |
1465 | /* Find the largest allowed block size that aligns to memory end */ |
1466 | for (bz = MAX_BLOCK_SIZE; bz > MIN_MEMORY_BLOCK_SIZE; bz >>= 1) { |
1467 | if (IS_ALIGNED(boot_mem_end, bz)) |
1468 | break; |
1469 | } |
1470 | done: |
1471 | pr_info("x86/mm: Memory block size: %ldMB\n" , bz >> 20); |
1472 | |
1473 | return bz; |
1474 | } |
1475 | |
1476 | static unsigned long memory_block_size_probed; |
1477 | unsigned long memory_block_size_bytes(void) |
1478 | { |
1479 | if (!memory_block_size_probed) |
1480 | memory_block_size_probed = probe_memory_block_size(); |
1481 | |
1482 | return memory_block_size_probed; |
1483 | } |
1484 | |
1485 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
1486 | /* |
1487 | * Initialise the sparsemem vmemmap using huge-pages at the PMD level. |
1488 | */ |
1489 | static long __meminitdata addr_start, addr_end; |
1490 | static void __meminitdata *p_start, *p_end; |
1491 | static int __meminitdata node_start; |
1492 | |
1493 | void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, |
1494 | unsigned long addr, unsigned long next) |
1495 | { |
1496 | pte_t entry; |
1497 | |
1498 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, |
1499 | PAGE_KERNEL_LARGE); |
1500 | set_pmd(pmdp: pmd, pmd: __pmd(val: pte_val(pte: entry))); |
1501 | |
1502 | /* check to see if we have contiguous blocks */ |
1503 | if (p_end != p || node_start != node) { |
1504 | if (p_start) |
1505 | pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n" , |
1506 | addr_start, addr_end-1, p_start, p_end-1, node_start); |
1507 | addr_start = addr; |
1508 | node_start = node; |
1509 | p_start = p; |
1510 | } |
1511 | |
1512 | addr_end = addr + PMD_SIZE; |
1513 | p_end = p + PMD_SIZE; |
1514 | |
1515 | if (!IS_ALIGNED(addr, PMD_SIZE) || |
1516 | !IS_ALIGNED(next, PMD_SIZE)) |
1517 | vmemmap_use_new_sub_pmd(start: addr, end: next); |
1518 | } |
1519 | |
1520 | int __meminit vmemmap_check_pmd(pmd_t *pmd, int node, |
1521 | unsigned long addr, unsigned long next) |
1522 | { |
1523 | int large = pmd_leaf(pte: *pmd); |
1524 | |
1525 | if (pmd_leaf(pte: *pmd)) { |
1526 | vmemmap_verify((pte_t *)pmd, node, addr, next); |
1527 | vmemmap_use_sub_pmd(start: addr, end: next); |
1528 | } |
1529 | |
1530 | return large; |
1531 | } |
1532 | |
1533 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, |
1534 | struct vmem_altmap *altmap) |
1535 | { |
1536 | int err; |
1537 | |
1538 | VM_BUG_ON(!PAGE_ALIGNED(start)); |
1539 | VM_BUG_ON(!PAGE_ALIGNED(end)); |
1540 | |
1541 | if (end - start < PAGES_PER_SECTION * sizeof(struct page)) |
1542 | err = vmemmap_populate_basepages(start, end, node, NULL); |
1543 | else if (boot_cpu_has(X86_FEATURE_PSE)) |
1544 | err = vmemmap_populate_hugepages(start, end, node, altmap); |
1545 | else if (altmap) { |
1546 | pr_err_once("%s: no cpu support for altmap allocations\n" , |
1547 | __func__); |
1548 | err = -ENOMEM; |
1549 | } else |
1550 | err = vmemmap_populate_basepages(start, end, node, NULL); |
1551 | if (!err) |
1552 | sync_global_pgds(start, end: end - 1); |
1553 | return err; |
1554 | } |
1555 | |
1556 | #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE |
1557 | void register_page_bootmem_memmap(unsigned long section_nr, |
1558 | struct page *start_page, unsigned long nr_pages) |
1559 | { |
1560 | unsigned long addr = (unsigned long)start_page; |
1561 | unsigned long end = (unsigned long)(start_page + nr_pages); |
1562 | unsigned long next; |
1563 | pgd_t *pgd; |
1564 | p4d_t *p4d; |
1565 | pud_t *pud; |
1566 | pmd_t *pmd; |
1567 | unsigned int nr_pmd_pages; |
1568 | struct page *page; |
1569 | |
1570 | for (; addr < end; addr = next) { |
1571 | pte_t *pte = NULL; |
1572 | |
1573 | pgd = pgd_offset_k(addr); |
1574 | if (pgd_none(pgd: *pgd)) { |
1575 | next = (addr + PAGE_SIZE) & PAGE_MASK; |
1576 | continue; |
1577 | } |
1578 | get_page_bootmem(info: section_nr, pgd_page(*pgd), type: MIX_SECTION_INFO); |
1579 | |
1580 | p4d = p4d_offset(pgd, address: addr); |
1581 | if (p4d_none(p4d: *p4d)) { |
1582 | next = (addr + PAGE_SIZE) & PAGE_MASK; |
1583 | continue; |
1584 | } |
1585 | get_page_bootmem(info: section_nr, p4d_page(*p4d), type: MIX_SECTION_INFO); |
1586 | |
1587 | pud = pud_offset(p4d, address: addr); |
1588 | if (pud_none(pud: *pud)) { |
1589 | next = (addr + PAGE_SIZE) & PAGE_MASK; |
1590 | continue; |
1591 | } |
1592 | get_page_bootmem(info: section_nr, pud_page(*pud), type: MIX_SECTION_INFO); |
1593 | |
1594 | if (!boot_cpu_has(X86_FEATURE_PSE)) { |
1595 | next = (addr + PAGE_SIZE) & PAGE_MASK; |
1596 | pmd = pmd_offset(pud, address: addr); |
1597 | if (pmd_none(pmd: *pmd)) |
1598 | continue; |
1599 | get_page_bootmem(info: section_nr, pmd_page(*pmd), |
1600 | type: MIX_SECTION_INFO); |
1601 | |
1602 | pte = pte_offset_kernel(pmd, address: addr); |
1603 | if (pte_none(pte: *pte)) |
1604 | continue; |
1605 | get_page_bootmem(info: section_nr, pte_page(*pte), |
1606 | type: SECTION_INFO); |
1607 | } else { |
1608 | next = pmd_addr_end(addr, end); |
1609 | |
1610 | pmd = pmd_offset(pud, address: addr); |
1611 | if (pmd_none(pmd: *pmd)) |
1612 | continue; |
1613 | |
1614 | nr_pmd_pages = 1 << get_order(PMD_SIZE); |
1615 | page = pmd_page(*pmd); |
1616 | while (nr_pmd_pages--) |
1617 | get_page_bootmem(info: section_nr, page: page++, |
1618 | type: SECTION_INFO); |
1619 | } |
1620 | } |
1621 | } |
1622 | #endif |
1623 | |
1624 | void __meminit vmemmap_populate_print_last(void) |
1625 | { |
1626 | if (p_start) { |
1627 | pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n" , |
1628 | addr_start, addr_end-1, p_start, p_end-1, node_start); |
1629 | p_start = NULL; |
1630 | p_end = NULL; |
1631 | node_start = 0; |
1632 | } |
1633 | } |
1634 | #endif |
1635 | |