1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * PowerPC version |
4 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
5 | * |
6 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) |
7 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) |
8 | * Copyright (C) 1996 Paul Mackerras |
9 | * |
10 | * Derived from "arch/i386/mm/init.c" |
11 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
12 | * |
13 | * Dave Engebretsen <engebret@us.ibm.com> |
14 | * Rework for PPC64 port. |
15 | */ |
16 | |
17 | #undef DEBUG |
18 | |
19 | #include <linux/signal.h> |
20 | #include <linux/sched.h> |
21 | #include <linux/kernel.h> |
22 | #include <linux/errno.h> |
23 | #include <linux/string.h> |
24 | #include <linux/types.h> |
25 | #include <linux/mman.h> |
26 | #include <linux/mm.h> |
27 | #include <linux/swap.h> |
28 | #include <linux/stddef.h> |
29 | #include <linux/vmalloc.h> |
30 | #include <linux/init.h> |
31 | #include <linux/delay.h> |
32 | #include <linux/highmem.h> |
33 | #include <linux/idr.h> |
34 | #include <linux/nodemask.h> |
35 | #include <linux/module.h> |
36 | #include <linux/poison.h> |
37 | #include <linux/memblock.h> |
38 | #include <linux/hugetlb.h> |
39 | #include <linux/slab.h> |
40 | #include <linux/of_fdt.h> |
41 | #include <linux/libfdt.h> |
42 | #include <linux/memremap.h> |
43 | #include <linux/memory.h> |
44 | |
45 | #include <asm/pgalloc.h> |
46 | #include <asm/page.h> |
47 | #include <asm/prom.h> |
48 | #include <asm/rtas.h> |
49 | #include <asm/io.h> |
50 | #include <asm/mmu_context.h> |
51 | #include <asm/mmu.h> |
52 | #include <linux/uaccess.h> |
53 | #include <asm/smp.h> |
54 | #include <asm/machdep.h> |
55 | #include <asm/tlb.h> |
56 | #include <asm/eeh.h> |
57 | #include <asm/processor.h> |
58 | #include <asm/mmzone.h> |
59 | #include <asm/cputable.h> |
60 | #include <asm/sections.h> |
61 | #include <asm/iommu.h> |
62 | #include <asm/vdso.h> |
63 | #include <asm/hugetlb.h> |
64 | |
65 | #include <mm/mmu_decl.h> |
66 | |
67 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
68 | /* |
69 | * Given an address within the vmemmap, determine the page that |
70 | * represents the start of the subsection it is within. Note that we have to |
71 | * do this by hand as the proffered address may not be correctly aligned. |
72 | * Subtraction of non-aligned pointers produces undefined results. |
73 | */ |
74 | static struct page * __meminit vmemmap_subsection_start(unsigned long vmemmap_addr) |
75 | { |
76 | unsigned long start_pfn; |
77 | unsigned long offset = vmemmap_addr - ((unsigned long)(vmemmap)); |
78 | |
79 | /* Return the pfn of the start of the section. */ |
80 | start_pfn = (offset / sizeof(struct page)) & PAGE_SUBSECTION_MASK; |
81 | return pfn_to_page(start_pfn); |
82 | } |
83 | |
84 | /* |
85 | * Since memory is added in sub-section chunks, before creating a new vmemmap |
86 | * mapping, the kernel should check whether there is an existing memmap mapping |
87 | * covering the new subsection added. This is needed because kernel can map |
88 | * vmemmap area using 16MB pages which will cover a memory range of 16G. Such |
89 | * a range covers multiple subsections (2M) |
90 | * |
91 | * If any subsection in the 16G range mapped by vmemmap is valid we consider the |
92 | * vmemmap populated (There is a page table entry already present). We can't do |
93 | * a page table lookup here because with the hash translation we don't keep |
94 | * vmemmap details in linux page table. |
95 | */ |
96 | int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size) |
97 | { |
98 | struct page *start; |
99 | unsigned long vmemmap_end = vmemmap_addr + vmemmap_map_size; |
100 | start = vmemmap_subsection_start(vmemmap_addr); |
101 | |
102 | for (; (unsigned long)start < vmemmap_end; start += PAGES_PER_SUBSECTION) |
103 | /* |
104 | * pfn valid check here is intended to really check |
105 | * whether we have any subsection already initialized |
106 | * in this range. |
107 | */ |
108 | if (pfn_valid(page_to_pfn(start))) |
109 | return 1; |
110 | |
111 | return 0; |
112 | } |
113 | |
114 | /* |
115 | * vmemmap virtual address space management does not have a traditional page |
116 | * table to track which virtual struct pages are backed by physical mapping. |
117 | * The virtual to physical mappings are tracked in a simple linked list |
118 | * format. 'vmemmap_list' maintains the entire vmemmap physical mapping at |
119 | * all times where as the 'next' list maintains the available |
120 | * vmemmap_backing structures which have been deleted from the |
121 | * 'vmemmap_global' list during system runtime (memory hotplug remove |
122 | * operation). The freed 'vmemmap_backing' structures are reused later when |
123 | * new requests come in without allocating fresh memory. This pointer also |
124 | * tracks the allocated 'vmemmap_backing' structures as we allocate one |
125 | * full page memory at a time when we dont have any. |
126 | */ |
127 | struct vmemmap_backing *vmemmap_list; |
128 | static struct vmemmap_backing *next; |
129 | |
130 | /* |
131 | * The same pointer 'next' tracks individual chunks inside the allocated |
132 | * full page during the boot time and again tracks the freed nodes during |
133 | * runtime. It is racy but it does not happen as they are separated by the |
134 | * boot process. Will create problem if some how we have memory hotplug |
135 | * operation during boot !! |
136 | */ |
137 | static int num_left; |
138 | static int num_freed; |
139 | |
140 | static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node) |
141 | { |
142 | struct vmemmap_backing *vmem_back; |
143 | /* get from freed entries first */ |
144 | if (num_freed) { |
145 | num_freed--; |
146 | vmem_back = next; |
147 | next = next->list; |
148 | |
149 | return vmem_back; |
150 | } |
151 | |
152 | /* allocate a page when required and hand out chunks */ |
153 | if (!num_left) { |
154 | next = vmemmap_alloc_block(PAGE_SIZE, node); |
155 | if (unlikely(!next)) { |
156 | WARN_ON(1); |
157 | return NULL; |
158 | } |
159 | num_left = PAGE_SIZE / sizeof(struct vmemmap_backing); |
160 | } |
161 | |
162 | num_left--; |
163 | |
164 | return next++; |
165 | } |
166 | |
167 | static __meminit int vmemmap_list_populate(unsigned long phys, |
168 | unsigned long start, |
169 | int node) |
170 | { |
171 | struct vmemmap_backing *vmem_back; |
172 | |
173 | vmem_back = vmemmap_list_alloc(node); |
174 | if (unlikely(!vmem_back)) { |
175 | pr_debug("vmemap list allocation failed\n" ); |
176 | return -ENOMEM; |
177 | } |
178 | |
179 | vmem_back->phys = phys; |
180 | vmem_back->virt_addr = start; |
181 | vmem_back->list = vmemmap_list; |
182 | |
183 | vmemmap_list = vmem_back; |
184 | return 0; |
185 | } |
186 | |
187 | bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start, |
188 | unsigned long page_size) |
189 | { |
190 | unsigned long nr_pfn = page_size / sizeof(struct page); |
191 | unsigned long start_pfn = page_to_pfn((struct page *)start); |
192 | |
193 | if ((start_pfn + nr_pfn - 1) > altmap->end_pfn) |
194 | return true; |
195 | |
196 | if (start_pfn < altmap->base_pfn) |
197 | return true; |
198 | |
199 | return false; |
200 | } |
201 | |
202 | static int __meminit __vmemmap_populate(unsigned long start, unsigned long end, int node, |
203 | struct vmem_altmap *altmap) |
204 | { |
205 | bool altmap_alloc; |
206 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; |
207 | |
208 | /* Align to the page size of the linear mapping. */ |
209 | start = ALIGN_DOWN(start, page_size); |
210 | |
211 | pr_debug("vmemmap_populate %lx..%lx, node %d\n" , start, end, node); |
212 | |
213 | for (; start < end; start += page_size) { |
214 | void *p = NULL; |
215 | int rc; |
216 | |
217 | /* |
218 | * This vmemmap range is backing different subsections. If any |
219 | * of that subsection is marked valid, that means we already |
220 | * have initialized a page table covering this range and hence |
221 | * the vmemmap range is populated. |
222 | */ |
223 | if (vmemmap_populated(vmemmap_addr: start, vmemmap_map_size: page_size)) |
224 | continue; |
225 | |
226 | /* |
227 | * Allocate from the altmap first if we have one. This may |
228 | * fail due to alignment issues when using 16MB hugepages, so |
229 | * fall back to system memory if the altmap allocation fail. |
230 | */ |
231 | if (altmap && !altmap_cross_boundary(altmap, start, page_size)) { |
232 | p = vmemmap_alloc_block_buf(size: page_size, node, altmap); |
233 | if (!p) |
234 | pr_debug("altmap block allocation failed, falling back to system memory" ); |
235 | else |
236 | altmap_alloc = true; |
237 | } |
238 | if (!p) { |
239 | p = vmemmap_alloc_block_buf(size: page_size, node, NULL); |
240 | altmap_alloc = false; |
241 | } |
242 | if (!p) |
243 | return -ENOMEM; |
244 | |
245 | if (vmemmap_list_populate(__pa(p), start, node)) { |
246 | /* |
247 | * If we don't populate vmemap list, we don't have |
248 | * the ability to free the allocated vmemmap |
249 | * pages in section_deactivate. Hence free them |
250 | * here. |
251 | */ |
252 | int nr_pfns = page_size >> PAGE_SHIFT; |
253 | unsigned long page_order = get_order(size: page_size); |
254 | |
255 | if (altmap_alloc) |
256 | vmem_altmap_free(altmap, nr_pfns); |
257 | else |
258 | free_pages(addr: (unsigned long)p, order: page_order); |
259 | return -ENOMEM; |
260 | } |
261 | |
262 | pr_debug(" * %016lx..%016lx allocated at %p\n" , |
263 | start, start + page_size, p); |
264 | |
265 | rc = vmemmap_create_mapping(start, page_size, __pa(p)); |
266 | if (rc < 0) { |
267 | pr_warn("%s: Unable to create vmemmap mapping: %d\n" , |
268 | __func__, rc); |
269 | return -EFAULT; |
270 | } |
271 | } |
272 | |
273 | return 0; |
274 | } |
275 | |
276 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, |
277 | struct vmem_altmap *altmap) |
278 | { |
279 | |
280 | #ifdef CONFIG_PPC_BOOK3S_64 |
281 | if (radix_enabled()) |
282 | return radix__vmemmap_populate(start, end, node, altmap); |
283 | #endif |
284 | |
285 | return __vmemmap_populate(start, end, node, altmap); |
286 | } |
287 | |
288 | #ifdef CONFIG_MEMORY_HOTPLUG |
289 | static unsigned long vmemmap_list_free(unsigned long start) |
290 | { |
291 | struct vmemmap_backing *vmem_back, *vmem_back_prev; |
292 | |
293 | vmem_back_prev = vmem_back = vmemmap_list; |
294 | |
295 | /* look for it with prev pointer recorded */ |
296 | for (; vmem_back; vmem_back = vmem_back->list) { |
297 | if (vmem_back->virt_addr == start) |
298 | break; |
299 | vmem_back_prev = vmem_back; |
300 | } |
301 | |
302 | if (unlikely(!vmem_back)) |
303 | return 0; |
304 | |
305 | /* remove it from vmemmap_list */ |
306 | if (vmem_back == vmemmap_list) /* remove head */ |
307 | vmemmap_list = vmem_back->list; |
308 | else |
309 | vmem_back_prev->list = vmem_back->list; |
310 | |
311 | /* next point to this freed entry */ |
312 | vmem_back->list = next; |
313 | next = vmem_back; |
314 | num_freed++; |
315 | |
316 | return vmem_back->phys; |
317 | } |
318 | |
319 | static void __ref __vmemmap_free(unsigned long start, unsigned long end, |
320 | struct vmem_altmap *altmap) |
321 | { |
322 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; |
323 | unsigned long page_order = get_order(size: page_size); |
324 | unsigned long alt_start = ~0, alt_end = ~0; |
325 | unsigned long base_pfn; |
326 | |
327 | start = ALIGN_DOWN(start, page_size); |
328 | if (altmap) { |
329 | alt_start = altmap->base_pfn; |
330 | alt_end = altmap->base_pfn + altmap->reserve + altmap->free; |
331 | } |
332 | |
333 | pr_debug("vmemmap_free %lx...%lx\n" , start, end); |
334 | |
335 | for (; start < end; start += page_size) { |
336 | unsigned long nr_pages, addr; |
337 | struct page *page; |
338 | |
339 | /* |
340 | * We have already marked the subsection we are trying to remove |
341 | * invalid. So if we want to remove the vmemmap range, we |
342 | * need to make sure there is no subsection marked valid |
343 | * in this range. |
344 | */ |
345 | if (vmemmap_populated(vmemmap_addr: start, vmemmap_map_size: page_size)) |
346 | continue; |
347 | |
348 | addr = vmemmap_list_free(start); |
349 | if (!addr) |
350 | continue; |
351 | |
352 | page = pfn_to_page(addr >> PAGE_SHIFT); |
353 | nr_pages = 1 << page_order; |
354 | base_pfn = PHYS_PFN(addr); |
355 | |
356 | if (base_pfn >= alt_start && base_pfn < alt_end) { |
357 | vmem_altmap_free(altmap, nr_pfns: nr_pages); |
358 | } else if (PageReserved(page)) { |
359 | /* allocated from bootmem */ |
360 | if (page_size < PAGE_SIZE) { |
361 | /* |
362 | * this shouldn't happen, but if it is |
363 | * the case, leave the memory there |
364 | */ |
365 | WARN_ON_ONCE(1); |
366 | } else { |
367 | while (nr_pages--) |
368 | free_reserved_page(page: page++); |
369 | } |
370 | } else { |
371 | free_pages(addr: (unsigned long)(__va(addr)), order: page_order); |
372 | } |
373 | |
374 | vmemmap_remove_mapping(start, page_size); |
375 | } |
376 | } |
377 | |
378 | void __ref vmemmap_free(unsigned long start, unsigned long end, |
379 | struct vmem_altmap *altmap) |
380 | { |
381 | #ifdef CONFIG_PPC_BOOK3S_64 |
382 | if (radix_enabled()) |
383 | return radix__vmemmap_free(start, end, altmap); |
384 | #endif |
385 | return __vmemmap_free(start, end, altmap); |
386 | } |
387 | |
388 | #endif |
389 | void register_page_bootmem_memmap(unsigned long section_nr, |
390 | struct page *start_page, unsigned long size) |
391 | { |
392 | } |
393 | |
394 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
395 | |
396 | #ifdef CONFIG_PPC_BOOK3S_64 |
397 | unsigned int mmu_lpid_bits; |
398 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
399 | EXPORT_SYMBOL_GPL(mmu_lpid_bits); |
400 | #endif |
401 | unsigned int mmu_pid_bits; |
402 | |
403 | static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT); |
404 | |
405 | static int __init parse_disable_radix(char *p) |
406 | { |
407 | bool val; |
408 | |
409 | if (!p) |
410 | val = true; |
411 | else if (kstrtobool(p, &val)) |
412 | return -EINVAL; |
413 | |
414 | disable_radix = val; |
415 | |
416 | return 0; |
417 | } |
418 | early_param("disable_radix" , parse_disable_radix); |
419 | |
420 | /* |
421 | * If we're running under a hypervisor, we need to check the contents of |
422 | * /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do |
423 | * radix. If not, we clear the radix feature bit so we fall back to hash. |
424 | */ |
425 | static void __init early_check_vec5(void) |
426 | { |
427 | unsigned long root, chosen; |
428 | int size; |
429 | const u8 *vec5; |
430 | u8 mmu_supported; |
431 | |
432 | root = of_get_flat_dt_root(); |
433 | chosen = of_get_flat_dt_subnode_by_name(root, "chosen" ); |
434 | if (chosen == -FDT_ERR_NOTFOUND) { |
435 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; |
436 | return; |
437 | } |
438 | vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5" , &size); |
439 | if (!vec5) { |
440 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; |
441 | return; |
442 | } |
443 | if (size <= OV5_INDX(OV5_MMU_SUPPORT)) { |
444 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; |
445 | return; |
446 | } |
447 | |
448 | /* Check for supported configuration */ |
449 | mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] & |
450 | OV5_FEAT(OV5_MMU_SUPPORT); |
451 | if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) { |
452 | /* Hypervisor only supports radix - check enabled && GTSE */ |
453 | if (!early_radix_enabled()) { |
454 | pr_warn("WARNING: Ignoring cmdline option disable_radix\n" ); |
455 | } |
456 | if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] & |
457 | OV5_FEAT(OV5_RADIX_GTSE))) { |
458 | cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE; |
459 | } else |
460 | cur_cpu_spec->mmu_features |= MMU_FTR_GTSE; |
461 | /* Do radix anyway - the hypervisor said we had to */ |
462 | cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX; |
463 | } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) { |
464 | /* Hypervisor only supports hash - disable radix */ |
465 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; |
466 | cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE; |
467 | } |
468 | } |
469 | |
470 | static int __init dt_scan_mmu_pid_width(unsigned long node, |
471 | const char *uname, int depth, |
472 | void *data) |
473 | { |
474 | int size = 0; |
475 | const __be32 *prop; |
476 | const char *type = of_get_flat_dt_prop(node, "device_type" , NULL); |
477 | |
478 | /* We are scanning "cpu" nodes only */ |
479 | if (type == NULL || strcmp(type, "cpu" ) != 0) |
480 | return 0; |
481 | |
482 | /* Find MMU LPID, PID register size */ |
483 | prop = of_get_flat_dt_prop(node, "ibm,mmu-lpid-bits" , &size); |
484 | if (prop && size == 4) |
485 | mmu_lpid_bits = be32_to_cpup(prop); |
486 | |
487 | prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits" , &size); |
488 | if (prop && size == 4) |
489 | mmu_pid_bits = be32_to_cpup(prop); |
490 | |
491 | if (!mmu_pid_bits && !mmu_lpid_bits) |
492 | return 0; |
493 | |
494 | return 1; |
495 | } |
496 | |
497 | /* |
498 | * Outside hotplug the kernel uses this value to map the kernel direct map |
499 | * with radix. To be compatible with older kernels, let's keep this value |
500 | * as 16M which is also SECTION_SIZE with SPARSEMEM. We can ideally map |
501 | * things with 1GB size in the case where we don't support hotplug. |
502 | */ |
503 | #ifndef CONFIG_MEMORY_HOTPLUG |
504 | #define DEFAULT_MEMORY_BLOCK_SIZE SZ_16M |
505 | #else |
506 | #define DEFAULT_MEMORY_BLOCK_SIZE MIN_MEMORY_BLOCK_SIZE |
507 | #endif |
508 | |
509 | static void update_memory_block_size(unsigned long *block_size, unsigned long mem_size) |
510 | { |
511 | unsigned long min_memory_block_size = DEFAULT_MEMORY_BLOCK_SIZE; |
512 | |
513 | for (; *block_size > min_memory_block_size; *block_size >>= 2) { |
514 | if ((mem_size & *block_size) == 0) |
515 | break; |
516 | } |
517 | } |
518 | |
519 | static int __init probe_memory_block_size(unsigned long node, const char *uname, int |
520 | depth, void *data) |
521 | { |
522 | const char *type; |
523 | unsigned long *block_size = (unsigned long *)data; |
524 | const __be32 *reg, *endp; |
525 | int l; |
526 | |
527 | if (depth != 1) |
528 | return 0; |
529 | /* |
530 | * If we have dynamic-reconfiguration-memory node, use the |
531 | * lmb value. |
532 | */ |
533 | if (strcmp(uname, "ibm,dynamic-reconfiguration-memory" ) == 0) { |
534 | |
535 | const __be32 *prop; |
536 | |
537 | prop = of_get_flat_dt_prop(node, "ibm,lmb-size" , &l); |
538 | |
539 | if (!prop || l < dt_root_size_cells * sizeof(__be32)) |
540 | /* |
541 | * Nothing in the device tree |
542 | */ |
543 | *block_size = DEFAULT_MEMORY_BLOCK_SIZE; |
544 | else |
545 | *block_size = of_read_number(prop, dt_root_size_cells); |
546 | /* |
547 | * We have found the final value. Don't probe further. |
548 | */ |
549 | return 1; |
550 | } |
551 | /* |
552 | * Find all the device tree nodes of memory type and make sure |
553 | * the area can be mapped using the memory block size value |
554 | * we end up using. We start with 1G value and keep reducing |
555 | * it such that we can map the entire area using memory_block_size. |
556 | * This will be used on powernv and older pseries that don't |
557 | * have ibm,lmb-size node. |
558 | * For ex: with P5 we can end up with |
559 | * memory@0 -> 128MB |
560 | * memory@128M -> 64M |
561 | * This will end up using 64MB memory block size value. |
562 | */ |
563 | type = of_get_flat_dt_prop(node, "device_type" , NULL); |
564 | if (type == NULL || strcmp(type, "memory" ) != 0) |
565 | return 0; |
566 | |
567 | reg = of_get_flat_dt_prop(node, "linux,usable-memory" , &l); |
568 | if (!reg) |
569 | reg = of_get_flat_dt_prop(node, "reg" , &l); |
570 | if (!reg) |
571 | return 0; |
572 | |
573 | endp = reg + (l / sizeof(__be32)); |
574 | while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { |
575 | const char *compatible; |
576 | u64 size; |
577 | |
578 | dt_mem_next_cell(dt_root_addr_cells, ®); |
579 | size = dt_mem_next_cell(dt_root_size_cells, ®); |
580 | |
581 | if (size) { |
582 | update_memory_block_size(block_size, size); |
583 | continue; |
584 | } |
585 | /* |
586 | * ibm,coherent-device-memory with linux,usable-memory = 0 |
587 | * Force 256MiB block size. Work around for GPUs on P9 PowerNV |
588 | * linux,usable-memory == 0 implies driver managed memory and |
589 | * we can't use large memory block size due to hotplug/unplug |
590 | * limitations. |
591 | */ |
592 | compatible = of_get_flat_dt_prop(node, "compatible" , NULL); |
593 | if (compatible && !strcmp(compatible, "ibm,coherent-device-memory" )) { |
594 | if (*block_size > SZ_256M) |
595 | *block_size = SZ_256M; |
596 | /* |
597 | * We keep 256M as the upper limit with GPU present. |
598 | */ |
599 | return 0; |
600 | } |
601 | } |
602 | /* continue looking for other memory device types */ |
603 | return 0; |
604 | } |
605 | |
606 | /* |
607 | * start with 1G memory block size. Early init will |
608 | * fix this with correct value. |
609 | */ |
610 | unsigned long memory_block_size __ro_after_init = 1UL << 30; |
611 | static void __init early_init_memory_block_size(void) |
612 | { |
613 | /* |
614 | * We need to do memory_block_size probe early so that |
615 | * radix__early_init_mmu() can use this as limit for |
616 | * mapping page size. |
617 | */ |
618 | of_scan_flat_dt(probe_memory_block_size, &memory_block_size); |
619 | } |
620 | |
621 | void __init mmu_early_init_devtree(void) |
622 | { |
623 | bool hvmode = !!(mfmsr() & MSR_HV); |
624 | |
625 | /* Disable radix mode based on kernel command line. */ |
626 | if (disable_radix) { |
627 | if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU)) |
628 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; |
629 | else |
630 | pr_warn("WARNING: Ignoring cmdline option disable_radix\n" ); |
631 | } |
632 | |
633 | of_scan_flat_dt(dt_scan_mmu_pid_width, NULL); |
634 | if (hvmode && !mmu_lpid_bits) { |
635 | if (early_cpu_has_feature(CPU_FTR_ARCH_207S)) |
636 | mmu_lpid_bits = 12; /* POWER8-10 */ |
637 | else |
638 | mmu_lpid_bits = 10; /* POWER7 */ |
639 | } |
640 | if (!mmu_pid_bits) { |
641 | if (early_cpu_has_feature(CPU_FTR_ARCH_300)) |
642 | mmu_pid_bits = 20; /* POWER9-10 */ |
643 | } |
644 | |
645 | /* |
646 | * Check /chosen/ibm,architecture-vec-5 if running as a guest. |
647 | * When running bare-metal, we can use radix if we like |
648 | * even though the ibm,architecture-vec-5 property created by |
649 | * skiboot doesn't have the necessary bits set. |
650 | */ |
651 | if (!hvmode) |
652 | early_check_vec5(); |
653 | |
654 | early_init_memory_block_size(); |
655 | |
656 | if (early_radix_enabled()) { |
657 | radix__early_init_devtree(); |
658 | |
659 | /* |
660 | * We have finalized the translation we are going to use by now. |
661 | * Radix mode is not limited by RMA / VRMA addressing. |
662 | * Hence don't limit memblock allocations. |
663 | */ |
664 | ppc64_rma_size = ULONG_MAX; |
665 | memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); |
666 | } else |
667 | hash__early_init_devtree(); |
668 | |
669 | if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE)) |
670 | hugetlbpage_init_defaultsize(); |
671 | |
672 | if (!(cur_cpu_spec->mmu_features & MMU_FTR_HPTE_TABLE) && |
673 | !(cur_cpu_spec->mmu_features & MMU_FTR_TYPE_RADIX)) |
674 | panic("kernel does not support any MMU type offered by platform" ); |
675 | } |
676 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
677 | |