1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * This file contains pgtable related functions for 64-bit machines. |
4 | * |
5 | * Derived from arch/ppc64/mm/init.c |
6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
7 | * |
8 | * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org) |
9 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) |
10 | * Copyright (C) 1996 Paul Mackerras |
11 | * |
12 | * Derived from "arch/i386/mm/init.c" |
13 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
14 | * |
15 | * Dave Engebretsen <engebret@us.ibm.com> |
16 | * Rework for PPC64 port. |
17 | */ |
18 | |
19 | #include <linux/signal.h> |
20 | #include <linux/sched.h> |
21 | #include <linux/kernel.h> |
22 | #include <linux/errno.h> |
23 | #include <linux/string.h> |
24 | #include <linux/export.h> |
25 | #include <linux/types.h> |
26 | #include <linux/mman.h> |
27 | #include <linux/mm.h> |
28 | #include <linux/swap.h> |
29 | #include <linux/stddef.h> |
30 | #include <linux/vmalloc.h> |
31 | #include <linux/slab.h> |
32 | #include <linux/hugetlb.h> |
33 | |
34 | #include <asm/page.h> |
35 | #include <asm/mmu_context.h> |
36 | #include <asm/mmu.h> |
37 | #include <asm/smp.h> |
38 | #include <asm/machdep.h> |
39 | #include <asm/tlb.h> |
40 | #include <asm/processor.h> |
41 | #include <asm/cputable.h> |
42 | #include <asm/sections.h> |
43 | #include <asm/firmware.h> |
44 | #include <asm/dma.h> |
45 | |
46 | #include <mm/mmu_decl.h> |
47 | |
48 | |
49 | #ifdef CONFIG_PPC_BOOK3S_64 |
50 | /* |
51 | * partition table and process table for ISA 3.0 |
52 | */ |
53 | struct prtb_entry *process_tb; |
54 | struct patb_entry *partition_tb; |
55 | /* |
56 | * page table size |
57 | */ |
58 | unsigned long __pte_index_size; |
59 | EXPORT_SYMBOL(__pte_index_size); |
60 | unsigned long __pmd_index_size; |
61 | EXPORT_SYMBOL(__pmd_index_size); |
62 | unsigned long __pud_index_size; |
63 | EXPORT_SYMBOL(__pud_index_size); |
64 | unsigned long __pgd_index_size; |
65 | EXPORT_SYMBOL(__pgd_index_size); |
66 | unsigned long __pud_cache_index; |
67 | EXPORT_SYMBOL(__pud_cache_index); |
68 | unsigned long __pte_table_size; |
69 | EXPORT_SYMBOL(__pte_table_size); |
70 | unsigned long __pmd_table_size; |
71 | EXPORT_SYMBOL(__pmd_table_size); |
72 | unsigned long __pud_table_size; |
73 | EXPORT_SYMBOL(__pud_table_size); |
74 | unsigned long __pgd_table_size; |
75 | EXPORT_SYMBOL(__pgd_table_size); |
76 | unsigned long __pmd_val_bits; |
77 | EXPORT_SYMBOL(__pmd_val_bits); |
78 | unsigned long __pud_val_bits; |
79 | EXPORT_SYMBOL(__pud_val_bits); |
80 | unsigned long __pgd_val_bits; |
81 | EXPORT_SYMBOL(__pgd_val_bits); |
82 | unsigned long __kernel_virt_start; |
83 | EXPORT_SYMBOL(__kernel_virt_start); |
84 | unsigned long __vmalloc_start; |
85 | EXPORT_SYMBOL(__vmalloc_start); |
86 | unsigned long __vmalloc_end; |
87 | EXPORT_SYMBOL(__vmalloc_end); |
88 | unsigned long __kernel_io_start; |
89 | EXPORT_SYMBOL(__kernel_io_start); |
90 | unsigned long __kernel_io_end; |
91 | struct page *vmemmap; |
92 | EXPORT_SYMBOL(vmemmap); |
93 | unsigned long __pte_frag_nr; |
94 | EXPORT_SYMBOL(__pte_frag_nr); |
95 | unsigned long __pte_frag_size_shift; |
96 | EXPORT_SYMBOL(__pte_frag_size_shift); |
97 | #endif |
98 | |
99 | #ifndef __PAGETABLE_PUD_FOLDED |
100 | /* 4 level page table */ |
101 | struct page *p4d_page(p4d_t p4d) |
102 | { |
103 | if (p4d_leaf(p4d)) { |
104 | if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) |
105 | VM_WARN_ON(!p4d_huge(p4d)); |
106 | return pte_page(p4d_pte(p4d)); |
107 | } |
108 | return virt_to_page(p4d_pgtable(p4d)); |
109 | } |
110 | #endif |
111 | |
112 | struct page *pud_page(pud_t pud) |
113 | { |
114 | if (pud_leaf(pud)) { |
115 | if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) |
116 | VM_WARN_ON(!pud_huge(pud)); |
117 | return pte_page(pud_pte(pud)); |
118 | } |
119 | return virt_to_page(pud_pgtable(pud)); |
120 | } |
121 | |
122 | /* |
123 | * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags |
124 | * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address. |
125 | */ |
126 | struct page *pmd_page(pmd_t pmd) |
127 | { |
128 | if (pmd_leaf(pmd)) { |
129 | /* |
130 | * vmalloc_to_page may be called on any vmap address (not only |
131 | * vmalloc), and it uses pmd_page() etc., when huge vmap is |
132 | * enabled so these checks can't be used. |
133 | */ |
134 | if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) |
135 | VM_WARN_ON(!(pmd_leaf(pmd) || pmd_huge(pmd))); |
136 | return pte_page(pmd_pte(pmd)); |
137 | } |
138 | return virt_to_page(pmd_page_vaddr(pmd)); |
139 | } |
140 | |
141 | #ifdef CONFIG_STRICT_KERNEL_RWX |
142 | void mark_rodata_ro(void) |
143 | { |
144 | if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) { |
145 | pr_warn("Warning: Unable to mark rodata read only on this CPU.\n" ); |
146 | return; |
147 | } |
148 | |
149 | if (radix_enabled()) |
150 | radix__mark_rodata_ro(); |
151 | else |
152 | hash__mark_rodata_ro(); |
153 | } |
154 | |
155 | void mark_initmem_nx(void) |
156 | { |
157 | if (radix_enabled()) |
158 | radix__mark_initmem_nx(); |
159 | else |
160 | hash__mark_initmem_nx(); |
161 | } |
162 | #endif |
163 | |