1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Helper routines for building identity mapping page tables. This is |
4 | * included by both the compressed kernel and the regular kernel. |
5 | */ |
6 | |
7 | static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page, |
8 | unsigned long addr, unsigned long end) |
9 | { |
10 | addr &= PMD_MASK; |
11 | for (; addr < end; addr += PMD_SIZE) { |
12 | pmd_t *pmd = pmd_page + pmd_index(address: addr); |
13 | |
14 | if (pmd_present(pmd: *pmd)) |
15 | continue; |
16 | |
17 | set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag)); |
18 | } |
19 | } |
20 | |
21 | static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, |
22 | unsigned long addr, unsigned long end) |
23 | { |
24 | unsigned long next; |
25 | |
26 | for (; addr < end; addr = next) { |
27 | pud_t *pud = pud_page + pud_index(address: addr); |
28 | pmd_t *pmd; |
29 | |
30 | next = (addr & PUD_MASK) + PUD_SIZE; |
31 | if (next > end) |
32 | next = end; |
33 | |
34 | if (info->direct_gbpages) { |
35 | pud_t pudval; |
36 | |
37 | if (pud_present(pud: *pud)) |
38 | continue; |
39 | |
40 | addr &= PUD_MASK; |
41 | pudval = __pud((addr - info->offset) | info->page_flag); |
42 | set_pud(pud, pudval); |
43 | continue; |
44 | } |
45 | |
46 | if (pud_present(pud: *pud)) { |
47 | pmd = pmd_offset(pud, address: 0); |
48 | ident_pmd_init(info, pmd_page: pmd, addr, end: next); |
49 | continue; |
50 | } |
51 | pmd = (pmd_t *)info->alloc_pgt_page(info->context); |
52 | if (!pmd) |
53 | return -ENOMEM; |
54 | ident_pmd_init(info, pmd_page: pmd, addr, end: next); |
55 | set_pud(pud, __pud(__pa(pmd) | info->kernpg_flag)); |
56 | } |
57 | |
58 | return 0; |
59 | } |
60 | |
61 | static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page, |
62 | unsigned long addr, unsigned long end) |
63 | { |
64 | unsigned long next; |
65 | int result; |
66 | |
67 | for (; addr < end; addr = next) { |
68 | p4d_t *p4d = p4d_page + p4d_index(address: addr); |
69 | pud_t *pud; |
70 | |
71 | next = (addr & P4D_MASK) + P4D_SIZE; |
72 | if (next > end) |
73 | next = end; |
74 | |
75 | if (p4d_present(p4d: *p4d)) { |
76 | pud = pud_offset(p4d, address: 0); |
77 | result = ident_pud_init(info, pud_page: pud, addr, end: next); |
78 | if (result) |
79 | return result; |
80 | |
81 | continue; |
82 | } |
83 | pud = (pud_t *)info->alloc_pgt_page(info->context); |
84 | if (!pud) |
85 | return -ENOMEM; |
86 | |
87 | result = ident_pud_init(info, pud_page: pud, addr, end: next); |
88 | if (result) |
89 | return result; |
90 | |
91 | set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag)); |
92 | } |
93 | |
94 | return 0; |
95 | } |
96 | |
97 | int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, |
98 | unsigned long pstart, unsigned long pend) |
99 | { |
100 | unsigned long addr = pstart + info->offset; |
101 | unsigned long end = pend + info->offset; |
102 | unsigned long next; |
103 | int result; |
104 | |
105 | /* Set the default pagetable flags if not supplied */ |
106 | if (!info->kernpg_flag) |
107 | info->kernpg_flag = _KERNPG_TABLE; |
108 | |
109 | /* Filter out unsupported __PAGE_KERNEL_* bits: */ |
110 | info->kernpg_flag &= __default_kernel_pte_mask; |
111 | |
112 | for (; addr < end; addr = next) { |
113 | pgd_t *pgd = pgd_page + pgd_index(addr); |
114 | p4d_t *p4d; |
115 | |
116 | next = (addr & PGDIR_MASK) + PGDIR_SIZE; |
117 | if (next > end) |
118 | next = end; |
119 | |
120 | if (pgd_present(pgd: *pgd)) { |
121 | p4d = p4d_offset(pgd, address: 0); |
122 | result = ident_p4d_init(info, p4d_page: p4d, addr, end: next); |
123 | if (result) |
124 | return result; |
125 | continue; |
126 | } |
127 | |
128 | p4d = (p4d_t *)info->alloc_pgt_page(info->context); |
129 | if (!p4d) |
130 | return -ENOMEM; |
131 | result = ident_p4d_init(info, p4d_page: p4d, addr, end: next); |
132 | if (result) |
133 | return result; |
134 | if (pgtable_l5_enabled()) { |
135 | set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag)); |
136 | } else { |
137 | /* |
138 | * With p4d folded, pgd is equal to p4d. |
139 | * The pgd entry has to point to the pud page table in this case. |
140 | */ |
141 | pud_t *pud = pud_offset(p4d, address: 0); |
142 | set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag)); |
143 | } |
144 | } |
145 | |
146 | return 0; |
147 | } |
148 | |