1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * This file contains kasan initialization code for ARM. |
4 | * |
5 | * Copyright (c) 2018 Samsung Electronics Co., Ltd. |
6 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
7 | * Author: Linus Walleij <linus.walleij@linaro.org> |
8 | */ |
9 | |
10 | #define pr_fmt(fmt) "kasan: " fmt |
11 | #include <linux/kasan.h> |
12 | #include <linux/kernel.h> |
13 | #include <linux/memblock.h> |
14 | #include <linux/sched/task.h> |
15 | #include <linux/start_kernel.h> |
16 | #include <linux/pgtable.h> |
17 | #include <asm/cputype.h> |
18 | #include <asm/highmem.h> |
19 | #include <asm/mach/map.h> |
20 | #include <asm/page.h> |
21 | #include <asm/pgalloc.h> |
22 | #include <asm/procinfo.h> |
23 | #include <asm/proc-fns.h> |
24 | |
25 | #include "mm.h" |
26 | |
27 | static pgd_t tmp_pgd_table[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE); |
28 | |
29 | pmd_t tmp_pmd_table[PTRS_PER_PMD] __page_aligned_bss; |
30 | |
31 | static __init void *kasan_alloc_block_raw(size_t size) |
32 | { |
33 | return memblock_alloc_try_nid_raw(size, align: size, __pa(MAX_DMA_ADDRESS), |
34 | MEMBLOCK_ALLOC_NOLEAKTRACE, NUMA_NO_NODE); |
35 | } |
36 | |
37 | static __init void *kasan_alloc_block(size_t size) |
38 | { |
39 | return memblock_alloc_try_nid(size, align: size, __pa(MAX_DMA_ADDRESS), |
40 | MEMBLOCK_ALLOC_NOLEAKTRACE, NUMA_NO_NODE); |
41 | } |
42 | |
43 | static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, |
44 | unsigned long end, bool early) |
45 | { |
46 | unsigned long next; |
47 | pte_t *ptep = pte_offset_kernel(pmd: pmdp, address: addr); |
48 | |
49 | do { |
50 | pte_t entry; |
51 | void *p; |
52 | |
53 | next = addr + PAGE_SIZE; |
54 | |
55 | if (!early) { |
56 | if (!pte_none(READ_ONCE(*ptep))) |
57 | continue; |
58 | |
59 | p = kasan_alloc_block_raw(PAGE_SIZE); |
60 | if (!p) { |
61 | panic(fmt: "%s failed to allocate shadow page for address 0x%lx\n" , |
62 | __func__, addr); |
63 | return; |
64 | } |
65 | memset(p, KASAN_SHADOW_INIT, PAGE_SIZE); |
66 | entry = pfn_pte(page_nr: virt_to_pfn(p), |
67 | __pgprot(pgprot_val(PAGE_KERNEL))); |
68 | } else if (pte_none(READ_ONCE(*ptep))) { |
69 | /* |
70 | * The early shadow memory is mapping all KASan |
71 | * operations to one and the same page in memory, |
72 | * "kasan_early_shadow_page" so that the instrumentation |
73 | * will work on a scratch area until we can set up the |
74 | * proper KASan shadow memory. |
75 | */ |
76 | entry = pfn_pte(virt_to_pfn(kasan_early_shadow_page), |
77 | __pgprot(_L_PTE_DEFAULT | L_PTE_DIRTY | L_PTE_XN)); |
78 | } else { |
79 | /* |
80 | * Early shadow mappings are PMD_SIZE aligned, so if the |
81 | * first entry is already set, they must all be set. |
82 | */ |
83 | return; |
84 | } |
85 | |
86 | set_pte_at(&init_mm, addr, ptep, entry); |
87 | } while (ptep++, addr = next, addr != end); |
88 | } |
89 | |
90 | /* |
91 | * The pmd (page middle directory) is only used on LPAE |
92 | */ |
93 | static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, |
94 | unsigned long end, bool early) |
95 | { |
96 | unsigned long next; |
97 | pmd_t *pmdp = pmd_offset(pud: pudp, address: addr); |
98 | |
99 | do { |
100 | if (pmd_none(pmd: *pmdp)) { |
101 | /* |
102 | * We attempt to allocate a shadow block for the PMDs |
103 | * used by the PTEs for this address if it isn't already |
104 | * allocated. |
105 | */ |
106 | void *p = early ? kasan_early_shadow_pte : |
107 | kasan_alloc_block(PAGE_SIZE); |
108 | |
109 | if (!p) { |
110 | panic(fmt: "%s failed to allocate shadow block for address 0x%lx\n" , |
111 | __func__, addr); |
112 | return; |
113 | } |
114 | pmd_populate_kernel(mm: &init_mm, pmd: pmdp, pte: p); |
115 | flush_pmd_entry(pmdp); |
116 | } |
117 | |
118 | next = pmd_addr_end(addr, end); |
119 | kasan_pte_populate(pmdp, addr, end: next, early); |
120 | } while (pmdp++, addr = next, addr != end); |
121 | } |
122 | |
123 | static void __init kasan_pgd_populate(unsigned long addr, unsigned long end, |
124 | bool early) |
125 | { |
126 | unsigned long next; |
127 | pgd_t *pgdp; |
128 | p4d_t *p4dp; |
129 | pud_t *pudp; |
130 | |
131 | pgdp = pgd_offset_k(addr); |
132 | |
133 | do { |
134 | /* |
135 | * Allocate and populate the shadow block of p4d folded into |
136 | * pud folded into pmd if it doesn't already exist |
137 | */ |
138 | if (!early && pgd_none(pgd: *pgdp)) { |
139 | void *p = kasan_alloc_block(PAGE_SIZE); |
140 | |
141 | if (!p) { |
142 | panic(fmt: "%s failed to allocate shadow block for address 0x%lx\n" , |
143 | __func__, addr); |
144 | return; |
145 | } |
146 | pgd_populate(mm: &init_mm, pgd: pgdp, p4d: p); |
147 | } |
148 | |
149 | next = pgd_addr_end(addr, end); |
150 | /* |
151 | * We just immediately jump over the p4d and pud page |
152 | * directories since we believe ARM32 will never gain four |
153 | * nor five level page tables. |
154 | */ |
155 | p4dp = p4d_offset(pgd: pgdp, address: addr); |
156 | pudp = pud_offset(p4d: p4dp, address: addr); |
157 | |
158 | kasan_pmd_populate(pudp, addr, end: next, early); |
159 | } while (pgdp++, addr = next, addr != end); |
160 | } |
161 | |
162 | extern struct proc_info_list *lookup_processor_type(unsigned int); |
163 | |
164 | void __init kasan_early_init(void) |
165 | { |
166 | struct proc_info_list *list; |
167 | |
168 | /* |
169 | * locate processor in the list of supported processor |
170 | * types. The linker builds this table for us from the |
171 | * entries in arch/arm/mm/proc-*.S |
172 | */ |
173 | list = lookup_processor_type(read_cpuid_id()); |
174 | if (list) { |
175 | #ifdef MULTI_CPU |
176 | processor = *list->proc; |
177 | #endif |
178 | } |
179 | |
180 | BUILD_BUG_ON((KASAN_SHADOW_END - (1UL << 29)) != KASAN_SHADOW_OFFSET); |
181 | /* |
182 | * We walk the page table and set all of the shadow memory to point |
183 | * to the scratch page. |
184 | */ |
185 | kasan_pgd_populate(addr: KASAN_SHADOW_START, end: KASAN_SHADOW_END, early: true); |
186 | } |
187 | |
188 | static void __init clear_pgds(unsigned long start, |
189 | unsigned long end) |
190 | { |
191 | for (; start && start < end; start += PMD_SIZE) |
192 | pmd_clear(pmdp: pmd_off_k(va: start)); |
193 | } |
194 | |
195 | static int __init create_mapping(void *start, void *end) |
196 | { |
197 | void *shadow_start, *shadow_end; |
198 | |
199 | shadow_start = kasan_mem_to_shadow(start); |
200 | shadow_end = kasan_mem_to_shadow(end); |
201 | |
202 | pr_info("Mapping kernel virtual memory block: %px-%px at shadow: %px-%px\n" , |
203 | start, end, shadow_start, shadow_end); |
204 | |
205 | kasan_pgd_populate(addr: (unsigned long)shadow_start & PAGE_MASK, |
206 | PAGE_ALIGN((unsigned long)shadow_end), early: false); |
207 | return 0; |
208 | } |
209 | |
210 | void __init kasan_init(void) |
211 | { |
212 | phys_addr_t pa_start, pa_end; |
213 | u64 i; |
214 | |
215 | /* |
216 | * We are going to perform proper setup of shadow memory. |
217 | * |
218 | * At first we should unmap early shadow (clear_pgds() call bellow). |
219 | * However, instrumented code can't execute without shadow memory. |
220 | * |
221 | * To keep the early shadow memory MMU tables around while setting up |
222 | * the proper shadow memory, we copy swapper_pg_dir (the initial page |
223 | * table) to tmp_pgd_table and use that to keep the early shadow memory |
224 | * mapped until the full shadow setup is finished. Then we swap back |
225 | * to the proper swapper_pg_dir. |
226 | */ |
227 | |
228 | memcpy(tmp_pgd_table, swapper_pg_dir, sizeof(tmp_pgd_table)); |
229 | #ifdef CONFIG_ARM_LPAE |
230 | /* We need to be in the same PGD or this won't work */ |
231 | BUILD_BUG_ON(pgd_index(KASAN_SHADOW_START) != |
232 | pgd_index(KASAN_SHADOW_END)); |
233 | memcpy(tmp_pmd_table, |
234 | (void*)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_START)), |
235 | sizeof(tmp_pmd_table)); |
236 | set_pgd(&tmp_pgd_table[pgd_index(KASAN_SHADOW_START)], |
237 | __pgd(__pa(tmp_pmd_table) | PMD_TYPE_TABLE | L_PGD_SWAPPER)); |
238 | #endif |
239 | cpu_switch_mm(tmp_pgd_table, &init_mm); |
240 | local_flush_tlb_all(); |
241 | |
242 | clear_pgds(start: KASAN_SHADOW_START, end: KASAN_SHADOW_END); |
243 | |
244 | if (!IS_ENABLED(CONFIG_KASAN_VMALLOC)) |
245 | kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START), |
246 | kasan_mem_to_shadow((void *)VMALLOC_END)); |
247 | |
248 | kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_END), |
249 | kasan_mem_to_shadow((void *)-1UL) + 1); |
250 | |
251 | for_each_mem_range(i, &pa_start, &pa_end) { |
252 | void *start = __va(pa_start); |
253 | void *end = __va(pa_end); |
254 | |
255 | /* Do not attempt to shadow highmem */ |
256 | if (pa_start >= arm_lowmem_limit) { |
257 | pr_info("Skip highmem block at %pa-%pa\n" , &pa_start, &pa_end); |
258 | continue; |
259 | } |
260 | if (pa_end > arm_lowmem_limit) { |
261 | pr_info("Truncating shadow for memory block at %pa-%pa to lowmem region at %pa\n" , |
262 | &pa_start, &pa_end, &arm_lowmem_limit); |
263 | end = __va(arm_lowmem_limit); |
264 | } |
265 | if (start >= end) { |
266 | pr_info("Skipping invalid memory block %pa-%pa (virtual %p-%p)\n" , |
267 | &pa_start, &pa_end, start, end); |
268 | continue; |
269 | } |
270 | |
271 | create_mapping(start, end); |
272 | } |
273 | |
274 | /* |
275 | * 1. The module global variables are in MODULES_VADDR ~ MODULES_END, |
276 | * so we need to map this area if CONFIG_KASAN_VMALLOC=n. With |
277 | * VMALLOC support KASAN will manage this region dynamically, |
278 | * refer to kasan_populate_vmalloc() and ARM's implementation of |
279 | * module_alloc(). |
280 | * 2. PKMAP_BASE ~ PKMAP_BASE+PMD_SIZE's shadow and MODULES_VADDR |
281 | * ~ MODULES_END's shadow is in the same PMD_SIZE, so we can't |
282 | * use kasan_populate_zero_shadow. |
283 | */ |
284 | if (!IS_ENABLED(CONFIG_KASAN_VMALLOC) && IS_ENABLED(CONFIG_MODULES)) |
285 | create_mapping(start: (void *)MODULES_VADDR, end: (void *)(MODULES_END)); |
286 | create_mapping((void *)PKMAP_BASE, (void *)(PKMAP_BASE + PMD_SIZE)); |
287 | |
288 | /* |
289 | * KAsan may reuse the contents of kasan_early_shadow_pte directly, so |
290 | * we should make sure that it maps the zero page read-only. |
291 | */ |
292 | for (i = 0; i < PTRS_PER_PTE; i++) |
293 | set_pte_at(&init_mm, KASAN_SHADOW_START + i*PAGE_SIZE, |
294 | &kasan_early_shadow_pte[i], |
295 | pfn_pte(virt_to_pfn(kasan_early_shadow_page), |
296 | __pgprot(pgprot_val(PAGE_KERNEL) |
297 | | L_PTE_RDONLY))); |
298 | |
299 | cpu_switch_mm(swapper_pg_dir, &init_mm); |
300 | local_flush_tlb_all(); |
301 | |
302 | memset(kasan_early_shadow_page, 0, PAGE_SIZE); |
303 | pr_info("Kernel address sanitizer initialized\n" ); |
304 | init_task.kasan_depth = 0; |
305 | } |
306 | |