1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * KMSAN initialization routines. |
4 | * |
5 | * Copyright (C) 2017-2021 Google LLC |
6 | * Author: Alexander Potapenko <glider@google.com> |
7 | * |
8 | */ |
9 | |
10 | #include "kmsan.h" |
11 | |
12 | #include <asm/sections.h> |
13 | #include <linux/mm.h> |
14 | #include <linux/memblock.h> |
15 | |
16 | #include "../internal.h" |
17 | |
18 | #define NUM_FUTURE_RANGES 128 |
19 | struct start_end_pair { |
20 | u64 start, end; |
21 | }; |
22 | |
23 | static struct start_end_pair start_end_pairs[NUM_FUTURE_RANGES] __initdata; |
24 | static int future_index __initdata; |
25 | |
26 | /* |
27 | * Record a range of memory for which the metadata pages will be created once |
28 | * the page allocator becomes available. |
29 | */ |
30 | static void __init kmsan_record_future_shadow_range(void *start, void *end) |
31 | { |
32 | u64 nstart = (u64)start, nend = (u64)end, cstart, cend; |
33 | bool merged = false; |
34 | |
35 | KMSAN_WARN_ON(future_index == NUM_FUTURE_RANGES); |
36 | KMSAN_WARN_ON((nstart >= nend) || !nstart || !nend); |
37 | nstart = ALIGN_DOWN(nstart, PAGE_SIZE); |
38 | nend = ALIGN(nend, PAGE_SIZE); |
39 | |
40 | /* |
41 | * Scan the existing ranges to see if any of them overlaps with |
42 | * [start, end). In that case, merge the two ranges instead of |
43 | * creating a new one. |
44 | * The number of ranges is less than 20, so there is no need to organize |
45 | * them into a more intelligent data structure. |
46 | */ |
47 | for (int i = 0; i < future_index; i++) { |
48 | cstart = start_end_pairs[i].start; |
49 | cend = start_end_pairs[i].end; |
50 | if ((cstart < nstart && cend < nstart) || |
51 | (cstart > nend && cend > nend)) |
52 | /* ranges are disjoint - do not merge */ |
53 | continue; |
54 | start_end_pairs[i].start = min(nstart, cstart); |
55 | start_end_pairs[i].end = max(nend, cend); |
56 | merged = true; |
57 | break; |
58 | } |
59 | if (merged) |
60 | return; |
61 | start_end_pairs[future_index].start = nstart; |
62 | start_end_pairs[future_index].end = nend; |
63 | future_index++; |
64 | } |
65 | |
66 | /* |
67 | * Initialize the shadow for existing mappings during kernel initialization. |
68 | * These include kernel text/data sections, NODE_DATA and future ranges |
69 | * registered while creating other data (e.g. percpu). |
70 | * |
71 | * Allocations via memblock can be only done before slab is initialized. |
72 | */ |
73 | void __init kmsan_init_shadow(void) |
74 | { |
75 | const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); |
76 | phys_addr_t p_start, p_end; |
77 | u64 loop; |
78 | int nid; |
79 | |
80 | for_each_reserved_mem_range(loop, &p_start, &p_end) |
81 | kmsan_record_future_shadow_range(phys_to_virt(address: p_start), |
82 | phys_to_virt(address: p_end)); |
83 | /* Allocate shadow for .data */ |
84 | kmsan_record_future_shadow_range(start: _sdata, end: _edata); |
85 | |
86 | for_each_online_node(nid) |
87 | kmsan_record_future_shadow_range( |
88 | NODE_DATA(nid), end: (char *)NODE_DATA(nid) + nd_size); |
89 | |
90 | for (int i = 0; i < future_index; i++) |
91 | kmsan_init_alloc_meta_for_range( |
92 | start: (void *)start_end_pairs[i].start, |
93 | end: (void *)start_end_pairs[i].end); |
94 | } |
95 | |
96 | struct metadata_page_pair { |
97 | struct page *shadow, *origin; |
98 | }; |
99 | static struct metadata_page_pair held_back[NR_PAGE_ORDERS] __initdata; |
100 | |
101 | /* |
102 | * Eager metadata allocation. When the memblock allocator is freeing pages to |
103 | * pagealloc, we use 2/3 of them as metadata for the remaining 1/3. |
104 | * We store the pointers to the returned blocks of pages in held_back[] grouped |
105 | * by their order: when kmsan_memblock_free_pages() is called for the first |
106 | * time with a certain order, it is reserved as a shadow block, for the second |
107 | * time - as an origin block. On the third time the incoming block receives its |
108 | * shadow and origin ranges from the previously saved shadow and origin blocks, |
109 | * after which held_back[order] can be used again. |
110 | * |
111 | * At the very end there may be leftover blocks in held_back[]. They are |
112 | * collected later by kmsan_memblock_discard(). |
113 | */ |
114 | bool kmsan_memblock_free_pages(struct page *page, unsigned int order) |
115 | { |
116 | struct page *shadow, *origin; |
117 | |
118 | if (!held_back[order].shadow) { |
119 | held_back[order].shadow = page; |
120 | return false; |
121 | } |
122 | if (!held_back[order].origin) { |
123 | held_back[order].origin = page; |
124 | return false; |
125 | } |
126 | shadow = held_back[order].shadow; |
127 | origin = held_back[order].origin; |
128 | kmsan_setup_meta(page, shadow, origin, order); |
129 | |
130 | held_back[order].shadow = NULL; |
131 | held_back[order].origin = NULL; |
132 | return true; |
133 | } |
134 | |
135 | #define MAX_BLOCKS 8 |
136 | struct smallstack { |
137 | struct page *items[MAX_BLOCKS]; |
138 | int index; |
139 | int order; |
140 | }; |
141 | |
142 | static struct smallstack collect = { |
143 | .index = 0, |
144 | .order = MAX_PAGE_ORDER, |
145 | }; |
146 | |
147 | static void smallstack_push(struct smallstack *stack, struct page *pages) |
148 | { |
149 | KMSAN_WARN_ON(stack->index == MAX_BLOCKS); |
150 | stack->items[stack->index] = pages; |
151 | stack->index++; |
152 | } |
153 | #undef MAX_BLOCKS |
154 | |
155 | static struct page *smallstack_pop(struct smallstack *stack) |
156 | { |
157 | struct page *ret; |
158 | |
159 | KMSAN_WARN_ON(stack->index == 0); |
160 | stack->index--; |
161 | ret = stack->items[stack->index]; |
162 | stack->items[stack->index] = NULL; |
163 | return ret; |
164 | } |
165 | |
166 | static void do_collection(void) |
167 | { |
168 | struct page *page, *shadow, *origin; |
169 | |
170 | while (collect.index >= 3) { |
171 | page = smallstack_pop(stack: &collect); |
172 | shadow = smallstack_pop(stack: &collect); |
173 | origin = smallstack_pop(stack: &collect); |
174 | kmsan_setup_meta(page, shadow, origin, order: collect.order); |
175 | __free_pages_core(page, order: collect.order); |
176 | } |
177 | } |
178 | |
179 | static void collect_split(void) |
180 | { |
181 | struct smallstack tmp = { |
182 | .order = collect.order - 1, |
183 | .index = 0, |
184 | }; |
185 | struct page *page; |
186 | |
187 | if (!collect.order) |
188 | return; |
189 | while (collect.index) { |
190 | page = smallstack_pop(stack: &collect); |
191 | smallstack_push(stack: &tmp, pages: &page[0]); |
192 | smallstack_push(stack: &tmp, pages: &page[1 << tmp.order]); |
193 | } |
194 | __memcpy(to: &collect, from: &tmp, len: sizeof(tmp)); |
195 | } |
196 | |
197 | /* |
198 | * Memblock is about to go away. Split the page blocks left over in held_back[] |
199 | * and return 1/3 of that memory to the system. |
200 | */ |
201 | static void kmsan_memblock_discard(void) |
202 | { |
203 | /* |
204 | * For each order=N: |
205 | * - push held_back[N].shadow and .origin to @collect; |
206 | * - while there are >= 3 elements in @collect, do garbage collection: |
207 | * - pop 3 ranges from @collect; |
208 | * - use two of them as shadow and origin for the third one; |
209 | * - repeat; |
210 | * - split each remaining element from @collect into 2 ranges of |
211 | * order=N-1, |
212 | * - repeat. |
213 | */ |
214 | collect.order = MAX_PAGE_ORDER; |
215 | for (int i = MAX_PAGE_ORDER; i >= 0; i--) { |
216 | if (held_back[i].shadow) |
217 | smallstack_push(stack: &collect, pages: held_back[i].shadow); |
218 | if (held_back[i].origin) |
219 | smallstack_push(stack: &collect, pages: held_back[i].origin); |
220 | held_back[i].shadow = NULL; |
221 | held_back[i].origin = NULL; |
222 | do_collection(); |
223 | collect_split(); |
224 | } |
225 | } |
226 | |
227 | void __init kmsan_init_runtime(void) |
228 | { |
229 | /* Assuming current is init_task */ |
230 | kmsan_internal_task_create(current); |
231 | kmsan_memblock_discard(); |
232 | pr_info("Starting KernelMemorySanitizer\n" ); |
233 | pr_info("ATTENTION: KMSAN is a debugging tool! Do not use it on production machines!\n" ); |
234 | kmsan_enabled = true; |
235 | } |
236 | |