1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * KMSAN hooks for kernel subsystems. |
4 | * |
5 | * These functions handle creation of KMSAN metadata for memory allocations. |
6 | * |
7 | * Copyright (C) 2018-2022 Google LLC |
8 | * Author: Alexander Potapenko <glider@google.com> |
9 | * |
10 | */ |
11 | |
12 | #include <linux/cacheflush.h> |
13 | #include <linux/dma-direction.h> |
14 | #include <linux/gfp.h> |
15 | #include <linux/kmsan.h> |
16 | #include <linux/mm.h> |
17 | #include <linux/mm_types.h> |
18 | #include <linux/scatterlist.h> |
19 | #include <linux/slab.h> |
20 | #include <linux/uaccess.h> |
21 | #include <linux/usb.h> |
22 | |
23 | #include "../internal.h" |
24 | #include "../slab.h" |
25 | #include "kmsan.h" |
26 | |
27 | /* |
28 | * Instrumented functions shouldn't be called under |
29 | * kmsan_enter_runtime()/kmsan_leave_runtime(), because this will lead to |
30 | * skipping effects of functions like memset() inside instrumented code. |
31 | */ |
32 | |
33 | void kmsan_task_create(struct task_struct *task) |
34 | { |
35 | kmsan_enter_runtime(); |
36 | kmsan_internal_task_create(task); |
37 | kmsan_leave_runtime(); |
38 | } |
39 | |
40 | void kmsan_task_exit(struct task_struct *task) |
41 | { |
42 | struct kmsan_ctx *ctx = &task->kmsan_ctx; |
43 | |
44 | if (!kmsan_enabled || kmsan_in_runtime()) |
45 | return; |
46 | |
47 | ctx->allow_reporting = false; |
48 | } |
49 | |
50 | void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags) |
51 | { |
52 | if (unlikely(object == NULL)) |
53 | return; |
54 | if (!kmsan_enabled || kmsan_in_runtime()) |
55 | return; |
56 | /* |
57 | * There's a ctor or this is an RCU cache - do nothing. The memory |
58 | * status hasn't changed since last use. |
59 | */ |
60 | if (s->ctor || (s->flags & SLAB_TYPESAFE_BY_RCU)) |
61 | return; |
62 | |
63 | kmsan_enter_runtime(); |
64 | if (flags & __GFP_ZERO) |
65 | kmsan_internal_unpoison_memory(address: object, size: s->object_size, |
66 | KMSAN_POISON_CHECK); |
67 | else |
68 | kmsan_internal_poison_memory(address: object, size: s->object_size, flags, |
69 | KMSAN_POISON_CHECK); |
70 | kmsan_leave_runtime(); |
71 | } |
72 | |
73 | void kmsan_slab_free(struct kmem_cache *s, void *object) |
74 | { |
75 | if (!kmsan_enabled || kmsan_in_runtime()) |
76 | return; |
77 | |
78 | /* RCU slabs could be legally used after free within the RCU period */ |
79 | if (unlikely(s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))) |
80 | return; |
81 | /* |
82 | * If there's a constructor, freed memory must remain in the same state |
83 | * until the next allocation. We cannot save its state to detect |
84 | * use-after-free bugs, instead we just keep it unpoisoned. |
85 | */ |
86 | if (s->ctor) |
87 | return; |
88 | kmsan_enter_runtime(); |
89 | kmsan_internal_poison_memory(address: object, size: s->object_size, GFP_KERNEL, |
90 | KMSAN_POISON_CHECK | KMSAN_POISON_FREE); |
91 | kmsan_leave_runtime(); |
92 | } |
93 | |
94 | void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) |
95 | { |
96 | if (unlikely(ptr == NULL)) |
97 | return; |
98 | if (!kmsan_enabled || kmsan_in_runtime()) |
99 | return; |
100 | kmsan_enter_runtime(); |
101 | if (flags & __GFP_ZERO) |
102 | kmsan_internal_unpoison_memory(address: (void *)ptr, size, |
103 | /*checked*/ true); |
104 | else |
105 | kmsan_internal_poison_memory(address: (void *)ptr, size, flags, |
106 | KMSAN_POISON_CHECK); |
107 | kmsan_leave_runtime(); |
108 | } |
109 | |
110 | void kmsan_kfree_large(const void *ptr) |
111 | { |
112 | struct page *page; |
113 | |
114 | if (!kmsan_enabled || kmsan_in_runtime()) |
115 | return; |
116 | kmsan_enter_runtime(); |
117 | page = virt_to_head_page(x: (void *)ptr); |
118 | KMSAN_WARN_ON(ptr != page_address(page)); |
119 | kmsan_internal_poison_memory(address: (void *)ptr, |
120 | size: page_size(page), |
121 | GFP_KERNEL, |
122 | KMSAN_POISON_CHECK | KMSAN_POISON_FREE); |
123 | kmsan_leave_runtime(); |
124 | } |
125 | |
126 | static unsigned long vmalloc_shadow(unsigned long addr) |
127 | { |
128 | return (unsigned long)kmsan_get_metadata(addr: (void *)addr, |
129 | KMSAN_META_SHADOW); |
130 | } |
131 | |
132 | static unsigned long vmalloc_origin(unsigned long addr) |
133 | { |
134 | return (unsigned long)kmsan_get_metadata(addr: (void *)addr, |
135 | KMSAN_META_ORIGIN); |
136 | } |
137 | |
138 | void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end) |
139 | { |
140 | __vunmap_range_noflush(start: vmalloc_shadow(addr: start), end: vmalloc_shadow(addr: end)); |
141 | __vunmap_range_noflush(start: vmalloc_origin(addr: start), end: vmalloc_origin(addr: end)); |
142 | flush_cache_vmap(start: vmalloc_shadow(addr: start), end: vmalloc_shadow(addr: end)); |
143 | flush_cache_vmap(start: vmalloc_origin(addr: start), end: vmalloc_origin(addr: end)); |
144 | } |
145 | |
146 | /* |
147 | * This function creates new shadow/origin pages for the physical pages mapped |
148 | * into the virtual memory. If those physical pages already had shadow/origin, |
149 | * those are ignored. |
150 | */ |
151 | int kmsan_ioremap_page_range(unsigned long start, unsigned long end, |
152 | phys_addr_t phys_addr, pgprot_t prot, |
153 | unsigned int page_shift) |
154 | { |
155 | gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO; |
156 | struct page *shadow, *origin; |
157 | unsigned long off = 0; |
158 | int nr, err = 0, clean = 0, mapped; |
159 | |
160 | if (!kmsan_enabled || kmsan_in_runtime()) |
161 | return 0; |
162 | |
163 | nr = (end - start) / PAGE_SIZE; |
164 | kmsan_enter_runtime(); |
165 | for (int i = 0; i < nr; i++, off += PAGE_SIZE, clean = i) { |
166 | shadow = alloc_pages(gfp: gfp_mask, order: 1); |
167 | origin = alloc_pages(gfp: gfp_mask, order: 1); |
168 | if (!shadow || !origin) { |
169 | err = -ENOMEM; |
170 | goto ret; |
171 | } |
172 | mapped = __vmap_pages_range_noflush( |
173 | addr: vmalloc_shadow(addr: start + off), |
174 | end: vmalloc_shadow(addr: start + off + PAGE_SIZE), prot, pages: &shadow, |
175 | PAGE_SHIFT); |
176 | if (mapped) { |
177 | err = mapped; |
178 | goto ret; |
179 | } |
180 | shadow = NULL; |
181 | mapped = __vmap_pages_range_noflush( |
182 | addr: vmalloc_origin(addr: start + off), |
183 | end: vmalloc_origin(addr: start + off + PAGE_SIZE), prot, pages: &origin, |
184 | PAGE_SHIFT); |
185 | if (mapped) { |
186 | __vunmap_range_noflush( |
187 | start: vmalloc_shadow(addr: start + off), |
188 | end: vmalloc_shadow(addr: start + off + PAGE_SIZE)); |
189 | err = mapped; |
190 | goto ret; |
191 | } |
192 | origin = NULL; |
193 | } |
194 | /* Page mapping loop finished normally, nothing to clean up. */ |
195 | clean = 0; |
196 | |
197 | ret: |
198 | if (clean > 0) { |
199 | /* |
200 | * Something went wrong. Clean up shadow/origin pages allocated |
201 | * on the last loop iteration, then delete mappings created |
202 | * during the previous iterations. |
203 | */ |
204 | if (shadow) |
205 | __free_pages(page: shadow, order: 1); |
206 | if (origin) |
207 | __free_pages(page: origin, order: 1); |
208 | __vunmap_range_noflush( |
209 | start: vmalloc_shadow(addr: start), |
210 | end: vmalloc_shadow(addr: start + clean * PAGE_SIZE)); |
211 | __vunmap_range_noflush( |
212 | start: vmalloc_origin(addr: start), |
213 | end: vmalloc_origin(addr: start + clean * PAGE_SIZE)); |
214 | } |
215 | flush_cache_vmap(start: vmalloc_shadow(addr: start), end: vmalloc_shadow(addr: end)); |
216 | flush_cache_vmap(start: vmalloc_origin(addr: start), end: vmalloc_origin(addr: end)); |
217 | kmsan_leave_runtime(); |
218 | return err; |
219 | } |
220 | |
221 | void kmsan_iounmap_page_range(unsigned long start, unsigned long end) |
222 | { |
223 | unsigned long v_shadow, v_origin; |
224 | struct page *shadow, *origin; |
225 | int nr; |
226 | |
227 | if (!kmsan_enabled || kmsan_in_runtime()) |
228 | return; |
229 | |
230 | nr = (end - start) / PAGE_SIZE; |
231 | kmsan_enter_runtime(); |
232 | v_shadow = (unsigned long)vmalloc_shadow(addr: start); |
233 | v_origin = (unsigned long)vmalloc_origin(addr: start); |
234 | for (int i = 0; i < nr; |
235 | i++, v_shadow += PAGE_SIZE, v_origin += PAGE_SIZE) { |
236 | shadow = kmsan_vmalloc_to_page_or_null(vaddr: (void *)v_shadow); |
237 | origin = kmsan_vmalloc_to_page_or_null(vaddr: (void *)v_origin); |
238 | __vunmap_range_noflush(start: v_shadow, end: vmalloc_shadow(addr: end)); |
239 | __vunmap_range_noflush(start: v_origin, end: vmalloc_origin(addr: end)); |
240 | if (shadow) |
241 | __free_pages(page: shadow, order: 1); |
242 | if (origin) |
243 | __free_pages(page: origin, order: 1); |
244 | } |
245 | flush_cache_vmap(start: vmalloc_shadow(addr: start), end: vmalloc_shadow(addr: end)); |
246 | flush_cache_vmap(start: vmalloc_origin(addr: start), end: vmalloc_origin(addr: end)); |
247 | kmsan_leave_runtime(); |
248 | } |
249 | |
250 | void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy, |
251 | size_t left) |
252 | { |
253 | unsigned long ua_flags; |
254 | |
255 | if (!kmsan_enabled || kmsan_in_runtime()) |
256 | return; |
257 | /* |
258 | * At this point we've copied the memory already. It's hard to check it |
259 | * before copying, as the size of actually copied buffer is unknown. |
260 | */ |
261 | |
262 | /* copy_to_user() may copy zero bytes. No need to check. */ |
263 | if (!to_copy) |
264 | return; |
265 | /* Or maybe copy_to_user() failed to copy anything. */ |
266 | if (to_copy <= left) |
267 | return; |
268 | |
269 | ua_flags = user_access_save(); |
270 | if ((u64)to < TASK_SIZE) { |
271 | /* This is a user memory access, check it. */ |
272 | kmsan_internal_check_memory(addr: (void *)from, size: to_copy - left, user_addr: to, |
273 | reason: REASON_COPY_TO_USER); |
274 | } else { |
275 | /* Otherwise this is a kernel memory access. This happens when a |
276 | * compat syscall passes an argument allocated on the kernel |
277 | * stack to a real syscall. |
278 | * Don't check anything, just copy the shadow of the copied |
279 | * bytes. |
280 | */ |
281 | kmsan_internal_memmove_metadata(dst: (void *)to, src: (void *)from, |
282 | n: to_copy - left); |
283 | } |
284 | user_access_restore(ua_flags); |
285 | } |
286 | EXPORT_SYMBOL(kmsan_copy_to_user); |
287 | |
288 | /* Helper function to check an URB. */ |
289 | void kmsan_handle_urb(const struct urb *urb, bool is_out) |
290 | { |
291 | if (!urb) |
292 | return; |
293 | if (is_out) |
294 | kmsan_internal_check_memory(addr: urb->transfer_buffer, |
295 | size: urb->transfer_buffer_length, |
296 | /*user_addr*/ 0, reason: REASON_SUBMIT_URB); |
297 | else |
298 | kmsan_internal_unpoison_memory(address: urb->transfer_buffer, |
299 | size: urb->transfer_buffer_length, |
300 | /*checked*/ false); |
301 | } |
302 | EXPORT_SYMBOL_GPL(kmsan_handle_urb); |
303 | |
304 | static void kmsan_handle_dma_page(const void *addr, size_t size, |
305 | enum dma_data_direction dir) |
306 | { |
307 | switch (dir) { |
308 | case DMA_BIDIRECTIONAL: |
309 | kmsan_internal_check_memory(addr: (void *)addr, size, /*user_addr*/ 0, |
310 | reason: REASON_ANY); |
311 | kmsan_internal_unpoison_memory(address: (void *)addr, size, |
312 | /*checked*/ false); |
313 | break; |
314 | case DMA_TO_DEVICE: |
315 | kmsan_internal_check_memory(addr: (void *)addr, size, /*user_addr*/ 0, |
316 | reason: REASON_ANY); |
317 | break; |
318 | case DMA_FROM_DEVICE: |
319 | kmsan_internal_unpoison_memory(address: (void *)addr, size, |
320 | /*checked*/ false); |
321 | break; |
322 | case DMA_NONE: |
323 | break; |
324 | } |
325 | } |
326 | |
327 | /* Helper function to handle DMA data transfers. */ |
328 | void kmsan_handle_dma(struct page *page, size_t offset, size_t size, |
329 | enum dma_data_direction dir) |
330 | { |
331 | u64 page_offset, to_go, addr; |
332 | |
333 | if (PageHighMem(page)) |
334 | return; |
335 | addr = (u64)page_address(page) + offset; |
336 | /* |
337 | * The kernel may occasionally give us adjacent DMA pages not belonging |
338 | * to the same allocation. Process them separately to avoid triggering |
339 | * internal KMSAN checks. |
340 | */ |
341 | while (size > 0) { |
342 | page_offset = offset_in_page(addr); |
343 | to_go = min(PAGE_SIZE - page_offset, (u64)size); |
344 | kmsan_handle_dma_page(addr: (void *)addr, size: to_go, dir); |
345 | addr += to_go; |
346 | size -= to_go; |
347 | } |
348 | } |
349 | |
350 | void kmsan_handle_dma_sg(struct scatterlist *sg, int nents, |
351 | enum dma_data_direction dir) |
352 | { |
353 | struct scatterlist *item; |
354 | int i; |
355 | |
356 | for_each_sg(sg, item, nents, i) |
357 | kmsan_handle_dma(sg_page(sg: item), item->offset, item->length, |
358 | dir); |
359 | } |
360 | |
361 | /* Functions from kmsan-checks.h follow. */ |
362 | |
363 | /* |
364 | * To create an origin, kmsan_poison_memory() unwinds the stacks and stores it |
365 | * into the stack depot. This may cause deadlocks if done from within KMSAN |
366 | * runtime, therefore we bail out if kmsan_in_runtime(). |
367 | */ |
368 | void kmsan_poison_memory(const void *address, size_t size, gfp_t flags) |
369 | { |
370 | if (!kmsan_enabled || kmsan_in_runtime()) |
371 | return; |
372 | kmsan_enter_runtime(); |
373 | /* The users may want to poison/unpoison random memory. */ |
374 | kmsan_internal_poison_memory(address: (void *)address, size, flags, |
375 | KMSAN_POISON_NOCHECK); |
376 | kmsan_leave_runtime(); |
377 | } |
378 | EXPORT_SYMBOL(kmsan_poison_memory); |
379 | |
380 | /* |
381 | * Unlike kmsan_poison_memory(), this function can be used from within KMSAN |
382 | * runtime, because it does not trigger allocations or call instrumented code. |
383 | */ |
384 | void kmsan_unpoison_memory(const void *address, size_t size) |
385 | { |
386 | unsigned long ua_flags; |
387 | |
388 | if (!kmsan_enabled) |
389 | return; |
390 | |
391 | ua_flags = user_access_save(); |
392 | /* The users may want to poison/unpoison random memory. */ |
393 | kmsan_internal_unpoison_memory(address: (void *)address, size, |
394 | KMSAN_POISON_NOCHECK); |
395 | user_access_restore(ua_flags); |
396 | } |
397 | EXPORT_SYMBOL(kmsan_unpoison_memory); |
398 | |
399 | /* |
400 | * Version of kmsan_unpoison_memory() called from IRQ entry functions. |
401 | */ |
402 | void kmsan_unpoison_entry_regs(const struct pt_regs *regs) |
403 | { |
404 | kmsan_unpoison_memory((void *)regs, sizeof(*regs)); |
405 | } |
406 | |
407 | void kmsan_check_memory(const void *addr, size_t size) |
408 | { |
409 | if (!kmsan_enabled) |
410 | return; |
411 | return kmsan_internal_check_memory(addr: (void *)addr, size, /*user_addr*/ 0, |
412 | reason: REASON_ANY); |
413 | } |
414 | EXPORT_SYMBOL(kmsan_check_memory); |
415 | |