1// SPDX-License-Identifier: GPL-2.0
2/*
3 * This is for all the tests related to copy_to_user() and copy_from_user()
4 * hardening.
5 */
6#include "lkdtm.h"
7#include <linux/slab.h>
8#include <linux/highmem.h>
9#include <linux/vmalloc.h>
10#include <linux/sched/task_stack.h>
11#include <linux/mman.h>
12#include <linux/uaccess.h>
13#include <asm/cacheflush.h>
14
15/*
16 * Many of the tests here end up using const sizes, but those would
17 * normally be ignored by hardened usercopy, so force the compiler
18 * into choosing the non-const path to make sure we trigger the
19 * hardened usercopy checks by added "unconst" to all the const copies,
20 * and making sure "cache_size" isn't optimized into a const.
21 */
22static volatile size_t unconst;
23static volatile size_t cache_size = 1024;
24static struct kmem_cache *whitelist_cache;
25
26static const unsigned char test_text[] = "This is a test.\n";
27
28/*
29 * Instead of adding -Wno-return-local-addr, just pass the stack address
30 * through a function to obfuscate it from the compiler.
31 */
32static noinline unsigned char *trick_compiler(unsigned char *stack)
33{
34 return stack + unconst;
35}
36
37static noinline unsigned char *do_usercopy_stack_callee(int value)
38{
39 unsigned char buf[128];
40 int i;
41
42 /* Exercise stack to avoid everything living in registers. */
43 for (i = 0; i < sizeof(buf); i++) {
44 buf[i] = value & 0xff;
45 }
46
47 /*
48 * Put the target buffer in the middle of stack allocation
49 * so that we don't step on future stack users regardless
50 * of stack growth direction.
51 */
52 return trick_compiler(stack: &buf[(128/2)-32]);
53}
54
55static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
56{
57 unsigned long user_addr;
58 unsigned char good_stack[32];
59 unsigned char *bad_stack;
60 int i;
61
62 /* Exercise stack to avoid everything living in registers. */
63 for (i = 0; i < sizeof(good_stack); i++)
64 good_stack[i] = test_text[i % sizeof(test_text)];
65
66 /* This is a pointer to outside our current stack frame. */
67 if (bad_frame) {
68 bad_stack = do_usercopy_stack_callee(value: (uintptr_t)&bad_stack);
69 } else {
70 /* Put start address just inside stack. */
71 bad_stack = task_stack_page(current) + THREAD_SIZE;
72 bad_stack -= sizeof(unsigned long);
73 }
74
75#ifdef ARCH_HAS_CURRENT_STACK_POINTER
76 pr_info("stack : %px\n", (void *)current_stack_pointer);
77#endif
78 pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack));
79 pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack));
80
81 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
82 PROT_READ | PROT_WRITE | PROT_EXEC,
83 MAP_ANONYMOUS | MAP_PRIVATE, 0);
84 if (user_addr >= TASK_SIZE) {
85 pr_warn("Failed to allocate user memory\n");
86 return;
87 }
88
89 if (to_user) {
90 pr_info("attempting good copy_to_user of local stack\n");
91 if (copy_to_user(to: (void __user *)user_addr, from: good_stack,
92 n: unconst + sizeof(good_stack))) {
93 pr_warn("copy_to_user failed unexpectedly?!\n");
94 goto free_user;
95 }
96
97 pr_info("attempting bad copy_to_user of distant stack\n");
98 if (copy_to_user(to: (void __user *)user_addr, from: bad_stack,
99 n: unconst + sizeof(good_stack))) {
100 pr_warn("copy_to_user failed, but lacked Oops\n");
101 goto free_user;
102 }
103 } else {
104 /*
105 * There isn't a safe way to not be protected by usercopy
106 * if we're going to write to another thread's stack.
107 */
108 if (!bad_frame)
109 goto free_user;
110
111 pr_info("attempting good copy_from_user of local stack\n");
112 if (copy_from_user(to: good_stack, from: (void __user *)user_addr,
113 n: unconst + sizeof(good_stack))) {
114 pr_warn("copy_from_user failed unexpectedly?!\n");
115 goto free_user;
116 }
117
118 pr_info("attempting bad copy_from_user of distant stack\n");
119 if (copy_from_user(to: bad_stack, from: (void __user *)user_addr,
120 n: unconst + sizeof(good_stack))) {
121 pr_warn("copy_from_user failed, but lacked Oops\n");
122 goto free_user;
123 }
124 }
125
126free_user:
127 vm_munmap(user_addr, PAGE_SIZE);
128}
129
130/*
131 * This checks for whole-object size validation with hardened usercopy,
132 * with or without usercopy whitelisting.
133 */
134static void do_usercopy_slab_size(bool to_user)
135{
136 unsigned long user_addr;
137 unsigned char *one, *two;
138 void __user *test_user_addr;
139 void *test_kern_addr;
140 size_t size = unconst + 1024;
141
142 one = kmalloc(size, GFP_KERNEL);
143 two = kmalloc(size, GFP_KERNEL);
144 if (!one || !two) {
145 pr_warn("Failed to allocate kernel memory\n");
146 goto free_kernel;
147 }
148
149 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
150 PROT_READ | PROT_WRITE | PROT_EXEC,
151 MAP_ANONYMOUS | MAP_PRIVATE, 0);
152 if (user_addr >= TASK_SIZE) {
153 pr_warn("Failed to allocate user memory\n");
154 goto free_kernel;
155 }
156
157 memset(one, 'A', size);
158 memset(two, 'B', size);
159
160 test_user_addr = (void __user *)(user_addr + 16);
161 test_kern_addr = one + 16;
162
163 if (to_user) {
164 pr_info("attempting good copy_to_user of correct size\n");
165 if (copy_to_user(to: test_user_addr, from: test_kern_addr, n: size / 2)) {
166 pr_warn("copy_to_user failed unexpectedly?!\n");
167 goto free_user;
168 }
169
170 pr_info("attempting bad copy_to_user of too large size\n");
171 if (copy_to_user(to: test_user_addr, from: test_kern_addr, n: size)) {
172 pr_warn("copy_to_user failed, but lacked Oops\n");
173 goto free_user;
174 }
175 } else {
176 pr_info("attempting good copy_from_user of correct size\n");
177 if (copy_from_user(to: test_kern_addr, from: test_user_addr, n: size / 2)) {
178 pr_warn("copy_from_user failed unexpectedly?!\n");
179 goto free_user;
180 }
181
182 pr_info("attempting bad copy_from_user of too large size\n");
183 if (copy_from_user(to: test_kern_addr, from: test_user_addr, n: size)) {
184 pr_warn("copy_from_user failed, but lacked Oops\n");
185 goto free_user;
186 }
187 }
188 pr_err("FAIL: bad usercopy not detected!\n");
189 pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
190
191free_user:
192 vm_munmap(user_addr, PAGE_SIZE);
193free_kernel:
194 kfree(objp: one);
195 kfree(objp: two);
196}
197
198/*
199 * This checks for the specific whitelist window within an object. If this
200 * test passes, then do_usercopy_slab_size() tests will pass too.
201 */
202static void do_usercopy_slab_whitelist(bool to_user)
203{
204 unsigned long user_alloc;
205 unsigned char *buf = NULL;
206 unsigned char __user *user_addr;
207 size_t offset, size;
208
209 /* Make sure cache was prepared. */
210 if (!whitelist_cache) {
211 pr_warn("Failed to allocate kernel cache\n");
212 return;
213 }
214
215 /*
216 * Allocate a buffer with a whitelisted window in the buffer.
217 */
218 buf = kmem_cache_alloc(cachep: whitelist_cache, GFP_KERNEL);
219 if (!buf) {
220 pr_warn("Failed to allocate buffer from whitelist cache\n");
221 goto free_alloc;
222 }
223
224 /* Allocate user memory we'll poke at. */
225 user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
226 PROT_READ | PROT_WRITE | PROT_EXEC,
227 MAP_ANONYMOUS | MAP_PRIVATE, 0);
228 if (user_alloc >= TASK_SIZE) {
229 pr_warn("Failed to allocate user memory\n");
230 goto free_alloc;
231 }
232 user_addr = (void __user *)user_alloc;
233
234 memset(buf, 'B', cache_size);
235
236 /* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
237 offset = (cache_size / 4) + unconst;
238 size = (cache_size / 16) + unconst;
239
240 if (to_user) {
241 pr_info("attempting good copy_to_user inside whitelist\n");
242 if (copy_to_user(to: user_addr, from: buf + offset, n: size)) {
243 pr_warn("copy_to_user failed unexpectedly?!\n");
244 goto free_user;
245 }
246
247 pr_info("attempting bad copy_to_user outside whitelist\n");
248 if (copy_to_user(to: user_addr, from: buf + offset - 1, n: size)) {
249 pr_warn("copy_to_user failed, but lacked Oops\n");
250 goto free_user;
251 }
252 } else {
253 pr_info("attempting good copy_from_user inside whitelist\n");
254 if (copy_from_user(to: buf + offset, from: user_addr, n: size)) {
255 pr_warn("copy_from_user failed unexpectedly?!\n");
256 goto free_user;
257 }
258
259 pr_info("attempting bad copy_from_user outside whitelist\n");
260 if (copy_from_user(to: buf + offset - 1, from: user_addr, n: size)) {
261 pr_warn("copy_from_user failed, but lacked Oops\n");
262 goto free_user;
263 }
264 }
265 pr_err("FAIL: bad usercopy not detected!\n");
266 pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
267
268free_user:
269 vm_munmap(user_alloc, PAGE_SIZE);
270free_alloc:
271 if (buf)
272 kmem_cache_free(s: whitelist_cache, objp: buf);
273}
274
275/* Callable tests. */
276static void lkdtm_USERCOPY_SLAB_SIZE_TO(void)
277{
278 do_usercopy_slab_size(to_user: true);
279}
280
281static void lkdtm_USERCOPY_SLAB_SIZE_FROM(void)
282{
283 do_usercopy_slab_size(to_user: false);
284}
285
286static void lkdtm_USERCOPY_SLAB_WHITELIST_TO(void)
287{
288 do_usercopy_slab_whitelist(to_user: true);
289}
290
291static void lkdtm_USERCOPY_SLAB_WHITELIST_FROM(void)
292{
293 do_usercopy_slab_whitelist(to_user: false);
294}
295
296static void lkdtm_USERCOPY_STACK_FRAME_TO(void)
297{
298 do_usercopy_stack(to_user: true, bad_frame: true);
299}
300
301static void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
302{
303 do_usercopy_stack(to_user: false, bad_frame: true);
304}
305
306static void lkdtm_USERCOPY_STACK_BEYOND(void)
307{
308 do_usercopy_stack(to_user: true, bad_frame: false);
309}
310
311static void lkdtm_USERCOPY_KERNEL(void)
312{
313 unsigned long user_addr;
314
315 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
316 PROT_READ | PROT_WRITE | PROT_EXEC,
317 MAP_ANONYMOUS | MAP_PRIVATE, 0);
318 if (user_addr >= TASK_SIZE) {
319 pr_warn("Failed to allocate user memory\n");
320 return;
321 }
322
323 pr_info("attempting good copy_to_user from kernel rodata: %px\n",
324 test_text);
325 if (copy_to_user(to: (void __user *)user_addr, from: test_text,
326 n: unconst + sizeof(test_text))) {
327 pr_warn("copy_to_user failed unexpectedly?!\n");
328 goto free_user;
329 }
330
331 pr_info("attempting bad copy_to_user from kernel text: %px\n",
332 vm_mmap);
333 if (copy_to_user(to: (void __user *)user_addr, from: vm_mmap,
334 n: unconst + PAGE_SIZE)) {
335 pr_warn("copy_to_user failed, but lacked Oops\n");
336 goto free_user;
337 }
338 pr_err("FAIL: bad copy_to_user() not detected!\n");
339 pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
340
341free_user:
342 vm_munmap(user_addr, PAGE_SIZE);
343}
344
345/*
346 * This expects "kaddr" to point to a PAGE_SIZE allocation, which means
347 * a more complete test that would include copy_from_user() would risk
348 * memory corruption. Just test copy_to_user() here, as that exercises
349 * almost exactly the same code paths.
350 */
351static void do_usercopy_page_span(const char *name, void *kaddr)
352{
353 unsigned long uaddr;
354
355 uaddr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE,
356 MAP_ANONYMOUS | MAP_PRIVATE, 0);
357 if (uaddr >= TASK_SIZE) {
358 pr_warn("Failed to allocate user memory\n");
359 return;
360 }
361
362 /* Initialize contents. */
363 memset(kaddr, 0xAA, PAGE_SIZE);
364
365 /* Bump the kaddr forward to detect a page-spanning overflow. */
366 kaddr += PAGE_SIZE / 2;
367
368 pr_info("attempting good copy_to_user() from kernel %s: %px\n",
369 name, kaddr);
370 if (copy_to_user(to: (void __user *)uaddr, from: kaddr,
371 n: unconst + (PAGE_SIZE / 2))) {
372 pr_err("copy_to_user() failed unexpectedly?!\n");
373 goto free_user;
374 }
375
376 pr_info("attempting bad copy_to_user() from kernel %s: %px\n",
377 name, kaddr);
378 if (copy_to_user(to: (void __user *)uaddr, from: kaddr, n: unconst + PAGE_SIZE)) {
379 pr_warn("Good, copy_to_user() failed, but lacked Oops(?!)\n");
380 goto free_user;
381 }
382
383 pr_err("FAIL: bad copy_to_user() not detected!\n");
384 pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
385
386free_user:
387 vm_munmap(uaddr, PAGE_SIZE);
388}
389
390static void lkdtm_USERCOPY_VMALLOC(void)
391{
392 void *addr;
393
394 addr = vmalloc(PAGE_SIZE);
395 if (!addr) {
396 pr_err("vmalloc() failed!?\n");
397 return;
398 }
399 do_usercopy_page_span(name: "vmalloc", kaddr: addr);
400 vfree(addr);
401}
402
403static void lkdtm_USERCOPY_FOLIO(void)
404{
405 struct folio *folio;
406 void *addr;
407
408 /*
409 * FIXME: Folio checking currently misses 0-order allocations, so
410 * allocate and bump forward to the last page.
411 */
412 folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, order: 1);
413 if (!folio) {
414 pr_err("folio_alloc() failed!?\n");
415 return;
416 }
417 addr = folio_address(folio);
418 if (addr)
419 do_usercopy_page_span(name: "folio", kaddr: addr + PAGE_SIZE);
420 else
421 pr_err("folio_address() failed?!\n");
422 folio_put(folio);
423}
424
425void __init lkdtm_usercopy_init(void)
426{
427 /* Prepare cache that lacks SLAB_USERCOPY flag. */
428 whitelist_cache =
429 kmem_cache_create_usercopy(name: "lkdtm-usercopy", size: cache_size,
430 align: 0, flags: 0,
431 useroffset: cache_size / 4,
432 usersize: cache_size / 16,
433 NULL);
434}
435
436void __exit lkdtm_usercopy_exit(void)
437{
438 kmem_cache_destroy(s: whitelist_cache);
439}
440
441static struct crashtype crashtypes[] = {
442 CRASHTYPE(USERCOPY_SLAB_SIZE_TO),
443 CRASHTYPE(USERCOPY_SLAB_SIZE_FROM),
444 CRASHTYPE(USERCOPY_SLAB_WHITELIST_TO),
445 CRASHTYPE(USERCOPY_SLAB_WHITELIST_FROM),
446 CRASHTYPE(USERCOPY_STACK_FRAME_TO),
447 CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
448 CRASHTYPE(USERCOPY_STACK_BEYOND),
449 CRASHTYPE(USERCOPY_VMALLOC),
450 CRASHTYPE(USERCOPY_FOLIO),
451 CRASHTYPE(USERCOPY_KERNEL),
452};
453
454struct crashtype_category usercopy_crashtypes = {
455 .crashtypes = crashtypes,
456 .len = ARRAY_SIZE(crashtypes),
457};
458

source code of linux/drivers/misc/lkdtm/usercopy.c