1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_KASAN_H
3#define _LINUX_KASAN_H
4
5#include <linux/bug.h>
6#include <linux/kasan-enabled.h>
7#include <linux/kasan-tags.h>
8#include <linux/kernel.h>
9#include <linux/static_key.h>
10#include <linux/types.h>
11
12struct kmem_cache;
13struct page;
14struct slab;
15struct vm_struct;
16struct task_struct;
17
18#ifdef CONFIG_KASAN
19
20#include <linux/linkage.h>
21#include <asm/kasan.h>
22
23#endif
24
25typedef unsigned int __bitwise kasan_vmalloc_flags_t;
26
27#define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u)
28#define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u)
29#define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
30#define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
31
32#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
33
34#include <linux/pgtable.h>
35
36/* Software KASAN implementations use shadow memory. */
37
38#ifdef CONFIG_KASAN_SW_TAGS
39/* This matches KASAN_TAG_INVALID. */
40#define KASAN_SHADOW_INIT 0xFE
41#else
42#define KASAN_SHADOW_INIT 0
43#endif
44
45#ifndef PTE_HWTABLE_PTRS
46#define PTE_HWTABLE_PTRS 0
47#endif
48
49extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
50extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
51extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
52extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
53extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
54
55int kasan_populate_early_shadow(const void *shadow_start,
56 const void *shadow_end);
57
58#ifndef kasan_mem_to_shadow
59static inline void *kasan_mem_to_shadow(const void *addr)
60{
61 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
62 + KASAN_SHADOW_OFFSET;
63}
64#endif
65
66int kasan_add_zero_shadow(void *start, unsigned long size);
67void kasan_remove_zero_shadow(void *start, unsigned long size);
68
69/* Enable reporting bugs after kasan_disable_current() */
70extern void kasan_enable_current(void);
71
72/* Disable reporting bugs for current task */
73extern void kasan_disable_current(void);
74
75#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
76
77static inline int kasan_add_zero_shadow(void *start, unsigned long size)
78{
79 return 0;
80}
81static inline void kasan_remove_zero_shadow(void *start,
82 unsigned long size)
83{}
84
85static inline void kasan_enable_current(void) {}
86static inline void kasan_disable_current(void) {}
87
88#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
89
90#ifdef CONFIG_KASAN_HW_TAGS
91
92#else /* CONFIG_KASAN_HW_TAGS */
93
94#endif /* CONFIG_KASAN_HW_TAGS */
95
96static inline bool kasan_has_integrated_init(void)
97{
98 return kasan_hw_tags_enabled();
99}
100
101#ifdef CONFIG_KASAN
102void __kasan_unpoison_range(const void *addr, size_t size);
103static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
104{
105 if (kasan_enabled())
106 __kasan_unpoison_range(addr, size);
107}
108
109void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
110static __always_inline void kasan_poison_pages(struct page *page,
111 unsigned int order, bool init)
112{
113 if (kasan_enabled())
114 __kasan_poison_pages(page, order, init);
115}
116
117bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
118static __always_inline bool kasan_unpoison_pages(struct page *page,
119 unsigned int order, bool init)
120{
121 if (kasan_enabled())
122 return __kasan_unpoison_pages(page, order, init);
123 return false;
124}
125
126void __kasan_poison_slab(struct slab *slab);
127static __always_inline void kasan_poison_slab(struct slab *slab)
128{
129 if (kasan_enabled())
130 __kasan_poison_slab(slab);
131}
132
133void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
134/**
135 * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
136 * @cache: Cache the object belong to.
137 * @object: Pointer to the object.
138 *
139 * This function is intended for the slab allocator's internal use. It
140 * temporarily unpoisons an object from a newly allocated slab without doing
141 * anything else. The object must later be repoisoned by
142 * kasan_poison_new_object().
143 */
144static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
145 void *object)
146{
147 if (kasan_enabled())
148 __kasan_unpoison_new_object(cache, object);
149}
150
151void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
152/**
153 * kasan_unpoison_new_object - Repoison a new slab object.
154 * @cache: Cache the object belong to.
155 * @object: Pointer to the object.
156 *
157 * This function is intended for the slab allocator's internal use. It
158 * repoisons an object that was previously unpoisoned by
159 * kasan_unpoison_new_object() without doing anything else.
160 */
161static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
162 void *object)
163{
164 if (kasan_enabled())
165 __kasan_poison_new_object(cache, object);
166}
167
168void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
169 const void *object);
170static __always_inline void * __must_check kasan_init_slab_obj(
171 struct kmem_cache *cache, const void *object)
172{
173 if (kasan_enabled())
174 return __kasan_init_slab_obj(cache, object);
175 return (void *)object;
176}
177
178bool __kasan_slab_free(struct kmem_cache *s, void *object,
179 unsigned long ip, bool init);
180static __always_inline bool kasan_slab_free(struct kmem_cache *s,
181 void *object, bool init)
182{
183 if (kasan_enabled())
184 return __kasan_slab_free(s, object, _RET_IP_, init);
185 return false;
186}
187
188void __kasan_kfree_large(void *ptr, unsigned long ip);
189static __always_inline void kasan_kfree_large(void *ptr)
190{
191 if (kasan_enabled())
192 __kasan_kfree_large(ptr, _RET_IP_);
193}
194
195void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
196 void *object, gfp_t flags, bool init);
197static __always_inline void * __must_check kasan_slab_alloc(
198 struct kmem_cache *s, void *object, gfp_t flags, bool init)
199{
200 if (kasan_enabled())
201 return __kasan_slab_alloc(s, object, flags, init);
202 return object;
203}
204
205void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
206 size_t size, gfp_t flags);
207static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
208 const void *object, size_t size, gfp_t flags)
209{
210 if (kasan_enabled())
211 return __kasan_kmalloc(s, object, size, flags);
212 return (void *)object;
213}
214
215void * __must_check __kasan_kmalloc_large(const void *ptr,
216 size_t size, gfp_t flags);
217static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
218 size_t size, gfp_t flags)
219{
220 if (kasan_enabled())
221 return __kasan_kmalloc_large(ptr, size, flags);
222 return (void *)ptr;
223}
224
225void * __must_check __kasan_krealloc(const void *object,
226 size_t new_size, gfp_t flags);
227static __always_inline void * __must_check kasan_krealloc(const void *object,
228 size_t new_size, gfp_t flags)
229{
230 if (kasan_enabled())
231 return __kasan_krealloc(object, new_size, flags);
232 return (void *)object;
233}
234
235bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
236 unsigned long ip);
237/**
238 * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
239 * @page: Pointer to the page allocation.
240 * @order: Order of the allocation.
241 *
242 * This function is intended for kernel subsystems that cache page allocations
243 * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
244 *
245 * This function is similar to kasan_mempool_poison_object() but operates on
246 * page allocations.
247 *
248 * Before the poisoned allocation can be reused, it must be unpoisoned via
249 * kasan_mempool_unpoison_pages().
250 *
251 * Return: true if the allocation can be safely reused; false otherwise.
252 */
253static __always_inline bool kasan_mempool_poison_pages(struct page *page,
254 unsigned int order)
255{
256 if (kasan_enabled())
257 return __kasan_mempool_poison_pages(page, order, _RET_IP_);
258 return true;
259}
260
261void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
262 unsigned long ip);
263/**
264 * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
265 * @page: Pointer to the page allocation.
266 * @order: Order of the allocation.
267 *
268 * This function is intended for kernel subsystems that cache page allocations
269 * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
270 *
271 * This function unpoisons a page allocation that was previously poisoned by
272 * kasan_mempool_poison_pages() without zeroing the allocation's memory. For
273 * the tag-based modes, this function assigns a new tag to the allocation.
274 */
275static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
276 unsigned int order)
277{
278 if (kasan_enabled())
279 __kasan_mempool_unpoison_pages(page, order, _RET_IP_);
280}
281
282bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
283/**
284 * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
285 * @ptr: Pointer to the slab allocation.
286 *
287 * This function is intended for kernel subsystems that cache slab allocations
288 * to reuse them instead of freeing them back to the slab allocator (e.g.
289 * mempool).
290 *
291 * This function poisons a slab allocation and saves a free stack trace for it
292 * without initializing the allocation's memory and without putting it into the
293 * quarantine (for the Generic mode).
294 *
295 * This function also performs checks to detect double-free and invalid-free
296 * bugs and reports them. The caller can use the return value of this function
297 * to find out if the allocation is buggy.
298 *
299 * Before the poisoned allocation can be reused, it must be unpoisoned via
300 * kasan_mempool_unpoison_object().
301 *
302 * This function operates on all slab allocations including large kmalloc
303 * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
304 * size > KMALLOC_MAX_SIZE).
305 *
306 * Return: true if the allocation can be safely reused; false otherwise.
307 */
308static __always_inline bool kasan_mempool_poison_object(void *ptr)
309{
310 if (kasan_enabled())
311 return __kasan_mempool_poison_object(ptr, _RET_IP_);
312 return true;
313}
314
315void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
316/**
317 * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
318 * @ptr: Pointer to the slab allocation.
319 * @size: Size to be unpoisoned.
320 *
321 * This function is intended for kernel subsystems that cache slab allocations
322 * to reuse them instead of freeing them back to the slab allocator (e.g.
323 * mempool).
324 *
325 * This function unpoisons a slab allocation that was previously poisoned via
326 * kasan_mempool_poison_object() and saves an alloc stack trace for it without
327 * initializing the allocation's memory. For the tag-based modes, this function
328 * does not assign a new tag to the allocation and instead restores the
329 * original tags based on the pointer value.
330 *
331 * This function operates on all slab allocations including large kmalloc
332 * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
333 * size > KMALLOC_MAX_SIZE).
334 */
335static __always_inline void kasan_mempool_unpoison_object(void *ptr,
336 size_t size)
337{
338 if (kasan_enabled())
339 __kasan_mempool_unpoison_object(ptr, size, _RET_IP_);
340}
341
342/*
343 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
344 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
345 */
346bool __kasan_check_byte(const void *addr, unsigned long ip);
347static __always_inline bool kasan_check_byte(const void *addr)
348{
349 if (kasan_enabled())
350 return __kasan_check_byte(addr, _RET_IP_);
351 return true;
352}
353
354#else /* CONFIG_KASAN */
355
356static inline void kasan_unpoison_range(const void *address, size_t size) {}
357static inline void kasan_poison_pages(struct page *page, unsigned int order,
358 bool init) {}
359static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
360 bool init)
361{
362 return false;
363}
364static inline void kasan_poison_slab(struct slab *slab) {}
365static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
366 void *object) {}
367static inline void kasan_poison_new_object(struct kmem_cache *cache,
368 void *object) {}
369static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
370 const void *object)
371{
372 return (void *)object;
373}
374static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
375{
376 return false;
377}
378static inline void kasan_kfree_large(void *ptr) {}
379static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
380 gfp_t flags, bool init)
381{
382 return object;
383}
384static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
385 size_t size, gfp_t flags)
386{
387 return (void *)object;
388}
389static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
390{
391 return (void *)ptr;
392}
393static inline void *kasan_krealloc(const void *object, size_t new_size,
394 gfp_t flags)
395{
396 return (void *)object;
397}
398static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
399{
400 return true;
401}
402static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
403static inline bool kasan_mempool_poison_object(void *ptr)
404{
405 return true;
406}
407static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {}
408
409static inline bool kasan_check_byte(const void *address)
410{
411 return true;
412}
413
414#endif /* CONFIG_KASAN */
415
416#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
417void kasan_unpoison_task_stack(struct task_struct *task);
418asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
419#else
420static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
421static inline void kasan_unpoison_task_stack_below(const void *watermark) {}
422#endif
423
424#ifdef CONFIG_KASAN_GENERIC
425
426struct kasan_cache {
427 int alloc_meta_offset;
428 int free_meta_offset;
429};
430
431size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
432void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
433 slab_flags_t *flags);
434
435void kasan_cache_shrink(struct kmem_cache *cache);
436void kasan_cache_shutdown(struct kmem_cache *cache);
437void kasan_record_aux_stack(void *ptr);
438void kasan_record_aux_stack_noalloc(void *ptr);
439
440#else /* CONFIG_KASAN_GENERIC */
441
442/* Tag-based KASAN modes do not use per-object metadata. */
443static inline size_t kasan_metadata_size(struct kmem_cache *cache,
444 bool in_object)
445{
446 return 0;
447}
448/* And no cache-related metadata initialization is required. */
449static inline void kasan_cache_create(struct kmem_cache *cache,
450 unsigned int *size,
451 slab_flags_t *flags) {}
452
453static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
454static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
455static inline void kasan_record_aux_stack(void *ptr) {}
456static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
457
458#endif /* CONFIG_KASAN_GENERIC */
459
460#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
461
462static inline void *kasan_reset_tag(const void *addr)
463{
464 return (void *)arch_kasan_reset_tag(addr);
465}
466
467/**
468 * kasan_report - print a report about a bad memory access detected by KASAN
469 * @addr: address of the bad access
470 * @size: size of the bad access
471 * @is_write: whether the bad access is a write or a read
472 * @ip: instruction pointer for the accessibility check or the bad access itself
473 */
474bool kasan_report(const void *addr, size_t size,
475 bool is_write, unsigned long ip);
476
477#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
478
479static inline void *kasan_reset_tag(const void *addr)
480{
481 return (void *)addr;
482}
483
484#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
485
486#ifdef CONFIG_KASAN_HW_TAGS
487
488void kasan_report_async(void);
489
490#endif /* CONFIG_KASAN_HW_TAGS */
491
492#ifdef CONFIG_KASAN_SW_TAGS
493void __init kasan_init_sw_tags(void);
494#else
495static inline void kasan_init_sw_tags(void) { }
496#endif
497
498#ifdef CONFIG_KASAN_HW_TAGS
499void kasan_init_hw_tags_cpu(void);
500void __init kasan_init_hw_tags(void);
501#else
502static inline void kasan_init_hw_tags_cpu(void) { }
503static inline void kasan_init_hw_tags(void) { }
504#endif
505
506#ifdef CONFIG_KASAN_VMALLOC
507
508#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
509
510void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
511int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
512void kasan_release_vmalloc(unsigned long start, unsigned long end,
513 unsigned long free_region_start,
514 unsigned long free_region_end);
515
516#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
517
518static inline void kasan_populate_early_vm_area_shadow(void *start,
519 unsigned long size)
520{ }
521static inline int kasan_populate_vmalloc(unsigned long start,
522 unsigned long size)
523{
524 return 0;
525}
526static inline void kasan_release_vmalloc(unsigned long start,
527 unsigned long end,
528 unsigned long free_region_start,
529 unsigned long free_region_end) { }
530
531#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
532
533void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
534 kasan_vmalloc_flags_t flags);
535static __always_inline void *kasan_unpoison_vmalloc(const void *start,
536 unsigned long size,
537 kasan_vmalloc_flags_t flags)
538{
539 if (kasan_enabled())
540 return __kasan_unpoison_vmalloc(start, size, flags);
541 return (void *)start;
542}
543
544void __kasan_poison_vmalloc(const void *start, unsigned long size);
545static __always_inline void kasan_poison_vmalloc(const void *start,
546 unsigned long size)
547{
548 if (kasan_enabled())
549 __kasan_poison_vmalloc(start, size);
550}
551
552#else /* CONFIG_KASAN_VMALLOC */
553
554static inline void kasan_populate_early_vm_area_shadow(void *start,
555 unsigned long size) { }
556static inline int kasan_populate_vmalloc(unsigned long start,
557 unsigned long size)
558{
559 return 0;
560}
561static inline void kasan_release_vmalloc(unsigned long start,
562 unsigned long end,
563 unsigned long free_region_start,
564 unsigned long free_region_end) { }
565
566static inline void *kasan_unpoison_vmalloc(const void *start,
567 unsigned long size,
568 kasan_vmalloc_flags_t flags)
569{
570 return (void *)start;
571}
572static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
573{ }
574
575#endif /* CONFIG_KASAN_VMALLOC */
576
577#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
578 !defined(CONFIG_KASAN_VMALLOC)
579
580/*
581 * These functions allocate and free shadow memory for kernel modules.
582 * They are only required when KASAN_VMALLOC is not supported, as otherwise
583 * shadow memory is allocated by the generic vmalloc handlers.
584 */
585int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
586void kasan_free_module_shadow(const struct vm_struct *vm);
587
588#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
589
590static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
591static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
592
593#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
594
595#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
596void kasan_non_canonical_hook(unsigned long addr);
597#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
598static inline void kasan_non_canonical_hook(unsigned long addr) { }
599#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
600
601#endif /* LINUX_KASAN_H */
602

source code of linux/include/linux/kasan.h