blob: 30d343b4a40a599f6095e172a4a506b22826e966 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_KASAN_H
3#define _LINUX_KASAN_H
4
5#include <linux/types.h>
6
7struct kmem_cache;
8struct page;
9struct vm_struct;
10struct task_struct;
11
12#ifdef CONFIG_KASAN
13
Olivier Deprez157378f2022-04-04 15:47:50 +020014#include <linux/pgtable.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015#include <asm/kasan.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020016
17/* kasan_data struct is used in KUnit tests for KASAN expected failures */
18struct kunit_kasan_expectation {
19 bool report_expected;
20 bool report_found;
21};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000022
David Brazdil0f672f62019-12-10 10:32:29 +000023extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
24extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
25extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
26extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
27extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000028
David Brazdil0f672f62019-12-10 10:32:29 +000029int kasan_populate_early_shadow(const void *shadow_start,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000030 const void *shadow_end);
31
32static inline void *kasan_mem_to_shadow(const void *addr)
33{
34 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
35 + KASAN_SHADOW_OFFSET;
36}
37
38/* Enable reporting bugs after kasan_disable_current() */
39extern void kasan_enable_current(void);
40
41/* Disable reporting bugs for current task */
42extern void kasan_disable_current(void);
43
44void kasan_unpoison_shadow(const void *address, size_t size);
45
46void kasan_unpoison_task_stack(struct task_struct *task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000047
48void kasan_alloc_pages(struct page *page, unsigned int order);
49void kasan_free_pages(struct page *page, unsigned int order);
50
51void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
52 slab_flags_t *flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000053
54void kasan_poison_slab(struct page *page);
55void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
56void kasan_poison_object_data(struct kmem_cache *cache, void *object);
David Brazdil0f672f62019-12-10 10:32:29 +000057void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
58 const void *object);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059
David Brazdil0f672f62019-12-10 10:32:29 +000060void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
61 gfp_t flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000062void kasan_kfree_large(void *ptr, unsigned long ip);
63void kasan_poison_kfree(void *ptr, unsigned long ip);
David Brazdil0f672f62019-12-10 10:32:29 +000064void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object,
65 size_t size, gfp_t flags);
66void * __must_check kasan_krealloc(const void *object, size_t new_size,
67 gfp_t flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000068
David Brazdil0f672f62019-12-10 10:32:29 +000069void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object,
70 gfp_t flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000071bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
72
73struct kasan_cache {
74 int alloc_meta_offset;
75 int free_meta_offset;
76};
77
Olivier Deprez157378f2022-04-04 15:47:50 +020078/*
79 * These functions provide a special case to support backing module
80 * allocations with real shadow memory. With KASAN vmalloc, the special
81 * case is unnecessary, as the work is handled in the generic case.
82 */
83#ifndef CONFIG_KASAN_VMALLOC
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000084int kasan_module_alloc(void *addr, size_t size);
85void kasan_free_shadow(const struct vm_struct *vm);
Olivier Deprez157378f2022-04-04 15:47:50 +020086#else
87static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
88static inline void kasan_free_shadow(const struct vm_struct *vm) {}
89#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000090
91int kasan_add_zero_shadow(void *start, unsigned long size);
92void kasan_remove_zero_shadow(void *start, unsigned long size);
93
David Brazdil0f672f62019-12-10 10:32:29 +000094size_t __ksize(const void *);
95static inline void kasan_unpoison_slab(const void *ptr)
96{
97 kasan_unpoison_shadow(ptr, __ksize(ptr));
98}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000099size_t kasan_metadata_size(struct kmem_cache *cache);
100
101bool kasan_save_enable_multi_shot(void);
102void kasan_restore_multi_shot(bool enabled);
103
104#else /* CONFIG_KASAN */
105
106static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
107
108static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000109
110static inline void kasan_enable_current(void) {}
111static inline void kasan_disable_current(void) {}
112
113static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
114static inline void kasan_free_pages(struct page *page, unsigned int order) {}
115
116static inline void kasan_cache_create(struct kmem_cache *cache,
117 unsigned int *size,
118 slab_flags_t *flags) {}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000119
120static inline void kasan_poison_slab(struct page *page) {}
121static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
122 void *object) {}
123static inline void kasan_poison_object_data(struct kmem_cache *cache,
124 void *object) {}
David Brazdil0f672f62019-12-10 10:32:29 +0000125static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
126 const void *object)
127{
128 return (void *)object;
129}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000130
David Brazdil0f672f62019-12-10 10:32:29 +0000131static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags)
132{
133 return ptr;
134}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000135static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
136static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
David Brazdil0f672f62019-12-10 10:32:29 +0000137static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
138 size_t size, gfp_t flags)
139{
140 return (void *)object;
141}
142static inline void *kasan_krealloc(const void *object, size_t new_size,
143 gfp_t flags)
144{
145 return (void *)object;
146}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000147
David Brazdil0f672f62019-12-10 10:32:29 +0000148static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
149 gfp_t flags)
150{
151 return object;
152}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000153static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
154 unsigned long ip)
155{
156 return false;
157}
158
159static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
160static inline void kasan_free_shadow(const struct vm_struct *vm) {}
161
162static inline int kasan_add_zero_shadow(void *start, unsigned long size)
163{
164 return 0;
165}
166static inline void kasan_remove_zero_shadow(void *start,
167 unsigned long size)
168{}
169
170static inline void kasan_unpoison_slab(const void *ptr) { }
171static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
172
173#endif /* CONFIG_KASAN */
174
David Brazdil0f672f62019-12-10 10:32:29 +0000175#ifdef CONFIG_KASAN_GENERIC
176
177#define KASAN_SHADOW_INIT 0
178
179void kasan_cache_shrink(struct kmem_cache *cache);
180void kasan_cache_shutdown(struct kmem_cache *cache);
Olivier Deprez157378f2022-04-04 15:47:50 +0200181void kasan_record_aux_stack(void *ptr);
David Brazdil0f672f62019-12-10 10:32:29 +0000182
183#else /* CONFIG_KASAN_GENERIC */
184
185static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
186static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
Olivier Deprez157378f2022-04-04 15:47:50 +0200187static inline void kasan_record_aux_stack(void *ptr) {}
David Brazdil0f672f62019-12-10 10:32:29 +0000188
189#endif /* CONFIG_KASAN_GENERIC */
190
191#ifdef CONFIG_KASAN_SW_TAGS
192
193#define KASAN_SHADOW_INIT 0xFF
194
195void kasan_init_tags(void);
196
197void *kasan_reset_tag(const void *addr);
198
Olivier Deprez157378f2022-04-04 15:47:50 +0200199bool kasan_report(unsigned long addr, size_t size,
David Brazdil0f672f62019-12-10 10:32:29 +0000200 bool is_write, unsigned long ip);
201
202#else /* CONFIG_KASAN_SW_TAGS */
203
204static inline void kasan_init_tags(void) { }
205
206static inline void *kasan_reset_tag(const void *addr)
207{
208 return (void *)addr;
209}
210
211#endif /* CONFIG_KASAN_SW_TAGS */
212
Olivier Deprez157378f2022-04-04 15:47:50 +0200213#ifdef CONFIG_KASAN_VMALLOC
214int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
215void kasan_poison_vmalloc(const void *start, unsigned long size);
216void kasan_unpoison_vmalloc(const void *start, unsigned long size);
217void kasan_release_vmalloc(unsigned long start, unsigned long end,
218 unsigned long free_region_start,
219 unsigned long free_region_end);
220#else
221static inline int kasan_populate_vmalloc(unsigned long start,
222 unsigned long size)
223{
224 return 0;
225}
226
227static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
228{ }
229static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
230{ }
231static inline void kasan_release_vmalloc(unsigned long start,
232 unsigned long end,
233 unsigned long free_region_start,
234 unsigned long free_region_end) {}
235#endif
236
237#ifdef CONFIG_KASAN_INLINE
238void kasan_non_canonical_hook(unsigned long addr);
239#else /* CONFIG_KASAN_INLINE */
240static inline void kasan_non_canonical_hook(unsigned long addr) { }
241#endif /* CONFIG_KASAN_INLINE */
242
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000243#endif /* LINUX_KASAN_H */