1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * KASAN quarantine. |
4 | * |
5 | * Author: Alexander Potapenko <glider@google.com> |
6 | * Copyright (C) 2016 Google, Inc. |
7 | * |
8 | * Based on code by Dmitry Chernenkov. |
9 | */ |
10 | |
11 | #define pr_fmt(fmt) "kasan: " fmt |
12 | |
13 | #include <linux/gfp.h> |
14 | #include <linux/hash.h> |
15 | #include <linux/kernel.h> |
16 | #include <linux/mm.h> |
17 | #include <linux/percpu.h> |
18 | #include <linux/printk.h> |
19 | #include <linux/shrinker.h> |
20 | #include <linux/slab.h> |
21 | #include <linux/srcu.h> |
22 | #include <linux/string.h> |
23 | #include <linux/types.h> |
24 | #include <linux/cpuhotplug.h> |
25 | |
26 | #include "../slab.h" |
27 | #include "kasan.h" |
28 | |
29 | /* Data structure and operations for quarantine queues. */ |
30 | |
31 | /* |
32 | * Each queue is a single-linked list, which also stores the total size of |
33 | * objects inside of it. |
34 | */ |
35 | struct qlist_head { |
36 | struct qlist_node *head; |
37 | struct qlist_node *tail; |
38 | size_t bytes; |
39 | bool offline; |
40 | }; |
41 | |
42 | #define QLIST_INIT { NULL, NULL, 0 } |
43 | |
44 | static bool qlist_empty(struct qlist_head *q) |
45 | { |
46 | return !q->head; |
47 | } |
48 | |
49 | static void qlist_init(struct qlist_head *q) |
50 | { |
51 | q->head = q->tail = NULL; |
52 | q->bytes = 0; |
53 | } |
54 | |
55 | static void qlist_put(struct qlist_head *q, struct qlist_node *qlink, |
56 | size_t size) |
57 | { |
58 | if (unlikely(qlist_empty(q))) |
59 | q->head = qlink; |
60 | else |
61 | q->tail->next = qlink; |
62 | q->tail = qlink; |
63 | qlink->next = NULL; |
64 | q->bytes += size; |
65 | } |
66 | |
67 | static void qlist_move_all(struct qlist_head *from, struct qlist_head *to) |
68 | { |
69 | if (unlikely(qlist_empty(from))) |
70 | return; |
71 | |
72 | if (qlist_empty(q: to)) { |
73 | *to = *from; |
74 | qlist_init(q: from); |
75 | return; |
76 | } |
77 | |
78 | to->tail->next = from->head; |
79 | to->tail = from->tail; |
80 | to->bytes += from->bytes; |
81 | |
82 | qlist_init(q: from); |
83 | } |
84 | |
85 | #define QUARANTINE_PERCPU_SIZE (1 << 20) |
86 | #define QUARANTINE_BATCHES \ |
87 | (1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS) |
88 | |
89 | /* |
90 | * The object quarantine consists of per-cpu queues and a global queue, |
91 | * guarded by quarantine_lock. |
92 | */ |
93 | static DEFINE_PER_CPU(struct qlist_head, cpu_quarantine); |
94 | |
95 | /* Round-robin FIFO array of batches. */ |
96 | static struct qlist_head global_quarantine[QUARANTINE_BATCHES]; |
97 | static int quarantine_head; |
98 | static int quarantine_tail; |
99 | /* Total size of all objects in global_quarantine across all batches. */ |
100 | static unsigned long quarantine_size; |
101 | static DEFINE_RAW_SPINLOCK(quarantine_lock); |
102 | DEFINE_STATIC_SRCU(remove_cache_srcu); |
103 | |
104 | struct cpu_shrink_qlist { |
105 | raw_spinlock_t lock; |
106 | struct qlist_head qlist; |
107 | }; |
108 | |
109 | static DEFINE_PER_CPU(struct cpu_shrink_qlist, shrink_qlist) = { |
110 | .lock = __RAW_SPIN_LOCK_UNLOCKED(shrink_qlist.lock), |
111 | }; |
112 | |
113 | /* Maximum size of the global queue. */ |
114 | static unsigned long quarantine_max_size; |
115 | |
116 | /* |
117 | * Target size of a batch in global_quarantine. |
118 | * Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM. |
119 | */ |
120 | static unsigned long quarantine_batch_size; |
121 | |
122 | /* |
123 | * The fraction of physical memory the quarantine is allowed to occupy. |
124 | * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep |
125 | * the ratio low to avoid OOM. |
126 | */ |
127 | #define QUARANTINE_FRACTION 32 |
128 | |
129 | static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink) |
130 | { |
131 | return virt_to_slab(addr: qlink)->slab_cache; |
132 | } |
133 | |
134 | static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache) |
135 | { |
136 | struct kasan_free_meta *free_info = |
137 | container_of(qlink, struct kasan_free_meta, |
138 | quarantine_link); |
139 | |
140 | return ((void *)free_info) - cache->kasan_info.free_meta_offset; |
141 | } |
142 | |
143 | static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) |
144 | { |
145 | void *object = qlink_to_object(qlink, cache); |
146 | struct kasan_free_meta *meta = kasan_get_free_meta(cache, object); |
147 | unsigned long flags; |
148 | |
149 | if (IS_ENABLED(CONFIG_SLAB)) |
150 | local_irq_save(flags); |
151 | |
152 | /* |
153 | * If init_on_free is enabled and KASAN's free metadata is stored in |
154 | * the object, zero the metadata. Otherwise, the object's memory will |
155 | * not be properly zeroed, as KASAN saves the metadata after the slab |
156 | * allocator zeroes the object. |
157 | */ |
158 | if (slab_want_init_on_free(cache) && |
159 | cache->kasan_info.free_meta_offset == 0) |
160 | memzero_explicit(meta, sizeof(*meta)); |
161 | |
162 | /* |
163 | * As the object now gets freed from the quarantine, assume that its |
164 | * free track is no longer valid. |
165 | */ |
166 | *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE; |
167 | |
168 | ___cache_free(cache, x: object, _THIS_IP_); |
169 | |
170 | if (IS_ENABLED(CONFIG_SLAB)) |
171 | local_irq_restore(flags); |
172 | } |
173 | |
174 | static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache) |
175 | { |
176 | struct qlist_node *qlink; |
177 | |
178 | if (unlikely(qlist_empty(q))) |
179 | return; |
180 | |
181 | qlink = q->head; |
182 | while (qlink) { |
183 | struct kmem_cache *obj_cache = |
184 | cache ? cache : qlink_to_cache(qlink); |
185 | struct qlist_node *next = qlink->next; |
186 | |
187 | qlink_free(qlink, cache: obj_cache); |
188 | qlink = next; |
189 | } |
190 | qlist_init(q); |
191 | } |
192 | |
193 | bool kasan_quarantine_put(struct kmem_cache *cache, void *object) |
194 | { |
195 | unsigned long flags; |
196 | struct qlist_head *q; |
197 | struct qlist_head temp = QLIST_INIT; |
198 | struct kasan_free_meta *meta = kasan_get_free_meta(cache, object); |
199 | |
200 | /* |
201 | * If there's no metadata for this object, don't put it into |
202 | * quarantine. |
203 | */ |
204 | if (!meta) |
205 | return false; |
206 | |
207 | /* |
208 | * Note: irq must be disabled until after we move the batch to the |
209 | * global quarantine. Otherwise kasan_quarantine_remove_cache() can |
210 | * miss some objects belonging to the cache if they are in our local |
211 | * temp list. kasan_quarantine_remove_cache() executes on_each_cpu() |
212 | * at the beginning which ensures that it either sees the objects in |
213 | * per-cpu lists or in the global quarantine. |
214 | */ |
215 | local_irq_save(flags); |
216 | |
217 | q = this_cpu_ptr(&cpu_quarantine); |
218 | if (q->offline) { |
219 | local_irq_restore(flags); |
220 | return false; |
221 | } |
222 | qlist_put(q, qlink: &meta->quarantine_link, size: cache->size); |
223 | if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) { |
224 | qlist_move_all(from: q, to: &temp); |
225 | |
226 | raw_spin_lock(&quarantine_lock); |
227 | WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes); |
228 | qlist_move_all(from: &temp, to: &global_quarantine[quarantine_tail]); |
229 | if (global_quarantine[quarantine_tail].bytes >= |
230 | READ_ONCE(quarantine_batch_size)) { |
231 | int new_tail; |
232 | |
233 | new_tail = quarantine_tail + 1; |
234 | if (new_tail == QUARANTINE_BATCHES) |
235 | new_tail = 0; |
236 | if (new_tail != quarantine_head) |
237 | quarantine_tail = new_tail; |
238 | } |
239 | raw_spin_unlock(&quarantine_lock); |
240 | } |
241 | |
242 | local_irq_restore(flags); |
243 | |
244 | return true; |
245 | } |
246 | |
247 | void kasan_quarantine_reduce(void) |
248 | { |
249 | size_t total_size, new_quarantine_size, percpu_quarantines; |
250 | unsigned long flags; |
251 | int srcu_idx; |
252 | struct qlist_head to_free = QLIST_INIT; |
253 | |
254 | if (likely(READ_ONCE(quarantine_size) <= |
255 | READ_ONCE(quarantine_max_size))) |
256 | return; |
257 | |
258 | /* |
259 | * srcu critical section ensures that kasan_quarantine_remove_cache() |
260 | * will not miss objects belonging to the cache while they are in our |
261 | * local to_free list. srcu is chosen because (1) it gives us private |
262 | * grace period domain that does not interfere with anything else, |
263 | * and (2) it allows synchronize_srcu() to return without waiting |
264 | * if there are no pending read critical sections (which is the |
265 | * expected case). |
266 | */ |
267 | srcu_idx = srcu_read_lock(ssp: &remove_cache_srcu); |
268 | raw_spin_lock_irqsave(&quarantine_lock, flags); |
269 | |
270 | /* |
271 | * Update quarantine size in case of hotplug. Allocate a fraction of |
272 | * the installed memory to quarantine minus per-cpu queue limits. |
273 | */ |
274 | total_size = (totalram_pages() << PAGE_SHIFT) / |
275 | QUARANTINE_FRACTION; |
276 | percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus(); |
277 | new_quarantine_size = (total_size < percpu_quarantines) ? |
278 | 0 : total_size - percpu_quarantines; |
279 | WRITE_ONCE(quarantine_max_size, new_quarantine_size); |
280 | /* Aim at consuming at most 1/2 of slots in quarantine. */ |
281 | WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE, |
282 | 2 * total_size / QUARANTINE_BATCHES)); |
283 | |
284 | if (likely(quarantine_size > quarantine_max_size)) { |
285 | qlist_move_all(from: &global_quarantine[quarantine_head], to: &to_free); |
286 | WRITE_ONCE(quarantine_size, quarantine_size - to_free.bytes); |
287 | quarantine_head++; |
288 | if (quarantine_head == QUARANTINE_BATCHES) |
289 | quarantine_head = 0; |
290 | } |
291 | |
292 | raw_spin_unlock_irqrestore(&quarantine_lock, flags); |
293 | |
294 | qlist_free_all(q: &to_free, NULL); |
295 | srcu_read_unlock(ssp: &remove_cache_srcu, idx: srcu_idx); |
296 | } |
297 | |
298 | static void qlist_move_cache(struct qlist_head *from, |
299 | struct qlist_head *to, |
300 | struct kmem_cache *cache) |
301 | { |
302 | struct qlist_node *curr; |
303 | |
304 | if (unlikely(qlist_empty(from))) |
305 | return; |
306 | |
307 | curr = from->head; |
308 | qlist_init(q: from); |
309 | while (curr) { |
310 | struct qlist_node *next = curr->next; |
311 | struct kmem_cache *obj_cache = qlink_to_cache(qlink: curr); |
312 | |
313 | if (obj_cache == cache) |
314 | qlist_put(q: to, qlink: curr, size: obj_cache->size); |
315 | else |
316 | qlist_put(q: from, qlink: curr, size: obj_cache->size); |
317 | |
318 | curr = next; |
319 | } |
320 | } |
321 | |
322 | static void __per_cpu_remove_cache(struct qlist_head *q, void *arg) |
323 | { |
324 | struct kmem_cache *cache = arg; |
325 | unsigned long flags; |
326 | struct cpu_shrink_qlist *sq; |
327 | |
328 | sq = this_cpu_ptr(&shrink_qlist); |
329 | raw_spin_lock_irqsave(&sq->lock, flags); |
330 | qlist_move_cache(from: q, to: &sq->qlist, cache); |
331 | raw_spin_unlock_irqrestore(&sq->lock, flags); |
332 | } |
333 | |
334 | static void per_cpu_remove_cache(void *arg) |
335 | { |
336 | struct qlist_head *q; |
337 | |
338 | q = this_cpu_ptr(&cpu_quarantine); |
339 | /* |
340 | * Ensure the ordering between the writing to q->offline and |
341 | * per_cpu_remove_cache. Prevent cpu_quarantine from being corrupted |
342 | * by interrupt. |
343 | */ |
344 | if (READ_ONCE(q->offline)) |
345 | return; |
346 | __per_cpu_remove_cache(q, arg); |
347 | } |
348 | |
349 | /* Free all quarantined objects belonging to cache. */ |
350 | void kasan_quarantine_remove_cache(struct kmem_cache *cache) |
351 | { |
352 | unsigned long flags, i; |
353 | struct qlist_head to_free = QLIST_INIT; |
354 | int cpu; |
355 | struct cpu_shrink_qlist *sq; |
356 | |
357 | /* |
358 | * Must be careful to not miss any objects that are being moved from |
359 | * per-cpu list to the global quarantine in kasan_quarantine_put(), |
360 | * nor objects being freed in kasan_quarantine_reduce(). on_each_cpu() |
361 | * achieves the first goal, while synchronize_srcu() achieves the |
362 | * second. |
363 | */ |
364 | on_each_cpu(func: per_cpu_remove_cache, info: cache, wait: 1); |
365 | |
366 | for_each_online_cpu(cpu) { |
367 | sq = per_cpu_ptr(&shrink_qlist, cpu); |
368 | raw_spin_lock_irqsave(&sq->lock, flags); |
369 | qlist_move_cache(from: &sq->qlist, to: &to_free, cache); |
370 | raw_spin_unlock_irqrestore(&sq->lock, flags); |
371 | } |
372 | qlist_free_all(q: &to_free, cache); |
373 | |
374 | raw_spin_lock_irqsave(&quarantine_lock, flags); |
375 | for (i = 0; i < QUARANTINE_BATCHES; i++) { |
376 | if (qlist_empty(q: &global_quarantine[i])) |
377 | continue; |
378 | qlist_move_cache(from: &global_quarantine[i], to: &to_free, cache); |
379 | /* Scanning whole quarantine can take a while. */ |
380 | raw_spin_unlock_irqrestore(&quarantine_lock, flags); |
381 | cond_resched(); |
382 | raw_spin_lock_irqsave(&quarantine_lock, flags); |
383 | } |
384 | raw_spin_unlock_irqrestore(&quarantine_lock, flags); |
385 | |
386 | qlist_free_all(q: &to_free, cache); |
387 | |
388 | synchronize_srcu(ssp: &remove_cache_srcu); |
389 | } |
390 | |
391 | static int kasan_cpu_online(unsigned int cpu) |
392 | { |
393 | this_cpu_ptr(&cpu_quarantine)->offline = false; |
394 | return 0; |
395 | } |
396 | |
397 | static int kasan_cpu_offline(unsigned int cpu) |
398 | { |
399 | struct qlist_head *q; |
400 | |
401 | q = this_cpu_ptr(&cpu_quarantine); |
402 | /* Ensure the ordering between the writing to q->offline and |
403 | * qlist_free_all. Otherwise, cpu_quarantine may be corrupted |
404 | * by interrupt. |
405 | */ |
406 | WRITE_ONCE(q->offline, true); |
407 | barrier(); |
408 | qlist_free_all(q, NULL); |
409 | return 0; |
410 | } |
411 | |
412 | static int __init kasan_cpu_quarantine_init(void) |
413 | { |
414 | int ret = 0; |
415 | |
416 | ret = cpuhp_setup_state(state: CPUHP_AP_ONLINE_DYN, name: "mm/kasan:online" , |
417 | startup: kasan_cpu_online, teardown: kasan_cpu_offline); |
418 | if (ret < 0) |
419 | pr_err("cpu quarantine register failed [%d]\n" , ret); |
420 | return ret; |
421 | } |
422 | late_initcall(kasan_cpu_quarantine_init); |
423 | |