1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * jump label support |
4 | * |
5 | * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> |
6 | * Copyright (C) 2011 Peter Zijlstra |
7 | * |
8 | */ |
9 | #include <linux/memory.h> |
10 | #include <linux/uaccess.h> |
11 | #include <linux/module.h> |
12 | #include <linux/list.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/sort.h> |
15 | #include <linux/err.h> |
16 | #include <linux/static_key.h> |
17 | #include <linux/jump_label_ratelimit.h> |
18 | #include <linux/bug.h> |
19 | #include <linux/cpu.h> |
20 | #include <asm/sections.h> |
21 | |
22 | /* mutex to protect coming/going of the jump_label table */ |
23 | static DEFINE_MUTEX(jump_label_mutex); |
24 | |
25 | void jump_label_lock(void) |
26 | { |
27 | mutex_lock(&jump_label_mutex); |
28 | } |
29 | |
30 | void jump_label_unlock(void) |
31 | { |
32 | mutex_unlock(lock: &jump_label_mutex); |
33 | } |
34 | |
35 | static int jump_label_cmp(const void *a, const void *b) |
36 | { |
37 | const struct jump_entry *jea = a; |
38 | const struct jump_entry *jeb = b; |
39 | |
40 | /* |
41 | * Entrires are sorted by key. |
42 | */ |
43 | if (jump_entry_key(entry: jea) < jump_entry_key(entry: jeb)) |
44 | return -1; |
45 | |
46 | if (jump_entry_key(entry: jea) > jump_entry_key(entry: jeb)) |
47 | return 1; |
48 | |
49 | /* |
50 | * In the batching mode, entries should also be sorted by the code |
51 | * inside the already sorted list of entries, enabling a bsearch in |
52 | * the vector. |
53 | */ |
54 | if (jump_entry_code(entry: jea) < jump_entry_code(entry: jeb)) |
55 | return -1; |
56 | |
57 | if (jump_entry_code(entry: jea) > jump_entry_code(entry: jeb)) |
58 | return 1; |
59 | |
60 | return 0; |
61 | } |
62 | |
63 | static void jump_label_swap(void *a, void *b, int size) |
64 | { |
65 | long delta = (unsigned long)a - (unsigned long)b; |
66 | struct jump_entry *jea = a; |
67 | struct jump_entry *jeb = b; |
68 | struct jump_entry tmp = *jea; |
69 | |
70 | jea->code = jeb->code - delta; |
71 | jea->target = jeb->target - delta; |
72 | jea->key = jeb->key - delta; |
73 | |
74 | jeb->code = tmp.code + delta; |
75 | jeb->target = tmp.target + delta; |
76 | jeb->key = tmp.key + delta; |
77 | } |
78 | |
79 | static void |
80 | jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) |
81 | { |
82 | unsigned long size; |
83 | void *swapfn = NULL; |
84 | |
85 | if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE)) |
86 | swapfn = jump_label_swap; |
87 | |
88 | size = (((unsigned long)stop - (unsigned long)start) |
89 | / sizeof(struct jump_entry)); |
90 | sort(base: start, num: size, size: sizeof(struct jump_entry), cmp_func: jump_label_cmp, swap_func: swapfn); |
91 | } |
92 | |
93 | static void jump_label_update(struct static_key *key); |
94 | |
95 | /* |
96 | * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h. |
97 | * The use of 'atomic_read()' requires atomic.h and its problematic for some |
98 | * kernel headers such as kernel.h and others. Since static_key_count() is not |
99 | * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok |
100 | * to have it be a function here. Similarly, for 'static_key_enable()' and |
101 | * 'static_key_disable()', which require bug.h. This should allow jump_label.h |
102 | * to be included from most/all places for CONFIG_JUMP_LABEL. |
103 | */ |
104 | int static_key_count(struct static_key *key) |
105 | { |
106 | /* |
107 | * -1 means the first static_key_slow_inc() is in progress. |
108 | * static_key_enabled() must return true, so return 1 here. |
109 | */ |
110 | int n = atomic_read(v: &key->enabled); |
111 | |
112 | return n >= 0 ? n : 1; |
113 | } |
114 | EXPORT_SYMBOL_GPL(static_key_count); |
115 | |
116 | /* |
117 | * static_key_fast_inc_not_disabled - adds a user for a static key |
118 | * @key: static key that must be already enabled |
119 | * |
120 | * The caller must make sure that the static key can't get disabled while |
121 | * in this function. It doesn't patch jump labels, only adds a user to |
122 | * an already enabled static key. |
123 | * |
124 | * Returns true if the increment was done. Unlike refcount_t the ref counter |
125 | * is not saturated, but will fail to increment on overflow. |
126 | */ |
127 | bool static_key_fast_inc_not_disabled(struct static_key *key) |
128 | { |
129 | int v; |
130 | |
131 | STATIC_KEY_CHECK_USE(key); |
132 | /* |
133 | * Negative key->enabled has a special meaning: it sends |
134 | * static_key_slow_inc() down the slow path, and it is non-zero |
135 | * so it counts as "enabled" in jump_label_update(). Note that |
136 | * atomic_inc_unless_negative() checks >= 0, so roll our own. |
137 | */ |
138 | v = atomic_read(v: &key->enabled); |
139 | do { |
140 | if (v <= 0 || (v + 1) < 0) |
141 | return false; |
142 | } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1))); |
143 | |
144 | return true; |
145 | } |
146 | EXPORT_SYMBOL_GPL(static_key_fast_inc_not_disabled); |
147 | |
148 | bool static_key_slow_inc_cpuslocked(struct static_key *key) |
149 | { |
150 | lockdep_assert_cpus_held(); |
151 | |
152 | /* |
153 | * Careful if we get concurrent static_key_slow_inc() calls; |
154 | * later calls must wait for the first one to _finish_ the |
155 | * jump_label_update() process. At the same time, however, |
156 | * the jump_label_update() call below wants to see |
157 | * static_key_enabled(&key) for jumps to be updated properly. |
158 | */ |
159 | if (static_key_fast_inc_not_disabled(key)) |
160 | return true; |
161 | |
162 | jump_label_lock(); |
163 | if (atomic_read(v: &key->enabled) == 0) { |
164 | atomic_set(v: &key->enabled, i: -1); |
165 | jump_label_update(key); |
166 | /* |
167 | * Ensure that if the above cmpxchg loop observes our positive |
168 | * value, it must also observe all the text changes. |
169 | */ |
170 | atomic_set_release(v: &key->enabled, i: 1); |
171 | } else { |
172 | if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key))) { |
173 | jump_label_unlock(); |
174 | return false; |
175 | } |
176 | } |
177 | jump_label_unlock(); |
178 | return true; |
179 | } |
180 | |
181 | bool static_key_slow_inc(struct static_key *key) |
182 | { |
183 | bool ret; |
184 | |
185 | cpus_read_lock(); |
186 | ret = static_key_slow_inc_cpuslocked(key); |
187 | cpus_read_unlock(); |
188 | return ret; |
189 | } |
190 | EXPORT_SYMBOL_GPL(static_key_slow_inc); |
191 | |
192 | void static_key_enable_cpuslocked(struct static_key *key) |
193 | { |
194 | STATIC_KEY_CHECK_USE(key); |
195 | lockdep_assert_cpus_held(); |
196 | |
197 | if (atomic_read(v: &key->enabled) > 0) { |
198 | WARN_ON_ONCE(atomic_read(&key->enabled) != 1); |
199 | return; |
200 | } |
201 | |
202 | jump_label_lock(); |
203 | if (atomic_read(v: &key->enabled) == 0) { |
204 | atomic_set(v: &key->enabled, i: -1); |
205 | jump_label_update(key); |
206 | /* |
207 | * See static_key_slow_inc(). |
208 | */ |
209 | atomic_set_release(v: &key->enabled, i: 1); |
210 | } |
211 | jump_label_unlock(); |
212 | } |
213 | EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked); |
214 | |
215 | void static_key_enable(struct static_key *key) |
216 | { |
217 | cpus_read_lock(); |
218 | static_key_enable_cpuslocked(key); |
219 | cpus_read_unlock(); |
220 | } |
221 | EXPORT_SYMBOL_GPL(static_key_enable); |
222 | |
223 | void static_key_disable_cpuslocked(struct static_key *key) |
224 | { |
225 | STATIC_KEY_CHECK_USE(key); |
226 | lockdep_assert_cpus_held(); |
227 | |
228 | if (atomic_read(v: &key->enabled) != 1) { |
229 | WARN_ON_ONCE(atomic_read(&key->enabled) != 0); |
230 | return; |
231 | } |
232 | |
233 | jump_label_lock(); |
234 | if (atomic_cmpxchg(v: &key->enabled, old: 1, new: 0)) |
235 | jump_label_update(key); |
236 | jump_label_unlock(); |
237 | } |
238 | EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked); |
239 | |
240 | void static_key_disable(struct static_key *key) |
241 | { |
242 | cpus_read_lock(); |
243 | static_key_disable_cpuslocked(key); |
244 | cpus_read_unlock(); |
245 | } |
246 | EXPORT_SYMBOL_GPL(static_key_disable); |
247 | |
248 | static bool static_key_slow_try_dec(struct static_key *key) |
249 | { |
250 | int val; |
251 | |
252 | val = atomic_fetch_add_unless(v: &key->enabled, a: -1, u: 1); |
253 | if (val == 1) |
254 | return false; |
255 | |
256 | /* |
257 | * The negative count check is valid even when a negative |
258 | * key->enabled is in use by static_key_slow_inc(); a |
259 | * __static_key_slow_dec() before the first static_key_slow_inc() |
260 | * returns is unbalanced, because all other static_key_slow_inc() |
261 | * instances block while the update is in progress. |
262 | */ |
263 | WARN(val < 0, "jump label: negative count!\n" ); |
264 | return true; |
265 | } |
266 | |
267 | static void __static_key_slow_dec_cpuslocked(struct static_key *key) |
268 | { |
269 | lockdep_assert_cpus_held(); |
270 | |
271 | if (static_key_slow_try_dec(key)) |
272 | return; |
273 | |
274 | jump_label_lock(); |
275 | if (atomic_dec_and_test(v: &key->enabled)) |
276 | jump_label_update(key); |
277 | jump_label_unlock(); |
278 | } |
279 | |
280 | static void __static_key_slow_dec(struct static_key *key) |
281 | { |
282 | cpus_read_lock(); |
283 | __static_key_slow_dec_cpuslocked(key); |
284 | cpus_read_unlock(); |
285 | } |
286 | |
287 | void jump_label_update_timeout(struct work_struct *work) |
288 | { |
289 | struct static_key_deferred *key = |
290 | container_of(work, struct static_key_deferred, work.work); |
291 | __static_key_slow_dec(key: &key->key); |
292 | } |
293 | EXPORT_SYMBOL_GPL(jump_label_update_timeout); |
294 | |
295 | void static_key_slow_dec(struct static_key *key) |
296 | { |
297 | STATIC_KEY_CHECK_USE(key); |
298 | __static_key_slow_dec(key); |
299 | } |
300 | EXPORT_SYMBOL_GPL(static_key_slow_dec); |
301 | |
302 | void static_key_slow_dec_cpuslocked(struct static_key *key) |
303 | { |
304 | STATIC_KEY_CHECK_USE(key); |
305 | __static_key_slow_dec_cpuslocked(key); |
306 | } |
307 | |
308 | void __static_key_slow_dec_deferred(struct static_key *key, |
309 | struct delayed_work *work, |
310 | unsigned long timeout) |
311 | { |
312 | STATIC_KEY_CHECK_USE(key); |
313 | |
314 | if (static_key_slow_try_dec(key)) |
315 | return; |
316 | |
317 | schedule_delayed_work(dwork: work, delay: timeout); |
318 | } |
319 | EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred); |
320 | |
321 | void __static_key_deferred_flush(void *key, struct delayed_work *work) |
322 | { |
323 | STATIC_KEY_CHECK_USE(key); |
324 | flush_delayed_work(dwork: work); |
325 | } |
326 | EXPORT_SYMBOL_GPL(__static_key_deferred_flush); |
327 | |
328 | void jump_label_rate_limit(struct static_key_deferred *key, |
329 | unsigned long rl) |
330 | { |
331 | STATIC_KEY_CHECK_USE(key); |
332 | key->timeout = rl; |
333 | INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); |
334 | } |
335 | EXPORT_SYMBOL_GPL(jump_label_rate_limit); |
336 | |
337 | static int addr_conflict(struct jump_entry *entry, void *start, void *end) |
338 | { |
339 | if (jump_entry_code(entry) <= (unsigned long)end && |
340 | jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start) |
341 | return 1; |
342 | |
343 | return 0; |
344 | } |
345 | |
346 | static int __jump_label_text_reserved(struct jump_entry *iter_start, |
347 | struct jump_entry *iter_stop, void *start, void *end, bool init) |
348 | { |
349 | struct jump_entry *iter; |
350 | |
351 | iter = iter_start; |
352 | while (iter < iter_stop) { |
353 | if (init || !jump_entry_is_init(entry: iter)) { |
354 | if (addr_conflict(entry: iter, start, end)) |
355 | return 1; |
356 | } |
357 | iter++; |
358 | } |
359 | |
360 | return 0; |
361 | } |
362 | |
363 | #ifndef arch_jump_label_transform_static |
364 | static void arch_jump_label_transform_static(struct jump_entry *entry, |
365 | enum jump_label_type type) |
366 | { |
367 | /* nothing to do on most architectures */ |
368 | } |
369 | #endif |
370 | |
371 | static inline struct jump_entry *static_key_entries(struct static_key *key) |
372 | { |
373 | WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED); |
374 | return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK); |
375 | } |
376 | |
377 | static inline bool static_key_type(struct static_key *key) |
378 | { |
379 | return key->type & JUMP_TYPE_TRUE; |
380 | } |
381 | |
382 | static inline bool static_key_linked(struct static_key *key) |
383 | { |
384 | return key->type & JUMP_TYPE_LINKED; |
385 | } |
386 | |
387 | static inline void static_key_clear_linked(struct static_key *key) |
388 | { |
389 | key->type &= ~JUMP_TYPE_LINKED; |
390 | } |
391 | |
392 | static inline void static_key_set_linked(struct static_key *key) |
393 | { |
394 | key->type |= JUMP_TYPE_LINKED; |
395 | } |
396 | |
397 | /*** |
398 | * A 'struct static_key' uses a union such that it either points directly |
399 | * to a table of 'struct jump_entry' or to a linked list of modules which in |
400 | * turn point to 'struct jump_entry' tables. |
401 | * |
402 | * The two lower bits of the pointer are used to keep track of which pointer |
403 | * type is in use and to store the initial branch direction, we use an access |
404 | * function which preserves these bits. |
405 | */ |
406 | static void static_key_set_entries(struct static_key *key, |
407 | struct jump_entry *entries) |
408 | { |
409 | unsigned long type; |
410 | |
411 | WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK); |
412 | type = key->type & JUMP_TYPE_MASK; |
413 | key->entries = entries; |
414 | key->type |= type; |
415 | } |
416 | |
417 | static enum jump_label_type jump_label_type(struct jump_entry *entry) |
418 | { |
419 | struct static_key *key = jump_entry_key(entry); |
420 | bool enabled = static_key_enabled(key); |
421 | bool branch = jump_entry_is_branch(entry); |
422 | |
423 | /* See the comment in linux/jump_label.h */ |
424 | return enabled ^ branch; |
425 | } |
426 | |
427 | static bool jump_label_can_update(struct jump_entry *entry, bool init) |
428 | { |
429 | /* |
430 | * Cannot update code that was in an init text area. |
431 | */ |
432 | if (!init && jump_entry_is_init(entry)) |
433 | return false; |
434 | |
435 | if (!kernel_text_address(addr: jump_entry_code(entry))) { |
436 | /* |
437 | * This skips patching built-in __exit, which |
438 | * is part of init_section_contains() but is |
439 | * not part of kernel_text_address(). |
440 | * |
441 | * Skipping built-in __exit is fine since it |
442 | * will never be executed. |
443 | */ |
444 | WARN_ONCE(!jump_entry_is_init(entry), |
445 | "can't patch jump_label at %pS" , |
446 | (void *)jump_entry_code(entry)); |
447 | return false; |
448 | } |
449 | |
450 | return true; |
451 | } |
452 | |
453 | #ifndef HAVE_JUMP_LABEL_BATCH |
454 | static void __jump_label_update(struct static_key *key, |
455 | struct jump_entry *entry, |
456 | struct jump_entry *stop, |
457 | bool init) |
458 | { |
459 | for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { |
460 | if (jump_label_can_update(entry, init)) |
461 | arch_jump_label_transform(entry, jump_label_type(entry)); |
462 | } |
463 | } |
464 | #else |
465 | static void __jump_label_update(struct static_key *key, |
466 | struct jump_entry *entry, |
467 | struct jump_entry *stop, |
468 | bool init) |
469 | { |
470 | for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { |
471 | |
472 | if (!jump_label_can_update(entry, init)) |
473 | continue; |
474 | |
475 | if (!arch_jump_label_transform_queue(entry, type: jump_label_type(entry))) { |
476 | /* |
477 | * Queue is full: Apply the current queue and try again. |
478 | */ |
479 | arch_jump_label_transform_apply(); |
480 | BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry))); |
481 | } |
482 | } |
483 | arch_jump_label_transform_apply(); |
484 | } |
485 | #endif |
486 | |
487 | void __init jump_label_init(void) |
488 | { |
489 | struct jump_entry *iter_start = __start___jump_table; |
490 | struct jump_entry *iter_stop = __stop___jump_table; |
491 | struct static_key *key = NULL; |
492 | struct jump_entry *iter; |
493 | |
494 | /* |
495 | * Since we are initializing the static_key.enabled field with |
496 | * with the 'raw' int values (to avoid pulling in atomic.h) in |
497 | * jump_label.h, let's make sure that is safe. There are only two |
498 | * cases to check since we initialize to 0 or 1. |
499 | */ |
500 | BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0); |
501 | BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1); |
502 | |
503 | if (static_key_initialized) |
504 | return; |
505 | |
506 | cpus_read_lock(); |
507 | jump_label_lock(); |
508 | jump_label_sort_entries(start: iter_start, stop: iter_stop); |
509 | |
510 | for (iter = iter_start; iter < iter_stop; iter++) { |
511 | struct static_key *iterk; |
512 | bool in_init; |
513 | |
514 | /* rewrite NOPs */ |
515 | if (jump_label_type(entry: iter) == JUMP_LABEL_NOP) |
516 | arch_jump_label_transform_static(entry: iter, type: JUMP_LABEL_NOP); |
517 | |
518 | in_init = init_section_contains(virt: (void *)jump_entry_code(entry: iter), size: 1); |
519 | jump_entry_set_init(entry: iter, set: in_init); |
520 | |
521 | iterk = jump_entry_key(entry: iter); |
522 | if (iterk == key) |
523 | continue; |
524 | |
525 | key = iterk; |
526 | static_key_set_entries(key, entries: iter); |
527 | } |
528 | static_key_initialized = true; |
529 | jump_label_unlock(); |
530 | cpus_read_unlock(); |
531 | } |
532 | |
533 | #ifdef CONFIG_MODULES |
534 | |
535 | enum jump_label_type jump_label_init_type(struct jump_entry *entry) |
536 | { |
537 | struct static_key *key = jump_entry_key(entry); |
538 | bool type = static_key_type(key); |
539 | bool branch = jump_entry_is_branch(entry); |
540 | |
541 | /* See the comment in linux/jump_label.h */ |
542 | return type ^ branch; |
543 | } |
544 | |
545 | struct static_key_mod { |
546 | struct static_key_mod *next; |
547 | struct jump_entry *entries; |
548 | struct module *mod; |
549 | }; |
550 | |
551 | static inline struct static_key_mod *static_key_mod(struct static_key *key) |
552 | { |
553 | WARN_ON_ONCE(!static_key_linked(key)); |
554 | return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK); |
555 | } |
556 | |
557 | /*** |
558 | * key->type and key->next are the same via union. |
559 | * This sets key->next and preserves the type bits. |
560 | * |
561 | * See additional comments above static_key_set_entries(). |
562 | */ |
563 | static void static_key_set_mod(struct static_key *key, |
564 | struct static_key_mod *mod) |
565 | { |
566 | unsigned long type; |
567 | |
568 | WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK); |
569 | type = key->type & JUMP_TYPE_MASK; |
570 | key->next = mod; |
571 | key->type |= type; |
572 | } |
573 | |
574 | static int __jump_label_mod_text_reserved(void *start, void *end) |
575 | { |
576 | struct module *mod; |
577 | int ret; |
578 | |
579 | preempt_disable(); |
580 | mod = __module_text_address(addr: (unsigned long)start); |
581 | WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); |
582 | if (!try_module_get(module: mod)) |
583 | mod = NULL; |
584 | preempt_enable(); |
585 | |
586 | if (!mod) |
587 | return 0; |
588 | |
589 | ret = __jump_label_text_reserved(iter_start: mod->jump_entries, |
590 | iter_stop: mod->jump_entries + mod->num_jump_entries, |
591 | start, end, init: mod->state == MODULE_STATE_COMING); |
592 | |
593 | module_put(module: mod); |
594 | |
595 | return ret; |
596 | } |
597 | |
598 | static void __jump_label_mod_update(struct static_key *key) |
599 | { |
600 | struct static_key_mod *mod; |
601 | |
602 | for (mod = static_key_mod(key); mod; mod = mod->next) { |
603 | struct jump_entry *stop; |
604 | struct module *m; |
605 | |
606 | /* |
607 | * NULL if the static_key is defined in a module |
608 | * that does not use it |
609 | */ |
610 | if (!mod->entries) |
611 | continue; |
612 | |
613 | m = mod->mod; |
614 | if (!m) |
615 | stop = __stop___jump_table; |
616 | else |
617 | stop = m->jump_entries + m->num_jump_entries; |
618 | __jump_label_update(key, entry: mod->entries, stop, |
619 | init: m && m->state == MODULE_STATE_COMING); |
620 | } |
621 | } |
622 | |
623 | static int jump_label_add_module(struct module *mod) |
624 | { |
625 | struct jump_entry *iter_start = mod->jump_entries; |
626 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
627 | struct jump_entry *iter; |
628 | struct static_key *key = NULL; |
629 | struct static_key_mod *jlm, *jlm2; |
630 | |
631 | /* if the module doesn't have jump label entries, just return */ |
632 | if (iter_start == iter_stop) |
633 | return 0; |
634 | |
635 | jump_label_sort_entries(start: iter_start, stop: iter_stop); |
636 | |
637 | for (iter = iter_start; iter < iter_stop; iter++) { |
638 | struct static_key *iterk; |
639 | bool in_init; |
640 | |
641 | in_init = within_module_init(addr: jump_entry_code(entry: iter), mod); |
642 | jump_entry_set_init(entry: iter, set: in_init); |
643 | |
644 | iterk = jump_entry_key(entry: iter); |
645 | if (iterk == key) |
646 | continue; |
647 | |
648 | key = iterk; |
649 | if (within_module(addr: (unsigned long)key, mod)) { |
650 | static_key_set_entries(key, entries: iter); |
651 | continue; |
652 | } |
653 | jlm = kzalloc(size: sizeof(struct static_key_mod), GFP_KERNEL); |
654 | if (!jlm) |
655 | return -ENOMEM; |
656 | if (!static_key_linked(key)) { |
657 | jlm2 = kzalloc(size: sizeof(struct static_key_mod), |
658 | GFP_KERNEL); |
659 | if (!jlm2) { |
660 | kfree(objp: jlm); |
661 | return -ENOMEM; |
662 | } |
663 | preempt_disable(); |
664 | jlm2->mod = __module_address(addr: (unsigned long)key); |
665 | preempt_enable(); |
666 | jlm2->entries = static_key_entries(key); |
667 | jlm2->next = NULL; |
668 | static_key_set_mod(key, mod: jlm2); |
669 | static_key_set_linked(key); |
670 | } |
671 | jlm->mod = mod; |
672 | jlm->entries = iter; |
673 | jlm->next = static_key_mod(key); |
674 | static_key_set_mod(key, mod: jlm); |
675 | static_key_set_linked(key); |
676 | |
677 | /* Only update if we've changed from our initial state */ |
678 | if (jump_label_type(entry: iter) != jump_label_init_type(entry: iter)) |
679 | __jump_label_update(key, entry: iter, stop: iter_stop, init: true); |
680 | } |
681 | |
682 | return 0; |
683 | } |
684 | |
685 | static void jump_label_del_module(struct module *mod) |
686 | { |
687 | struct jump_entry *iter_start = mod->jump_entries; |
688 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
689 | struct jump_entry *iter; |
690 | struct static_key *key = NULL; |
691 | struct static_key_mod *jlm, **prev; |
692 | |
693 | for (iter = iter_start; iter < iter_stop; iter++) { |
694 | if (jump_entry_key(entry: iter) == key) |
695 | continue; |
696 | |
697 | key = jump_entry_key(entry: iter); |
698 | |
699 | if (within_module(addr: (unsigned long)key, mod)) |
700 | continue; |
701 | |
702 | /* No memory during module load */ |
703 | if (WARN_ON(!static_key_linked(key))) |
704 | continue; |
705 | |
706 | prev = &key->next; |
707 | jlm = static_key_mod(key); |
708 | |
709 | while (jlm && jlm->mod != mod) { |
710 | prev = &jlm->next; |
711 | jlm = jlm->next; |
712 | } |
713 | |
714 | /* No memory during module load */ |
715 | if (WARN_ON(!jlm)) |
716 | continue; |
717 | |
718 | if (prev == &key->next) |
719 | static_key_set_mod(key, mod: jlm->next); |
720 | else |
721 | *prev = jlm->next; |
722 | |
723 | kfree(objp: jlm); |
724 | |
725 | jlm = static_key_mod(key); |
726 | /* if only one etry is left, fold it back into the static_key */ |
727 | if (jlm->next == NULL) { |
728 | static_key_set_entries(key, entries: jlm->entries); |
729 | static_key_clear_linked(key); |
730 | kfree(objp: jlm); |
731 | } |
732 | } |
733 | } |
734 | |
735 | static int |
736 | jump_label_module_notify(struct notifier_block *self, unsigned long val, |
737 | void *data) |
738 | { |
739 | struct module *mod = data; |
740 | int ret = 0; |
741 | |
742 | cpus_read_lock(); |
743 | jump_label_lock(); |
744 | |
745 | switch (val) { |
746 | case MODULE_STATE_COMING: |
747 | ret = jump_label_add_module(mod); |
748 | if (ret) { |
749 | WARN(1, "Failed to allocate memory: jump_label may not work properly.\n" ); |
750 | jump_label_del_module(mod); |
751 | } |
752 | break; |
753 | case MODULE_STATE_GOING: |
754 | jump_label_del_module(mod); |
755 | break; |
756 | } |
757 | |
758 | jump_label_unlock(); |
759 | cpus_read_unlock(); |
760 | |
761 | return notifier_from_errno(err: ret); |
762 | } |
763 | |
764 | static struct notifier_block jump_label_module_nb = { |
765 | .notifier_call = jump_label_module_notify, |
766 | .priority = 1, /* higher than tracepoints */ |
767 | }; |
768 | |
769 | static __init int jump_label_init_module(void) |
770 | { |
771 | return register_module_notifier(nb: &jump_label_module_nb); |
772 | } |
773 | early_initcall(jump_label_init_module); |
774 | |
775 | #endif /* CONFIG_MODULES */ |
776 | |
777 | /*** |
778 | * jump_label_text_reserved - check if addr range is reserved |
779 | * @start: start text addr |
780 | * @end: end text addr |
781 | * |
782 | * checks if the text addr located between @start and @end |
783 | * overlaps with any of the jump label patch addresses. Code |
784 | * that wants to modify kernel text should first verify that |
785 | * it does not overlap with any of the jump label addresses. |
786 | * Caller must hold jump_label_mutex. |
787 | * |
788 | * returns 1 if there is an overlap, 0 otherwise |
789 | */ |
790 | int jump_label_text_reserved(void *start, void *end) |
791 | { |
792 | bool init = system_state < SYSTEM_RUNNING; |
793 | int ret = __jump_label_text_reserved(iter_start: __start___jump_table, |
794 | iter_stop: __stop___jump_table, start, end, init); |
795 | |
796 | if (ret) |
797 | return ret; |
798 | |
799 | #ifdef CONFIG_MODULES |
800 | ret = __jump_label_mod_text_reserved(start, end); |
801 | #endif |
802 | return ret; |
803 | } |
804 | |
805 | static void jump_label_update(struct static_key *key) |
806 | { |
807 | struct jump_entry *stop = __stop___jump_table; |
808 | bool init = system_state < SYSTEM_RUNNING; |
809 | struct jump_entry *entry; |
810 | #ifdef CONFIG_MODULES |
811 | struct module *mod; |
812 | |
813 | if (static_key_linked(key)) { |
814 | __jump_label_mod_update(key); |
815 | return; |
816 | } |
817 | |
818 | preempt_disable(); |
819 | mod = __module_address(addr: (unsigned long)key); |
820 | if (mod) { |
821 | stop = mod->jump_entries + mod->num_jump_entries; |
822 | init = mod->state == MODULE_STATE_COMING; |
823 | } |
824 | preempt_enable(); |
825 | #endif |
826 | entry = static_key_entries(key); |
827 | /* if there are no users, entry can be NULL */ |
828 | if (entry) |
829 | __jump_label_update(key, entry, stop, init); |
830 | } |
831 | |
832 | #ifdef CONFIG_STATIC_KEYS_SELFTEST |
833 | static DEFINE_STATIC_KEY_TRUE(sk_true); |
834 | static DEFINE_STATIC_KEY_FALSE(sk_false); |
835 | |
836 | static __init int jump_label_test(void) |
837 | { |
838 | int i; |
839 | |
840 | for (i = 0; i < 2; i++) { |
841 | WARN_ON(static_key_enabled(&sk_true.key) != true); |
842 | WARN_ON(static_key_enabled(&sk_false.key) != false); |
843 | |
844 | WARN_ON(!static_branch_likely(&sk_true)); |
845 | WARN_ON(!static_branch_unlikely(&sk_true)); |
846 | WARN_ON(static_branch_likely(&sk_false)); |
847 | WARN_ON(static_branch_unlikely(&sk_false)); |
848 | |
849 | static_branch_disable(&sk_true); |
850 | static_branch_enable(&sk_false); |
851 | |
852 | WARN_ON(static_key_enabled(&sk_true.key) == true); |
853 | WARN_ON(static_key_enabled(&sk_false.key) == false); |
854 | |
855 | WARN_ON(static_branch_likely(&sk_true)); |
856 | WARN_ON(static_branch_unlikely(&sk_true)); |
857 | WARN_ON(!static_branch_likely(&sk_false)); |
858 | WARN_ON(!static_branch_unlikely(&sk_false)); |
859 | |
860 | static_branch_enable(&sk_true); |
861 | static_branch_disable(&sk_false); |
862 | } |
863 | |
864 | return 0; |
865 | } |
866 | early_initcall(jump_label_test); |
867 | #endif /* STATIC_KEYS_SELFTEST */ |
868 | |