1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Kernel Probes (KProbes) |
4 | * |
5 | * Copyright (C) IBM Corporation, 2002, 2004 |
6 | * |
7 | * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel |
8 | * Probes initial implementation (includes suggestions from |
9 | * Rusty Russell). |
10 | * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with |
11 | * hlists and exceptions notifier as suggested by Andi Kleen. |
12 | * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes |
13 | * interface to access function arguments. |
14 | * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes |
15 | * exceptions notifier to be first on the priority list. |
16 | * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston |
17 | * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi |
18 | * <prasanna@in.ibm.com> added function-return probes. |
19 | */ |
20 | |
21 | #define pr_fmt(fmt) "kprobes: " fmt |
22 | |
23 | #include <linux/kprobes.h> |
24 | #include <linux/hash.h> |
25 | #include <linux/init.h> |
26 | #include <linux/slab.h> |
27 | #include <linux/stddef.h> |
28 | #include <linux/export.h> |
29 | #include <linux/moduleloader.h> |
30 | #include <linux/kallsyms.h> |
31 | #include <linux/freezer.h> |
32 | #include <linux/seq_file.h> |
33 | #include <linux/debugfs.h> |
34 | #include <linux/sysctl.h> |
35 | #include <linux/kdebug.h> |
36 | #include <linux/memory.h> |
37 | #include <linux/ftrace.h> |
38 | #include <linux/cpu.h> |
39 | #include <linux/jump_label.h> |
40 | #include <linux/static_call.h> |
41 | #include <linux/perf_event.h> |
42 | |
43 | #include <asm/sections.h> |
44 | #include <asm/cacheflush.h> |
45 | #include <asm/errno.h> |
46 | #include <linux/uaccess.h> |
47 | |
48 | #define KPROBE_HASH_BITS 6 |
49 | #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) |
50 | |
51 | #if !defined(CONFIG_OPTPROBES) || !defined(CONFIG_SYSCTL) |
52 | #define kprobe_sysctls_init() do { } while (0) |
53 | #endif |
54 | |
55 | static int kprobes_initialized; |
56 | /* kprobe_table can be accessed by |
57 | * - Normal hlist traversal and RCU add/del under 'kprobe_mutex' is held. |
58 | * Or |
59 | * - RCU hlist traversal under disabling preempt (breakpoint handlers) |
60 | */ |
61 | static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; |
62 | |
63 | /* NOTE: change this value only with 'kprobe_mutex' held */ |
64 | static bool kprobes_all_disarmed; |
65 | |
66 | /* This protects 'kprobe_table' and 'optimizing_list' */ |
67 | static DEFINE_MUTEX(kprobe_mutex); |
68 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance); |
69 | |
70 | kprobe_opcode_t * __weak kprobe_lookup_name(const char *name, |
71 | unsigned int __unused) |
72 | { |
73 | return ((kprobe_opcode_t *)(kallsyms_lookup_name(name))); |
74 | } |
75 | |
76 | /* |
77 | * Blacklist -- list of 'struct kprobe_blacklist_entry' to store info where |
78 | * kprobes can not probe. |
79 | */ |
80 | static LIST_HEAD(kprobe_blacklist); |
81 | |
82 | #ifdef __ARCH_WANT_KPROBES_INSN_SLOT |
83 | /* |
84 | * 'kprobe::ainsn.insn' points to the copy of the instruction to be |
85 | * single-stepped. x86_64, POWER4 and above have no-exec support and |
86 | * stepping on the instruction on a vmalloced/kmalloced/data page |
87 | * is a recipe for disaster |
88 | */ |
89 | struct kprobe_insn_page { |
90 | struct list_head list; |
91 | kprobe_opcode_t *insns; /* Page of instruction slots */ |
92 | struct kprobe_insn_cache *cache; |
93 | int nused; |
94 | int ngarbage; |
95 | char slot_used[]; |
96 | }; |
97 | |
98 | #define KPROBE_INSN_PAGE_SIZE(slots) \ |
99 | (offsetof(struct kprobe_insn_page, slot_used) + \ |
100 | (sizeof(char) * (slots))) |
101 | |
102 | static int slots_per_page(struct kprobe_insn_cache *c) |
103 | { |
104 | return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); |
105 | } |
106 | |
107 | enum kprobe_slot_state { |
108 | SLOT_CLEAN = 0, |
109 | SLOT_DIRTY = 1, |
110 | SLOT_USED = 2, |
111 | }; |
112 | |
113 | void __weak *alloc_insn_page(void) |
114 | { |
115 | /* |
116 | * Use module_alloc() so this page is within +/- 2GB of where the |
117 | * kernel image and loaded module images reside. This is required |
118 | * for most of the architectures. |
119 | * (e.g. x86-64 needs this to handle the %rip-relative fixups.) |
120 | */ |
121 | return module_alloc(PAGE_SIZE); |
122 | } |
123 | |
124 | static void free_insn_page(void *page) |
125 | { |
126 | module_memfree(module_region: page); |
127 | } |
128 | |
129 | struct kprobe_insn_cache kprobe_insn_slots = { |
130 | .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex), |
131 | .alloc = alloc_insn_page, |
132 | .free = free_insn_page, |
133 | .sym = KPROBE_INSN_PAGE_SYM, |
134 | .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), |
135 | .insn_size = MAX_INSN_SIZE, |
136 | .nr_garbage = 0, |
137 | }; |
138 | static int collect_garbage_slots(struct kprobe_insn_cache *c); |
139 | |
140 | /** |
141 | * __get_insn_slot() - Find a slot on an executable page for an instruction. |
142 | * We allocate an executable page if there's no room on existing ones. |
143 | */ |
144 | kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) |
145 | { |
146 | struct kprobe_insn_page *kip; |
147 | kprobe_opcode_t *slot = NULL; |
148 | |
149 | /* Since the slot array is not protected by rcu, we need a mutex */ |
150 | mutex_lock(&c->mutex); |
151 | retry: |
152 | rcu_read_lock(); |
153 | list_for_each_entry_rcu(kip, &c->pages, list) { |
154 | if (kip->nused < slots_per_page(c)) { |
155 | int i; |
156 | |
157 | for (i = 0; i < slots_per_page(c); i++) { |
158 | if (kip->slot_used[i] == SLOT_CLEAN) { |
159 | kip->slot_used[i] = SLOT_USED; |
160 | kip->nused++; |
161 | slot = kip->insns + (i * c->insn_size); |
162 | rcu_read_unlock(); |
163 | goto out; |
164 | } |
165 | } |
166 | /* kip->nused is broken. Fix it. */ |
167 | kip->nused = slots_per_page(c); |
168 | WARN_ON(1); |
169 | } |
170 | } |
171 | rcu_read_unlock(); |
172 | |
173 | /* If there are any garbage slots, collect it and try again. */ |
174 | if (c->nr_garbage && collect_garbage_slots(c) == 0) |
175 | goto retry; |
176 | |
177 | /* All out of space. Need to allocate a new page. */ |
178 | kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); |
179 | if (!kip) |
180 | goto out; |
181 | |
182 | kip->insns = c->alloc(); |
183 | if (!kip->insns) { |
184 | kfree(objp: kip); |
185 | goto out; |
186 | } |
187 | INIT_LIST_HEAD(list: &kip->list); |
188 | memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); |
189 | kip->slot_used[0] = SLOT_USED; |
190 | kip->nused = 1; |
191 | kip->ngarbage = 0; |
192 | kip->cache = c; |
193 | list_add_rcu(new: &kip->list, head: &c->pages); |
194 | slot = kip->insns; |
195 | |
196 | /* Record the perf ksymbol register event after adding the page */ |
197 | perf_event_ksymbol(ksym_type: PERF_RECORD_KSYMBOL_TYPE_OOL, addr: (unsigned long)kip->insns, |
198 | PAGE_SIZE, unregister: false, sym: c->sym); |
199 | out: |
200 | mutex_unlock(lock: &c->mutex); |
201 | return slot; |
202 | } |
203 | |
204 | /* Return true if all garbages are collected, otherwise false. */ |
205 | static bool collect_one_slot(struct kprobe_insn_page *kip, int idx) |
206 | { |
207 | kip->slot_used[idx] = SLOT_CLEAN; |
208 | kip->nused--; |
209 | if (kip->nused == 0) { |
210 | /* |
211 | * Page is no longer in use. Free it unless |
212 | * it's the last one. We keep the last one |
213 | * so as not to have to set it up again the |
214 | * next time somebody inserts a probe. |
215 | */ |
216 | if (!list_is_singular(head: &kip->list)) { |
217 | /* |
218 | * Record perf ksymbol unregister event before removing |
219 | * the page. |
220 | */ |
221 | perf_event_ksymbol(ksym_type: PERF_RECORD_KSYMBOL_TYPE_OOL, |
222 | addr: (unsigned long)kip->insns, PAGE_SIZE, unregister: true, |
223 | sym: kip->cache->sym); |
224 | list_del_rcu(entry: &kip->list); |
225 | synchronize_rcu(); |
226 | kip->cache->free(kip->insns); |
227 | kfree(objp: kip); |
228 | } |
229 | return true; |
230 | } |
231 | return false; |
232 | } |
233 | |
234 | static int collect_garbage_slots(struct kprobe_insn_cache *c) |
235 | { |
236 | struct kprobe_insn_page *kip, *next; |
237 | |
238 | /* Ensure no-one is interrupted on the garbages */ |
239 | synchronize_rcu(); |
240 | |
241 | list_for_each_entry_safe(kip, next, &c->pages, list) { |
242 | int i; |
243 | |
244 | if (kip->ngarbage == 0) |
245 | continue; |
246 | kip->ngarbage = 0; /* we will collect all garbages */ |
247 | for (i = 0; i < slots_per_page(c); i++) { |
248 | if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, idx: i)) |
249 | break; |
250 | } |
251 | } |
252 | c->nr_garbage = 0; |
253 | return 0; |
254 | } |
255 | |
256 | void __free_insn_slot(struct kprobe_insn_cache *c, |
257 | kprobe_opcode_t *slot, int dirty) |
258 | { |
259 | struct kprobe_insn_page *kip; |
260 | long idx; |
261 | |
262 | mutex_lock(&c->mutex); |
263 | rcu_read_lock(); |
264 | list_for_each_entry_rcu(kip, &c->pages, list) { |
265 | idx = ((long)slot - (long)kip->insns) / |
266 | (c->insn_size * sizeof(kprobe_opcode_t)); |
267 | if (idx >= 0 && idx < slots_per_page(c)) |
268 | goto out; |
269 | } |
270 | /* Could not find this slot. */ |
271 | WARN_ON(1); |
272 | kip = NULL; |
273 | out: |
274 | rcu_read_unlock(); |
275 | /* Mark and sweep: this may sleep */ |
276 | if (kip) { |
277 | /* Check double free */ |
278 | WARN_ON(kip->slot_used[idx] != SLOT_USED); |
279 | if (dirty) { |
280 | kip->slot_used[idx] = SLOT_DIRTY; |
281 | kip->ngarbage++; |
282 | if (++c->nr_garbage > slots_per_page(c)) |
283 | collect_garbage_slots(c); |
284 | } else { |
285 | collect_one_slot(kip, idx); |
286 | } |
287 | } |
288 | mutex_unlock(lock: &c->mutex); |
289 | } |
290 | |
291 | /* |
292 | * Check given address is on the page of kprobe instruction slots. |
293 | * This will be used for checking whether the address on a stack |
294 | * is on a text area or not. |
295 | */ |
296 | bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr) |
297 | { |
298 | struct kprobe_insn_page *kip; |
299 | bool ret = false; |
300 | |
301 | rcu_read_lock(); |
302 | list_for_each_entry_rcu(kip, &c->pages, list) { |
303 | if (addr >= (unsigned long)kip->insns && |
304 | addr < (unsigned long)kip->insns + PAGE_SIZE) { |
305 | ret = true; |
306 | break; |
307 | } |
308 | } |
309 | rcu_read_unlock(); |
310 | |
311 | return ret; |
312 | } |
313 | |
314 | int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum, |
315 | unsigned long *value, char *type, char *sym) |
316 | { |
317 | struct kprobe_insn_page *kip; |
318 | int ret = -ERANGE; |
319 | |
320 | rcu_read_lock(); |
321 | list_for_each_entry_rcu(kip, &c->pages, list) { |
322 | if ((*symnum)--) |
323 | continue; |
324 | strscpy(p: sym, q: c->sym, KSYM_NAME_LEN); |
325 | *type = 't'; |
326 | *value = (unsigned long)kip->insns; |
327 | ret = 0; |
328 | break; |
329 | } |
330 | rcu_read_unlock(); |
331 | |
332 | return ret; |
333 | } |
334 | |
335 | #ifdef CONFIG_OPTPROBES |
336 | void __weak *alloc_optinsn_page(void) |
337 | { |
338 | return alloc_insn_page(); |
339 | } |
340 | |
341 | void __weak free_optinsn_page(void *page) |
342 | { |
343 | free_insn_page(page); |
344 | } |
345 | |
346 | /* For optimized_kprobe buffer */ |
347 | struct kprobe_insn_cache kprobe_optinsn_slots = { |
348 | .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex), |
349 | .alloc = alloc_optinsn_page, |
350 | .free = free_optinsn_page, |
351 | .sym = KPROBE_OPTINSN_PAGE_SYM, |
352 | .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), |
353 | /* .insn_size is initialized later */ |
354 | .nr_garbage = 0, |
355 | }; |
356 | #endif |
357 | #endif |
358 | |
359 | /* We have preemption disabled.. so it is safe to use __ versions */ |
360 | static inline void set_kprobe_instance(struct kprobe *kp) |
361 | { |
362 | __this_cpu_write(kprobe_instance, kp); |
363 | } |
364 | |
365 | static inline void reset_kprobe_instance(void) |
366 | { |
367 | __this_cpu_write(kprobe_instance, NULL); |
368 | } |
369 | |
370 | /* |
371 | * This routine is called either: |
372 | * - under the 'kprobe_mutex' - during kprobe_[un]register(). |
373 | * OR |
374 | * - with preemption disabled - from architecture specific code. |
375 | */ |
376 | struct kprobe *get_kprobe(void *addr) |
377 | { |
378 | struct hlist_head *head; |
379 | struct kprobe *p; |
380 | |
381 | head = &kprobe_table[hash_ptr(ptr: addr, KPROBE_HASH_BITS)]; |
382 | hlist_for_each_entry_rcu(p, head, hlist, |
383 | lockdep_is_held(&kprobe_mutex)) { |
384 | if (p->addr == addr) |
385 | return p; |
386 | } |
387 | |
388 | return NULL; |
389 | } |
390 | NOKPROBE_SYMBOL(get_kprobe); |
391 | |
392 | static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); |
393 | |
394 | /* Return true if 'p' is an aggregator */ |
395 | static inline bool kprobe_aggrprobe(struct kprobe *p) |
396 | { |
397 | return p->pre_handler == aggr_pre_handler; |
398 | } |
399 | |
400 | /* Return true if 'p' is unused */ |
401 | static inline bool kprobe_unused(struct kprobe *p) |
402 | { |
403 | return kprobe_aggrprobe(p) && kprobe_disabled(p) && |
404 | list_empty(head: &p->list); |
405 | } |
406 | |
407 | /* Keep all fields in the kprobe consistent. */ |
408 | static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p) |
409 | { |
410 | memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t)); |
411 | memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn)); |
412 | } |
413 | |
414 | #ifdef CONFIG_OPTPROBES |
415 | /* NOTE: This is protected by 'kprobe_mutex'. */ |
416 | static bool kprobes_allow_optimization; |
417 | |
418 | /* |
419 | * Call all 'kprobe::pre_handler' on the list, but ignores its return value. |
420 | * This must be called from arch-dep optimized caller. |
421 | */ |
422 | void opt_pre_handler(struct kprobe *p, struct pt_regs *regs) |
423 | { |
424 | struct kprobe *kp; |
425 | |
426 | list_for_each_entry_rcu(kp, &p->list, list) { |
427 | if (kp->pre_handler && likely(!kprobe_disabled(kp))) { |
428 | set_kprobe_instance(kp); |
429 | kp->pre_handler(kp, regs); |
430 | } |
431 | reset_kprobe_instance(); |
432 | } |
433 | } |
434 | NOKPROBE_SYMBOL(opt_pre_handler); |
435 | |
436 | /* Free optimized instructions and optimized_kprobe */ |
437 | static void free_aggr_kprobe(struct kprobe *p) |
438 | { |
439 | struct optimized_kprobe *op; |
440 | |
441 | op = container_of(p, struct optimized_kprobe, kp); |
442 | arch_remove_optimized_kprobe(op); |
443 | arch_remove_kprobe(p); |
444 | kfree(objp: op); |
445 | } |
446 | |
447 | /* Return true if the kprobe is ready for optimization. */ |
448 | static inline int kprobe_optready(struct kprobe *p) |
449 | { |
450 | struct optimized_kprobe *op; |
451 | |
452 | if (kprobe_aggrprobe(p)) { |
453 | op = container_of(p, struct optimized_kprobe, kp); |
454 | return arch_prepared_optinsn(optinsn: &op->optinsn); |
455 | } |
456 | |
457 | return 0; |
458 | } |
459 | |
460 | /* Return true if the kprobe is disarmed. Note: p must be on hash list */ |
461 | bool kprobe_disarmed(struct kprobe *p) |
462 | { |
463 | struct optimized_kprobe *op; |
464 | |
465 | /* If kprobe is not aggr/opt probe, just return kprobe is disabled */ |
466 | if (!kprobe_aggrprobe(p)) |
467 | return kprobe_disabled(p); |
468 | |
469 | op = container_of(p, struct optimized_kprobe, kp); |
470 | |
471 | return kprobe_disabled(p) && list_empty(head: &op->list); |
472 | } |
473 | |
474 | /* Return true if the probe is queued on (un)optimizing lists */ |
475 | static bool kprobe_queued(struct kprobe *p) |
476 | { |
477 | struct optimized_kprobe *op; |
478 | |
479 | if (kprobe_aggrprobe(p)) { |
480 | op = container_of(p, struct optimized_kprobe, kp); |
481 | if (!list_empty(head: &op->list)) |
482 | return true; |
483 | } |
484 | return false; |
485 | } |
486 | |
487 | /* |
488 | * Return an optimized kprobe whose optimizing code replaces |
489 | * instructions including 'addr' (exclude breakpoint). |
490 | */ |
491 | static struct kprobe *get_optimized_kprobe(kprobe_opcode_t *addr) |
492 | { |
493 | int i; |
494 | struct kprobe *p = NULL; |
495 | struct optimized_kprobe *op; |
496 | |
497 | /* Don't check i == 0, since that is a breakpoint case. */ |
498 | for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH / sizeof(kprobe_opcode_t); i++) |
499 | p = get_kprobe(addr: addr - i); |
500 | |
501 | if (p && kprobe_optready(p)) { |
502 | op = container_of(p, struct optimized_kprobe, kp); |
503 | if (arch_within_optimized_kprobe(op, addr)) |
504 | return p; |
505 | } |
506 | |
507 | return NULL; |
508 | } |
509 | |
510 | /* Optimization staging list, protected by 'kprobe_mutex' */ |
511 | static LIST_HEAD(optimizing_list); |
512 | static LIST_HEAD(unoptimizing_list); |
513 | static LIST_HEAD(freeing_list); |
514 | |
515 | static void kprobe_optimizer(struct work_struct *work); |
516 | static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); |
517 | #define OPTIMIZE_DELAY 5 |
518 | |
519 | /* |
520 | * Optimize (replace a breakpoint with a jump) kprobes listed on |
521 | * 'optimizing_list'. |
522 | */ |
523 | static void do_optimize_kprobes(void) |
524 | { |
525 | lockdep_assert_held(&text_mutex); |
526 | /* |
527 | * The optimization/unoptimization refers 'online_cpus' via |
528 | * stop_machine() and cpu-hotplug modifies the 'online_cpus'. |
529 | * And same time, 'text_mutex' will be held in cpu-hotplug and here. |
530 | * This combination can cause a deadlock (cpu-hotplug tries to lock |
531 | * 'text_mutex' but stop_machine() can not be done because |
532 | * the 'online_cpus' has been changed) |
533 | * To avoid this deadlock, caller must have locked cpu-hotplug |
534 | * for preventing cpu-hotplug outside of 'text_mutex' locking. |
535 | */ |
536 | lockdep_assert_cpus_held(); |
537 | |
538 | /* Optimization never be done when disarmed */ |
539 | if (kprobes_all_disarmed || !kprobes_allow_optimization || |
540 | list_empty(head: &optimizing_list)) |
541 | return; |
542 | |
543 | arch_optimize_kprobes(oplist: &optimizing_list); |
544 | } |
545 | |
546 | /* |
547 | * Unoptimize (replace a jump with a breakpoint and remove the breakpoint |
548 | * if need) kprobes listed on 'unoptimizing_list'. |
549 | */ |
550 | static void do_unoptimize_kprobes(void) |
551 | { |
552 | struct optimized_kprobe *op, *tmp; |
553 | |
554 | lockdep_assert_held(&text_mutex); |
555 | /* See comment in do_optimize_kprobes() */ |
556 | lockdep_assert_cpus_held(); |
557 | |
558 | if (!list_empty(head: &unoptimizing_list)) |
559 | arch_unoptimize_kprobes(oplist: &unoptimizing_list, done_list: &freeing_list); |
560 | |
561 | /* Loop on 'freeing_list' for disarming and removing from kprobe hash list */ |
562 | list_for_each_entry_safe(op, tmp, &freeing_list, list) { |
563 | /* Switching from detour code to origin */ |
564 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; |
565 | /* Disarm probes if marked disabled and not gone */ |
566 | if (kprobe_disabled(p: &op->kp) && !kprobe_gone(p: &op->kp)) |
567 | arch_disarm_kprobe(p: &op->kp); |
568 | if (kprobe_unused(p: &op->kp)) { |
569 | /* |
570 | * Remove unused probes from hash list. After waiting |
571 | * for synchronization, these probes are reclaimed. |
572 | * (reclaiming is done by do_free_cleaned_kprobes().) |
573 | */ |
574 | hlist_del_rcu(n: &op->kp.hlist); |
575 | } else |
576 | list_del_init(entry: &op->list); |
577 | } |
578 | } |
579 | |
580 | /* Reclaim all kprobes on the 'freeing_list' */ |
581 | static void do_free_cleaned_kprobes(void) |
582 | { |
583 | struct optimized_kprobe *op, *tmp; |
584 | |
585 | list_for_each_entry_safe(op, tmp, &freeing_list, list) { |
586 | list_del_init(entry: &op->list); |
587 | if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) { |
588 | /* |
589 | * This must not happen, but if there is a kprobe |
590 | * still in use, keep it on kprobes hash list. |
591 | */ |
592 | continue; |
593 | } |
594 | free_aggr_kprobe(p: &op->kp); |
595 | } |
596 | } |
597 | |
598 | /* Start optimizer after OPTIMIZE_DELAY passed */ |
599 | static void kick_kprobe_optimizer(void) |
600 | { |
601 | schedule_delayed_work(dwork: &optimizing_work, OPTIMIZE_DELAY); |
602 | } |
603 | |
604 | /* Kprobe jump optimizer */ |
605 | static void kprobe_optimizer(struct work_struct *work) |
606 | { |
607 | mutex_lock(&kprobe_mutex); |
608 | cpus_read_lock(); |
609 | mutex_lock(&text_mutex); |
610 | |
611 | /* |
612 | * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) |
613 | * kprobes before waiting for quiesence period. |
614 | */ |
615 | do_unoptimize_kprobes(); |
616 | |
617 | /* |
618 | * Step 2: Wait for quiesence period to ensure all potentially |
619 | * preempted tasks to have normally scheduled. Because optprobe |
620 | * may modify multiple instructions, there is a chance that Nth |
621 | * instruction is preempted. In that case, such tasks can return |
622 | * to 2nd-Nth byte of jump instruction. This wait is for avoiding it. |
623 | * Note that on non-preemptive kernel, this is transparently converted |
624 | * to synchronoze_sched() to wait for all interrupts to have completed. |
625 | */ |
626 | synchronize_rcu_tasks(); |
627 | |
628 | /* Step 3: Optimize kprobes after quiesence period */ |
629 | do_optimize_kprobes(); |
630 | |
631 | /* Step 4: Free cleaned kprobes after quiesence period */ |
632 | do_free_cleaned_kprobes(); |
633 | |
634 | mutex_unlock(lock: &text_mutex); |
635 | cpus_read_unlock(); |
636 | |
637 | /* Step 5: Kick optimizer again if needed */ |
638 | if (!list_empty(head: &optimizing_list) || !list_empty(head: &unoptimizing_list)) |
639 | kick_kprobe_optimizer(); |
640 | |
641 | mutex_unlock(lock: &kprobe_mutex); |
642 | } |
643 | |
644 | /* Wait for completing optimization and unoptimization */ |
645 | void wait_for_kprobe_optimizer(void) |
646 | { |
647 | mutex_lock(&kprobe_mutex); |
648 | |
649 | while (!list_empty(head: &optimizing_list) || !list_empty(head: &unoptimizing_list)) { |
650 | mutex_unlock(lock: &kprobe_mutex); |
651 | |
652 | /* This will also make 'optimizing_work' execute immmediately */ |
653 | flush_delayed_work(dwork: &optimizing_work); |
654 | /* 'optimizing_work' might not have been queued yet, relax */ |
655 | cpu_relax(); |
656 | |
657 | mutex_lock(&kprobe_mutex); |
658 | } |
659 | |
660 | mutex_unlock(lock: &kprobe_mutex); |
661 | } |
662 | |
663 | bool optprobe_queued_unopt(struct optimized_kprobe *op) |
664 | { |
665 | struct optimized_kprobe *_op; |
666 | |
667 | list_for_each_entry(_op, &unoptimizing_list, list) { |
668 | if (op == _op) |
669 | return true; |
670 | } |
671 | |
672 | return false; |
673 | } |
674 | |
675 | /* Optimize kprobe if p is ready to be optimized */ |
676 | static void optimize_kprobe(struct kprobe *p) |
677 | { |
678 | struct optimized_kprobe *op; |
679 | |
680 | /* Check if the kprobe is disabled or not ready for optimization. */ |
681 | if (!kprobe_optready(p) || !kprobes_allow_optimization || |
682 | (kprobe_disabled(p) || kprobes_all_disarmed)) |
683 | return; |
684 | |
685 | /* kprobes with 'post_handler' can not be optimized */ |
686 | if (p->post_handler) |
687 | return; |
688 | |
689 | op = container_of(p, struct optimized_kprobe, kp); |
690 | |
691 | /* Check there is no other kprobes at the optimized instructions */ |
692 | if (arch_check_optimized_kprobe(op) < 0) |
693 | return; |
694 | |
695 | /* Check if it is already optimized. */ |
696 | if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) { |
697 | if (optprobe_queued_unopt(op)) { |
698 | /* This is under unoptimizing. Just dequeue the probe */ |
699 | list_del_init(entry: &op->list); |
700 | } |
701 | return; |
702 | } |
703 | op->kp.flags |= KPROBE_FLAG_OPTIMIZED; |
704 | |
705 | /* |
706 | * On the 'unoptimizing_list' and 'optimizing_list', |
707 | * 'op' must have OPTIMIZED flag |
708 | */ |
709 | if (WARN_ON_ONCE(!list_empty(&op->list))) |
710 | return; |
711 | |
712 | list_add(new: &op->list, head: &optimizing_list); |
713 | kick_kprobe_optimizer(); |
714 | } |
715 | |
716 | /* Short cut to direct unoptimizing */ |
717 | static void force_unoptimize_kprobe(struct optimized_kprobe *op) |
718 | { |
719 | lockdep_assert_cpus_held(); |
720 | arch_unoptimize_kprobe(op); |
721 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; |
722 | } |
723 | |
724 | /* Unoptimize a kprobe if p is optimized */ |
725 | static void unoptimize_kprobe(struct kprobe *p, bool force) |
726 | { |
727 | struct optimized_kprobe *op; |
728 | |
729 | if (!kprobe_aggrprobe(p) || kprobe_disarmed(p)) |
730 | return; /* This is not an optprobe nor optimized */ |
731 | |
732 | op = container_of(p, struct optimized_kprobe, kp); |
733 | if (!kprobe_optimized(p)) |
734 | return; |
735 | |
736 | if (!list_empty(head: &op->list)) { |
737 | if (optprobe_queued_unopt(op)) { |
738 | /* Queued in unoptimizing queue */ |
739 | if (force) { |
740 | /* |
741 | * Forcibly unoptimize the kprobe here, and queue it |
742 | * in the freeing list for release afterwards. |
743 | */ |
744 | force_unoptimize_kprobe(op); |
745 | list_move(list: &op->list, head: &freeing_list); |
746 | } |
747 | } else { |
748 | /* Dequeue from the optimizing queue */ |
749 | list_del_init(entry: &op->list); |
750 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; |
751 | } |
752 | return; |
753 | } |
754 | |
755 | /* Optimized kprobe case */ |
756 | if (force) { |
757 | /* Forcibly update the code: this is a special case */ |
758 | force_unoptimize_kprobe(op); |
759 | } else { |
760 | list_add(new: &op->list, head: &unoptimizing_list); |
761 | kick_kprobe_optimizer(); |
762 | } |
763 | } |
764 | |
765 | /* Cancel unoptimizing for reusing */ |
766 | static int reuse_unused_kprobe(struct kprobe *ap) |
767 | { |
768 | struct optimized_kprobe *op; |
769 | |
770 | /* |
771 | * Unused kprobe MUST be on the way of delayed unoptimizing (means |
772 | * there is still a relative jump) and disabled. |
773 | */ |
774 | op = container_of(ap, struct optimized_kprobe, kp); |
775 | WARN_ON_ONCE(list_empty(&op->list)); |
776 | /* Enable the probe again */ |
777 | ap->flags &= ~KPROBE_FLAG_DISABLED; |
778 | /* Optimize it again. (remove from 'op->list') */ |
779 | if (!kprobe_optready(p: ap)) |
780 | return -EINVAL; |
781 | |
782 | optimize_kprobe(p: ap); |
783 | return 0; |
784 | } |
785 | |
786 | /* Remove optimized instructions */ |
787 | static void kill_optimized_kprobe(struct kprobe *p) |
788 | { |
789 | struct optimized_kprobe *op; |
790 | |
791 | op = container_of(p, struct optimized_kprobe, kp); |
792 | if (!list_empty(head: &op->list)) |
793 | /* Dequeue from the (un)optimization queue */ |
794 | list_del_init(entry: &op->list); |
795 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; |
796 | |
797 | if (kprobe_unused(p)) { |
798 | /* |
799 | * Unused kprobe is on unoptimizing or freeing list. We move it |
800 | * to freeing_list and let the kprobe_optimizer() remove it from |
801 | * the kprobe hash list and free it. |
802 | */ |
803 | if (optprobe_queued_unopt(op)) |
804 | list_move(list: &op->list, head: &freeing_list); |
805 | } |
806 | |
807 | /* Don't touch the code, because it is already freed. */ |
808 | arch_remove_optimized_kprobe(op); |
809 | } |
810 | |
811 | static inline |
812 | void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) |
813 | { |
814 | if (!kprobe_ftrace(p)) |
815 | arch_prepare_optimized_kprobe(op, orig: p); |
816 | } |
817 | |
818 | /* Try to prepare optimized instructions */ |
819 | static void prepare_optimized_kprobe(struct kprobe *p) |
820 | { |
821 | struct optimized_kprobe *op; |
822 | |
823 | op = container_of(p, struct optimized_kprobe, kp); |
824 | __prepare_optimized_kprobe(op, p); |
825 | } |
826 | |
827 | /* Allocate new optimized_kprobe and try to prepare optimized instructions. */ |
828 | static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) |
829 | { |
830 | struct optimized_kprobe *op; |
831 | |
832 | op = kzalloc(size: sizeof(struct optimized_kprobe), GFP_KERNEL); |
833 | if (!op) |
834 | return NULL; |
835 | |
836 | INIT_LIST_HEAD(list: &op->list); |
837 | op->kp.addr = p->addr; |
838 | __prepare_optimized_kprobe(op, p); |
839 | |
840 | return &op->kp; |
841 | } |
842 | |
843 | static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); |
844 | |
845 | /* |
846 | * Prepare an optimized_kprobe and optimize it. |
847 | * NOTE: 'p' must be a normal registered kprobe. |
848 | */ |
849 | static void try_to_optimize_kprobe(struct kprobe *p) |
850 | { |
851 | struct kprobe *ap; |
852 | struct optimized_kprobe *op; |
853 | |
854 | /* Impossible to optimize ftrace-based kprobe. */ |
855 | if (kprobe_ftrace(p)) |
856 | return; |
857 | |
858 | /* For preparing optimization, jump_label_text_reserved() is called. */ |
859 | cpus_read_lock(); |
860 | jump_label_lock(); |
861 | mutex_lock(&text_mutex); |
862 | |
863 | ap = alloc_aggr_kprobe(p); |
864 | if (!ap) |
865 | goto out; |
866 | |
867 | op = container_of(ap, struct optimized_kprobe, kp); |
868 | if (!arch_prepared_optinsn(optinsn: &op->optinsn)) { |
869 | /* If failed to setup optimizing, fallback to kprobe. */ |
870 | arch_remove_optimized_kprobe(op); |
871 | kfree(objp: op); |
872 | goto out; |
873 | } |
874 | |
875 | init_aggr_kprobe(ap, p); |
876 | optimize_kprobe(p: ap); /* This just kicks optimizer thread. */ |
877 | |
878 | out: |
879 | mutex_unlock(lock: &text_mutex); |
880 | jump_label_unlock(); |
881 | cpus_read_unlock(); |
882 | } |
883 | |
884 | static void optimize_all_kprobes(void) |
885 | { |
886 | struct hlist_head *head; |
887 | struct kprobe *p; |
888 | unsigned int i; |
889 | |
890 | mutex_lock(&kprobe_mutex); |
891 | /* If optimization is already allowed, just return. */ |
892 | if (kprobes_allow_optimization) |
893 | goto out; |
894 | |
895 | cpus_read_lock(); |
896 | kprobes_allow_optimization = true; |
897 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
898 | head = &kprobe_table[i]; |
899 | hlist_for_each_entry(p, head, hlist) |
900 | if (!kprobe_disabled(p)) |
901 | optimize_kprobe(p); |
902 | } |
903 | cpus_read_unlock(); |
904 | pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n" ); |
905 | out: |
906 | mutex_unlock(lock: &kprobe_mutex); |
907 | } |
908 | |
909 | #ifdef CONFIG_SYSCTL |
910 | static void unoptimize_all_kprobes(void) |
911 | { |
912 | struct hlist_head *head; |
913 | struct kprobe *p; |
914 | unsigned int i; |
915 | |
916 | mutex_lock(&kprobe_mutex); |
917 | /* If optimization is already prohibited, just return. */ |
918 | if (!kprobes_allow_optimization) { |
919 | mutex_unlock(lock: &kprobe_mutex); |
920 | return; |
921 | } |
922 | |
923 | cpus_read_lock(); |
924 | kprobes_allow_optimization = false; |
925 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
926 | head = &kprobe_table[i]; |
927 | hlist_for_each_entry(p, head, hlist) { |
928 | if (!kprobe_disabled(p)) |
929 | unoptimize_kprobe(p, force: false); |
930 | } |
931 | } |
932 | cpus_read_unlock(); |
933 | mutex_unlock(lock: &kprobe_mutex); |
934 | |
935 | /* Wait for unoptimizing completion. */ |
936 | wait_for_kprobe_optimizer(); |
937 | pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n" ); |
938 | } |
939 | |
940 | static DEFINE_MUTEX(kprobe_sysctl_mutex); |
941 | static int sysctl_kprobes_optimization; |
942 | static int proc_kprobes_optimization_handler(struct ctl_table *table, |
943 | int write, void *buffer, |
944 | size_t *length, loff_t *ppos) |
945 | { |
946 | int ret; |
947 | |
948 | mutex_lock(&kprobe_sysctl_mutex); |
949 | sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; |
950 | ret = proc_dointvec_minmax(table, write, buffer, length, ppos); |
951 | |
952 | if (sysctl_kprobes_optimization) |
953 | optimize_all_kprobes(); |
954 | else |
955 | unoptimize_all_kprobes(); |
956 | mutex_unlock(lock: &kprobe_sysctl_mutex); |
957 | |
958 | return ret; |
959 | } |
960 | |
961 | static struct ctl_table kprobe_sysctls[] = { |
962 | { |
963 | .procname = "kprobes-optimization" , |
964 | .data = &sysctl_kprobes_optimization, |
965 | .maxlen = sizeof(int), |
966 | .mode = 0644, |
967 | .proc_handler = proc_kprobes_optimization_handler, |
968 | .extra1 = SYSCTL_ZERO, |
969 | .extra2 = SYSCTL_ONE, |
970 | }, |
971 | {} |
972 | }; |
973 | |
974 | static void __init kprobe_sysctls_init(void) |
975 | { |
976 | register_sysctl_init("debug" , kprobe_sysctls); |
977 | } |
978 | #endif /* CONFIG_SYSCTL */ |
979 | |
980 | /* Put a breakpoint for a probe. */ |
981 | static void __arm_kprobe(struct kprobe *p) |
982 | { |
983 | struct kprobe *_p; |
984 | |
985 | lockdep_assert_held(&text_mutex); |
986 | |
987 | /* Find the overlapping optimized kprobes. */ |
988 | _p = get_optimized_kprobe(addr: p->addr); |
989 | if (unlikely(_p)) |
990 | /* Fallback to unoptimized kprobe */ |
991 | unoptimize_kprobe(p: _p, force: true); |
992 | |
993 | arch_arm_kprobe(p); |
994 | optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ |
995 | } |
996 | |
997 | /* Remove the breakpoint of a probe. */ |
998 | static void __disarm_kprobe(struct kprobe *p, bool reopt) |
999 | { |
1000 | struct kprobe *_p; |
1001 | |
1002 | lockdep_assert_held(&text_mutex); |
1003 | |
1004 | /* Try to unoptimize */ |
1005 | unoptimize_kprobe(p, force: kprobes_all_disarmed); |
1006 | |
1007 | if (!kprobe_queued(p)) { |
1008 | arch_disarm_kprobe(p); |
1009 | /* If another kprobe was blocked, re-optimize it. */ |
1010 | _p = get_optimized_kprobe(addr: p->addr); |
1011 | if (unlikely(_p) && reopt) |
1012 | optimize_kprobe(p: _p); |
1013 | } |
1014 | /* |
1015 | * TODO: Since unoptimization and real disarming will be done by |
1016 | * the worker thread, we can not check whether another probe are |
1017 | * unoptimized because of this probe here. It should be re-optimized |
1018 | * by the worker thread. |
1019 | */ |
1020 | } |
1021 | |
1022 | #else /* !CONFIG_OPTPROBES */ |
1023 | |
1024 | #define optimize_kprobe(p) do {} while (0) |
1025 | #define unoptimize_kprobe(p, f) do {} while (0) |
1026 | #define kill_optimized_kprobe(p) do {} while (0) |
1027 | #define prepare_optimized_kprobe(p) do {} while (0) |
1028 | #define try_to_optimize_kprobe(p) do {} while (0) |
1029 | #define __arm_kprobe(p) arch_arm_kprobe(p) |
1030 | #define __disarm_kprobe(p, o) arch_disarm_kprobe(p) |
1031 | #define kprobe_disarmed(p) kprobe_disabled(p) |
1032 | #define wait_for_kprobe_optimizer() do {} while (0) |
1033 | |
1034 | static int reuse_unused_kprobe(struct kprobe *ap) |
1035 | { |
1036 | /* |
1037 | * If the optimized kprobe is NOT supported, the aggr kprobe is |
1038 | * released at the same time that the last aggregated kprobe is |
1039 | * unregistered. |
1040 | * Thus there should be no chance to reuse unused kprobe. |
1041 | */ |
1042 | WARN_ON_ONCE(1); |
1043 | return -EINVAL; |
1044 | } |
1045 | |
1046 | static void free_aggr_kprobe(struct kprobe *p) |
1047 | { |
1048 | arch_remove_kprobe(p); |
1049 | kfree(p); |
1050 | } |
1051 | |
1052 | static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) |
1053 | { |
1054 | return kzalloc(sizeof(struct kprobe), GFP_KERNEL); |
1055 | } |
1056 | #endif /* CONFIG_OPTPROBES */ |
1057 | |
1058 | #ifdef CONFIG_KPROBES_ON_FTRACE |
1059 | static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { |
1060 | .func = kprobe_ftrace_handler, |
1061 | .flags = FTRACE_OPS_FL_SAVE_REGS, |
1062 | }; |
1063 | |
1064 | static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = { |
1065 | .func = kprobe_ftrace_handler, |
1066 | .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY, |
1067 | }; |
1068 | |
1069 | static int kprobe_ipmodify_enabled; |
1070 | static int kprobe_ftrace_enabled; |
1071 | |
1072 | static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, |
1073 | int *cnt) |
1074 | { |
1075 | int ret; |
1076 | |
1077 | lockdep_assert_held(&kprobe_mutex); |
1078 | |
1079 | ret = ftrace_set_filter_ip(ops, ip: (unsigned long)p->addr, remove: 0, reset: 0); |
1080 | if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error %d)\n" , p->addr, ret)) |
1081 | return ret; |
1082 | |
1083 | if (*cnt == 0) { |
1084 | ret = register_ftrace_function(ops); |
1085 | if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n" , ret)) |
1086 | goto err_ftrace; |
1087 | } |
1088 | |
1089 | (*cnt)++; |
1090 | return ret; |
1091 | |
1092 | err_ftrace: |
1093 | /* |
1094 | * At this point, sinec ops is not registered, we should be sefe from |
1095 | * registering empty filter. |
1096 | */ |
1097 | ftrace_set_filter_ip(ops, ip: (unsigned long)p->addr, remove: 1, reset: 0); |
1098 | return ret; |
1099 | } |
1100 | |
1101 | static int arm_kprobe_ftrace(struct kprobe *p) |
1102 | { |
1103 | bool ipmodify = (p->post_handler != NULL); |
1104 | |
1105 | return __arm_kprobe_ftrace(p, |
1106 | ops: ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops, |
1107 | cnt: ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); |
1108 | } |
1109 | |
1110 | static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, |
1111 | int *cnt) |
1112 | { |
1113 | int ret; |
1114 | |
1115 | lockdep_assert_held(&kprobe_mutex); |
1116 | |
1117 | if (*cnt == 1) { |
1118 | ret = unregister_ftrace_function(ops); |
1119 | if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (error %d)\n" , ret)) |
1120 | return ret; |
1121 | } |
1122 | |
1123 | (*cnt)--; |
1124 | |
1125 | ret = ftrace_set_filter_ip(ops, ip: (unsigned long)p->addr, remove: 1, reset: 0); |
1126 | WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (error %d)\n" , |
1127 | p->addr, ret); |
1128 | return ret; |
1129 | } |
1130 | |
1131 | static int disarm_kprobe_ftrace(struct kprobe *p) |
1132 | { |
1133 | bool ipmodify = (p->post_handler != NULL); |
1134 | |
1135 | return __disarm_kprobe_ftrace(p, |
1136 | ops: ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops, |
1137 | cnt: ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); |
1138 | } |
1139 | #else /* !CONFIG_KPROBES_ON_FTRACE */ |
1140 | static inline int arm_kprobe_ftrace(struct kprobe *p) |
1141 | { |
1142 | return -ENODEV; |
1143 | } |
1144 | |
1145 | static inline int disarm_kprobe_ftrace(struct kprobe *p) |
1146 | { |
1147 | return -ENODEV; |
1148 | } |
1149 | #endif |
1150 | |
1151 | static int prepare_kprobe(struct kprobe *p) |
1152 | { |
1153 | /* Must ensure p->addr is really on ftrace */ |
1154 | if (kprobe_ftrace(p)) |
1155 | return arch_prepare_kprobe_ftrace(p); |
1156 | |
1157 | return arch_prepare_kprobe(p); |
1158 | } |
1159 | |
1160 | static int arm_kprobe(struct kprobe *kp) |
1161 | { |
1162 | if (unlikely(kprobe_ftrace(kp))) |
1163 | return arm_kprobe_ftrace(p: kp); |
1164 | |
1165 | cpus_read_lock(); |
1166 | mutex_lock(&text_mutex); |
1167 | __arm_kprobe(p: kp); |
1168 | mutex_unlock(lock: &text_mutex); |
1169 | cpus_read_unlock(); |
1170 | |
1171 | return 0; |
1172 | } |
1173 | |
1174 | static int disarm_kprobe(struct kprobe *kp, bool reopt) |
1175 | { |
1176 | if (unlikely(kprobe_ftrace(kp))) |
1177 | return disarm_kprobe_ftrace(p: kp); |
1178 | |
1179 | cpus_read_lock(); |
1180 | mutex_lock(&text_mutex); |
1181 | __disarm_kprobe(p: kp, reopt); |
1182 | mutex_unlock(lock: &text_mutex); |
1183 | cpus_read_unlock(); |
1184 | |
1185 | return 0; |
1186 | } |
1187 | |
1188 | /* |
1189 | * Aggregate handlers for multiple kprobes support - these handlers |
1190 | * take care of invoking the individual kprobe handlers on p->list |
1191 | */ |
1192 | static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) |
1193 | { |
1194 | struct kprobe *kp; |
1195 | |
1196 | list_for_each_entry_rcu(kp, &p->list, list) { |
1197 | if (kp->pre_handler && likely(!kprobe_disabled(kp))) { |
1198 | set_kprobe_instance(kp); |
1199 | if (kp->pre_handler(kp, regs)) |
1200 | return 1; |
1201 | } |
1202 | reset_kprobe_instance(); |
1203 | } |
1204 | return 0; |
1205 | } |
1206 | NOKPROBE_SYMBOL(aggr_pre_handler); |
1207 | |
1208 | static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, |
1209 | unsigned long flags) |
1210 | { |
1211 | struct kprobe *kp; |
1212 | |
1213 | list_for_each_entry_rcu(kp, &p->list, list) { |
1214 | if (kp->post_handler && likely(!kprobe_disabled(kp))) { |
1215 | set_kprobe_instance(kp); |
1216 | kp->post_handler(kp, regs, flags); |
1217 | reset_kprobe_instance(); |
1218 | } |
1219 | } |
1220 | } |
1221 | NOKPROBE_SYMBOL(aggr_post_handler); |
1222 | |
1223 | /* Walks the list and increments 'nmissed' if 'p' has child probes. */ |
1224 | void kprobes_inc_nmissed_count(struct kprobe *p) |
1225 | { |
1226 | struct kprobe *kp; |
1227 | |
1228 | if (!kprobe_aggrprobe(p)) { |
1229 | p->nmissed++; |
1230 | } else { |
1231 | list_for_each_entry_rcu(kp, &p->list, list) |
1232 | kp->nmissed++; |
1233 | } |
1234 | } |
1235 | NOKPROBE_SYMBOL(kprobes_inc_nmissed_count); |
1236 | |
1237 | static struct kprobe kprobe_busy = { |
1238 | .addr = (void *) get_kprobe, |
1239 | }; |
1240 | |
1241 | void kprobe_busy_begin(void) |
1242 | { |
1243 | struct kprobe_ctlblk *kcb; |
1244 | |
1245 | preempt_disable(); |
1246 | __this_cpu_write(current_kprobe, &kprobe_busy); |
1247 | kcb = get_kprobe_ctlblk(); |
1248 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
1249 | } |
1250 | |
1251 | void kprobe_busy_end(void) |
1252 | { |
1253 | __this_cpu_write(current_kprobe, NULL); |
1254 | preempt_enable(); |
1255 | } |
1256 | |
1257 | /* Add the new probe to 'ap->list'. */ |
1258 | static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) |
1259 | { |
1260 | if (p->post_handler) |
1261 | unoptimize_kprobe(p: ap, force: true); /* Fall back to normal kprobe */ |
1262 | |
1263 | list_add_rcu(new: &p->list, head: &ap->list); |
1264 | if (p->post_handler && !ap->post_handler) |
1265 | ap->post_handler = aggr_post_handler; |
1266 | |
1267 | return 0; |
1268 | } |
1269 | |
1270 | /* |
1271 | * Fill in the required fields of the aggregator kprobe. Replace the |
1272 | * earlier kprobe in the hlist with the aggregator kprobe. |
1273 | */ |
1274 | static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) |
1275 | { |
1276 | /* Copy the insn slot of 'p' to 'ap'. */ |
1277 | copy_kprobe(ap: p, p: ap); |
1278 | flush_insn_slot(ap); |
1279 | ap->addr = p->addr; |
1280 | ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED; |
1281 | ap->pre_handler = aggr_pre_handler; |
1282 | /* We don't care the kprobe which has gone. */ |
1283 | if (p->post_handler && !kprobe_gone(p)) |
1284 | ap->post_handler = aggr_post_handler; |
1285 | |
1286 | INIT_LIST_HEAD(list: &ap->list); |
1287 | INIT_HLIST_NODE(h: &ap->hlist); |
1288 | |
1289 | list_add_rcu(new: &p->list, head: &ap->list); |
1290 | hlist_replace_rcu(old: &p->hlist, new: &ap->hlist); |
1291 | } |
1292 | |
1293 | /* |
1294 | * This registers the second or subsequent kprobe at the same address. |
1295 | */ |
1296 | static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) |
1297 | { |
1298 | int ret = 0; |
1299 | struct kprobe *ap = orig_p; |
1300 | |
1301 | cpus_read_lock(); |
1302 | |
1303 | /* For preparing optimization, jump_label_text_reserved() is called */ |
1304 | jump_label_lock(); |
1305 | mutex_lock(&text_mutex); |
1306 | |
1307 | if (!kprobe_aggrprobe(p: orig_p)) { |
1308 | /* If 'orig_p' is not an 'aggr_kprobe', create new one. */ |
1309 | ap = alloc_aggr_kprobe(p: orig_p); |
1310 | if (!ap) { |
1311 | ret = -ENOMEM; |
1312 | goto out; |
1313 | } |
1314 | init_aggr_kprobe(ap, p: orig_p); |
1315 | } else if (kprobe_unused(p: ap)) { |
1316 | /* This probe is going to die. Rescue it */ |
1317 | ret = reuse_unused_kprobe(ap); |
1318 | if (ret) |
1319 | goto out; |
1320 | } |
1321 | |
1322 | if (kprobe_gone(p: ap)) { |
1323 | /* |
1324 | * Attempting to insert new probe at the same location that |
1325 | * had a probe in the module vaddr area which already |
1326 | * freed. So, the instruction slot has already been |
1327 | * released. We need a new slot for the new probe. |
1328 | */ |
1329 | ret = arch_prepare_kprobe(p: ap); |
1330 | if (ret) |
1331 | /* |
1332 | * Even if fail to allocate new slot, don't need to |
1333 | * free the 'ap'. It will be used next time, or |
1334 | * freed by unregister_kprobe(). |
1335 | */ |
1336 | goto out; |
1337 | |
1338 | /* Prepare optimized instructions if possible. */ |
1339 | prepare_optimized_kprobe(p: ap); |
1340 | |
1341 | /* |
1342 | * Clear gone flag to prevent allocating new slot again, and |
1343 | * set disabled flag because it is not armed yet. |
1344 | */ |
1345 | ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) |
1346 | | KPROBE_FLAG_DISABLED; |
1347 | } |
1348 | |
1349 | /* Copy the insn slot of 'p' to 'ap'. */ |
1350 | copy_kprobe(ap, p); |
1351 | ret = add_new_kprobe(ap, p); |
1352 | |
1353 | out: |
1354 | mutex_unlock(lock: &text_mutex); |
1355 | jump_label_unlock(); |
1356 | cpus_read_unlock(); |
1357 | |
1358 | if (ret == 0 && kprobe_disabled(p: ap) && !kprobe_disabled(p)) { |
1359 | ap->flags &= ~KPROBE_FLAG_DISABLED; |
1360 | if (!kprobes_all_disarmed) { |
1361 | /* Arm the breakpoint again. */ |
1362 | ret = arm_kprobe(kp: ap); |
1363 | if (ret) { |
1364 | ap->flags |= KPROBE_FLAG_DISABLED; |
1365 | list_del_rcu(entry: &p->list); |
1366 | synchronize_rcu(); |
1367 | } |
1368 | } |
1369 | } |
1370 | return ret; |
1371 | } |
1372 | |
1373 | bool __weak arch_within_kprobe_blacklist(unsigned long addr) |
1374 | { |
1375 | /* The '__kprobes' functions and entry code must not be probed. */ |
1376 | return addr >= (unsigned long)__kprobes_text_start && |
1377 | addr < (unsigned long)__kprobes_text_end; |
1378 | } |
1379 | |
1380 | static bool __within_kprobe_blacklist(unsigned long addr) |
1381 | { |
1382 | struct kprobe_blacklist_entry *ent; |
1383 | |
1384 | if (arch_within_kprobe_blacklist(addr)) |
1385 | return true; |
1386 | /* |
1387 | * If 'kprobe_blacklist' is defined, check the address and |
1388 | * reject any probe registration in the prohibited area. |
1389 | */ |
1390 | list_for_each_entry(ent, &kprobe_blacklist, list) { |
1391 | if (addr >= ent->start_addr && addr < ent->end_addr) |
1392 | return true; |
1393 | } |
1394 | return false; |
1395 | } |
1396 | |
1397 | bool within_kprobe_blacklist(unsigned long addr) |
1398 | { |
1399 | char symname[KSYM_NAME_LEN], *p; |
1400 | |
1401 | if (__within_kprobe_blacklist(addr)) |
1402 | return true; |
1403 | |
1404 | /* Check if the address is on a suffixed-symbol */ |
1405 | if (!lookup_symbol_name(addr, symname)) { |
1406 | p = strchr(symname, '.'); |
1407 | if (!p) |
1408 | return false; |
1409 | *p = '\0'; |
1410 | addr = (unsigned long)kprobe_lookup_name(name: symname, unused: 0); |
1411 | if (addr) |
1412 | return __within_kprobe_blacklist(addr); |
1413 | } |
1414 | return false; |
1415 | } |
1416 | |
1417 | /* |
1418 | * arch_adjust_kprobe_addr - adjust the address |
1419 | * @addr: symbol base address |
1420 | * @offset: offset within the symbol |
1421 | * @on_func_entry: was this @addr+@offset on the function entry |
1422 | * |
1423 | * Typically returns @addr + @offset, except for special cases where the |
1424 | * function might be prefixed by a CFI landing pad, in that case any offset |
1425 | * inside the landing pad is mapped to the first 'real' instruction of the |
1426 | * symbol. |
1427 | * |
1428 | * Specifically, for things like IBT/BTI, skip the resp. ENDBR/BTI.C |
1429 | * instruction at +0. |
1430 | */ |
1431 | kprobe_opcode_t *__weak arch_adjust_kprobe_addr(unsigned long addr, |
1432 | unsigned long offset, |
1433 | bool *on_func_entry) |
1434 | { |
1435 | *on_func_entry = !offset; |
1436 | return (kprobe_opcode_t *)(addr + offset); |
1437 | } |
1438 | |
1439 | /* |
1440 | * If 'symbol_name' is specified, look it up and add the 'offset' |
1441 | * to it. This way, we can specify a relative address to a symbol. |
1442 | * This returns encoded errors if it fails to look up symbol or invalid |
1443 | * combination of parameters. |
1444 | */ |
1445 | static kprobe_opcode_t * |
1446 | _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name, |
1447 | unsigned long offset, bool *on_func_entry) |
1448 | { |
1449 | if ((symbol_name && addr) || (!symbol_name && !addr)) |
1450 | goto invalid; |
1451 | |
1452 | if (symbol_name) { |
1453 | /* |
1454 | * Input: @sym + @offset |
1455 | * Output: @addr + @offset |
1456 | * |
1457 | * NOTE: kprobe_lookup_name() does *NOT* fold the offset |
1458 | * argument into it's output! |
1459 | */ |
1460 | addr = kprobe_lookup_name(name: symbol_name, unused: offset); |
1461 | if (!addr) |
1462 | return ERR_PTR(error: -ENOENT); |
1463 | } |
1464 | |
1465 | /* |
1466 | * So here we have @addr + @offset, displace it into a new |
1467 | * @addr' + @offset' where @addr' is the symbol start address. |
1468 | */ |
1469 | addr = (void *)addr + offset; |
1470 | if (!kallsyms_lookup_size_offset(addr: (unsigned long)addr, NULL, offset: &offset)) |
1471 | return ERR_PTR(error: -ENOENT); |
1472 | addr = (void *)addr - offset; |
1473 | |
1474 | /* |
1475 | * Then ask the architecture to re-combine them, taking care of |
1476 | * magical function entry details while telling us if this was indeed |
1477 | * at the start of the function. |
1478 | */ |
1479 | addr = arch_adjust_kprobe_addr(addr: (unsigned long)addr, offset, on_func_entry); |
1480 | if (addr) |
1481 | return addr; |
1482 | |
1483 | invalid: |
1484 | return ERR_PTR(error: -EINVAL); |
1485 | } |
1486 | |
1487 | static kprobe_opcode_t *kprobe_addr(struct kprobe *p) |
1488 | { |
1489 | bool on_func_entry; |
1490 | return _kprobe_addr(addr: p->addr, symbol_name: p->symbol_name, offset: p->offset, on_func_entry: &on_func_entry); |
1491 | } |
1492 | |
1493 | /* |
1494 | * Check the 'p' is valid and return the aggregator kprobe |
1495 | * at the same address. |
1496 | */ |
1497 | static struct kprobe *__get_valid_kprobe(struct kprobe *p) |
1498 | { |
1499 | struct kprobe *ap, *list_p; |
1500 | |
1501 | lockdep_assert_held(&kprobe_mutex); |
1502 | |
1503 | ap = get_kprobe(addr: p->addr); |
1504 | if (unlikely(!ap)) |
1505 | return NULL; |
1506 | |
1507 | if (p != ap) { |
1508 | list_for_each_entry(list_p, &ap->list, list) |
1509 | if (list_p == p) |
1510 | /* kprobe p is a valid probe */ |
1511 | goto valid; |
1512 | return NULL; |
1513 | } |
1514 | valid: |
1515 | return ap; |
1516 | } |
1517 | |
1518 | /* |
1519 | * Warn and return error if the kprobe is being re-registered since |
1520 | * there must be a software bug. |
1521 | */ |
1522 | static inline int warn_kprobe_rereg(struct kprobe *p) |
1523 | { |
1524 | int ret = 0; |
1525 | |
1526 | mutex_lock(&kprobe_mutex); |
1527 | if (WARN_ON_ONCE(__get_valid_kprobe(p))) |
1528 | ret = -EINVAL; |
1529 | mutex_unlock(lock: &kprobe_mutex); |
1530 | |
1531 | return ret; |
1532 | } |
1533 | |
1534 | static int check_ftrace_location(struct kprobe *p) |
1535 | { |
1536 | unsigned long addr = (unsigned long)p->addr; |
1537 | |
1538 | if (ftrace_location(ip: addr) == addr) { |
1539 | #ifdef CONFIG_KPROBES_ON_FTRACE |
1540 | p->flags |= KPROBE_FLAG_FTRACE; |
1541 | #else /* !CONFIG_KPROBES_ON_FTRACE */ |
1542 | return -EINVAL; |
1543 | #endif |
1544 | } |
1545 | return 0; |
1546 | } |
1547 | |
1548 | static bool is_cfi_preamble_symbol(unsigned long addr) |
1549 | { |
1550 | char symbuf[KSYM_NAME_LEN]; |
1551 | |
1552 | if (lookup_symbol_name(addr, symname: symbuf)) |
1553 | return false; |
1554 | |
1555 | return str_has_prefix(str: "__cfi_" , prefix: symbuf) || |
1556 | str_has_prefix(str: "__pfx_" , prefix: symbuf); |
1557 | } |
1558 | |
1559 | static int check_kprobe_address_safe(struct kprobe *p, |
1560 | struct module **probed_mod) |
1561 | { |
1562 | int ret; |
1563 | |
1564 | ret = check_ftrace_location(p); |
1565 | if (ret) |
1566 | return ret; |
1567 | jump_label_lock(); |
1568 | preempt_disable(); |
1569 | |
1570 | /* Ensure it is not in reserved area nor out of text */ |
1571 | if (!(core_kernel_text(addr: (unsigned long) p->addr) || |
1572 | is_module_text_address(addr: (unsigned long) p->addr)) || |
1573 | in_gate_area_no_mm(addr: (unsigned long) p->addr) || |
1574 | within_kprobe_blacklist(addr: (unsigned long) p->addr) || |
1575 | jump_label_text_reserved(start: p->addr, end: p->addr) || |
1576 | static_call_text_reserved(start: p->addr, end: p->addr) || |
1577 | find_bug(bugaddr: (unsigned long)p->addr) || |
1578 | is_cfi_preamble_symbol(addr: (unsigned long)p->addr)) { |
1579 | ret = -EINVAL; |
1580 | goto out; |
1581 | } |
1582 | |
1583 | /* Check if 'p' is probing a module. */ |
1584 | *probed_mod = __module_text_address(addr: (unsigned long) p->addr); |
1585 | if (*probed_mod) { |
1586 | /* |
1587 | * We must hold a refcount of the probed module while updating |
1588 | * its code to prohibit unexpected unloading. |
1589 | */ |
1590 | if (unlikely(!try_module_get(*probed_mod))) { |
1591 | ret = -ENOENT; |
1592 | goto out; |
1593 | } |
1594 | |
1595 | /* |
1596 | * If the module freed '.init.text', we couldn't insert |
1597 | * kprobes in there. |
1598 | */ |
1599 | if (within_module_init(addr: (unsigned long)p->addr, mod: *probed_mod) && |
1600 | (*probed_mod)->state != MODULE_STATE_COMING) { |
1601 | module_put(module: *probed_mod); |
1602 | *probed_mod = NULL; |
1603 | ret = -ENOENT; |
1604 | } |
1605 | } |
1606 | out: |
1607 | preempt_enable(); |
1608 | jump_label_unlock(); |
1609 | |
1610 | return ret; |
1611 | } |
1612 | |
1613 | int register_kprobe(struct kprobe *p) |
1614 | { |
1615 | int ret; |
1616 | struct kprobe *old_p; |
1617 | struct module *probed_mod; |
1618 | kprobe_opcode_t *addr; |
1619 | bool on_func_entry; |
1620 | |
1621 | /* Adjust probe address from symbol */ |
1622 | addr = _kprobe_addr(addr: p->addr, symbol_name: p->symbol_name, offset: p->offset, on_func_entry: &on_func_entry); |
1623 | if (IS_ERR(ptr: addr)) |
1624 | return PTR_ERR(ptr: addr); |
1625 | p->addr = addr; |
1626 | |
1627 | ret = warn_kprobe_rereg(p); |
1628 | if (ret) |
1629 | return ret; |
1630 | |
1631 | /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ |
1632 | p->flags &= KPROBE_FLAG_DISABLED; |
1633 | p->nmissed = 0; |
1634 | INIT_LIST_HEAD(list: &p->list); |
1635 | |
1636 | ret = check_kprobe_address_safe(p, probed_mod: &probed_mod); |
1637 | if (ret) |
1638 | return ret; |
1639 | |
1640 | mutex_lock(&kprobe_mutex); |
1641 | |
1642 | if (on_func_entry) |
1643 | p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY; |
1644 | |
1645 | old_p = get_kprobe(addr: p->addr); |
1646 | if (old_p) { |
1647 | /* Since this may unoptimize 'old_p', locking 'text_mutex'. */ |
1648 | ret = register_aggr_kprobe(orig_p: old_p, p); |
1649 | goto out; |
1650 | } |
1651 | |
1652 | cpus_read_lock(); |
1653 | /* Prevent text modification */ |
1654 | mutex_lock(&text_mutex); |
1655 | ret = prepare_kprobe(p); |
1656 | mutex_unlock(lock: &text_mutex); |
1657 | cpus_read_unlock(); |
1658 | if (ret) |
1659 | goto out; |
1660 | |
1661 | INIT_HLIST_NODE(h: &p->hlist); |
1662 | hlist_add_head_rcu(n: &p->hlist, |
1663 | h: &kprobe_table[hash_ptr(ptr: p->addr, KPROBE_HASH_BITS)]); |
1664 | |
1665 | if (!kprobes_all_disarmed && !kprobe_disabled(p)) { |
1666 | ret = arm_kprobe(kp: p); |
1667 | if (ret) { |
1668 | hlist_del_rcu(n: &p->hlist); |
1669 | synchronize_rcu(); |
1670 | goto out; |
1671 | } |
1672 | } |
1673 | |
1674 | /* Try to optimize kprobe */ |
1675 | try_to_optimize_kprobe(p); |
1676 | out: |
1677 | mutex_unlock(lock: &kprobe_mutex); |
1678 | |
1679 | if (probed_mod) |
1680 | module_put(module: probed_mod); |
1681 | |
1682 | return ret; |
1683 | } |
1684 | EXPORT_SYMBOL_GPL(register_kprobe); |
1685 | |
1686 | /* Check if all probes on the 'ap' are disabled. */ |
1687 | static bool aggr_kprobe_disabled(struct kprobe *ap) |
1688 | { |
1689 | struct kprobe *kp; |
1690 | |
1691 | lockdep_assert_held(&kprobe_mutex); |
1692 | |
1693 | list_for_each_entry(kp, &ap->list, list) |
1694 | if (!kprobe_disabled(p: kp)) |
1695 | /* |
1696 | * Since there is an active probe on the list, |
1697 | * we can't disable this 'ap'. |
1698 | */ |
1699 | return false; |
1700 | |
1701 | return true; |
1702 | } |
1703 | |
1704 | static struct kprobe *__disable_kprobe(struct kprobe *p) |
1705 | { |
1706 | struct kprobe *orig_p; |
1707 | int ret; |
1708 | |
1709 | lockdep_assert_held(&kprobe_mutex); |
1710 | |
1711 | /* Get an original kprobe for return */ |
1712 | orig_p = __get_valid_kprobe(p); |
1713 | if (unlikely(orig_p == NULL)) |
1714 | return ERR_PTR(error: -EINVAL); |
1715 | |
1716 | if (!kprobe_disabled(p)) { |
1717 | /* Disable probe if it is a child probe */ |
1718 | if (p != orig_p) |
1719 | p->flags |= KPROBE_FLAG_DISABLED; |
1720 | |
1721 | /* Try to disarm and disable this/parent probe */ |
1722 | if (p == orig_p || aggr_kprobe_disabled(ap: orig_p)) { |
1723 | /* |
1724 | * Don't be lazy here. Even if 'kprobes_all_disarmed' |
1725 | * is false, 'orig_p' might not have been armed yet. |
1726 | * Note arm_all_kprobes() __tries__ to arm all kprobes |
1727 | * on the best effort basis. |
1728 | */ |
1729 | if (!kprobes_all_disarmed && !kprobe_disabled(p: orig_p)) { |
1730 | ret = disarm_kprobe(kp: orig_p, reopt: true); |
1731 | if (ret) { |
1732 | p->flags &= ~KPROBE_FLAG_DISABLED; |
1733 | return ERR_PTR(error: ret); |
1734 | } |
1735 | } |
1736 | orig_p->flags |= KPROBE_FLAG_DISABLED; |
1737 | } |
1738 | } |
1739 | |
1740 | return orig_p; |
1741 | } |
1742 | |
1743 | /* |
1744 | * Unregister a kprobe without a scheduler synchronization. |
1745 | */ |
1746 | static int __unregister_kprobe_top(struct kprobe *p) |
1747 | { |
1748 | struct kprobe *ap, *list_p; |
1749 | |
1750 | /* Disable kprobe. This will disarm it if needed. */ |
1751 | ap = __disable_kprobe(p); |
1752 | if (IS_ERR(ptr: ap)) |
1753 | return PTR_ERR(ptr: ap); |
1754 | |
1755 | if (ap == p) |
1756 | /* |
1757 | * This probe is an independent(and non-optimized) kprobe |
1758 | * (not an aggrprobe). Remove from the hash list. |
1759 | */ |
1760 | goto disarmed; |
1761 | |
1762 | /* Following process expects this probe is an aggrprobe */ |
1763 | WARN_ON(!kprobe_aggrprobe(ap)); |
1764 | |
1765 | if (list_is_singular(head: &ap->list) && kprobe_disarmed(p: ap)) |
1766 | /* |
1767 | * !disarmed could be happen if the probe is under delayed |
1768 | * unoptimizing. |
1769 | */ |
1770 | goto disarmed; |
1771 | else { |
1772 | /* If disabling probe has special handlers, update aggrprobe */ |
1773 | if (p->post_handler && !kprobe_gone(p)) { |
1774 | list_for_each_entry(list_p, &ap->list, list) { |
1775 | if ((list_p != p) && (list_p->post_handler)) |
1776 | goto noclean; |
1777 | } |
1778 | /* |
1779 | * For the kprobe-on-ftrace case, we keep the |
1780 | * post_handler setting to identify this aggrprobe |
1781 | * armed with kprobe_ipmodify_ops. |
1782 | */ |
1783 | if (!kprobe_ftrace(p: ap)) |
1784 | ap->post_handler = NULL; |
1785 | } |
1786 | noclean: |
1787 | /* |
1788 | * Remove from the aggrprobe: this path will do nothing in |
1789 | * __unregister_kprobe_bottom(). |
1790 | */ |
1791 | list_del_rcu(entry: &p->list); |
1792 | if (!kprobe_disabled(p: ap) && !kprobes_all_disarmed) |
1793 | /* |
1794 | * Try to optimize this probe again, because post |
1795 | * handler may have been changed. |
1796 | */ |
1797 | optimize_kprobe(p: ap); |
1798 | } |
1799 | return 0; |
1800 | |
1801 | disarmed: |
1802 | hlist_del_rcu(n: &ap->hlist); |
1803 | return 0; |
1804 | } |
1805 | |
1806 | static void __unregister_kprobe_bottom(struct kprobe *p) |
1807 | { |
1808 | struct kprobe *ap; |
1809 | |
1810 | if (list_empty(head: &p->list)) |
1811 | /* This is an independent kprobe */ |
1812 | arch_remove_kprobe(p); |
1813 | else if (list_is_singular(head: &p->list)) { |
1814 | /* This is the last child of an aggrprobe */ |
1815 | ap = list_entry(p->list.next, struct kprobe, list); |
1816 | list_del(entry: &p->list); |
1817 | free_aggr_kprobe(p: ap); |
1818 | } |
1819 | /* Otherwise, do nothing. */ |
1820 | } |
1821 | |
1822 | int register_kprobes(struct kprobe **kps, int num) |
1823 | { |
1824 | int i, ret = 0; |
1825 | |
1826 | if (num <= 0) |
1827 | return -EINVAL; |
1828 | for (i = 0; i < num; i++) { |
1829 | ret = register_kprobe(kps[i]); |
1830 | if (ret < 0) { |
1831 | if (i > 0) |
1832 | unregister_kprobes(kps, num: i); |
1833 | break; |
1834 | } |
1835 | } |
1836 | return ret; |
1837 | } |
1838 | EXPORT_SYMBOL_GPL(register_kprobes); |
1839 | |
1840 | void unregister_kprobe(struct kprobe *p) |
1841 | { |
1842 | unregister_kprobes(kps: &p, num: 1); |
1843 | } |
1844 | EXPORT_SYMBOL_GPL(unregister_kprobe); |
1845 | |
1846 | void unregister_kprobes(struct kprobe **kps, int num) |
1847 | { |
1848 | int i; |
1849 | |
1850 | if (num <= 0) |
1851 | return; |
1852 | mutex_lock(&kprobe_mutex); |
1853 | for (i = 0; i < num; i++) |
1854 | if (__unregister_kprobe_top(p: kps[i]) < 0) |
1855 | kps[i]->addr = NULL; |
1856 | mutex_unlock(lock: &kprobe_mutex); |
1857 | |
1858 | synchronize_rcu(); |
1859 | for (i = 0; i < num; i++) |
1860 | if (kps[i]->addr) |
1861 | __unregister_kprobe_bottom(p: kps[i]); |
1862 | } |
1863 | EXPORT_SYMBOL_GPL(unregister_kprobes); |
1864 | |
1865 | int __weak kprobe_exceptions_notify(struct notifier_block *self, |
1866 | unsigned long val, void *data) |
1867 | { |
1868 | return NOTIFY_DONE; |
1869 | } |
1870 | NOKPROBE_SYMBOL(kprobe_exceptions_notify); |
1871 | |
1872 | static struct notifier_block kprobe_exceptions_nb = { |
1873 | .notifier_call = kprobe_exceptions_notify, |
1874 | .priority = 0x7fffffff /* we need to be notified first */ |
1875 | }; |
1876 | |
1877 | #ifdef CONFIG_KRETPROBES |
1878 | |
1879 | #if !defined(CONFIG_KRETPROBE_ON_RETHOOK) |
1880 | |
1881 | /* callbacks for objpool of kretprobe instances */ |
1882 | static int kretprobe_init_inst(void *nod, void *context) |
1883 | { |
1884 | struct kretprobe_instance *ri = nod; |
1885 | |
1886 | ri->rph = context; |
1887 | return 0; |
1888 | } |
1889 | static int kretprobe_fini_pool(struct objpool_head *head, void *context) |
1890 | { |
1891 | kfree(context); |
1892 | return 0; |
1893 | } |
1894 | |
1895 | static void free_rp_inst_rcu(struct rcu_head *head) |
1896 | { |
1897 | struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu); |
1898 | struct kretprobe_holder *rph = ri->rph; |
1899 | |
1900 | objpool_drop(ri, &rph->pool); |
1901 | } |
1902 | NOKPROBE_SYMBOL(free_rp_inst_rcu); |
1903 | |
1904 | static void recycle_rp_inst(struct kretprobe_instance *ri) |
1905 | { |
1906 | struct kretprobe *rp = get_kretprobe(ri); |
1907 | |
1908 | if (likely(rp)) |
1909 | objpool_push(ri, &rp->rph->pool); |
1910 | else |
1911 | call_rcu(&ri->rcu, free_rp_inst_rcu); |
1912 | } |
1913 | NOKPROBE_SYMBOL(recycle_rp_inst); |
1914 | |
1915 | /* |
1916 | * This function is called from delayed_put_task_struct() when a task is |
1917 | * dead and cleaned up to recycle any kretprobe instances associated with |
1918 | * this task. These left over instances represent probed functions that |
1919 | * have been called but will never return. |
1920 | */ |
1921 | void kprobe_flush_task(struct task_struct *tk) |
1922 | { |
1923 | struct kretprobe_instance *ri; |
1924 | struct llist_node *node; |
1925 | |
1926 | /* Early boot, not yet initialized. */ |
1927 | if (unlikely(!kprobes_initialized)) |
1928 | return; |
1929 | |
1930 | kprobe_busy_begin(); |
1931 | |
1932 | node = __llist_del_all(&tk->kretprobe_instances); |
1933 | while (node) { |
1934 | ri = container_of(node, struct kretprobe_instance, llist); |
1935 | node = node->next; |
1936 | |
1937 | recycle_rp_inst(ri); |
1938 | } |
1939 | |
1940 | kprobe_busy_end(); |
1941 | } |
1942 | NOKPROBE_SYMBOL(kprobe_flush_task); |
1943 | |
1944 | static inline void free_rp_inst(struct kretprobe *rp) |
1945 | { |
1946 | struct kretprobe_holder *rph = rp->rph; |
1947 | |
1948 | if (!rph) |
1949 | return; |
1950 | rp->rph = NULL; |
1951 | objpool_fini(&rph->pool); |
1952 | } |
1953 | |
1954 | /* This assumes the 'tsk' is the current task or the is not running. */ |
1955 | static kprobe_opcode_t *__kretprobe_find_ret_addr(struct task_struct *tsk, |
1956 | struct llist_node **cur) |
1957 | { |
1958 | struct kretprobe_instance *ri = NULL; |
1959 | struct llist_node *node = *cur; |
1960 | |
1961 | if (!node) |
1962 | node = tsk->kretprobe_instances.first; |
1963 | else |
1964 | node = node->next; |
1965 | |
1966 | while (node) { |
1967 | ri = container_of(node, struct kretprobe_instance, llist); |
1968 | if (ri->ret_addr != kretprobe_trampoline_addr()) { |
1969 | *cur = node; |
1970 | return ri->ret_addr; |
1971 | } |
1972 | node = node->next; |
1973 | } |
1974 | return NULL; |
1975 | } |
1976 | NOKPROBE_SYMBOL(__kretprobe_find_ret_addr); |
1977 | |
1978 | /** |
1979 | * kretprobe_find_ret_addr -- Find correct return address modified by kretprobe |
1980 | * @tsk: Target task |
1981 | * @fp: A frame pointer |
1982 | * @cur: a storage of the loop cursor llist_node pointer for next call |
1983 | * |
1984 | * Find the correct return address modified by a kretprobe on @tsk in unsigned |
1985 | * long type. If it finds the return address, this returns that address value, |
1986 | * or this returns 0. |
1987 | * The @tsk must be 'current' or a task which is not running. @fp is a hint |
1988 | * to get the currect return address - which is compared with the |
1989 | * kretprobe_instance::fp field. The @cur is a loop cursor for searching the |
1990 | * kretprobe return addresses on the @tsk. The '*@cur' should be NULL at the |
1991 | * first call, but '@cur' itself must NOT NULL. |
1992 | */ |
1993 | unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp, |
1994 | struct llist_node **cur) |
1995 | { |
1996 | struct kretprobe_instance *ri = NULL; |
1997 | kprobe_opcode_t *ret; |
1998 | |
1999 | if (WARN_ON_ONCE(!cur)) |
2000 | return 0; |
2001 | |
2002 | do { |
2003 | ret = __kretprobe_find_ret_addr(tsk, cur); |
2004 | if (!ret) |
2005 | break; |
2006 | ri = container_of(*cur, struct kretprobe_instance, llist); |
2007 | } while (ri->fp != fp); |
2008 | |
2009 | return (unsigned long)ret; |
2010 | } |
2011 | NOKPROBE_SYMBOL(kretprobe_find_ret_addr); |
2012 | |
2013 | void __weak arch_kretprobe_fixup_return(struct pt_regs *regs, |
2014 | kprobe_opcode_t *correct_ret_addr) |
2015 | { |
2016 | /* |
2017 | * Do nothing by default. Please fill this to update the fake return |
2018 | * address on the stack with the correct one on each arch if possible. |
2019 | */ |
2020 | } |
2021 | |
2022 | unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, |
2023 | void *frame_pointer) |
2024 | { |
2025 | struct kretprobe_instance *ri = NULL; |
2026 | struct llist_node *first, *node = NULL; |
2027 | kprobe_opcode_t *correct_ret_addr; |
2028 | struct kretprobe *rp; |
2029 | |
2030 | /* Find correct address and all nodes for this frame. */ |
2031 | correct_ret_addr = __kretprobe_find_ret_addr(current, &node); |
2032 | if (!correct_ret_addr) { |
2033 | pr_err("kretprobe: Return address not found, not execute handler. Maybe there is a bug in the kernel.\n" ); |
2034 | BUG_ON(1); |
2035 | } |
2036 | |
2037 | /* |
2038 | * Set the return address as the instruction pointer, because if the |
2039 | * user handler calls stack_trace_save_regs() with this 'regs', |
2040 | * the stack trace will start from the instruction pointer. |
2041 | */ |
2042 | instruction_pointer_set(regs, (unsigned long)correct_ret_addr); |
2043 | |
2044 | /* Run the user handler of the nodes. */ |
2045 | first = current->kretprobe_instances.first; |
2046 | while (first) { |
2047 | ri = container_of(first, struct kretprobe_instance, llist); |
2048 | |
2049 | if (WARN_ON_ONCE(ri->fp != frame_pointer)) |
2050 | break; |
2051 | |
2052 | rp = get_kretprobe(ri); |
2053 | if (rp && rp->handler) { |
2054 | struct kprobe *prev = kprobe_running(); |
2055 | |
2056 | __this_cpu_write(current_kprobe, &rp->kp); |
2057 | ri->ret_addr = correct_ret_addr; |
2058 | rp->handler(ri, regs); |
2059 | __this_cpu_write(current_kprobe, prev); |
2060 | } |
2061 | if (first == node) |
2062 | break; |
2063 | |
2064 | first = first->next; |
2065 | } |
2066 | |
2067 | arch_kretprobe_fixup_return(regs, correct_ret_addr); |
2068 | |
2069 | /* Unlink all nodes for this frame. */ |
2070 | first = current->kretprobe_instances.first; |
2071 | current->kretprobe_instances.first = node->next; |
2072 | node->next = NULL; |
2073 | |
2074 | /* Recycle free instances. */ |
2075 | while (first) { |
2076 | ri = container_of(first, struct kretprobe_instance, llist); |
2077 | first = first->next; |
2078 | |
2079 | recycle_rp_inst(ri); |
2080 | } |
2081 | |
2082 | return (unsigned long)correct_ret_addr; |
2083 | } |
2084 | NOKPROBE_SYMBOL(__kretprobe_trampoline_handler) |
2085 | |
2086 | /* |
2087 | * This kprobe pre_handler is registered with every kretprobe. When probe |
2088 | * hits it will set up the return probe. |
2089 | */ |
2090 | static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) |
2091 | { |
2092 | struct kretprobe *rp = container_of(p, struct kretprobe, kp); |
2093 | struct kretprobe_holder *rph = rp->rph; |
2094 | struct kretprobe_instance *ri; |
2095 | |
2096 | ri = objpool_pop(&rph->pool); |
2097 | if (!ri) { |
2098 | rp->nmissed++; |
2099 | return 0; |
2100 | } |
2101 | |
2102 | if (rp->entry_handler && rp->entry_handler(ri, regs)) { |
2103 | objpool_push(ri, &rph->pool); |
2104 | return 0; |
2105 | } |
2106 | |
2107 | arch_prepare_kretprobe(ri, regs); |
2108 | |
2109 | __llist_add(&ri->llist, ¤t->kretprobe_instances); |
2110 | |
2111 | return 0; |
2112 | } |
2113 | NOKPROBE_SYMBOL(pre_handler_kretprobe); |
2114 | #else /* CONFIG_KRETPROBE_ON_RETHOOK */ |
2115 | /* |
2116 | * This kprobe pre_handler is registered with every kretprobe. When probe |
2117 | * hits it will set up the return probe. |
2118 | */ |
2119 | static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) |
2120 | { |
2121 | struct kretprobe *rp = container_of(p, struct kretprobe, kp); |
2122 | struct kretprobe_instance *ri; |
2123 | struct rethook_node *rhn; |
2124 | |
2125 | rhn = rethook_try_get(rh: rp->rh); |
2126 | if (!rhn) { |
2127 | rp->nmissed++; |
2128 | return 0; |
2129 | } |
2130 | |
2131 | ri = container_of(rhn, struct kretprobe_instance, node); |
2132 | |
2133 | if (rp->entry_handler && rp->entry_handler(ri, regs)) |
2134 | rethook_recycle(node: rhn); |
2135 | else |
2136 | rethook_hook(node: rhn, regs, mcount: kprobe_ftrace(p)); |
2137 | |
2138 | return 0; |
2139 | } |
2140 | NOKPROBE_SYMBOL(pre_handler_kretprobe); |
2141 | |
2142 | static void kretprobe_rethook_handler(struct rethook_node *rh, void *data, |
2143 | unsigned long ret_addr, |
2144 | struct pt_regs *regs) |
2145 | { |
2146 | struct kretprobe *rp = (struct kretprobe *)data; |
2147 | struct kretprobe_instance *ri; |
2148 | struct kprobe_ctlblk *kcb; |
2149 | |
2150 | /* The data must NOT be null. This means rethook data structure is broken. */ |
2151 | if (WARN_ON_ONCE(!data) || !rp->handler) |
2152 | return; |
2153 | |
2154 | __this_cpu_write(current_kprobe, &rp->kp); |
2155 | kcb = get_kprobe_ctlblk(); |
2156 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
2157 | |
2158 | ri = container_of(rh, struct kretprobe_instance, node); |
2159 | rp->handler(ri, regs); |
2160 | |
2161 | __this_cpu_write(current_kprobe, NULL); |
2162 | } |
2163 | NOKPROBE_SYMBOL(kretprobe_rethook_handler); |
2164 | |
2165 | #endif /* !CONFIG_KRETPROBE_ON_RETHOOK */ |
2166 | |
2167 | /** |
2168 | * kprobe_on_func_entry() -- check whether given address is function entry |
2169 | * @addr: Target address |
2170 | * @sym: Target symbol name |
2171 | * @offset: The offset from the symbol or the address |
2172 | * |
2173 | * This checks whether the given @addr+@offset or @sym+@offset is on the |
2174 | * function entry address or not. |
2175 | * This returns 0 if it is the function entry, or -EINVAL if it is not. |
2176 | * And also it returns -ENOENT if it fails the symbol or address lookup. |
2177 | * Caller must pass @addr or @sym (either one must be NULL), or this |
2178 | * returns -EINVAL. |
2179 | */ |
2180 | int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset) |
2181 | { |
2182 | bool on_func_entry; |
2183 | kprobe_opcode_t *kp_addr = _kprobe_addr(addr, symbol_name: sym, offset, on_func_entry: &on_func_entry); |
2184 | |
2185 | if (IS_ERR(ptr: kp_addr)) |
2186 | return PTR_ERR(ptr: kp_addr); |
2187 | |
2188 | if (!on_func_entry) |
2189 | return -EINVAL; |
2190 | |
2191 | return 0; |
2192 | } |
2193 | |
2194 | int register_kretprobe(struct kretprobe *rp) |
2195 | { |
2196 | int ret; |
2197 | int i; |
2198 | void *addr; |
2199 | |
2200 | ret = kprobe_on_func_entry(addr: rp->kp.addr, sym: rp->kp.symbol_name, offset: rp->kp.offset); |
2201 | if (ret) |
2202 | return ret; |
2203 | |
2204 | /* If only 'rp->kp.addr' is specified, check reregistering kprobes */ |
2205 | if (rp->kp.addr && warn_kprobe_rereg(p: &rp->kp)) |
2206 | return -EINVAL; |
2207 | |
2208 | if (kretprobe_blacklist_size) { |
2209 | addr = kprobe_addr(p: &rp->kp); |
2210 | if (IS_ERR(ptr: addr)) |
2211 | return PTR_ERR(ptr: addr); |
2212 | |
2213 | for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { |
2214 | if (kretprobe_blacklist[i].addr == addr) |
2215 | return -EINVAL; |
2216 | } |
2217 | } |
2218 | |
2219 | if (rp->data_size > KRETPROBE_MAX_DATA_SIZE) |
2220 | return -E2BIG; |
2221 | |
2222 | rp->kp.pre_handler = pre_handler_kretprobe; |
2223 | rp->kp.post_handler = NULL; |
2224 | |
2225 | /* Pre-allocate memory for max kretprobe instances */ |
2226 | if (rp->maxactive <= 0) |
2227 | rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus()); |
2228 | |
2229 | #ifdef CONFIG_KRETPROBE_ON_RETHOOK |
2230 | rp->rh = rethook_alloc(data: (void *)rp, handler: kretprobe_rethook_handler, |
2231 | size: sizeof(struct kretprobe_instance) + |
2232 | rp->data_size, num: rp->maxactive); |
2233 | if (IS_ERR(ptr: rp->rh)) |
2234 | return PTR_ERR(ptr: rp->rh); |
2235 | |
2236 | rp->nmissed = 0; |
2237 | /* Establish function entry probe point */ |
2238 | ret = register_kprobe(&rp->kp); |
2239 | if (ret != 0) { |
2240 | rethook_free(rh: rp->rh); |
2241 | rp->rh = NULL; |
2242 | } |
2243 | #else /* !CONFIG_KRETPROBE_ON_RETHOOK */ |
2244 | rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL); |
2245 | if (!rp->rph) |
2246 | return -ENOMEM; |
2247 | |
2248 | if (objpool_init(&rp->rph->pool, rp->maxactive, rp->data_size + |
2249 | sizeof(struct kretprobe_instance), GFP_KERNEL, |
2250 | rp->rph, kretprobe_init_inst, kretprobe_fini_pool)) { |
2251 | kfree(rp->rph); |
2252 | rp->rph = NULL; |
2253 | return -ENOMEM; |
2254 | } |
2255 | rp->rph->rp = rp; |
2256 | rp->nmissed = 0; |
2257 | /* Establish function entry probe point */ |
2258 | ret = register_kprobe(&rp->kp); |
2259 | if (ret != 0) |
2260 | free_rp_inst(rp); |
2261 | #endif |
2262 | return ret; |
2263 | } |
2264 | EXPORT_SYMBOL_GPL(register_kretprobe); |
2265 | |
2266 | int register_kretprobes(struct kretprobe **rps, int num) |
2267 | { |
2268 | int ret = 0, i; |
2269 | |
2270 | if (num <= 0) |
2271 | return -EINVAL; |
2272 | for (i = 0; i < num; i++) { |
2273 | ret = register_kretprobe(rps[i]); |
2274 | if (ret < 0) { |
2275 | if (i > 0) |
2276 | unregister_kretprobes(rps, num: i); |
2277 | break; |
2278 | } |
2279 | } |
2280 | return ret; |
2281 | } |
2282 | EXPORT_SYMBOL_GPL(register_kretprobes); |
2283 | |
2284 | void unregister_kretprobe(struct kretprobe *rp) |
2285 | { |
2286 | unregister_kretprobes(rps: &rp, num: 1); |
2287 | } |
2288 | EXPORT_SYMBOL_GPL(unregister_kretprobe); |
2289 | |
2290 | void unregister_kretprobes(struct kretprobe **rps, int num) |
2291 | { |
2292 | int i; |
2293 | |
2294 | if (num <= 0) |
2295 | return; |
2296 | mutex_lock(&kprobe_mutex); |
2297 | for (i = 0; i < num; i++) { |
2298 | if (__unregister_kprobe_top(p: &rps[i]->kp) < 0) |
2299 | rps[i]->kp.addr = NULL; |
2300 | #ifdef CONFIG_KRETPROBE_ON_RETHOOK |
2301 | rethook_free(rh: rps[i]->rh); |
2302 | #else |
2303 | rps[i]->rph->rp = NULL; |
2304 | #endif |
2305 | } |
2306 | mutex_unlock(lock: &kprobe_mutex); |
2307 | |
2308 | synchronize_rcu(); |
2309 | for (i = 0; i < num; i++) { |
2310 | if (rps[i]->kp.addr) { |
2311 | __unregister_kprobe_bottom(p: &rps[i]->kp); |
2312 | #ifndef CONFIG_KRETPROBE_ON_RETHOOK |
2313 | free_rp_inst(rps[i]); |
2314 | #endif |
2315 | } |
2316 | } |
2317 | } |
2318 | EXPORT_SYMBOL_GPL(unregister_kretprobes); |
2319 | |
2320 | #else /* CONFIG_KRETPROBES */ |
2321 | int register_kretprobe(struct kretprobe *rp) |
2322 | { |
2323 | return -EOPNOTSUPP; |
2324 | } |
2325 | EXPORT_SYMBOL_GPL(register_kretprobe); |
2326 | |
2327 | int register_kretprobes(struct kretprobe **rps, int num) |
2328 | { |
2329 | return -EOPNOTSUPP; |
2330 | } |
2331 | EXPORT_SYMBOL_GPL(register_kretprobes); |
2332 | |
2333 | void unregister_kretprobe(struct kretprobe *rp) |
2334 | { |
2335 | } |
2336 | EXPORT_SYMBOL_GPL(unregister_kretprobe); |
2337 | |
2338 | void unregister_kretprobes(struct kretprobe **rps, int num) |
2339 | { |
2340 | } |
2341 | EXPORT_SYMBOL_GPL(unregister_kretprobes); |
2342 | |
2343 | static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) |
2344 | { |
2345 | return 0; |
2346 | } |
2347 | NOKPROBE_SYMBOL(pre_handler_kretprobe); |
2348 | |
2349 | #endif /* CONFIG_KRETPROBES */ |
2350 | |
2351 | /* Set the kprobe gone and remove its instruction buffer. */ |
2352 | static void kill_kprobe(struct kprobe *p) |
2353 | { |
2354 | struct kprobe *kp; |
2355 | |
2356 | lockdep_assert_held(&kprobe_mutex); |
2357 | |
2358 | /* |
2359 | * The module is going away. We should disarm the kprobe which |
2360 | * is using ftrace, because ftrace framework is still available at |
2361 | * 'MODULE_STATE_GOING' notification. |
2362 | */ |
2363 | if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed) |
2364 | disarm_kprobe_ftrace(p); |
2365 | |
2366 | p->flags |= KPROBE_FLAG_GONE; |
2367 | if (kprobe_aggrprobe(p)) { |
2368 | /* |
2369 | * If this is an aggr_kprobe, we have to list all the |
2370 | * chained probes and mark them GONE. |
2371 | */ |
2372 | list_for_each_entry(kp, &p->list, list) |
2373 | kp->flags |= KPROBE_FLAG_GONE; |
2374 | p->post_handler = NULL; |
2375 | kill_optimized_kprobe(p); |
2376 | } |
2377 | /* |
2378 | * Here, we can remove insn_slot safely, because no thread calls |
2379 | * the original probed function (which will be freed soon) any more. |
2380 | */ |
2381 | arch_remove_kprobe(p); |
2382 | } |
2383 | |
2384 | /* Disable one kprobe */ |
2385 | int disable_kprobe(struct kprobe *kp) |
2386 | { |
2387 | int ret = 0; |
2388 | struct kprobe *p; |
2389 | |
2390 | mutex_lock(&kprobe_mutex); |
2391 | |
2392 | /* Disable this kprobe */ |
2393 | p = __disable_kprobe(p: kp); |
2394 | if (IS_ERR(ptr: p)) |
2395 | ret = PTR_ERR(ptr: p); |
2396 | |
2397 | mutex_unlock(lock: &kprobe_mutex); |
2398 | return ret; |
2399 | } |
2400 | EXPORT_SYMBOL_GPL(disable_kprobe); |
2401 | |
2402 | /* Enable one kprobe */ |
2403 | int enable_kprobe(struct kprobe *kp) |
2404 | { |
2405 | int ret = 0; |
2406 | struct kprobe *p; |
2407 | |
2408 | mutex_lock(&kprobe_mutex); |
2409 | |
2410 | /* Check whether specified probe is valid. */ |
2411 | p = __get_valid_kprobe(p: kp); |
2412 | if (unlikely(p == NULL)) { |
2413 | ret = -EINVAL; |
2414 | goto out; |
2415 | } |
2416 | |
2417 | if (kprobe_gone(p: kp)) { |
2418 | /* This kprobe has gone, we couldn't enable it. */ |
2419 | ret = -EINVAL; |
2420 | goto out; |
2421 | } |
2422 | |
2423 | if (p != kp) |
2424 | kp->flags &= ~KPROBE_FLAG_DISABLED; |
2425 | |
2426 | if (!kprobes_all_disarmed && kprobe_disabled(p)) { |
2427 | p->flags &= ~KPROBE_FLAG_DISABLED; |
2428 | ret = arm_kprobe(kp: p); |
2429 | if (ret) { |
2430 | p->flags |= KPROBE_FLAG_DISABLED; |
2431 | if (p != kp) |
2432 | kp->flags |= KPROBE_FLAG_DISABLED; |
2433 | } |
2434 | } |
2435 | out: |
2436 | mutex_unlock(lock: &kprobe_mutex); |
2437 | return ret; |
2438 | } |
2439 | EXPORT_SYMBOL_GPL(enable_kprobe); |
2440 | |
2441 | /* Caller must NOT call this in usual path. This is only for critical case */ |
2442 | void dump_kprobe(struct kprobe *kp) |
2443 | { |
2444 | pr_err("Dump kprobe:\n.symbol_name = %s, .offset = %x, .addr = %pS\n" , |
2445 | kp->symbol_name, kp->offset, kp->addr); |
2446 | } |
2447 | NOKPROBE_SYMBOL(dump_kprobe); |
2448 | |
2449 | int kprobe_add_ksym_blacklist(unsigned long entry) |
2450 | { |
2451 | struct kprobe_blacklist_entry *ent; |
2452 | unsigned long offset = 0, size = 0; |
2453 | |
2454 | if (!kernel_text_address(addr: entry) || |
2455 | !kallsyms_lookup_size_offset(addr: entry, symbolsize: &size, offset: &offset)) |
2456 | return -EINVAL; |
2457 | |
2458 | ent = kmalloc(size: sizeof(*ent), GFP_KERNEL); |
2459 | if (!ent) |
2460 | return -ENOMEM; |
2461 | ent->start_addr = entry; |
2462 | ent->end_addr = entry + size; |
2463 | INIT_LIST_HEAD(list: &ent->list); |
2464 | list_add_tail(new: &ent->list, head: &kprobe_blacklist); |
2465 | |
2466 | return (int)size; |
2467 | } |
2468 | |
2469 | /* Add all symbols in given area into kprobe blacklist */ |
2470 | int kprobe_add_area_blacklist(unsigned long start, unsigned long end) |
2471 | { |
2472 | unsigned long entry; |
2473 | int ret = 0; |
2474 | |
2475 | for (entry = start; entry < end; entry += ret) { |
2476 | ret = kprobe_add_ksym_blacklist(entry); |
2477 | if (ret < 0) |
2478 | return ret; |
2479 | if (ret == 0) /* In case of alias symbol */ |
2480 | ret = 1; |
2481 | } |
2482 | return 0; |
2483 | } |
2484 | |
2485 | /* Remove all symbols in given area from kprobe blacklist */ |
2486 | static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end) |
2487 | { |
2488 | struct kprobe_blacklist_entry *ent, *n; |
2489 | |
2490 | list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) { |
2491 | if (ent->start_addr < start || ent->start_addr >= end) |
2492 | continue; |
2493 | list_del(entry: &ent->list); |
2494 | kfree(objp: ent); |
2495 | } |
2496 | } |
2497 | |
2498 | static void kprobe_remove_ksym_blacklist(unsigned long entry) |
2499 | { |
2500 | kprobe_remove_area_blacklist(start: entry, end: entry + 1); |
2501 | } |
2502 | |
2503 | int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value, |
2504 | char *type, char *sym) |
2505 | { |
2506 | return -ERANGE; |
2507 | } |
2508 | |
2509 | int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type, |
2510 | char *sym) |
2511 | { |
2512 | #ifdef __ARCH_WANT_KPROBES_INSN_SLOT |
2513 | if (!kprobe_cache_get_kallsym(c: &kprobe_insn_slots, symnum: &symnum, value, type, sym)) |
2514 | return 0; |
2515 | #ifdef CONFIG_OPTPROBES |
2516 | if (!kprobe_cache_get_kallsym(c: &kprobe_optinsn_slots, symnum: &symnum, value, type, sym)) |
2517 | return 0; |
2518 | #endif |
2519 | #endif |
2520 | if (!arch_kprobe_get_kallsym(symnum: &symnum, value, type, sym)) |
2521 | return 0; |
2522 | return -ERANGE; |
2523 | } |
2524 | |
2525 | int __init __weak arch_populate_kprobe_blacklist(void) |
2526 | { |
2527 | return 0; |
2528 | } |
2529 | |
2530 | /* |
2531 | * Lookup and populate the kprobe_blacklist. |
2532 | * |
2533 | * Unlike the kretprobe blacklist, we'll need to determine |
2534 | * the range of addresses that belong to the said functions, |
2535 | * since a kprobe need not necessarily be at the beginning |
2536 | * of a function. |
2537 | */ |
2538 | static int __init populate_kprobe_blacklist(unsigned long *start, |
2539 | unsigned long *end) |
2540 | { |
2541 | unsigned long entry; |
2542 | unsigned long *iter; |
2543 | int ret; |
2544 | |
2545 | for (iter = start; iter < end; iter++) { |
2546 | entry = (unsigned long)dereference_symbol_descriptor(ptr: (void *)*iter); |
2547 | ret = kprobe_add_ksym_blacklist(entry); |
2548 | if (ret == -EINVAL) |
2549 | continue; |
2550 | if (ret < 0) |
2551 | return ret; |
2552 | } |
2553 | |
2554 | /* Symbols in '__kprobes_text' are blacklisted */ |
2555 | ret = kprobe_add_area_blacklist(start: (unsigned long)__kprobes_text_start, |
2556 | end: (unsigned long)__kprobes_text_end); |
2557 | if (ret) |
2558 | return ret; |
2559 | |
2560 | /* Symbols in 'noinstr' section are blacklisted */ |
2561 | ret = kprobe_add_area_blacklist(start: (unsigned long)__noinstr_text_start, |
2562 | end: (unsigned long)__noinstr_text_end); |
2563 | |
2564 | return ret ? : arch_populate_kprobe_blacklist(); |
2565 | } |
2566 | |
2567 | static void add_module_kprobe_blacklist(struct module *mod) |
2568 | { |
2569 | unsigned long start, end; |
2570 | int i; |
2571 | |
2572 | if (mod->kprobe_blacklist) { |
2573 | for (i = 0; i < mod->num_kprobe_blacklist; i++) |
2574 | kprobe_add_ksym_blacklist(entry: mod->kprobe_blacklist[i]); |
2575 | } |
2576 | |
2577 | start = (unsigned long)mod->kprobes_text_start; |
2578 | if (start) { |
2579 | end = start + mod->kprobes_text_size; |
2580 | kprobe_add_area_blacklist(start, end); |
2581 | } |
2582 | |
2583 | start = (unsigned long)mod->noinstr_text_start; |
2584 | if (start) { |
2585 | end = start + mod->noinstr_text_size; |
2586 | kprobe_add_area_blacklist(start, end); |
2587 | } |
2588 | } |
2589 | |
2590 | static void remove_module_kprobe_blacklist(struct module *mod) |
2591 | { |
2592 | unsigned long start, end; |
2593 | int i; |
2594 | |
2595 | if (mod->kprobe_blacklist) { |
2596 | for (i = 0; i < mod->num_kprobe_blacklist; i++) |
2597 | kprobe_remove_ksym_blacklist(entry: mod->kprobe_blacklist[i]); |
2598 | } |
2599 | |
2600 | start = (unsigned long)mod->kprobes_text_start; |
2601 | if (start) { |
2602 | end = start + mod->kprobes_text_size; |
2603 | kprobe_remove_area_blacklist(start, end); |
2604 | } |
2605 | |
2606 | start = (unsigned long)mod->noinstr_text_start; |
2607 | if (start) { |
2608 | end = start + mod->noinstr_text_size; |
2609 | kprobe_remove_area_blacklist(start, end); |
2610 | } |
2611 | } |
2612 | |
2613 | /* Module notifier call back, checking kprobes on the module */ |
2614 | static int kprobes_module_callback(struct notifier_block *nb, |
2615 | unsigned long val, void *data) |
2616 | { |
2617 | struct module *mod = data; |
2618 | struct hlist_head *head; |
2619 | struct kprobe *p; |
2620 | unsigned int i; |
2621 | int checkcore = (val == MODULE_STATE_GOING); |
2622 | |
2623 | if (val == MODULE_STATE_COMING) { |
2624 | mutex_lock(&kprobe_mutex); |
2625 | add_module_kprobe_blacklist(mod); |
2626 | mutex_unlock(lock: &kprobe_mutex); |
2627 | } |
2628 | if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) |
2629 | return NOTIFY_DONE; |
2630 | |
2631 | /* |
2632 | * When 'MODULE_STATE_GOING' was notified, both of module '.text' and |
2633 | * '.init.text' sections would be freed. When 'MODULE_STATE_LIVE' was |
2634 | * notified, only '.init.text' section would be freed. We need to |
2635 | * disable kprobes which have been inserted in the sections. |
2636 | */ |
2637 | mutex_lock(&kprobe_mutex); |
2638 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
2639 | head = &kprobe_table[i]; |
2640 | hlist_for_each_entry(p, head, hlist) |
2641 | if (within_module_init(addr: (unsigned long)p->addr, mod) || |
2642 | (checkcore && |
2643 | within_module_core(addr: (unsigned long)p->addr, mod))) { |
2644 | /* |
2645 | * The vaddr this probe is installed will soon |
2646 | * be vfreed buy not synced to disk. Hence, |
2647 | * disarming the breakpoint isn't needed. |
2648 | * |
2649 | * Note, this will also move any optimized probes |
2650 | * that are pending to be removed from their |
2651 | * corresponding lists to the 'freeing_list' and |
2652 | * will not be touched by the delayed |
2653 | * kprobe_optimizer() work handler. |
2654 | */ |
2655 | kill_kprobe(p); |
2656 | } |
2657 | } |
2658 | if (val == MODULE_STATE_GOING) |
2659 | remove_module_kprobe_blacklist(mod); |
2660 | mutex_unlock(lock: &kprobe_mutex); |
2661 | return NOTIFY_DONE; |
2662 | } |
2663 | |
2664 | static struct notifier_block kprobe_module_nb = { |
2665 | .notifier_call = kprobes_module_callback, |
2666 | .priority = 0 |
2667 | }; |
2668 | |
2669 | void kprobe_free_init_mem(void) |
2670 | { |
2671 | void *start = (void *)(&__init_begin); |
2672 | void *end = (void *)(&__init_end); |
2673 | struct hlist_head *head; |
2674 | struct kprobe *p; |
2675 | int i; |
2676 | |
2677 | mutex_lock(&kprobe_mutex); |
2678 | |
2679 | /* Kill all kprobes on initmem because the target code has been freed. */ |
2680 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
2681 | head = &kprobe_table[i]; |
2682 | hlist_for_each_entry(p, head, hlist) { |
2683 | if (start <= (void *)p->addr && (void *)p->addr < end) |
2684 | kill_kprobe(p); |
2685 | } |
2686 | } |
2687 | |
2688 | mutex_unlock(lock: &kprobe_mutex); |
2689 | } |
2690 | |
2691 | static int __init init_kprobes(void) |
2692 | { |
2693 | int i, err; |
2694 | |
2695 | /* FIXME allocate the probe table, currently defined statically */ |
2696 | /* initialize all list heads */ |
2697 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) |
2698 | INIT_HLIST_HEAD(&kprobe_table[i]); |
2699 | |
2700 | err = populate_kprobe_blacklist(start: __start_kprobe_blacklist, |
2701 | end: __stop_kprobe_blacklist); |
2702 | if (err) |
2703 | pr_err("Failed to populate blacklist (error %d), kprobes not restricted, be careful using them!\n" , err); |
2704 | |
2705 | if (kretprobe_blacklist_size) { |
2706 | /* lookup the function address from its name */ |
2707 | for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { |
2708 | kretprobe_blacklist[i].addr = |
2709 | kprobe_lookup_name(name: kretprobe_blacklist[i].name, unused: 0); |
2710 | if (!kretprobe_blacklist[i].addr) |
2711 | pr_err("Failed to lookup symbol '%s' for kretprobe blacklist. Maybe the target function is removed or renamed.\n" , |
2712 | kretprobe_blacklist[i].name); |
2713 | } |
2714 | } |
2715 | |
2716 | /* By default, kprobes are armed */ |
2717 | kprobes_all_disarmed = false; |
2718 | |
2719 | #if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT) |
2720 | /* Init 'kprobe_optinsn_slots' for allocation */ |
2721 | kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; |
2722 | #endif |
2723 | |
2724 | err = arch_init_kprobes(); |
2725 | if (!err) |
2726 | err = register_die_notifier(nb: &kprobe_exceptions_nb); |
2727 | if (!err) |
2728 | err = register_module_notifier(nb: &kprobe_module_nb); |
2729 | |
2730 | kprobes_initialized = (err == 0); |
2731 | kprobe_sysctls_init(); |
2732 | return err; |
2733 | } |
2734 | early_initcall(init_kprobes); |
2735 | |
2736 | #if defined(CONFIG_OPTPROBES) |
2737 | static int __init init_optprobes(void) |
2738 | { |
2739 | /* |
2740 | * Enable kprobe optimization - this kicks the optimizer which |
2741 | * depends on synchronize_rcu_tasks() and ksoftirqd, that is |
2742 | * not spawned in early initcall. So delay the optimization. |
2743 | */ |
2744 | optimize_all_kprobes(); |
2745 | |
2746 | return 0; |
2747 | } |
2748 | subsys_initcall(init_optprobes); |
2749 | #endif |
2750 | |
2751 | #ifdef CONFIG_DEBUG_FS |
2752 | static void report_probe(struct seq_file *pi, struct kprobe *p, |
2753 | const char *sym, int offset, char *modname, struct kprobe *pp) |
2754 | { |
2755 | char *kprobe_type; |
2756 | void *addr = p->addr; |
2757 | |
2758 | if (p->pre_handler == pre_handler_kretprobe) |
2759 | kprobe_type = "r" ; |
2760 | else |
2761 | kprobe_type = "k" ; |
2762 | |
2763 | if (!kallsyms_show_value(cred: pi->file->f_cred)) |
2764 | addr = NULL; |
2765 | |
2766 | if (sym) |
2767 | seq_printf(m: pi, fmt: "%px %s %s+0x%x %s " , |
2768 | addr, kprobe_type, sym, offset, |
2769 | (modname ? modname : " " )); |
2770 | else /* try to use %pS */ |
2771 | seq_printf(m: pi, fmt: "%px %s %pS " , |
2772 | addr, kprobe_type, p->addr); |
2773 | |
2774 | if (!pp) |
2775 | pp = p; |
2776 | seq_printf(m: pi, fmt: "%s%s%s%s\n" , |
2777 | (kprobe_gone(p) ? "[GONE]" : "" ), |
2778 | ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : "" ), |
2779 | (kprobe_optimized(p: pp) ? "[OPTIMIZED]" : "" ), |
2780 | (kprobe_ftrace(p: pp) ? "[FTRACE]" : "" )); |
2781 | } |
2782 | |
2783 | static void *kprobe_seq_start(struct seq_file *f, loff_t *pos) |
2784 | { |
2785 | return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; |
2786 | } |
2787 | |
2788 | static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) |
2789 | { |
2790 | (*pos)++; |
2791 | if (*pos >= KPROBE_TABLE_SIZE) |
2792 | return NULL; |
2793 | return pos; |
2794 | } |
2795 | |
2796 | static void kprobe_seq_stop(struct seq_file *f, void *v) |
2797 | { |
2798 | /* Nothing to do */ |
2799 | } |
2800 | |
2801 | static int show_kprobe_addr(struct seq_file *pi, void *v) |
2802 | { |
2803 | struct hlist_head *head; |
2804 | struct kprobe *p, *kp; |
2805 | const char *sym = NULL; |
2806 | unsigned int i = *(loff_t *) v; |
2807 | unsigned long offset = 0; |
2808 | char *modname, namebuf[KSYM_NAME_LEN]; |
2809 | |
2810 | head = &kprobe_table[i]; |
2811 | preempt_disable(); |
2812 | hlist_for_each_entry_rcu(p, head, hlist) { |
2813 | sym = kallsyms_lookup(addr: (unsigned long)p->addr, NULL, |
2814 | offset: &offset, modname: &modname, namebuf); |
2815 | if (kprobe_aggrprobe(p)) { |
2816 | list_for_each_entry_rcu(kp, &p->list, list) |
2817 | report_probe(pi, p: kp, sym, offset, modname, pp: p); |
2818 | } else |
2819 | report_probe(pi, p, sym, offset, modname, NULL); |
2820 | } |
2821 | preempt_enable(); |
2822 | return 0; |
2823 | } |
2824 | |
2825 | static const struct seq_operations kprobes_sops = { |
2826 | .start = kprobe_seq_start, |
2827 | .next = kprobe_seq_next, |
2828 | .stop = kprobe_seq_stop, |
2829 | .show = show_kprobe_addr |
2830 | }; |
2831 | |
2832 | DEFINE_SEQ_ATTRIBUTE(kprobes); |
2833 | |
2834 | /* kprobes/blacklist -- shows which functions can not be probed */ |
2835 | static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos) |
2836 | { |
2837 | mutex_lock(&kprobe_mutex); |
2838 | return seq_list_start(head: &kprobe_blacklist, pos: *pos); |
2839 | } |
2840 | |
2841 | static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos) |
2842 | { |
2843 | return seq_list_next(v, head: &kprobe_blacklist, ppos: pos); |
2844 | } |
2845 | |
2846 | static int kprobe_blacklist_seq_show(struct seq_file *m, void *v) |
2847 | { |
2848 | struct kprobe_blacklist_entry *ent = |
2849 | list_entry(v, struct kprobe_blacklist_entry, list); |
2850 | |
2851 | /* |
2852 | * If '/proc/kallsyms' is not showing kernel address, we won't |
2853 | * show them here either. |
2854 | */ |
2855 | if (!kallsyms_show_value(cred: m->file->f_cred)) |
2856 | seq_printf(m, fmt: "0x%px-0x%px\t%ps\n" , NULL, NULL, |
2857 | (void *)ent->start_addr); |
2858 | else |
2859 | seq_printf(m, fmt: "0x%px-0x%px\t%ps\n" , (void *)ent->start_addr, |
2860 | (void *)ent->end_addr, (void *)ent->start_addr); |
2861 | return 0; |
2862 | } |
2863 | |
2864 | static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v) |
2865 | { |
2866 | mutex_unlock(lock: &kprobe_mutex); |
2867 | } |
2868 | |
2869 | static const struct seq_operations kprobe_blacklist_sops = { |
2870 | .start = kprobe_blacklist_seq_start, |
2871 | .next = kprobe_blacklist_seq_next, |
2872 | .stop = kprobe_blacklist_seq_stop, |
2873 | .show = kprobe_blacklist_seq_show, |
2874 | }; |
2875 | DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist); |
2876 | |
2877 | static int arm_all_kprobes(void) |
2878 | { |
2879 | struct hlist_head *head; |
2880 | struct kprobe *p; |
2881 | unsigned int i, total = 0, errors = 0; |
2882 | int err, ret = 0; |
2883 | |
2884 | mutex_lock(&kprobe_mutex); |
2885 | |
2886 | /* If kprobes are armed, just return */ |
2887 | if (!kprobes_all_disarmed) |
2888 | goto already_enabled; |
2889 | |
2890 | /* |
2891 | * optimize_kprobe() called by arm_kprobe() checks |
2892 | * kprobes_all_disarmed, so set kprobes_all_disarmed before |
2893 | * arm_kprobe. |
2894 | */ |
2895 | kprobes_all_disarmed = false; |
2896 | /* Arming kprobes doesn't optimize kprobe itself */ |
2897 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
2898 | head = &kprobe_table[i]; |
2899 | /* Arm all kprobes on a best-effort basis */ |
2900 | hlist_for_each_entry(p, head, hlist) { |
2901 | if (!kprobe_disabled(p)) { |
2902 | err = arm_kprobe(kp: p); |
2903 | if (err) { |
2904 | errors++; |
2905 | ret = err; |
2906 | } |
2907 | total++; |
2908 | } |
2909 | } |
2910 | } |
2911 | |
2912 | if (errors) |
2913 | pr_warn("Kprobes globally enabled, but failed to enable %d out of %d probes. Please check which kprobes are kept disabled via debugfs.\n" , |
2914 | errors, total); |
2915 | else |
2916 | pr_info("Kprobes globally enabled\n" ); |
2917 | |
2918 | already_enabled: |
2919 | mutex_unlock(lock: &kprobe_mutex); |
2920 | return ret; |
2921 | } |
2922 | |
2923 | static int disarm_all_kprobes(void) |
2924 | { |
2925 | struct hlist_head *head; |
2926 | struct kprobe *p; |
2927 | unsigned int i, total = 0, errors = 0; |
2928 | int err, ret = 0; |
2929 | |
2930 | mutex_lock(&kprobe_mutex); |
2931 | |
2932 | /* If kprobes are already disarmed, just return */ |
2933 | if (kprobes_all_disarmed) { |
2934 | mutex_unlock(lock: &kprobe_mutex); |
2935 | return 0; |
2936 | } |
2937 | |
2938 | kprobes_all_disarmed = true; |
2939 | |
2940 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
2941 | head = &kprobe_table[i]; |
2942 | /* Disarm all kprobes on a best-effort basis */ |
2943 | hlist_for_each_entry(p, head, hlist) { |
2944 | if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) { |
2945 | err = disarm_kprobe(kp: p, reopt: false); |
2946 | if (err) { |
2947 | errors++; |
2948 | ret = err; |
2949 | } |
2950 | total++; |
2951 | } |
2952 | } |
2953 | } |
2954 | |
2955 | if (errors) |
2956 | pr_warn("Kprobes globally disabled, but failed to disable %d out of %d probes. Please check which kprobes are kept enabled via debugfs.\n" , |
2957 | errors, total); |
2958 | else |
2959 | pr_info("Kprobes globally disabled\n" ); |
2960 | |
2961 | mutex_unlock(lock: &kprobe_mutex); |
2962 | |
2963 | /* Wait for disarming all kprobes by optimizer */ |
2964 | wait_for_kprobe_optimizer(); |
2965 | |
2966 | return ret; |
2967 | } |
2968 | |
2969 | /* |
2970 | * XXX: The debugfs bool file interface doesn't allow for callbacks |
2971 | * when the bool state is switched. We can reuse that facility when |
2972 | * available |
2973 | */ |
2974 | static ssize_t read_enabled_file_bool(struct file *file, |
2975 | char __user *user_buf, size_t count, loff_t *ppos) |
2976 | { |
2977 | char buf[3]; |
2978 | |
2979 | if (!kprobes_all_disarmed) |
2980 | buf[0] = '1'; |
2981 | else |
2982 | buf[0] = '0'; |
2983 | buf[1] = '\n'; |
2984 | buf[2] = 0x00; |
2985 | return simple_read_from_buffer(to: user_buf, count, ppos, from: buf, available: 2); |
2986 | } |
2987 | |
2988 | static ssize_t write_enabled_file_bool(struct file *file, |
2989 | const char __user *user_buf, size_t count, loff_t *ppos) |
2990 | { |
2991 | bool enable; |
2992 | int ret; |
2993 | |
2994 | ret = kstrtobool_from_user(s: user_buf, count, res: &enable); |
2995 | if (ret) |
2996 | return ret; |
2997 | |
2998 | ret = enable ? arm_all_kprobes() : disarm_all_kprobes(); |
2999 | if (ret) |
3000 | return ret; |
3001 | |
3002 | return count; |
3003 | } |
3004 | |
3005 | static const struct file_operations fops_kp = { |
3006 | .read = read_enabled_file_bool, |
3007 | .write = write_enabled_file_bool, |
3008 | .llseek = default_llseek, |
3009 | }; |
3010 | |
3011 | static int __init debugfs_kprobe_init(void) |
3012 | { |
3013 | struct dentry *dir; |
3014 | |
3015 | dir = debugfs_create_dir(name: "kprobes" , NULL); |
3016 | |
3017 | debugfs_create_file(name: "list" , mode: 0400, parent: dir, NULL, fops: &kprobes_fops); |
3018 | |
3019 | debugfs_create_file(name: "enabled" , mode: 0600, parent: dir, NULL, fops: &fops_kp); |
3020 | |
3021 | debugfs_create_file(name: "blacklist" , mode: 0400, parent: dir, NULL, |
3022 | fops: &kprobe_blacklist_fops); |
3023 | |
3024 | return 0; |
3025 | } |
3026 | |
3027 | late_initcall(debugfs_kprobe_init); |
3028 | #endif /* CONFIG_DEBUG_FS */ |
3029 | |