1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * transition.c - Kernel Live Patching transition functions |
4 | * |
5 | * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com> |
6 | */ |
7 | |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9 | |
10 | #include <linux/cpu.h> |
11 | #include <linux/stacktrace.h> |
12 | #include <linux/static_call.h> |
13 | #include "core.h" |
14 | #include "patch.h" |
15 | #include "transition.h" |
16 | |
17 | #define MAX_STACK_ENTRIES 100 |
18 | static DEFINE_PER_CPU(unsigned long[MAX_STACK_ENTRIES], klp_stack_entries); |
19 | |
20 | #define STACK_ERR_BUF_SIZE 128 |
21 | |
22 | #define SIGNALS_TIMEOUT 15 |
23 | |
24 | struct klp_patch *klp_transition_patch; |
25 | |
26 | static int klp_target_state = KLP_UNDEFINED; |
27 | |
28 | static unsigned int klp_signals_cnt; |
29 | |
30 | /* |
31 | * When a livepatch is in progress, enable klp stack checking in |
32 | * cond_resched(). This helps CPU-bound kthreads get patched. |
33 | */ |
34 | #if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) |
35 | |
36 | #define klp_cond_resched_enable() sched_dynamic_klp_enable() |
37 | #define klp_cond_resched_disable() sched_dynamic_klp_disable() |
38 | |
39 | #else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ |
40 | |
41 | DEFINE_STATIC_KEY_FALSE(klp_sched_try_switch_key); |
42 | EXPORT_SYMBOL(klp_sched_try_switch_key); |
43 | |
44 | #define klp_cond_resched_enable() static_branch_enable(&klp_sched_try_switch_key) |
45 | #define klp_cond_resched_disable() static_branch_disable(&klp_sched_try_switch_key) |
46 | |
47 | #endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ |
48 | |
49 | /* |
50 | * This work can be performed periodically to finish patching or unpatching any |
51 | * "straggler" tasks which failed to transition in the first attempt. |
52 | */ |
53 | static void klp_transition_work_fn(struct work_struct *work) |
54 | { |
55 | mutex_lock(&klp_mutex); |
56 | |
57 | if (klp_transition_patch) |
58 | klp_try_complete_transition(); |
59 | |
60 | mutex_unlock(lock: &klp_mutex); |
61 | } |
62 | static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); |
63 | |
64 | /* |
65 | * This function is just a stub to implement a hard force |
66 | * of synchronize_rcu(). This requires synchronizing |
67 | * tasks even in userspace and idle. |
68 | */ |
69 | static void klp_sync(struct work_struct *work) |
70 | { |
71 | } |
72 | |
73 | /* |
74 | * We allow to patch also functions where RCU is not watching, |
75 | * e.g. before user_exit(). We can not rely on the RCU infrastructure |
76 | * to do the synchronization. Instead hard force the sched synchronization. |
77 | * |
78 | * This approach allows to use RCU functions for manipulating func_stack |
79 | * safely. |
80 | */ |
81 | static void klp_synchronize_transition(void) |
82 | { |
83 | schedule_on_each_cpu(func: klp_sync); |
84 | } |
85 | |
86 | /* |
87 | * The transition to the target patch state is complete. Clean up the data |
88 | * structures. |
89 | */ |
90 | static void klp_complete_transition(void) |
91 | { |
92 | struct klp_object *obj; |
93 | struct klp_func *func; |
94 | struct task_struct *g, *task; |
95 | unsigned int cpu; |
96 | |
97 | pr_debug("'%s': completing %s transition\n" , |
98 | klp_transition_patch->mod->name, |
99 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching" ); |
100 | |
101 | if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) { |
102 | klp_unpatch_replaced_patches(new_patch: klp_transition_patch); |
103 | klp_discard_nops(new_patch: klp_transition_patch); |
104 | } |
105 | |
106 | if (klp_target_state == KLP_UNPATCHED) { |
107 | /* |
108 | * All tasks have transitioned to KLP_UNPATCHED so we can now |
109 | * remove the new functions from the func_stack. |
110 | */ |
111 | klp_unpatch_objects(patch: klp_transition_patch); |
112 | |
113 | /* |
114 | * Make sure klp_ftrace_handler() can no longer see functions |
115 | * from this patch on the ops->func_stack. Otherwise, after |
116 | * func->transition gets cleared, the handler may choose a |
117 | * removed function. |
118 | */ |
119 | klp_synchronize_transition(); |
120 | } |
121 | |
122 | klp_for_each_object(klp_transition_patch, obj) |
123 | klp_for_each_func(obj, func) |
124 | func->transition = false; |
125 | |
126 | /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ |
127 | if (klp_target_state == KLP_PATCHED) |
128 | klp_synchronize_transition(); |
129 | |
130 | read_lock(&tasklist_lock); |
131 | for_each_process_thread(g, task) { |
132 | WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); |
133 | task->patch_state = KLP_UNDEFINED; |
134 | } |
135 | read_unlock(&tasklist_lock); |
136 | |
137 | for_each_possible_cpu(cpu) { |
138 | task = idle_task(cpu); |
139 | WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); |
140 | task->patch_state = KLP_UNDEFINED; |
141 | } |
142 | |
143 | klp_for_each_object(klp_transition_patch, obj) { |
144 | if (!klp_is_object_loaded(obj)) |
145 | continue; |
146 | if (klp_target_state == KLP_PATCHED) |
147 | klp_post_patch_callback(obj); |
148 | else if (klp_target_state == KLP_UNPATCHED) |
149 | klp_post_unpatch_callback(obj); |
150 | } |
151 | |
152 | pr_notice("'%s': %s complete\n" , klp_transition_patch->mod->name, |
153 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching" ); |
154 | |
155 | klp_target_state = KLP_UNDEFINED; |
156 | klp_transition_patch = NULL; |
157 | } |
158 | |
159 | /* |
160 | * This is called in the error path, to cancel a transition before it has |
161 | * started, i.e. klp_init_transition() has been called but |
162 | * klp_start_transition() hasn't. If the transition *has* been started, |
163 | * klp_reverse_transition() should be used instead. |
164 | */ |
165 | void klp_cancel_transition(void) |
166 | { |
167 | if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED)) |
168 | return; |
169 | |
170 | pr_debug("'%s': canceling patching transition, going to unpatch\n" , |
171 | klp_transition_patch->mod->name); |
172 | |
173 | klp_target_state = KLP_UNPATCHED; |
174 | klp_complete_transition(); |
175 | } |
176 | |
177 | /* |
178 | * Switch the patched state of the task to the set of functions in the target |
179 | * patch state. |
180 | * |
181 | * NOTE: If task is not 'current', the caller must ensure the task is inactive. |
182 | * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value. |
183 | */ |
184 | void klp_update_patch_state(struct task_struct *task) |
185 | { |
186 | /* |
187 | * A variant of synchronize_rcu() is used to allow patching functions |
188 | * where RCU is not watching, see klp_synchronize_transition(). |
189 | */ |
190 | preempt_disable_notrace(); |
191 | |
192 | /* |
193 | * This test_and_clear_tsk_thread_flag() call also serves as a read |
194 | * barrier (smp_rmb) for two cases: |
195 | * |
196 | * 1) Enforce the order of the TIF_PATCH_PENDING read and the |
197 | * klp_target_state read. The corresponding write barriers are in |
198 | * klp_init_transition() and klp_reverse_transition(). |
199 | * |
200 | * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read |
201 | * of func->transition, if klp_ftrace_handler() is called later on |
202 | * the same CPU. See __klp_disable_patch(). |
203 | */ |
204 | if (test_and_clear_tsk_thread_flag(tsk: task, TIF_PATCH_PENDING)) |
205 | task->patch_state = READ_ONCE(klp_target_state); |
206 | |
207 | preempt_enable_notrace(); |
208 | } |
209 | |
210 | /* |
211 | * Determine whether the given stack trace includes any references to a |
212 | * to-be-patched or to-be-unpatched function. |
213 | */ |
214 | static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, |
215 | unsigned int nr_entries) |
216 | { |
217 | unsigned long func_addr, func_size, address; |
218 | struct klp_ops *ops; |
219 | int i; |
220 | |
221 | if (klp_target_state == KLP_UNPATCHED) { |
222 | /* |
223 | * Check for the to-be-unpatched function |
224 | * (the func itself). |
225 | */ |
226 | func_addr = (unsigned long)func->new_func; |
227 | func_size = func->new_size; |
228 | } else { |
229 | /* |
230 | * Check for the to-be-patched function |
231 | * (the previous func). |
232 | */ |
233 | ops = klp_find_ops(old_func: func->old_func); |
234 | |
235 | if (list_is_singular(head: &ops->func_stack)) { |
236 | /* original function */ |
237 | func_addr = (unsigned long)func->old_func; |
238 | func_size = func->old_size; |
239 | } else { |
240 | /* previously patched function */ |
241 | struct klp_func *prev; |
242 | |
243 | prev = list_next_entry(func, stack_node); |
244 | func_addr = (unsigned long)prev->new_func; |
245 | func_size = prev->new_size; |
246 | } |
247 | } |
248 | |
249 | for (i = 0; i < nr_entries; i++) { |
250 | address = entries[i]; |
251 | |
252 | if (address >= func_addr && address < func_addr + func_size) |
253 | return -EAGAIN; |
254 | } |
255 | |
256 | return 0; |
257 | } |
258 | |
259 | /* |
260 | * Determine whether it's safe to transition the task to the target patch state |
261 | * by looking for any to-be-patched or to-be-unpatched functions on its stack. |
262 | */ |
263 | static int klp_check_stack(struct task_struct *task, const char **oldname) |
264 | { |
265 | unsigned long *entries = this_cpu_ptr(klp_stack_entries); |
266 | struct klp_object *obj; |
267 | struct klp_func *func; |
268 | int ret, nr_entries; |
269 | |
270 | /* Protect 'klp_stack_entries' */ |
271 | lockdep_assert_preemption_disabled(); |
272 | |
273 | ret = stack_trace_save_tsk_reliable(tsk: task, store: entries, MAX_STACK_ENTRIES); |
274 | if (ret < 0) |
275 | return -EINVAL; |
276 | nr_entries = ret; |
277 | |
278 | klp_for_each_object(klp_transition_patch, obj) { |
279 | if (!obj->patched) |
280 | continue; |
281 | klp_for_each_func(obj, func) { |
282 | ret = klp_check_stack_func(func, entries, nr_entries); |
283 | if (ret) { |
284 | *oldname = func->old_name; |
285 | return -EADDRINUSE; |
286 | } |
287 | } |
288 | } |
289 | |
290 | return 0; |
291 | } |
292 | |
293 | static int klp_check_and_switch_task(struct task_struct *task, void *arg) |
294 | { |
295 | int ret; |
296 | |
297 | if (task_curr(p: task) && task != current) |
298 | return -EBUSY; |
299 | |
300 | ret = klp_check_stack(task, oldname: arg); |
301 | if (ret) |
302 | return ret; |
303 | |
304 | clear_tsk_thread_flag(tsk: task, TIF_PATCH_PENDING); |
305 | task->patch_state = klp_target_state; |
306 | return 0; |
307 | } |
308 | |
309 | /* |
310 | * Try to safely switch a task to the target patch state. If it's currently |
311 | * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or |
312 | * if the stack is unreliable, return false. |
313 | */ |
314 | static bool klp_try_switch_task(struct task_struct *task) |
315 | { |
316 | const char *old_name; |
317 | int ret; |
318 | |
319 | /* check if this task has already switched over */ |
320 | if (task->patch_state == klp_target_state) |
321 | return true; |
322 | |
323 | /* |
324 | * For arches which don't have reliable stack traces, we have to rely |
325 | * on other methods (e.g., switching tasks at kernel exit). |
326 | */ |
327 | if (!klp_have_reliable_stack()) |
328 | return false; |
329 | |
330 | /* |
331 | * Now try to check the stack for any to-be-patched or to-be-unpatched |
332 | * functions. If all goes well, switch the task to the target patch |
333 | * state. |
334 | */ |
335 | if (task == current) |
336 | ret = klp_check_and_switch_task(current, arg: &old_name); |
337 | else |
338 | ret = task_call_func(p: task, func: klp_check_and_switch_task, arg: &old_name); |
339 | |
340 | switch (ret) { |
341 | case 0: /* success */ |
342 | break; |
343 | |
344 | case -EBUSY: /* klp_check_and_switch_task() */ |
345 | pr_debug("%s: %s:%d is running\n" , |
346 | __func__, task->comm, task->pid); |
347 | break; |
348 | case -EINVAL: /* klp_check_and_switch_task() */ |
349 | pr_debug("%s: %s:%d has an unreliable stack\n" , |
350 | __func__, task->comm, task->pid); |
351 | break; |
352 | case -EADDRINUSE: /* klp_check_and_switch_task() */ |
353 | pr_debug("%s: %s:%d is sleeping on function %s\n" , |
354 | __func__, task->comm, task->pid, old_name); |
355 | break; |
356 | |
357 | default: |
358 | pr_debug("%s: Unknown error code (%d) when trying to switch %s:%d\n" , |
359 | __func__, ret, task->comm, task->pid); |
360 | break; |
361 | } |
362 | |
363 | return !ret; |
364 | } |
365 | |
366 | void __klp_sched_try_switch(void) |
367 | { |
368 | if (likely(!klp_patch_pending(current))) |
369 | return; |
370 | |
371 | /* |
372 | * This function is called from cond_resched() which is called in many |
373 | * places throughout the kernel. Using the klp_mutex here might |
374 | * deadlock. |
375 | * |
376 | * Instead, disable preemption to prevent racing with other callers of |
377 | * klp_try_switch_task(). Thanks to task_call_func() they won't be |
378 | * able to switch this task while it's running. |
379 | */ |
380 | preempt_disable(); |
381 | |
382 | /* |
383 | * Make sure current didn't get patched between the above check and |
384 | * preempt_disable(). |
385 | */ |
386 | if (unlikely(!klp_patch_pending(current))) |
387 | goto out; |
388 | |
389 | /* |
390 | * Enforce the order of the TIF_PATCH_PENDING read above and the |
391 | * klp_target_state read in klp_try_switch_task(). The corresponding |
392 | * write barriers are in klp_init_transition() and |
393 | * klp_reverse_transition(). |
394 | */ |
395 | smp_rmb(); |
396 | |
397 | klp_try_switch_task(current); |
398 | |
399 | out: |
400 | preempt_enable(); |
401 | } |
402 | EXPORT_SYMBOL(__klp_sched_try_switch); |
403 | |
404 | /* |
405 | * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set. |
406 | * Kthreads with TIF_PATCH_PENDING set are woken up. |
407 | */ |
408 | static void klp_send_signals(void) |
409 | { |
410 | struct task_struct *g, *task; |
411 | |
412 | if (klp_signals_cnt == SIGNALS_TIMEOUT) |
413 | pr_notice("signaling remaining tasks\n" ); |
414 | |
415 | read_lock(&tasklist_lock); |
416 | for_each_process_thread(g, task) { |
417 | if (!klp_patch_pending(task)) |
418 | continue; |
419 | |
420 | /* |
421 | * There is a small race here. We could see TIF_PATCH_PENDING |
422 | * set and decide to wake up a kthread or send a fake signal. |
423 | * Meanwhile the task could migrate itself and the action |
424 | * would be meaningless. It is not serious though. |
425 | */ |
426 | if (task->flags & PF_KTHREAD) { |
427 | /* |
428 | * Wake up a kthread which sleeps interruptedly and |
429 | * still has not been migrated. |
430 | */ |
431 | wake_up_state(tsk: task, TASK_INTERRUPTIBLE); |
432 | } else { |
433 | /* |
434 | * Send fake signal to all non-kthread tasks which are |
435 | * still not migrated. |
436 | */ |
437 | set_notify_signal(task); |
438 | } |
439 | } |
440 | read_unlock(&tasklist_lock); |
441 | } |
442 | |
443 | /* |
444 | * Try to switch all remaining tasks to the target patch state by walking the |
445 | * stacks of sleeping tasks and looking for any to-be-patched or |
446 | * to-be-unpatched functions. If such functions are found, the task can't be |
447 | * switched yet. |
448 | * |
449 | * If any tasks are still stuck in the initial patch state, schedule a retry. |
450 | */ |
451 | void klp_try_complete_transition(void) |
452 | { |
453 | unsigned int cpu; |
454 | struct task_struct *g, *task; |
455 | struct klp_patch *patch; |
456 | bool complete = true; |
457 | |
458 | WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); |
459 | |
460 | /* |
461 | * Try to switch the tasks to the target patch state by walking their |
462 | * stacks and looking for any to-be-patched or to-be-unpatched |
463 | * functions. If such functions are found on a stack, or if the stack |
464 | * is deemed unreliable, the task can't be switched yet. |
465 | * |
466 | * Usually this will transition most (or all) of the tasks on a system |
467 | * unless the patch includes changes to a very common function. |
468 | */ |
469 | read_lock(&tasklist_lock); |
470 | for_each_process_thread(g, task) |
471 | if (!klp_try_switch_task(task)) |
472 | complete = false; |
473 | read_unlock(&tasklist_lock); |
474 | |
475 | /* |
476 | * Ditto for the idle "swapper" tasks. |
477 | */ |
478 | cpus_read_lock(); |
479 | for_each_possible_cpu(cpu) { |
480 | task = idle_task(cpu); |
481 | if (cpu_online(cpu)) { |
482 | if (!klp_try_switch_task(task)) { |
483 | complete = false; |
484 | /* Make idle task go through the main loop. */ |
485 | wake_up_if_idle(cpu); |
486 | } |
487 | } else if (task->patch_state != klp_target_state) { |
488 | /* offline idle tasks can be switched immediately */ |
489 | clear_tsk_thread_flag(tsk: task, TIF_PATCH_PENDING); |
490 | task->patch_state = klp_target_state; |
491 | } |
492 | } |
493 | cpus_read_unlock(); |
494 | |
495 | if (!complete) { |
496 | if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT)) |
497 | klp_send_signals(); |
498 | klp_signals_cnt++; |
499 | |
500 | /* |
501 | * Some tasks weren't able to be switched over. Try again |
502 | * later and/or wait for other methods like kernel exit |
503 | * switching. |
504 | */ |
505 | schedule_delayed_work(dwork: &klp_transition_work, |
506 | delay: round_jiffies_relative(HZ)); |
507 | return; |
508 | } |
509 | |
510 | /* Done! Now cleanup the data structures. */ |
511 | klp_cond_resched_disable(); |
512 | patch = klp_transition_patch; |
513 | klp_complete_transition(); |
514 | |
515 | /* |
516 | * It would make more sense to free the unused patches in |
517 | * klp_complete_transition() but it is called also |
518 | * from klp_cancel_transition(). |
519 | */ |
520 | if (!patch->enabled) |
521 | klp_free_patch_async(patch); |
522 | else if (patch->replace) |
523 | klp_free_replaced_patches_async(new_patch: patch); |
524 | } |
525 | |
526 | /* |
527 | * Start the transition to the specified target patch state so tasks can begin |
528 | * switching to it. |
529 | */ |
530 | void klp_start_transition(void) |
531 | { |
532 | struct task_struct *g, *task; |
533 | unsigned int cpu; |
534 | |
535 | WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); |
536 | |
537 | pr_notice("'%s': starting %s transition\n" , |
538 | klp_transition_patch->mod->name, |
539 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching" ); |
540 | |
541 | /* |
542 | * Mark all normal tasks as needing a patch state update. They'll |
543 | * switch either in klp_try_complete_transition() or as they exit the |
544 | * kernel. |
545 | */ |
546 | read_lock(&tasklist_lock); |
547 | for_each_process_thread(g, task) |
548 | if (task->patch_state != klp_target_state) |
549 | set_tsk_thread_flag(tsk: task, TIF_PATCH_PENDING); |
550 | read_unlock(&tasklist_lock); |
551 | |
552 | /* |
553 | * Mark all idle tasks as needing a patch state update. They'll switch |
554 | * either in klp_try_complete_transition() or at the idle loop switch |
555 | * point. |
556 | */ |
557 | for_each_possible_cpu(cpu) { |
558 | task = idle_task(cpu); |
559 | if (task->patch_state != klp_target_state) |
560 | set_tsk_thread_flag(tsk: task, TIF_PATCH_PENDING); |
561 | } |
562 | |
563 | klp_cond_resched_enable(); |
564 | |
565 | klp_signals_cnt = 0; |
566 | } |
567 | |
568 | /* |
569 | * Initialize the global target patch state and all tasks to the initial patch |
570 | * state, and initialize all function transition states to true in preparation |
571 | * for patching or unpatching. |
572 | */ |
573 | void klp_init_transition(struct klp_patch *patch, int state) |
574 | { |
575 | struct task_struct *g, *task; |
576 | unsigned int cpu; |
577 | struct klp_object *obj; |
578 | struct klp_func *func; |
579 | int initial_state = !state; |
580 | |
581 | WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED); |
582 | |
583 | klp_transition_patch = patch; |
584 | |
585 | /* |
586 | * Set the global target patch state which tasks will switch to. This |
587 | * has no effect until the TIF_PATCH_PENDING flags get set later. |
588 | */ |
589 | klp_target_state = state; |
590 | |
591 | pr_debug("'%s': initializing %s transition\n" , patch->mod->name, |
592 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching" ); |
593 | |
594 | /* |
595 | * Initialize all tasks to the initial patch state to prepare them for |
596 | * switching to the target state. |
597 | */ |
598 | read_lock(&tasklist_lock); |
599 | for_each_process_thread(g, task) { |
600 | WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); |
601 | task->patch_state = initial_state; |
602 | } |
603 | read_unlock(&tasklist_lock); |
604 | |
605 | /* |
606 | * Ditto for the idle "swapper" tasks. |
607 | */ |
608 | for_each_possible_cpu(cpu) { |
609 | task = idle_task(cpu); |
610 | WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); |
611 | task->patch_state = initial_state; |
612 | } |
613 | |
614 | /* |
615 | * Enforce the order of the task->patch_state initializations and the |
616 | * func->transition updates to ensure that klp_ftrace_handler() doesn't |
617 | * see a func in transition with a task->patch_state of KLP_UNDEFINED. |
618 | * |
619 | * Also enforce the order of the klp_target_state write and future |
620 | * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() and |
621 | * __klp_sched_try_switch() don't set a task->patch_state to |
622 | * KLP_UNDEFINED. |
623 | */ |
624 | smp_wmb(); |
625 | |
626 | /* |
627 | * Set the func transition states so klp_ftrace_handler() will know to |
628 | * switch to the transition logic. |
629 | * |
630 | * When patching, the funcs aren't yet in the func_stack and will be |
631 | * made visible to the ftrace handler shortly by the calls to |
632 | * klp_patch_object(). |
633 | * |
634 | * When unpatching, the funcs are already in the func_stack and so are |
635 | * already visible to the ftrace handler. |
636 | */ |
637 | klp_for_each_object(patch, obj) |
638 | klp_for_each_func(obj, func) |
639 | func->transition = true; |
640 | } |
641 | |
642 | /* |
643 | * This function can be called in the middle of an existing transition to |
644 | * reverse the direction of the target patch state. This can be done to |
645 | * effectively cancel an existing enable or disable operation if there are any |
646 | * tasks which are stuck in the initial patch state. |
647 | */ |
648 | void klp_reverse_transition(void) |
649 | { |
650 | unsigned int cpu; |
651 | struct task_struct *g, *task; |
652 | |
653 | pr_debug("'%s': reversing transition from %s\n" , |
654 | klp_transition_patch->mod->name, |
655 | klp_target_state == KLP_PATCHED ? "patching to unpatching" : |
656 | "unpatching to patching" ); |
657 | |
658 | /* |
659 | * Clear all TIF_PATCH_PENDING flags to prevent races caused by |
660 | * klp_update_patch_state() or __klp_sched_try_switch() running in |
661 | * parallel with the reverse transition. |
662 | */ |
663 | read_lock(&tasklist_lock); |
664 | for_each_process_thread(g, task) |
665 | clear_tsk_thread_flag(tsk: task, TIF_PATCH_PENDING); |
666 | read_unlock(&tasklist_lock); |
667 | |
668 | for_each_possible_cpu(cpu) |
669 | clear_tsk_thread_flag(tsk: idle_task(cpu), TIF_PATCH_PENDING); |
670 | |
671 | /* |
672 | * Make sure all existing invocations of klp_update_patch_state() and |
673 | * __klp_sched_try_switch() see the cleared TIF_PATCH_PENDING before |
674 | * starting the reverse transition. |
675 | */ |
676 | klp_synchronize_transition(); |
677 | |
678 | /* |
679 | * All patching has stopped, now re-initialize the global variables to |
680 | * prepare for the reverse transition. |
681 | */ |
682 | klp_transition_patch->enabled = !klp_transition_patch->enabled; |
683 | klp_target_state = !klp_target_state; |
684 | |
685 | /* |
686 | * Enforce the order of the klp_target_state write and the |
687 | * TIF_PATCH_PENDING writes in klp_start_transition() to ensure |
688 | * klp_update_patch_state() and __klp_sched_try_switch() don't set |
689 | * task->patch_state to the wrong value. |
690 | */ |
691 | smp_wmb(); |
692 | |
693 | klp_start_transition(); |
694 | } |
695 | |
696 | /* Called from copy_process() during fork */ |
697 | void klp_copy_process(struct task_struct *child) |
698 | { |
699 | |
700 | /* |
701 | * The parent process may have gone through a KLP transition since |
702 | * the thread flag was copied in setup_thread_stack earlier. Bring |
703 | * the task flag up to date with the parent here. |
704 | * |
705 | * The operation is serialized against all klp_*_transition() |
706 | * operations by the tasklist_lock. The only exceptions are |
707 | * klp_update_patch_state(current) and __klp_sched_try_switch(), but we |
708 | * cannot race with them because we are current. |
709 | */ |
710 | if (test_tsk_thread_flag(current, TIF_PATCH_PENDING)) |
711 | set_tsk_thread_flag(tsk: child, TIF_PATCH_PENDING); |
712 | else |
713 | clear_tsk_thread_flag(tsk: child, TIF_PATCH_PENDING); |
714 | |
715 | child->patch_state = current->patch_state; |
716 | } |
717 | |
718 | /* |
719 | * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an |
720 | * existing transition to finish. |
721 | * |
722 | * NOTE: klp_update_patch_state(task) requires the task to be inactive or |
723 | * 'current'. This is not the case here and the consistency model could be |
724 | * broken. Administrator, who is the only one to execute the |
725 | * klp_force_transitions(), has to be aware of this. |
726 | */ |
727 | void klp_force_transition(void) |
728 | { |
729 | struct klp_patch *patch; |
730 | struct task_struct *g, *task; |
731 | unsigned int cpu; |
732 | |
733 | pr_warn("forcing remaining tasks to the patched state\n" ); |
734 | |
735 | read_lock(&tasklist_lock); |
736 | for_each_process_thread(g, task) |
737 | klp_update_patch_state(task); |
738 | read_unlock(&tasklist_lock); |
739 | |
740 | for_each_possible_cpu(cpu) |
741 | klp_update_patch_state(task: idle_task(cpu)); |
742 | |
743 | /* Set forced flag for patches being removed. */ |
744 | if (klp_target_state == KLP_UNPATCHED) |
745 | klp_transition_patch->forced = true; |
746 | else if (klp_transition_patch->replace) { |
747 | klp_for_each_patch(patch) { |
748 | if (patch != klp_transition_patch) |
749 | patch->forced = true; |
750 | } |
751 | } |
752 | } |
753 | |