1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * patch.c - livepatch patching functions |
4 | * |
5 | * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com> |
6 | * Copyright (C) 2014 SUSE |
7 | * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com> |
8 | */ |
9 | |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | |
12 | #include <linux/livepatch.h> |
13 | #include <linux/list.h> |
14 | #include <linux/ftrace.h> |
15 | #include <linux/rculist.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/bug.h> |
18 | #include <linux/printk.h> |
19 | #include "core.h" |
20 | #include "patch.h" |
21 | #include "transition.h" |
22 | |
23 | static LIST_HEAD(klp_ops); |
24 | |
25 | struct klp_ops *klp_find_ops(void *old_func) |
26 | { |
27 | struct klp_ops *ops; |
28 | struct klp_func *func; |
29 | |
30 | list_for_each_entry(ops, &klp_ops, node) { |
31 | func = list_first_entry(&ops->func_stack, struct klp_func, |
32 | stack_node); |
33 | if (func->old_func == old_func) |
34 | return ops; |
35 | } |
36 | |
37 | return NULL; |
38 | } |
39 | |
40 | static void notrace klp_ftrace_handler(unsigned long ip, |
41 | unsigned long parent_ip, |
42 | struct ftrace_ops *fops, |
43 | struct ftrace_regs *fregs) |
44 | { |
45 | struct klp_ops *ops; |
46 | struct klp_func *func; |
47 | int patch_state; |
48 | int bit; |
49 | |
50 | ops = container_of(fops, struct klp_ops, fops); |
51 | |
52 | /* |
53 | * The ftrace_test_recursion_trylock() will disable preemption, |
54 | * which is required for the variant of synchronize_rcu() that is |
55 | * used to allow patching functions where RCU is not watching. |
56 | * See klp_synchronize_transition() for more details. |
57 | */ |
58 | bit = ftrace_test_recursion_trylock(ip, parent_ip); |
59 | if (WARN_ON_ONCE(bit < 0)) |
60 | return; |
61 | |
62 | func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, |
63 | stack_node); |
64 | |
65 | /* |
66 | * func should never be NULL because preemption should be disabled here |
67 | * and unregister_ftrace_function() does the equivalent of a |
68 | * synchronize_rcu() before the func_stack removal. |
69 | */ |
70 | if (WARN_ON_ONCE(!func)) |
71 | goto unlock; |
72 | |
73 | /* |
74 | * In the enable path, enforce the order of the ops->func_stack and |
75 | * func->transition reads. The corresponding write barrier is in |
76 | * __klp_enable_patch(). |
77 | * |
78 | * (Note that this barrier technically isn't needed in the disable |
79 | * path. In the rare case where klp_update_patch_state() runs before |
80 | * this handler, its TIF_PATCH_PENDING read and this func->transition |
81 | * read need to be ordered. But klp_update_patch_state() already |
82 | * enforces that.) |
83 | */ |
84 | smp_rmb(); |
85 | |
86 | if (unlikely(func->transition)) { |
87 | |
88 | /* |
89 | * Enforce the order of the func->transition and |
90 | * current->patch_state reads. Otherwise we could read an |
91 | * out-of-date task state and pick the wrong function. The |
92 | * corresponding write barrier is in klp_init_transition(). |
93 | */ |
94 | smp_rmb(); |
95 | |
96 | patch_state = current->patch_state; |
97 | |
98 | WARN_ON_ONCE(patch_state == KLP_UNDEFINED); |
99 | |
100 | if (patch_state == KLP_UNPATCHED) { |
101 | /* |
102 | * Use the previously patched version of the function. |
103 | * If no previous patches exist, continue with the |
104 | * original function. |
105 | */ |
106 | func = list_entry_rcu(func->stack_node.next, |
107 | struct klp_func, stack_node); |
108 | |
109 | if (&func->stack_node == &ops->func_stack) |
110 | goto unlock; |
111 | } |
112 | } |
113 | |
114 | /* |
115 | * NOPs are used to replace existing patches with original code. |
116 | * Do nothing! Setting pc would cause an infinite loop. |
117 | */ |
118 | if (func->nop) |
119 | goto unlock; |
120 | |
121 | ftrace_regs_set_instruction_pointer(fregs, (unsigned long)func->new_func); |
122 | |
123 | unlock: |
124 | ftrace_test_recursion_unlock(bit); |
125 | } |
126 | |
127 | static void klp_unpatch_func(struct klp_func *func) |
128 | { |
129 | struct klp_ops *ops; |
130 | |
131 | if (WARN_ON(!func->patched)) |
132 | return; |
133 | if (WARN_ON(!func->old_func)) |
134 | return; |
135 | |
136 | ops = klp_find_ops(old_func: func->old_func); |
137 | if (WARN_ON(!ops)) |
138 | return; |
139 | |
140 | if (list_is_singular(head: &ops->func_stack)) { |
141 | unsigned long ftrace_loc; |
142 | |
143 | ftrace_loc = ftrace_location(ip: (unsigned long)func->old_func); |
144 | if (WARN_ON(!ftrace_loc)) |
145 | return; |
146 | |
147 | WARN_ON(unregister_ftrace_function(&ops->fops)); |
148 | WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0)); |
149 | |
150 | list_del_rcu(entry: &func->stack_node); |
151 | list_del(entry: &ops->node); |
152 | kfree(objp: ops); |
153 | } else { |
154 | list_del_rcu(entry: &func->stack_node); |
155 | } |
156 | |
157 | func->patched = false; |
158 | } |
159 | |
160 | static int klp_patch_func(struct klp_func *func) |
161 | { |
162 | struct klp_ops *ops; |
163 | int ret; |
164 | |
165 | if (WARN_ON(!func->old_func)) |
166 | return -EINVAL; |
167 | |
168 | if (WARN_ON(func->patched)) |
169 | return -EINVAL; |
170 | |
171 | ops = klp_find_ops(old_func: func->old_func); |
172 | if (!ops) { |
173 | unsigned long ftrace_loc; |
174 | |
175 | ftrace_loc = ftrace_location(ip: (unsigned long)func->old_func); |
176 | if (!ftrace_loc) { |
177 | pr_err("failed to find location for function '%s'\n" , |
178 | func->old_name); |
179 | return -EINVAL; |
180 | } |
181 | |
182 | ops = kzalloc(size: sizeof(*ops), GFP_KERNEL); |
183 | if (!ops) |
184 | return -ENOMEM; |
185 | |
186 | ops->fops.func = klp_ftrace_handler; |
187 | ops->fops.flags = FTRACE_OPS_FL_DYNAMIC | |
188 | #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS |
189 | FTRACE_OPS_FL_SAVE_REGS | |
190 | #endif |
191 | FTRACE_OPS_FL_IPMODIFY | |
192 | FTRACE_OPS_FL_PERMANENT; |
193 | |
194 | list_add(new: &ops->node, head: &klp_ops); |
195 | |
196 | INIT_LIST_HEAD(list: &ops->func_stack); |
197 | list_add_rcu(new: &func->stack_node, head: &ops->func_stack); |
198 | |
199 | ret = ftrace_set_filter_ip(ops: &ops->fops, ip: ftrace_loc, remove: 0, reset: 0); |
200 | if (ret) { |
201 | pr_err("failed to set ftrace filter for function '%s' (%d)\n" , |
202 | func->old_name, ret); |
203 | goto err; |
204 | } |
205 | |
206 | ret = register_ftrace_function(ops: &ops->fops); |
207 | if (ret) { |
208 | pr_err("failed to register ftrace handler for function '%s' (%d)\n" , |
209 | func->old_name, ret); |
210 | ftrace_set_filter_ip(ops: &ops->fops, ip: ftrace_loc, remove: 1, reset: 0); |
211 | goto err; |
212 | } |
213 | |
214 | |
215 | } else { |
216 | list_add_rcu(new: &func->stack_node, head: &ops->func_stack); |
217 | } |
218 | |
219 | func->patched = true; |
220 | |
221 | return 0; |
222 | |
223 | err: |
224 | list_del_rcu(entry: &func->stack_node); |
225 | list_del(entry: &ops->node); |
226 | kfree(objp: ops); |
227 | return ret; |
228 | } |
229 | |
230 | static void __klp_unpatch_object(struct klp_object *obj, bool nops_only) |
231 | { |
232 | struct klp_func *func; |
233 | |
234 | klp_for_each_func(obj, func) { |
235 | if (nops_only && !func->nop) |
236 | continue; |
237 | |
238 | if (func->patched) |
239 | klp_unpatch_func(func); |
240 | } |
241 | |
242 | if (obj->dynamic || !nops_only) |
243 | obj->patched = false; |
244 | } |
245 | |
246 | |
247 | void klp_unpatch_object(struct klp_object *obj) |
248 | { |
249 | __klp_unpatch_object(obj, nops_only: false); |
250 | } |
251 | |
252 | int klp_patch_object(struct klp_object *obj) |
253 | { |
254 | struct klp_func *func; |
255 | int ret; |
256 | |
257 | if (WARN_ON(obj->patched)) |
258 | return -EINVAL; |
259 | |
260 | klp_for_each_func(obj, func) { |
261 | ret = klp_patch_func(func); |
262 | if (ret) { |
263 | klp_unpatch_object(obj); |
264 | return ret; |
265 | } |
266 | } |
267 | obj->patched = true; |
268 | |
269 | return 0; |
270 | } |
271 | |
272 | static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only) |
273 | { |
274 | struct klp_object *obj; |
275 | |
276 | klp_for_each_object(patch, obj) |
277 | if (obj->patched) |
278 | __klp_unpatch_object(obj, nops_only); |
279 | } |
280 | |
281 | void klp_unpatch_objects(struct klp_patch *patch) |
282 | { |
283 | __klp_unpatch_objects(patch, nops_only: false); |
284 | } |
285 | |
286 | void klp_unpatch_objects_dynamic(struct klp_patch *patch) |
287 | { |
288 | __klp_unpatch_objects(patch, nops_only: true); |
289 | } |
290 | |