1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * core.c - Kernel Live Patching Core |
4 | * |
5 | * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com> |
6 | * Copyright (C) 2014 SUSE |
7 | */ |
8 | |
9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
10 | |
11 | #include <linux/module.h> |
12 | #include <linux/kernel.h> |
13 | #include <linux/mutex.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/list.h> |
16 | #include <linux/kallsyms.h> |
17 | #include <linux/livepatch.h> |
18 | #include <linux/elf.h> |
19 | #include <linux/moduleloader.h> |
20 | #include <linux/completion.h> |
21 | #include <linux/memory.h> |
22 | #include <linux/rcupdate.h> |
23 | #include <asm/cacheflush.h> |
24 | #include "core.h" |
25 | #include "patch.h" |
26 | #include "state.h" |
27 | #include "transition.h" |
28 | |
29 | /* |
30 | * klp_mutex is a coarse lock which serializes access to klp data. All |
31 | * accesses to klp-related variables and structures must have mutex protection, |
32 | * except within the following functions which carefully avoid the need for it: |
33 | * |
34 | * - klp_ftrace_handler() |
35 | * - klp_update_patch_state() |
36 | * - __klp_sched_try_switch() |
37 | */ |
38 | DEFINE_MUTEX(klp_mutex); |
39 | |
40 | /* |
41 | * Actively used patches: enabled or in transition. Note that replaced |
42 | * or disabled patches are not listed even though the related kernel |
43 | * module still can be loaded. |
44 | */ |
45 | LIST_HEAD(klp_patches); |
46 | |
47 | static struct kobject *klp_root_kobj; |
48 | |
49 | static bool klp_is_module(struct klp_object *obj) |
50 | { |
51 | return obj->name; |
52 | } |
53 | |
54 | /* sets obj->mod if object is not vmlinux and module is found */ |
55 | static void klp_find_object_module(struct klp_object *obj) |
56 | { |
57 | struct module *mod; |
58 | |
59 | if (!klp_is_module(obj)) |
60 | return; |
61 | |
62 | rcu_read_lock_sched(); |
63 | /* |
64 | * We do not want to block removal of patched modules and therefore |
65 | * we do not take a reference here. The patches are removed by |
66 | * klp_module_going() instead. |
67 | */ |
68 | mod = find_module(name: obj->name); |
69 | /* |
70 | * Do not mess work of klp_module_coming() and klp_module_going(). |
71 | * Note that the patch might still be needed before klp_module_going() |
72 | * is called. Module functions can be called even in the GOING state |
73 | * until mod->exit() finishes. This is especially important for |
74 | * patches that modify semantic of the functions. |
75 | */ |
76 | if (mod && mod->klp_alive) |
77 | obj->mod = mod; |
78 | |
79 | rcu_read_unlock_sched(); |
80 | } |
81 | |
82 | static bool klp_initialized(void) |
83 | { |
84 | return !!klp_root_kobj; |
85 | } |
86 | |
87 | static struct klp_func *klp_find_func(struct klp_object *obj, |
88 | struct klp_func *old_func) |
89 | { |
90 | struct klp_func *func; |
91 | |
92 | klp_for_each_func(obj, func) { |
93 | if ((strcmp(old_func->old_name, func->old_name) == 0) && |
94 | (old_func->old_sympos == func->old_sympos)) { |
95 | return func; |
96 | } |
97 | } |
98 | |
99 | return NULL; |
100 | } |
101 | |
102 | static struct klp_object *klp_find_object(struct klp_patch *patch, |
103 | struct klp_object *old_obj) |
104 | { |
105 | struct klp_object *obj; |
106 | |
107 | klp_for_each_object(patch, obj) { |
108 | if (klp_is_module(obj: old_obj)) { |
109 | if (klp_is_module(obj) && |
110 | strcmp(old_obj->name, obj->name) == 0) { |
111 | return obj; |
112 | } |
113 | } else if (!klp_is_module(obj)) { |
114 | return obj; |
115 | } |
116 | } |
117 | |
118 | return NULL; |
119 | } |
120 | |
121 | struct klp_find_arg { |
122 | const char *name; |
123 | unsigned long addr; |
124 | unsigned long count; |
125 | unsigned long pos; |
126 | }; |
127 | |
128 | static int klp_match_callback(void *data, unsigned long addr) |
129 | { |
130 | struct klp_find_arg *args = data; |
131 | |
132 | args->addr = addr; |
133 | args->count++; |
134 | |
135 | /* |
136 | * Finish the search when the symbol is found for the desired position |
137 | * or the position is not defined for a non-unique symbol. |
138 | */ |
139 | if ((args->pos && (args->count == args->pos)) || |
140 | (!args->pos && (args->count > 1))) |
141 | return 1; |
142 | |
143 | return 0; |
144 | } |
145 | |
146 | static int klp_find_callback(void *data, const char *name, unsigned long addr) |
147 | { |
148 | struct klp_find_arg *args = data; |
149 | |
150 | if (strcmp(args->name, name)) |
151 | return 0; |
152 | |
153 | return klp_match_callback(data, addr); |
154 | } |
155 | |
156 | static int klp_find_object_symbol(const char *objname, const char *name, |
157 | unsigned long sympos, unsigned long *addr) |
158 | { |
159 | struct klp_find_arg args = { |
160 | .name = name, |
161 | .addr = 0, |
162 | .count = 0, |
163 | .pos = sympos, |
164 | }; |
165 | |
166 | if (objname) |
167 | module_kallsyms_on_each_symbol(modname: objname, fn: klp_find_callback, data: &args); |
168 | else |
169 | kallsyms_on_each_match_symbol(fn: klp_match_callback, name, data: &args); |
170 | |
171 | /* |
172 | * Ensure an address was found. If sympos is 0, ensure symbol is unique; |
173 | * otherwise ensure the symbol position count matches sympos. |
174 | */ |
175 | if (args.addr == 0) |
176 | pr_err("symbol '%s' not found in symbol table\n" , name); |
177 | else if (args.count > 1 && sympos == 0) { |
178 | pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n" , |
179 | name, objname); |
180 | } else if (sympos != args.count && sympos > 0) { |
181 | pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n" , |
182 | sympos, name, objname ? objname : "vmlinux" ); |
183 | } else { |
184 | *addr = args.addr; |
185 | return 0; |
186 | } |
187 | |
188 | *addr = 0; |
189 | return -EINVAL; |
190 | } |
191 | |
192 | static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab, |
193 | unsigned int symndx, Elf_Shdr *relasec, |
194 | const char *sec_objname) |
195 | { |
196 | int i, cnt, ret; |
197 | char sym_objname[MODULE_NAME_LEN]; |
198 | char sym_name[KSYM_NAME_LEN]; |
199 | Elf_Rela *relas; |
200 | Elf_Sym *sym; |
201 | unsigned long sympos, addr; |
202 | bool sym_vmlinux; |
203 | bool sec_vmlinux = !strcmp(sec_objname, "vmlinux" ); |
204 | |
205 | /* |
206 | * Since the field widths for sym_objname and sym_name in the sscanf() |
207 | * call are hard-coded and correspond to MODULE_NAME_LEN and |
208 | * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN |
209 | * and KSYM_NAME_LEN have the values we expect them to have. |
210 | * |
211 | * Because the value of MODULE_NAME_LEN can differ among architectures, |
212 | * we use the smallest/strictest upper bound possible (56, based on |
213 | * the current definition of MODULE_NAME_LEN) to prevent overflows. |
214 | */ |
215 | BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 512); |
216 | |
217 | relas = (Elf_Rela *) relasec->sh_addr; |
218 | /* For each rela in this klp relocation section */ |
219 | for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) { |
220 | sym = (Elf_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info); |
221 | if (sym->st_shndx != SHN_LIVEPATCH) { |
222 | pr_err("symbol %s is not marked as a livepatch symbol\n" , |
223 | strtab + sym->st_name); |
224 | return -EINVAL; |
225 | } |
226 | |
227 | /* Format: .klp.sym.sym_objname.sym_name,sympos */ |
228 | cnt = sscanf(strtab + sym->st_name, |
229 | ".klp.sym.%55[^.].%511[^,],%lu" , |
230 | sym_objname, sym_name, &sympos); |
231 | if (cnt != 3) { |
232 | pr_err("symbol %s has an incorrectly formatted name\n" , |
233 | strtab + sym->st_name); |
234 | return -EINVAL; |
235 | } |
236 | |
237 | sym_vmlinux = !strcmp(sym_objname, "vmlinux" ); |
238 | |
239 | /* |
240 | * Prevent module-specific KLP rela sections from referencing |
241 | * vmlinux symbols. This helps prevent ordering issues with |
242 | * module special section initializations. Presumably such |
243 | * symbols are exported and normal relas can be used instead. |
244 | */ |
245 | if (!sec_vmlinux && sym_vmlinux) { |
246 | pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section\n" , |
247 | sym_name); |
248 | return -EINVAL; |
249 | } |
250 | |
251 | /* klp_find_object_symbol() treats a NULL objname as vmlinux */ |
252 | ret = klp_find_object_symbol(objname: sym_vmlinux ? NULL : sym_objname, |
253 | name: sym_name, sympos, addr: &addr); |
254 | if (ret) |
255 | return ret; |
256 | |
257 | sym->st_value = addr; |
258 | } |
259 | |
260 | return 0; |
261 | } |
262 | |
263 | void __weak clear_relocate_add(Elf_Shdr *sechdrs, |
264 | const char *strtab, |
265 | unsigned int symindex, |
266 | unsigned int relsec, |
267 | struct module *me) |
268 | { |
269 | } |
270 | |
271 | /* |
272 | * At a high-level, there are two types of klp relocation sections: those which |
273 | * reference symbols which live in vmlinux; and those which reference symbols |
274 | * which live in other modules. This function is called for both types: |
275 | * |
276 | * 1) When a klp module itself loads, the module code calls this function to |
277 | * write vmlinux-specific klp relocations (.klp.rela.vmlinux.* sections). |
278 | * These relocations are written to the klp module text to allow the patched |
279 | * code/data to reference unexported vmlinux symbols. They're written as |
280 | * early as possible to ensure that other module init code (.e.g., |
281 | * jump_label_apply_nops) can access any unexported vmlinux symbols which |
282 | * might be referenced by the klp module's special sections. |
283 | * |
284 | * 2) When a to-be-patched module loads -- or is already loaded when a |
285 | * corresponding klp module loads -- klp code calls this function to write |
286 | * module-specific klp relocations (.klp.rela.{module}.* sections). These |
287 | * are written to the klp module text to allow the patched code/data to |
288 | * reference symbols which live in the to-be-patched module or one of its |
289 | * module dependencies. Exported symbols are supported, in addition to |
290 | * unexported symbols, in order to enable late module patching, which allows |
291 | * the to-be-patched module to be loaded and patched sometime *after* the |
292 | * klp module is loaded. |
293 | */ |
294 | static int klp_write_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, |
295 | const char *shstrtab, const char *strtab, |
296 | unsigned int symndx, unsigned int secndx, |
297 | const char *objname, bool apply) |
298 | { |
299 | int cnt, ret; |
300 | char sec_objname[MODULE_NAME_LEN]; |
301 | Elf_Shdr *sec = sechdrs + secndx; |
302 | |
303 | /* |
304 | * Format: .klp.rela.sec_objname.section_name |
305 | * See comment in klp_resolve_symbols() for an explanation |
306 | * of the selected field width value. |
307 | */ |
308 | cnt = sscanf(shstrtab + sec->sh_name, ".klp.rela.%55[^.]" , |
309 | sec_objname); |
310 | if (cnt != 1) { |
311 | pr_err("section %s has an incorrectly formatted name\n" , |
312 | shstrtab + sec->sh_name); |
313 | return -EINVAL; |
314 | } |
315 | |
316 | if (strcmp(objname ? objname : "vmlinux" , sec_objname)) |
317 | return 0; |
318 | |
319 | if (apply) { |
320 | ret = klp_resolve_symbols(sechdrs, strtab, symndx, |
321 | relasec: sec, sec_objname); |
322 | if (ret) |
323 | return ret; |
324 | |
325 | return apply_relocate_add(sechdrs, strtab, symindex: symndx, relsec: secndx, mod: pmod); |
326 | } |
327 | |
328 | clear_relocate_add(sechdrs, strtab, symindex: symndx, relsec: secndx, me: pmod); |
329 | return 0; |
330 | } |
331 | |
332 | int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, |
333 | const char *shstrtab, const char *strtab, |
334 | unsigned int symndx, unsigned int secndx, |
335 | const char *objname) |
336 | { |
337 | return klp_write_section_relocs(pmod, sechdrs, shstrtab, strtab, symndx, |
338 | secndx, objname, apply: true); |
339 | } |
340 | |
341 | /* |
342 | * Sysfs Interface |
343 | * |
344 | * /sys/kernel/livepatch |
345 | * /sys/kernel/livepatch/<patch> |
346 | * /sys/kernel/livepatch/<patch>/enabled |
347 | * /sys/kernel/livepatch/<patch>/transition |
348 | * /sys/kernel/livepatch/<patch>/force |
349 | * /sys/kernel/livepatch/<patch>/<object> |
350 | * /sys/kernel/livepatch/<patch>/<object>/patched |
351 | * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> |
352 | */ |
353 | static int __klp_disable_patch(struct klp_patch *patch); |
354 | |
355 | static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, |
356 | const char *buf, size_t count) |
357 | { |
358 | struct klp_patch *patch; |
359 | int ret; |
360 | bool enabled; |
361 | |
362 | ret = kstrtobool(s: buf, res: &enabled); |
363 | if (ret) |
364 | return ret; |
365 | |
366 | patch = container_of(kobj, struct klp_patch, kobj); |
367 | |
368 | mutex_lock(&klp_mutex); |
369 | |
370 | if (patch->enabled == enabled) { |
371 | /* already in requested state */ |
372 | ret = -EINVAL; |
373 | goto out; |
374 | } |
375 | |
376 | /* |
377 | * Allow to reverse a pending transition in both ways. It might be |
378 | * necessary to complete the transition without forcing and breaking |
379 | * the system integrity. |
380 | * |
381 | * Do not allow to re-enable a disabled patch. |
382 | */ |
383 | if (patch == klp_transition_patch) |
384 | klp_reverse_transition(); |
385 | else if (!enabled) |
386 | ret = __klp_disable_patch(patch); |
387 | else |
388 | ret = -EINVAL; |
389 | |
390 | out: |
391 | mutex_unlock(lock: &klp_mutex); |
392 | |
393 | if (ret) |
394 | return ret; |
395 | return count; |
396 | } |
397 | |
398 | static ssize_t enabled_show(struct kobject *kobj, |
399 | struct kobj_attribute *attr, char *buf) |
400 | { |
401 | struct klp_patch *patch; |
402 | |
403 | patch = container_of(kobj, struct klp_patch, kobj); |
404 | return snprintf(buf, PAGE_SIZE-1, fmt: "%d\n" , patch->enabled); |
405 | } |
406 | |
407 | static ssize_t transition_show(struct kobject *kobj, |
408 | struct kobj_attribute *attr, char *buf) |
409 | { |
410 | struct klp_patch *patch; |
411 | |
412 | patch = container_of(kobj, struct klp_patch, kobj); |
413 | return snprintf(buf, PAGE_SIZE-1, fmt: "%d\n" , |
414 | patch == klp_transition_patch); |
415 | } |
416 | |
417 | static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, |
418 | const char *buf, size_t count) |
419 | { |
420 | struct klp_patch *patch; |
421 | int ret; |
422 | bool val; |
423 | |
424 | ret = kstrtobool(s: buf, res: &val); |
425 | if (ret) |
426 | return ret; |
427 | |
428 | if (!val) |
429 | return count; |
430 | |
431 | mutex_lock(&klp_mutex); |
432 | |
433 | patch = container_of(kobj, struct klp_patch, kobj); |
434 | if (patch != klp_transition_patch) { |
435 | mutex_unlock(lock: &klp_mutex); |
436 | return -EINVAL; |
437 | } |
438 | |
439 | klp_force_transition(); |
440 | |
441 | mutex_unlock(lock: &klp_mutex); |
442 | |
443 | return count; |
444 | } |
445 | |
446 | static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); |
447 | static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); |
448 | static struct kobj_attribute force_kobj_attr = __ATTR_WO(force); |
449 | static struct attribute *klp_patch_attrs[] = { |
450 | &enabled_kobj_attr.attr, |
451 | &transition_kobj_attr.attr, |
452 | &force_kobj_attr.attr, |
453 | NULL |
454 | }; |
455 | ATTRIBUTE_GROUPS(klp_patch); |
456 | |
457 | static ssize_t patched_show(struct kobject *kobj, |
458 | struct kobj_attribute *attr, char *buf) |
459 | { |
460 | struct klp_object *obj; |
461 | |
462 | obj = container_of(kobj, struct klp_object, kobj); |
463 | return sysfs_emit(buf, fmt: "%d\n" , obj->patched); |
464 | } |
465 | |
466 | static struct kobj_attribute patched_kobj_attr = __ATTR_RO(patched); |
467 | static struct attribute *klp_object_attrs[] = { |
468 | &patched_kobj_attr.attr, |
469 | NULL, |
470 | }; |
471 | ATTRIBUTE_GROUPS(klp_object); |
472 | |
473 | static void klp_free_object_dynamic(struct klp_object *obj) |
474 | { |
475 | kfree(objp: obj->name); |
476 | kfree(objp: obj); |
477 | } |
478 | |
479 | static void klp_init_func_early(struct klp_object *obj, |
480 | struct klp_func *func); |
481 | static void klp_init_object_early(struct klp_patch *patch, |
482 | struct klp_object *obj); |
483 | |
484 | static struct klp_object *klp_alloc_object_dynamic(const char *name, |
485 | struct klp_patch *patch) |
486 | { |
487 | struct klp_object *obj; |
488 | |
489 | obj = kzalloc(size: sizeof(*obj), GFP_KERNEL); |
490 | if (!obj) |
491 | return NULL; |
492 | |
493 | if (name) { |
494 | obj->name = kstrdup(s: name, GFP_KERNEL); |
495 | if (!obj->name) { |
496 | kfree(objp: obj); |
497 | return NULL; |
498 | } |
499 | } |
500 | |
501 | klp_init_object_early(patch, obj); |
502 | obj->dynamic = true; |
503 | |
504 | return obj; |
505 | } |
506 | |
507 | static void klp_free_func_nop(struct klp_func *func) |
508 | { |
509 | kfree(objp: func->old_name); |
510 | kfree(objp: func); |
511 | } |
512 | |
513 | static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func, |
514 | struct klp_object *obj) |
515 | { |
516 | struct klp_func *func; |
517 | |
518 | func = kzalloc(size: sizeof(*func), GFP_KERNEL); |
519 | if (!func) |
520 | return NULL; |
521 | |
522 | if (old_func->old_name) { |
523 | func->old_name = kstrdup(s: old_func->old_name, GFP_KERNEL); |
524 | if (!func->old_name) { |
525 | kfree(objp: func); |
526 | return NULL; |
527 | } |
528 | } |
529 | |
530 | klp_init_func_early(obj, func); |
531 | /* |
532 | * func->new_func is same as func->old_func. These addresses are |
533 | * set when the object is loaded, see klp_init_object_loaded(). |
534 | */ |
535 | func->old_sympos = old_func->old_sympos; |
536 | func->nop = true; |
537 | |
538 | return func; |
539 | } |
540 | |
541 | static int klp_add_object_nops(struct klp_patch *patch, |
542 | struct klp_object *old_obj) |
543 | { |
544 | struct klp_object *obj; |
545 | struct klp_func *func, *old_func; |
546 | |
547 | obj = klp_find_object(patch, old_obj); |
548 | |
549 | if (!obj) { |
550 | obj = klp_alloc_object_dynamic(name: old_obj->name, patch); |
551 | if (!obj) |
552 | return -ENOMEM; |
553 | } |
554 | |
555 | klp_for_each_func(old_obj, old_func) { |
556 | func = klp_find_func(obj, old_func); |
557 | if (func) |
558 | continue; |
559 | |
560 | func = klp_alloc_func_nop(old_func, obj); |
561 | if (!func) |
562 | return -ENOMEM; |
563 | } |
564 | |
565 | return 0; |
566 | } |
567 | |
568 | /* |
569 | * Add 'nop' functions which simply return to the caller to run |
570 | * the original function. The 'nop' functions are added to a |
571 | * patch to facilitate a 'replace' mode. |
572 | */ |
573 | static int klp_add_nops(struct klp_patch *patch) |
574 | { |
575 | struct klp_patch *old_patch; |
576 | struct klp_object *old_obj; |
577 | |
578 | klp_for_each_patch(old_patch) { |
579 | klp_for_each_object(old_patch, old_obj) { |
580 | int err; |
581 | |
582 | err = klp_add_object_nops(patch, old_obj); |
583 | if (err) |
584 | return err; |
585 | } |
586 | } |
587 | |
588 | return 0; |
589 | } |
590 | |
591 | static void klp_kobj_release_patch(struct kobject *kobj) |
592 | { |
593 | struct klp_patch *patch; |
594 | |
595 | patch = container_of(kobj, struct klp_patch, kobj); |
596 | complete(&patch->finish); |
597 | } |
598 | |
599 | static const struct kobj_type klp_ktype_patch = { |
600 | .release = klp_kobj_release_patch, |
601 | .sysfs_ops = &kobj_sysfs_ops, |
602 | .default_groups = klp_patch_groups, |
603 | }; |
604 | |
605 | static void klp_kobj_release_object(struct kobject *kobj) |
606 | { |
607 | struct klp_object *obj; |
608 | |
609 | obj = container_of(kobj, struct klp_object, kobj); |
610 | |
611 | if (obj->dynamic) |
612 | klp_free_object_dynamic(obj); |
613 | } |
614 | |
615 | static const struct kobj_type klp_ktype_object = { |
616 | .release = klp_kobj_release_object, |
617 | .sysfs_ops = &kobj_sysfs_ops, |
618 | .default_groups = klp_object_groups, |
619 | }; |
620 | |
621 | static void klp_kobj_release_func(struct kobject *kobj) |
622 | { |
623 | struct klp_func *func; |
624 | |
625 | func = container_of(kobj, struct klp_func, kobj); |
626 | |
627 | if (func->nop) |
628 | klp_free_func_nop(func); |
629 | } |
630 | |
631 | static const struct kobj_type klp_ktype_func = { |
632 | .release = klp_kobj_release_func, |
633 | .sysfs_ops = &kobj_sysfs_ops, |
634 | }; |
635 | |
636 | static void __klp_free_funcs(struct klp_object *obj, bool nops_only) |
637 | { |
638 | struct klp_func *func, *tmp_func; |
639 | |
640 | klp_for_each_func_safe(obj, func, tmp_func) { |
641 | if (nops_only && !func->nop) |
642 | continue; |
643 | |
644 | list_del(entry: &func->node); |
645 | kobject_put(kobj: &func->kobj); |
646 | } |
647 | } |
648 | |
649 | /* Clean up when a patched object is unloaded */ |
650 | static void klp_free_object_loaded(struct klp_object *obj) |
651 | { |
652 | struct klp_func *func; |
653 | |
654 | obj->mod = NULL; |
655 | |
656 | klp_for_each_func(obj, func) { |
657 | func->old_func = NULL; |
658 | |
659 | if (func->nop) |
660 | func->new_func = NULL; |
661 | } |
662 | } |
663 | |
664 | static void __klp_free_objects(struct klp_patch *patch, bool nops_only) |
665 | { |
666 | struct klp_object *obj, *tmp_obj; |
667 | |
668 | klp_for_each_object_safe(patch, obj, tmp_obj) { |
669 | __klp_free_funcs(obj, nops_only); |
670 | |
671 | if (nops_only && !obj->dynamic) |
672 | continue; |
673 | |
674 | list_del(entry: &obj->node); |
675 | kobject_put(kobj: &obj->kobj); |
676 | } |
677 | } |
678 | |
679 | static void klp_free_objects(struct klp_patch *patch) |
680 | { |
681 | __klp_free_objects(patch, nops_only: false); |
682 | } |
683 | |
684 | static void klp_free_objects_dynamic(struct klp_patch *patch) |
685 | { |
686 | __klp_free_objects(patch, nops_only: true); |
687 | } |
688 | |
689 | /* |
690 | * This function implements the free operations that can be called safely |
691 | * under klp_mutex. |
692 | * |
693 | * The operation must be completed by calling klp_free_patch_finish() |
694 | * outside klp_mutex. |
695 | */ |
696 | static void klp_free_patch_start(struct klp_patch *patch) |
697 | { |
698 | if (!list_empty(head: &patch->list)) |
699 | list_del(entry: &patch->list); |
700 | |
701 | klp_free_objects(patch); |
702 | } |
703 | |
704 | /* |
705 | * This function implements the free part that must be called outside |
706 | * klp_mutex. |
707 | * |
708 | * It must be called after klp_free_patch_start(). And it has to be |
709 | * the last function accessing the livepatch structures when the patch |
710 | * gets disabled. |
711 | */ |
712 | static void klp_free_patch_finish(struct klp_patch *patch) |
713 | { |
714 | /* |
715 | * Avoid deadlock with enabled_store() sysfs callback by |
716 | * calling this outside klp_mutex. It is safe because |
717 | * this is called when the patch gets disabled and it |
718 | * cannot get enabled again. |
719 | */ |
720 | kobject_put(kobj: &patch->kobj); |
721 | wait_for_completion(&patch->finish); |
722 | |
723 | /* Put the module after the last access to struct klp_patch. */ |
724 | if (!patch->forced) |
725 | module_put(module: patch->mod); |
726 | } |
727 | |
728 | /* |
729 | * The livepatch might be freed from sysfs interface created by the patch. |
730 | * This work allows to wait until the interface is destroyed in a separate |
731 | * context. |
732 | */ |
733 | static void klp_free_patch_work_fn(struct work_struct *work) |
734 | { |
735 | struct klp_patch *patch = |
736 | container_of(work, struct klp_patch, free_work); |
737 | |
738 | klp_free_patch_finish(patch); |
739 | } |
740 | |
741 | void klp_free_patch_async(struct klp_patch *patch) |
742 | { |
743 | klp_free_patch_start(patch); |
744 | schedule_work(work: &patch->free_work); |
745 | } |
746 | |
747 | void klp_free_replaced_patches_async(struct klp_patch *new_patch) |
748 | { |
749 | struct klp_patch *old_patch, *tmp_patch; |
750 | |
751 | klp_for_each_patch_safe(old_patch, tmp_patch) { |
752 | if (old_patch == new_patch) |
753 | return; |
754 | klp_free_patch_async(patch: old_patch); |
755 | } |
756 | } |
757 | |
758 | static int klp_init_func(struct klp_object *obj, struct klp_func *func) |
759 | { |
760 | if (!func->old_name) |
761 | return -EINVAL; |
762 | |
763 | /* |
764 | * NOPs get the address later. The patched module must be loaded, |
765 | * see klp_init_object_loaded(). |
766 | */ |
767 | if (!func->new_func && !func->nop) |
768 | return -EINVAL; |
769 | |
770 | if (strlen(func->old_name) >= KSYM_NAME_LEN) |
771 | return -EINVAL; |
772 | |
773 | INIT_LIST_HEAD(list: &func->stack_node); |
774 | func->patched = false; |
775 | func->transition = false; |
776 | |
777 | /* The format for the sysfs directory is <function,sympos> where sympos |
778 | * is the nth occurrence of this symbol in kallsyms for the patched |
779 | * object. If the user selects 0 for old_sympos, then 1 will be used |
780 | * since a unique symbol will be the first occurrence. |
781 | */ |
782 | return kobject_add(kobj: &func->kobj, parent: &obj->kobj, fmt: "%s,%lu" , |
783 | func->old_name, |
784 | func->old_sympos ? func->old_sympos : 1); |
785 | } |
786 | |
787 | static int klp_write_object_relocs(struct klp_patch *patch, |
788 | struct klp_object *obj, |
789 | bool apply) |
790 | { |
791 | int i, ret; |
792 | struct klp_modinfo *info = patch->mod->klp_info; |
793 | |
794 | for (i = 1; i < info->hdr.e_shnum; i++) { |
795 | Elf_Shdr *sec = info->sechdrs + i; |
796 | |
797 | if (!(sec->sh_flags & SHF_RELA_LIVEPATCH)) |
798 | continue; |
799 | |
800 | ret = klp_write_section_relocs(pmod: patch->mod, sechdrs: info->sechdrs, |
801 | shstrtab: info->secstrings, |
802 | strtab: patch->mod->core_kallsyms.strtab, |
803 | symndx: info->symndx, secndx: i, objname: obj->name, apply); |
804 | if (ret) |
805 | return ret; |
806 | } |
807 | |
808 | return 0; |
809 | } |
810 | |
811 | static int klp_apply_object_relocs(struct klp_patch *patch, |
812 | struct klp_object *obj) |
813 | { |
814 | return klp_write_object_relocs(patch, obj, apply: true); |
815 | } |
816 | |
817 | static void klp_clear_object_relocs(struct klp_patch *patch, |
818 | struct klp_object *obj) |
819 | { |
820 | klp_write_object_relocs(patch, obj, apply: false); |
821 | } |
822 | |
823 | /* parts of the initialization that is done only when the object is loaded */ |
824 | static int klp_init_object_loaded(struct klp_patch *patch, |
825 | struct klp_object *obj) |
826 | { |
827 | struct klp_func *func; |
828 | int ret; |
829 | |
830 | if (klp_is_module(obj)) { |
831 | /* |
832 | * Only write module-specific relocations here |
833 | * (.klp.rela.{module}.*). vmlinux-specific relocations were |
834 | * written earlier during the initialization of the klp module |
835 | * itself. |
836 | */ |
837 | ret = klp_apply_object_relocs(patch, obj); |
838 | if (ret) |
839 | return ret; |
840 | } |
841 | |
842 | klp_for_each_func(obj, func) { |
843 | ret = klp_find_object_symbol(objname: obj->name, name: func->old_name, |
844 | sympos: func->old_sympos, |
845 | addr: (unsigned long *)&func->old_func); |
846 | if (ret) |
847 | return ret; |
848 | |
849 | ret = kallsyms_lookup_size_offset(addr: (unsigned long)func->old_func, |
850 | symbolsize: &func->old_size, NULL); |
851 | if (!ret) { |
852 | pr_err("kallsyms size lookup failed for '%s'\n" , |
853 | func->old_name); |
854 | return -ENOENT; |
855 | } |
856 | |
857 | if (func->nop) |
858 | func->new_func = func->old_func; |
859 | |
860 | ret = kallsyms_lookup_size_offset(addr: (unsigned long)func->new_func, |
861 | symbolsize: &func->new_size, NULL); |
862 | if (!ret) { |
863 | pr_err("kallsyms size lookup failed for '%s' replacement\n" , |
864 | func->old_name); |
865 | return -ENOENT; |
866 | } |
867 | } |
868 | |
869 | return 0; |
870 | } |
871 | |
872 | static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) |
873 | { |
874 | struct klp_func *func; |
875 | int ret; |
876 | const char *name; |
877 | |
878 | if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN) |
879 | return -EINVAL; |
880 | |
881 | obj->patched = false; |
882 | obj->mod = NULL; |
883 | |
884 | klp_find_object_module(obj); |
885 | |
886 | name = klp_is_module(obj) ? obj->name : "vmlinux" ; |
887 | ret = kobject_add(kobj: &obj->kobj, parent: &patch->kobj, fmt: "%s" , name); |
888 | if (ret) |
889 | return ret; |
890 | |
891 | klp_for_each_func(obj, func) { |
892 | ret = klp_init_func(obj, func); |
893 | if (ret) |
894 | return ret; |
895 | } |
896 | |
897 | if (klp_is_object_loaded(obj)) |
898 | ret = klp_init_object_loaded(patch, obj); |
899 | |
900 | return ret; |
901 | } |
902 | |
903 | static void klp_init_func_early(struct klp_object *obj, |
904 | struct klp_func *func) |
905 | { |
906 | kobject_init(kobj: &func->kobj, ktype: &klp_ktype_func); |
907 | list_add_tail(new: &func->node, head: &obj->func_list); |
908 | } |
909 | |
910 | static void klp_init_object_early(struct klp_patch *patch, |
911 | struct klp_object *obj) |
912 | { |
913 | INIT_LIST_HEAD(list: &obj->func_list); |
914 | kobject_init(kobj: &obj->kobj, ktype: &klp_ktype_object); |
915 | list_add_tail(new: &obj->node, head: &patch->obj_list); |
916 | } |
917 | |
918 | static void klp_init_patch_early(struct klp_patch *patch) |
919 | { |
920 | struct klp_object *obj; |
921 | struct klp_func *func; |
922 | |
923 | INIT_LIST_HEAD(list: &patch->list); |
924 | INIT_LIST_HEAD(list: &patch->obj_list); |
925 | kobject_init(kobj: &patch->kobj, ktype: &klp_ktype_patch); |
926 | patch->enabled = false; |
927 | patch->forced = false; |
928 | INIT_WORK(&patch->free_work, klp_free_patch_work_fn); |
929 | init_completion(x: &patch->finish); |
930 | |
931 | klp_for_each_object_static(patch, obj) { |
932 | klp_init_object_early(patch, obj); |
933 | |
934 | klp_for_each_func_static(obj, func) { |
935 | klp_init_func_early(obj, func); |
936 | } |
937 | } |
938 | } |
939 | |
940 | static int klp_init_patch(struct klp_patch *patch) |
941 | { |
942 | struct klp_object *obj; |
943 | int ret; |
944 | |
945 | ret = kobject_add(kobj: &patch->kobj, parent: klp_root_kobj, fmt: "%s" , patch->mod->name); |
946 | if (ret) |
947 | return ret; |
948 | |
949 | if (patch->replace) { |
950 | ret = klp_add_nops(patch); |
951 | if (ret) |
952 | return ret; |
953 | } |
954 | |
955 | klp_for_each_object(patch, obj) { |
956 | ret = klp_init_object(patch, obj); |
957 | if (ret) |
958 | return ret; |
959 | } |
960 | |
961 | list_add_tail(new: &patch->list, head: &klp_patches); |
962 | |
963 | return 0; |
964 | } |
965 | |
966 | static int __klp_disable_patch(struct klp_patch *patch) |
967 | { |
968 | struct klp_object *obj; |
969 | |
970 | if (WARN_ON(!patch->enabled)) |
971 | return -EINVAL; |
972 | |
973 | if (klp_transition_patch) |
974 | return -EBUSY; |
975 | |
976 | klp_init_transition(patch, KLP_UNPATCHED); |
977 | |
978 | klp_for_each_object(patch, obj) |
979 | if (obj->patched) |
980 | klp_pre_unpatch_callback(obj); |
981 | |
982 | /* |
983 | * Enforce the order of the func->transition writes in |
984 | * klp_init_transition() and the TIF_PATCH_PENDING writes in |
985 | * klp_start_transition(). In the rare case where klp_ftrace_handler() |
986 | * is called shortly after klp_update_patch_state() switches the task, |
987 | * this ensures the handler sees that func->transition is set. |
988 | */ |
989 | smp_wmb(); |
990 | |
991 | klp_start_transition(); |
992 | patch->enabled = false; |
993 | klp_try_complete_transition(); |
994 | |
995 | return 0; |
996 | } |
997 | |
998 | static int __klp_enable_patch(struct klp_patch *patch) |
999 | { |
1000 | struct klp_object *obj; |
1001 | int ret; |
1002 | |
1003 | if (klp_transition_patch) |
1004 | return -EBUSY; |
1005 | |
1006 | if (WARN_ON(patch->enabled)) |
1007 | return -EINVAL; |
1008 | |
1009 | pr_notice("enabling patch '%s'\n" , patch->mod->name); |
1010 | |
1011 | klp_init_transition(patch, KLP_PATCHED); |
1012 | |
1013 | /* |
1014 | * Enforce the order of the func->transition writes in |
1015 | * klp_init_transition() and the ops->func_stack writes in |
1016 | * klp_patch_object(), so that klp_ftrace_handler() will see the |
1017 | * func->transition updates before the handler is registered and the |
1018 | * new funcs become visible to the handler. |
1019 | */ |
1020 | smp_wmb(); |
1021 | |
1022 | klp_for_each_object(patch, obj) { |
1023 | if (!klp_is_object_loaded(obj)) |
1024 | continue; |
1025 | |
1026 | ret = klp_pre_patch_callback(obj); |
1027 | if (ret) { |
1028 | pr_warn("pre-patch callback failed for object '%s'\n" , |
1029 | klp_is_module(obj) ? obj->name : "vmlinux" ); |
1030 | goto err; |
1031 | } |
1032 | |
1033 | ret = klp_patch_object(obj); |
1034 | if (ret) { |
1035 | pr_warn("failed to patch object '%s'\n" , |
1036 | klp_is_module(obj) ? obj->name : "vmlinux" ); |
1037 | goto err; |
1038 | } |
1039 | } |
1040 | |
1041 | klp_start_transition(); |
1042 | patch->enabled = true; |
1043 | klp_try_complete_transition(); |
1044 | |
1045 | return 0; |
1046 | err: |
1047 | pr_warn("failed to enable patch '%s'\n" , patch->mod->name); |
1048 | |
1049 | klp_cancel_transition(); |
1050 | return ret; |
1051 | } |
1052 | |
1053 | /** |
1054 | * klp_enable_patch() - enable the livepatch |
1055 | * @patch: patch to be enabled |
1056 | * |
1057 | * Initializes the data structure associated with the patch, creates the sysfs |
1058 | * interface, performs the needed symbol lookups and code relocations, |
1059 | * registers the patched functions with ftrace. |
1060 | * |
1061 | * This function is supposed to be called from the livepatch module_init() |
1062 | * callback. |
1063 | * |
1064 | * Return: 0 on success, otherwise error |
1065 | */ |
1066 | int klp_enable_patch(struct klp_patch *patch) |
1067 | { |
1068 | int ret; |
1069 | struct klp_object *obj; |
1070 | |
1071 | if (!patch || !patch->mod || !patch->objs) |
1072 | return -EINVAL; |
1073 | |
1074 | klp_for_each_object_static(patch, obj) { |
1075 | if (!obj->funcs) |
1076 | return -EINVAL; |
1077 | } |
1078 | |
1079 | |
1080 | if (!is_livepatch_module(mod: patch->mod)) { |
1081 | pr_err("module %s is not marked as a livepatch module\n" , |
1082 | patch->mod->name); |
1083 | return -EINVAL; |
1084 | } |
1085 | |
1086 | if (!klp_initialized()) |
1087 | return -ENODEV; |
1088 | |
1089 | if (!klp_have_reliable_stack()) { |
1090 | pr_warn("This architecture doesn't have support for the livepatch consistency model.\n" ); |
1091 | pr_warn("The livepatch transition may never complete.\n" ); |
1092 | } |
1093 | |
1094 | mutex_lock(&klp_mutex); |
1095 | |
1096 | if (!klp_is_patch_compatible(patch)) { |
1097 | pr_err("Livepatch patch (%s) is not compatible with the already installed livepatches.\n" , |
1098 | patch->mod->name); |
1099 | mutex_unlock(lock: &klp_mutex); |
1100 | return -EINVAL; |
1101 | } |
1102 | |
1103 | if (!try_module_get(module: patch->mod)) { |
1104 | mutex_unlock(lock: &klp_mutex); |
1105 | return -ENODEV; |
1106 | } |
1107 | |
1108 | klp_init_patch_early(patch); |
1109 | |
1110 | ret = klp_init_patch(patch); |
1111 | if (ret) |
1112 | goto err; |
1113 | |
1114 | ret = __klp_enable_patch(patch); |
1115 | if (ret) |
1116 | goto err; |
1117 | |
1118 | mutex_unlock(lock: &klp_mutex); |
1119 | |
1120 | return 0; |
1121 | |
1122 | err: |
1123 | klp_free_patch_start(patch); |
1124 | |
1125 | mutex_unlock(lock: &klp_mutex); |
1126 | |
1127 | klp_free_patch_finish(patch); |
1128 | |
1129 | return ret; |
1130 | } |
1131 | EXPORT_SYMBOL_GPL(klp_enable_patch); |
1132 | |
1133 | /* |
1134 | * This function unpatches objects from the replaced livepatches. |
1135 | * |
1136 | * We could be pretty aggressive here. It is called in the situation where |
1137 | * these structures are no longer accessed from the ftrace handler. |
1138 | * All functions are redirected by the klp_transition_patch. They |
1139 | * use either a new code or they are in the original code because |
1140 | * of the special nop function patches. |
1141 | * |
1142 | * The only exception is when the transition was forced. In this case, |
1143 | * klp_ftrace_handler() might still see the replaced patch on the stack. |
1144 | * Fortunately, it is carefully designed to work with removed functions |
1145 | * thanks to RCU. We only have to keep the patches on the system. Also |
1146 | * this is handled transparently by patch->module_put. |
1147 | */ |
1148 | void klp_unpatch_replaced_patches(struct klp_patch *new_patch) |
1149 | { |
1150 | struct klp_patch *old_patch; |
1151 | |
1152 | klp_for_each_patch(old_patch) { |
1153 | if (old_patch == new_patch) |
1154 | return; |
1155 | |
1156 | old_patch->enabled = false; |
1157 | klp_unpatch_objects(patch: old_patch); |
1158 | } |
1159 | } |
1160 | |
1161 | /* |
1162 | * This function removes the dynamically allocated 'nop' functions. |
1163 | * |
1164 | * We could be pretty aggressive. NOPs do not change the existing |
1165 | * behavior except for adding unnecessary delay by the ftrace handler. |
1166 | * |
1167 | * It is safe even when the transition was forced. The ftrace handler |
1168 | * will see a valid ops->func_stack entry thanks to RCU. |
1169 | * |
1170 | * We could even free the NOPs structures. They must be the last entry |
1171 | * in ops->func_stack. Therefore unregister_ftrace_function() is called. |
1172 | * It does the same as klp_synchronize_transition() to make sure that |
1173 | * nobody is inside the ftrace handler once the operation finishes. |
1174 | * |
1175 | * IMPORTANT: It must be called right after removing the replaced patches! |
1176 | */ |
1177 | void klp_discard_nops(struct klp_patch *new_patch) |
1178 | { |
1179 | klp_unpatch_objects_dynamic(patch: klp_transition_patch); |
1180 | klp_free_objects_dynamic(patch: klp_transition_patch); |
1181 | } |
1182 | |
1183 | /* |
1184 | * Remove parts of patches that touch a given kernel module. The list of |
1185 | * patches processed might be limited. When limit is NULL, all patches |
1186 | * will be handled. |
1187 | */ |
1188 | static void klp_cleanup_module_patches_limited(struct module *mod, |
1189 | struct klp_patch *limit) |
1190 | { |
1191 | struct klp_patch *patch; |
1192 | struct klp_object *obj; |
1193 | |
1194 | klp_for_each_patch(patch) { |
1195 | if (patch == limit) |
1196 | break; |
1197 | |
1198 | klp_for_each_object(patch, obj) { |
1199 | if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) |
1200 | continue; |
1201 | |
1202 | if (patch != klp_transition_patch) |
1203 | klp_pre_unpatch_callback(obj); |
1204 | |
1205 | pr_notice("reverting patch '%s' on unloading module '%s'\n" , |
1206 | patch->mod->name, obj->mod->name); |
1207 | klp_unpatch_object(obj); |
1208 | |
1209 | klp_post_unpatch_callback(obj); |
1210 | klp_clear_object_relocs(patch, obj); |
1211 | klp_free_object_loaded(obj); |
1212 | break; |
1213 | } |
1214 | } |
1215 | } |
1216 | |
1217 | int klp_module_coming(struct module *mod) |
1218 | { |
1219 | int ret; |
1220 | struct klp_patch *patch; |
1221 | struct klp_object *obj; |
1222 | |
1223 | if (WARN_ON(mod->state != MODULE_STATE_COMING)) |
1224 | return -EINVAL; |
1225 | |
1226 | if (!strcmp(mod->name, "vmlinux" )) { |
1227 | pr_err("vmlinux.ko: invalid module name\n" ); |
1228 | return -EINVAL; |
1229 | } |
1230 | |
1231 | mutex_lock(&klp_mutex); |
1232 | /* |
1233 | * Each module has to know that klp_module_coming() |
1234 | * has been called. We never know what module will |
1235 | * get patched by a new patch. |
1236 | */ |
1237 | mod->klp_alive = true; |
1238 | |
1239 | klp_for_each_patch(patch) { |
1240 | klp_for_each_object(patch, obj) { |
1241 | if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) |
1242 | continue; |
1243 | |
1244 | obj->mod = mod; |
1245 | |
1246 | ret = klp_init_object_loaded(patch, obj); |
1247 | if (ret) { |
1248 | pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n" , |
1249 | patch->mod->name, obj->mod->name, ret); |
1250 | goto err; |
1251 | } |
1252 | |
1253 | pr_notice("applying patch '%s' to loading module '%s'\n" , |
1254 | patch->mod->name, obj->mod->name); |
1255 | |
1256 | ret = klp_pre_patch_callback(obj); |
1257 | if (ret) { |
1258 | pr_warn("pre-patch callback failed for object '%s'\n" , |
1259 | obj->name); |
1260 | goto err; |
1261 | } |
1262 | |
1263 | ret = klp_patch_object(obj); |
1264 | if (ret) { |
1265 | pr_warn("failed to apply patch '%s' to module '%s' (%d)\n" , |
1266 | patch->mod->name, obj->mod->name, ret); |
1267 | |
1268 | klp_post_unpatch_callback(obj); |
1269 | goto err; |
1270 | } |
1271 | |
1272 | if (patch != klp_transition_patch) |
1273 | klp_post_patch_callback(obj); |
1274 | |
1275 | break; |
1276 | } |
1277 | } |
1278 | |
1279 | mutex_unlock(lock: &klp_mutex); |
1280 | |
1281 | return 0; |
1282 | |
1283 | err: |
1284 | /* |
1285 | * If a patch is unsuccessfully applied, return |
1286 | * error to the module loader. |
1287 | */ |
1288 | pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n" , |
1289 | patch->mod->name, obj->mod->name, obj->mod->name); |
1290 | mod->klp_alive = false; |
1291 | obj->mod = NULL; |
1292 | klp_cleanup_module_patches_limited(mod, limit: patch); |
1293 | mutex_unlock(lock: &klp_mutex); |
1294 | |
1295 | return ret; |
1296 | } |
1297 | |
1298 | void klp_module_going(struct module *mod) |
1299 | { |
1300 | if (WARN_ON(mod->state != MODULE_STATE_GOING && |
1301 | mod->state != MODULE_STATE_COMING)) |
1302 | return; |
1303 | |
1304 | mutex_lock(&klp_mutex); |
1305 | /* |
1306 | * Each module has to know that klp_module_going() |
1307 | * has been called. We never know what module will |
1308 | * get patched by a new patch. |
1309 | */ |
1310 | mod->klp_alive = false; |
1311 | |
1312 | klp_cleanup_module_patches_limited(mod, NULL); |
1313 | |
1314 | mutex_unlock(lock: &klp_mutex); |
1315 | } |
1316 | |
1317 | static int __init klp_init(void) |
1318 | { |
1319 | klp_root_kobj = kobject_create_and_add(name: "livepatch" , parent: kernel_kobj); |
1320 | if (!klp_root_kobj) |
1321 | return -ENOMEM; |
1322 | |
1323 | return 0; |
1324 | } |
1325 | |
1326 | module_init(klp_init); |
1327 | |