1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * linux/fs/binfmt_elf.c |
4 | * |
5 | * These are the functions used to load ELF format executables as used |
6 | * on SVr4 machines. Information on the format may be found in the book |
7 | * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support |
8 | * Tools". |
9 | * |
10 | * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com). |
11 | */ |
12 | |
13 | #include <linux/module.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/fs.h> |
16 | #include <linux/log2.h> |
17 | #include <linux/mm.h> |
18 | #include <linux/mman.h> |
19 | #include <linux/errno.h> |
20 | #include <linux/signal.h> |
21 | #include <linux/binfmts.h> |
22 | #include <linux/string.h> |
23 | #include <linux/file.h> |
24 | #include <linux/slab.h> |
25 | #include <linux/personality.h> |
26 | #include <linux/elfcore.h> |
27 | #include <linux/init.h> |
28 | #include <linux/highuid.h> |
29 | #include <linux/compiler.h> |
30 | #include <linux/highmem.h> |
31 | #include <linux/hugetlb.h> |
32 | #include <linux/pagemap.h> |
33 | #include <linux/vmalloc.h> |
34 | #include <linux/security.h> |
35 | #include <linux/random.h> |
36 | #include <linux/elf.h> |
37 | #include <linux/elf-randomize.h> |
38 | #include <linux/utsname.h> |
39 | #include <linux/coredump.h> |
40 | #include <linux/sched.h> |
41 | #include <linux/sched/coredump.h> |
42 | #include <linux/sched/task_stack.h> |
43 | #include <linux/sched/cputime.h> |
44 | #include <linux/sizes.h> |
45 | #include <linux/types.h> |
46 | #include <linux/cred.h> |
47 | #include <linux/dax.h> |
48 | #include <linux/uaccess.h> |
49 | #include <linux/rseq.h> |
50 | #include <asm/param.h> |
51 | #include <asm/page.h> |
52 | |
53 | #ifndef ELF_COMPAT |
54 | #define ELF_COMPAT 0 |
55 | #endif |
56 | |
57 | #ifndef user_long_t |
58 | #define user_long_t long |
59 | #endif |
60 | #ifndef user_siginfo_t |
61 | #define user_siginfo_t siginfo_t |
62 | #endif |
63 | |
64 | /* That's for binfmt_elf_fdpic to deal with */ |
65 | #ifndef elf_check_fdpic |
66 | #define elf_check_fdpic(ex) false |
67 | #endif |
68 | |
69 | static int load_elf_binary(struct linux_binprm *bprm); |
70 | |
71 | #ifdef CONFIG_USELIB |
72 | static int load_elf_library(struct file *); |
73 | #else |
74 | #define load_elf_library NULL |
75 | #endif |
76 | |
77 | /* |
78 | * If we don't support core dumping, then supply a NULL so we |
79 | * don't even try. |
80 | */ |
81 | #ifdef CONFIG_ELF_CORE |
82 | static int elf_core_dump(struct coredump_params *cprm); |
83 | #else |
84 | #define elf_core_dump NULL |
85 | #endif |
86 | |
87 | #if ELF_EXEC_PAGESIZE > PAGE_SIZE |
88 | #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE |
89 | #else |
90 | #define ELF_MIN_ALIGN PAGE_SIZE |
91 | #endif |
92 | |
93 | #ifndef ELF_CORE_EFLAGS |
94 | #define ELF_CORE_EFLAGS 0 |
95 | #endif |
96 | |
97 | #define ELF_PAGESTART(_v) ((_v) & ~(int)(ELF_MIN_ALIGN-1)) |
98 | #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1)) |
99 | #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1)) |
100 | |
101 | static struct linux_binfmt elf_format = { |
102 | .module = THIS_MODULE, |
103 | .load_binary = load_elf_binary, |
104 | .load_shlib = load_elf_library, |
105 | #ifdef CONFIG_COREDUMP |
106 | .core_dump = elf_core_dump, |
107 | .min_coredump = ELF_EXEC_PAGESIZE, |
108 | #endif |
109 | }; |
110 | |
111 | #define BAD_ADDR(x) (unlikely((unsigned long)(x) >= TASK_SIZE)) |
112 | |
113 | /* |
114 | * We need to explicitly zero any trailing portion of the page that follows |
115 | * p_filesz when it ends before the page ends (e.g. bss), otherwise this |
116 | * memory will contain the junk from the file that should not be present. |
117 | */ |
118 | static int padzero(unsigned long address) |
119 | { |
120 | unsigned long nbyte; |
121 | |
122 | nbyte = ELF_PAGEOFFSET(address); |
123 | if (nbyte) { |
124 | nbyte = ELF_MIN_ALIGN - nbyte; |
125 | if (clear_user(to: (void __user *)address, n: nbyte)) |
126 | return -EFAULT; |
127 | } |
128 | return 0; |
129 | } |
130 | |
131 | /* Let's use some macros to make this stack manipulation a little clearer */ |
132 | #ifdef CONFIG_STACK_GROWSUP |
133 | #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items)) |
134 | #define STACK_ROUND(sp, items) \ |
135 | ((15 + (unsigned long) ((sp) + (items))) &~ 15UL) |
136 | #define STACK_ALLOC(sp, len) ({ \ |
137 | elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \ |
138 | old_sp; }) |
139 | #else |
140 | #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items)) |
141 | #define STACK_ROUND(sp, items) \ |
142 | (((unsigned long) (sp - items)) &~ 15UL) |
143 | #define STACK_ALLOC(sp, len) (sp -= len) |
144 | #endif |
145 | |
146 | #ifndef ELF_BASE_PLATFORM |
147 | /* |
148 | * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture. |
149 | * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value |
150 | * will be copied to the user stack in the same manner as AT_PLATFORM. |
151 | */ |
152 | #define ELF_BASE_PLATFORM NULL |
153 | #endif |
154 | |
155 | static int |
156 | create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, |
157 | unsigned long interp_load_addr, |
158 | unsigned long e_entry, unsigned long phdr_addr) |
159 | { |
160 | struct mm_struct *mm = current->mm; |
161 | unsigned long p = bprm->p; |
162 | int argc = bprm->argc; |
163 | int envc = bprm->envc; |
164 | elf_addr_t __user *sp; |
165 | elf_addr_t __user *u_platform; |
166 | elf_addr_t __user *u_base_platform; |
167 | elf_addr_t __user *u_rand_bytes; |
168 | const char *k_platform = ELF_PLATFORM; |
169 | const char *k_base_platform = ELF_BASE_PLATFORM; |
170 | unsigned char k_rand_bytes[16]; |
171 | int items; |
172 | elf_addr_t *elf_info; |
173 | elf_addr_t flags = 0; |
174 | int ei_index; |
175 | const struct cred *cred = current_cred(); |
176 | struct vm_area_struct *vma; |
177 | |
178 | /* |
179 | * In some cases (e.g. Hyper-Threading), we want to avoid L1 |
180 | * evictions by the processes running on the same package. One |
181 | * thing we can do is to shuffle the initial stack for them. |
182 | */ |
183 | |
184 | p = arch_align_stack(sp: p); |
185 | |
186 | /* |
187 | * If this architecture has a platform capability string, copy it |
188 | * to userspace. In some cases (Sparc), this info is impossible |
189 | * for userspace to get any other way, in others (i386) it is |
190 | * merely difficult. |
191 | */ |
192 | u_platform = NULL; |
193 | if (k_platform) { |
194 | size_t len = strlen(k_platform) + 1; |
195 | |
196 | u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); |
197 | if (copy_to_user(to: u_platform, from: k_platform, n: len)) |
198 | return -EFAULT; |
199 | } |
200 | |
201 | /* |
202 | * If this architecture has a "base" platform capability |
203 | * string, copy it to userspace. |
204 | */ |
205 | u_base_platform = NULL; |
206 | if (k_base_platform) { |
207 | size_t len = strlen(k_base_platform) + 1; |
208 | |
209 | u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); |
210 | if (copy_to_user(to: u_base_platform, from: k_base_platform, n: len)) |
211 | return -EFAULT; |
212 | } |
213 | |
214 | /* |
215 | * Generate 16 random bytes for userspace PRNG seeding. |
216 | */ |
217 | get_random_bytes(buf: k_rand_bytes, len: sizeof(k_rand_bytes)); |
218 | u_rand_bytes = (elf_addr_t __user *) |
219 | STACK_ALLOC(p, sizeof(k_rand_bytes)); |
220 | if (copy_to_user(to: u_rand_bytes, from: k_rand_bytes, n: sizeof(k_rand_bytes))) |
221 | return -EFAULT; |
222 | |
223 | /* Create the ELF interpreter info */ |
224 | elf_info = (elf_addr_t *)mm->saved_auxv; |
225 | /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */ |
226 | #define NEW_AUX_ENT(id, val) \ |
227 | do { \ |
228 | *elf_info++ = id; \ |
229 | *elf_info++ = val; \ |
230 | } while (0) |
231 | |
232 | #ifdef ARCH_DLINFO |
233 | /* |
234 | * ARCH_DLINFO must come first so PPC can do its special alignment of |
235 | * AUXV. |
236 | * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in |
237 | * ARCH_DLINFO changes |
238 | */ |
239 | ARCH_DLINFO; |
240 | #endif |
241 | NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP); |
242 | NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE); |
243 | NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC); |
244 | NEW_AUX_ENT(AT_PHDR, phdr_addr); |
245 | NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr)); |
246 | NEW_AUX_ENT(AT_PHNUM, exec->e_phnum); |
247 | NEW_AUX_ENT(AT_BASE, interp_load_addr); |
248 | if (bprm->interp_flags & BINPRM_FLAGS_PRESERVE_ARGV0) |
249 | flags |= AT_FLAGS_PRESERVE_ARGV0; |
250 | NEW_AUX_ENT(AT_FLAGS, flags); |
251 | NEW_AUX_ENT(AT_ENTRY, e_entry); |
252 | NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid)); |
253 | NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid)); |
254 | NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid)); |
255 | NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid)); |
256 | NEW_AUX_ENT(AT_SECURE, bprm->secureexec); |
257 | NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes); |
258 | #ifdef ELF_HWCAP2 |
259 | NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2); |
260 | #endif |
261 | NEW_AUX_ENT(AT_EXECFN, bprm->exec); |
262 | if (k_platform) { |
263 | NEW_AUX_ENT(AT_PLATFORM, |
264 | (elf_addr_t)(unsigned long)u_platform); |
265 | } |
266 | if (k_base_platform) { |
267 | NEW_AUX_ENT(AT_BASE_PLATFORM, |
268 | (elf_addr_t)(unsigned long)u_base_platform); |
269 | } |
270 | if (bprm->have_execfd) { |
271 | NEW_AUX_ENT(AT_EXECFD, bprm->execfd); |
272 | } |
273 | #ifdef CONFIG_RSEQ |
274 | NEW_AUX_ENT(AT_RSEQ_FEATURE_SIZE, offsetof(struct rseq, end)); |
275 | NEW_AUX_ENT(AT_RSEQ_ALIGN, __alignof__(struct rseq)); |
276 | #endif |
277 | #undef NEW_AUX_ENT |
278 | /* AT_NULL is zero; clear the rest too */ |
279 | memset(elf_info, 0, (char *)mm->saved_auxv + |
280 | sizeof(mm->saved_auxv) - (char *)elf_info); |
281 | |
282 | /* And advance past the AT_NULL entry. */ |
283 | elf_info += 2; |
284 | |
285 | ei_index = elf_info - (elf_addr_t *)mm->saved_auxv; |
286 | sp = STACK_ADD(p, ei_index); |
287 | |
288 | items = (argc + 1) + (envc + 1) + 1; |
289 | bprm->p = STACK_ROUND(sp, items); |
290 | |
291 | /* Point sp at the lowest address on the stack */ |
292 | #ifdef CONFIG_STACK_GROWSUP |
293 | sp = (elf_addr_t __user *)bprm->p - items - ei_index; |
294 | bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */ |
295 | #else |
296 | sp = (elf_addr_t __user *)bprm->p; |
297 | #endif |
298 | |
299 | |
300 | /* |
301 | * Grow the stack manually; some architectures have a limit on how |
302 | * far ahead a user-space access may be in order to grow the stack. |
303 | */ |
304 | if (mmap_write_lock_killable(mm)) |
305 | return -EINTR; |
306 | vma = find_extend_vma_locked(mm, addr: bprm->p); |
307 | mmap_write_unlock(mm); |
308 | if (!vma) |
309 | return -EFAULT; |
310 | |
311 | /* Now, let's put argc (and argv, envp if appropriate) on the stack */ |
312 | if (put_user(argc, sp++)) |
313 | return -EFAULT; |
314 | |
315 | /* Populate list of argv pointers back to argv strings. */ |
316 | p = mm->arg_end = mm->arg_start; |
317 | while (argc-- > 0) { |
318 | size_t len; |
319 | if (put_user((elf_addr_t)p, sp++)) |
320 | return -EFAULT; |
321 | len = strnlen_user(str: (void __user *)p, MAX_ARG_STRLEN); |
322 | if (!len || len > MAX_ARG_STRLEN) |
323 | return -EINVAL; |
324 | p += len; |
325 | } |
326 | if (put_user(0, sp++)) |
327 | return -EFAULT; |
328 | mm->arg_end = p; |
329 | |
330 | /* Populate list of envp pointers back to envp strings. */ |
331 | mm->env_end = mm->env_start = p; |
332 | while (envc-- > 0) { |
333 | size_t len; |
334 | if (put_user((elf_addr_t)p, sp++)) |
335 | return -EFAULT; |
336 | len = strnlen_user(str: (void __user *)p, MAX_ARG_STRLEN); |
337 | if (!len || len > MAX_ARG_STRLEN) |
338 | return -EINVAL; |
339 | p += len; |
340 | } |
341 | if (put_user(0, sp++)) |
342 | return -EFAULT; |
343 | mm->env_end = p; |
344 | |
345 | /* Put the elf_info on the stack in the right place. */ |
346 | if (copy_to_user(to: sp, from: mm->saved_auxv, n: ei_index * sizeof(elf_addr_t))) |
347 | return -EFAULT; |
348 | return 0; |
349 | } |
350 | |
351 | /* |
352 | * Map "eppnt->p_filesz" bytes from "filep" offset "eppnt->p_offset" |
353 | * into memory at "addr". (Note that p_filesz is rounded up to the |
354 | * next page, so any extra bytes from the file must be wiped.) |
355 | */ |
356 | static unsigned long elf_map(struct file *filep, unsigned long addr, |
357 | const struct elf_phdr *eppnt, int prot, int type, |
358 | unsigned long total_size) |
359 | { |
360 | unsigned long map_addr; |
361 | unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr); |
362 | unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr); |
363 | addr = ELF_PAGESTART(addr); |
364 | size = ELF_PAGEALIGN(size); |
365 | |
366 | /* mmap() will return -EINVAL if given a zero size, but a |
367 | * segment with zero filesize is perfectly valid */ |
368 | if (!size) |
369 | return addr; |
370 | |
371 | /* |
372 | * total_size is the size of the ELF (interpreter) image. |
373 | * The _first_ mmap needs to know the full size, otherwise |
374 | * randomization might put this image into an overlapping |
375 | * position with the ELF binary image. (since size < total_size) |
376 | * So we first map the 'big' image - and unmap the remainder at |
377 | * the end. (which unmap is needed for ELF images with holes.) |
378 | */ |
379 | if (total_size) { |
380 | total_size = ELF_PAGEALIGN(total_size); |
381 | map_addr = vm_mmap(filep, addr, total_size, prot, type, off); |
382 | if (!BAD_ADDR(map_addr)) |
383 | vm_munmap(map_addr+size, total_size-size); |
384 | } else |
385 | map_addr = vm_mmap(filep, addr, size, prot, type, off); |
386 | |
387 | if ((type & MAP_FIXED_NOREPLACE) && |
388 | PTR_ERR(ptr: (void *)map_addr) == -EEXIST) |
389 | pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n" , |
390 | task_pid_nr(current), current->comm, (void *)addr); |
391 | |
392 | return(map_addr); |
393 | } |
394 | |
395 | /* |
396 | * Map "eppnt->p_filesz" bytes from "filep" offset "eppnt->p_offset" |
397 | * into memory at "addr". Memory from "p_filesz" through "p_memsz" |
398 | * rounded up to the next page is zeroed. |
399 | */ |
400 | static unsigned long elf_load(struct file *filep, unsigned long addr, |
401 | const struct elf_phdr *eppnt, int prot, int type, |
402 | unsigned long total_size) |
403 | { |
404 | unsigned long zero_start, zero_end; |
405 | unsigned long map_addr; |
406 | |
407 | if (eppnt->p_filesz) { |
408 | map_addr = elf_map(filep, addr, eppnt, prot, type, total_size); |
409 | if (BAD_ADDR(map_addr)) |
410 | return map_addr; |
411 | if (eppnt->p_memsz > eppnt->p_filesz) { |
412 | zero_start = map_addr + ELF_PAGEOFFSET(eppnt->p_vaddr) + |
413 | eppnt->p_filesz; |
414 | zero_end = map_addr + ELF_PAGEOFFSET(eppnt->p_vaddr) + |
415 | eppnt->p_memsz; |
416 | |
417 | /* |
418 | * Zero the end of the last mapped page but ignore |
419 | * any errors if the segment isn't writable. |
420 | */ |
421 | if (padzero(address: zero_start) && (prot & PROT_WRITE)) |
422 | return -EFAULT; |
423 | } |
424 | } else { |
425 | map_addr = zero_start = ELF_PAGESTART(addr); |
426 | zero_end = zero_start + ELF_PAGEOFFSET(eppnt->p_vaddr) + |
427 | eppnt->p_memsz; |
428 | } |
429 | if (eppnt->p_memsz > eppnt->p_filesz) { |
430 | /* |
431 | * Map the last of the segment. |
432 | * If the header is requesting these pages to be |
433 | * executable, honour that (ppc32 needs this). |
434 | */ |
435 | int error; |
436 | |
437 | zero_start = ELF_PAGEALIGN(zero_start); |
438 | zero_end = ELF_PAGEALIGN(zero_end); |
439 | |
440 | error = vm_brk_flags(zero_start, zero_end - zero_start, |
441 | prot & PROT_EXEC ? VM_EXEC : 0); |
442 | if (error) |
443 | map_addr = error; |
444 | } |
445 | return map_addr; |
446 | } |
447 | |
448 | |
449 | static unsigned long total_mapping_size(const struct elf_phdr *phdr, int nr) |
450 | { |
451 | elf_addr_t min_addr = -1; |
452 | elf_addr_t max_addr = 0; |
453 | bool pt_load = false; |
454 | int i; |
455 | |
456 | for (i = 0; i < nr; i++) { |
457 | if (phdr[i].p_type == PT_LOAD) { |
458 | min_addr = min(min_addr, ELF_PAGESTART(phdr[i].p_vaddr)); |
459 | max_addr = max(max_addr, phdr[i].p_vaddr + phdr[i].p_memsz); |
460 | pt_load = true; |
461 | } |
462 | } |
463 | return pt_load ? (max_addr - min_addr) : 0; |
464 | } |
465 | |
466 | static int elf_read(struct file *file, void *buf, size_t len, loff_t pos) |
467 | { |
468 | ssize_t rv; |
469 | |
470 | rv = kernel_read(file, buf, len, &pos); |
471 | if (unlikely(rv != len)) { |
472 | return (rv < 0) ? rv : -EIO; |
473 | } |
474 | return 0; |
475 | } |
476 | |
477 | static unsigned long maximum_alignment(struct elf_phdr *cmds, int nr) |
478 | { |
479 | unsigned long alignment = 0; |
480 | int i; |
481 | |
482 | for (i = 0; i < nr; i++) { |
483 | if (cmds[i].p_type == PT_LOAD) { |
484 | unsigned long p_align = cmds[i].p_align; |
485 | |
486 | /* skip non-power of two alignments as invalid */ |
487 | if (!is_power_of_2(n: p_align)) |
488 | continue; |
489 | alignment = max(alignment, p_align); |
490 | } |
491 | } |
492 | |
493 | /* ensure we align to at least one page */ |
494 | return ELF_PAGEALIGN(alignment); |
495 | } |
496 | |
497 | /** |
498 | * load_elf_phdrs() - load ELF program headers |
499 | * @elf_ex: ELF header of the binary whose program headers should be loaded |
500 | * @elf_file: the opened ELF binary file |
501 | * |
502 | * Loads ELF program headers from the binary file elf_file, which has the ELF |
503 | * header pointed to by elf_ex, into a newly allocated array. The caller is |
504 | * responsible for freeing the allocated data. Returns NULL upon failure. |
505 | */ |
506 | static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex, |
507 | struct file *elf_file) |
508 | { |
509 | struct elf_phdr *elf_phdata = NULL; |
510 | int retval = -1; |
511 | unsigned int size; |
512 | |
513 | /* |
514 | * If the size of this structure has changed, then punt, since |
515 | * we will be doing the wrong thing. |
516 | */ |
517 | if (elf_ex->e_phentsize != sizeof(struct elf_phdr)) |
518 | goto out; |
519 | |
520 | /* Sanity check the number of program headers... */ |
521 | /* ...and their total size. */ |
522 | size = sizeof(struct elf_phdr) * elf_ex->e_phnum; |
523 | if (size == 0 || size > 65536 || size > ELF_MIN_ALIGN) |
524 | goto out; |
525 | |
526 | elf_phdata = kmalloc(size, GFP_KERNEL); |
527 | if (!elf_phdata) |
528 | goto out; |
529 | |
530 | /* Read in the program headers */ |
531 | retval = elf_read(file: elf_file, buf: elf_phdata, len: size, pos: elf_ex->e_phoff); |
532 | |
533 | out: |
534 | if (retval) { |
535 | kfree(objp: elf_phdata); |
536 | elf_phdata = NULL; |
537 | } |
538 | return elf_phdata; |
539 | } |
540 | |
541 | #ifndef CONFIG_ARCH_BINFMT_ELF_STATE |
542 | |
543 | /** |
544 | * struct arch_elf_state - arch-specific ELF loading state |
545 | * |
546 | * This structure is used to preserve architecture specific data during |
547 | * the loading of an ELF file, throughout the checking of architecture |
548 | * specific ELF headers & through to the point where the ELF load is |
549 | * known to be proceeding (ie. SET_PERSONALITY). |
550 | * |
551 | * This implementation is a dummy for architectures which require no |
552 | * specific state. |
553 | */ |
554 | struct arch_elf_state { |
555 | }; |
556 | |
557 | #define INIT_ARCH_ELF_STATE {} |
558 | |
559 | /** |
560 | * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header |
561 | * @ehdr: The main ELF header |
562 | * @phdr: The program header to check |
563 | * @elf: The open ELF file |
564 | * @is_interp: True if the phdr is from the interpreter of the ELF being |
565 | * loaded, else false. |
566 | * @state: Architecture-specific state preserved throughout the process |
567 | * of loading the ELF. |
568 | * |
569 | * Inspects the program header phdr to validate its correctness and/or |
570 | * suitability for the system. Called once per ELF program header in the |
571 | * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its |
572 | * interpreter. |
573 | * |
574 | * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load |
575 | * with that return code. |
576 | */ |
577 | static inline int arch_elf_pt_proc(struct elfhdr *ehdr, |
578 | struct elf_phdr *phdr, |
579 | struct file *elf, bool is_interp, |
580 | struct arch_elf_state *state) |
581 | { |
582 | /* Dummy implementation, always proceed */ |
583 | return 0; |
584 | } |
585 | |
586 | /** |
587 | * arch_check_elf() - check an ELF executable |
588 | * @ehdr: The main ELF header |
589 | * @has_interp: True if the ELF has an interpreter, else false. |
590 | * @interp_ehdr: The interpreter's ELF header |
591 | * @state: Architecture-specific state preserved throughout the process |
592 | * of loading the ELF. |
593 | * |
594 | * Provides a final opportunity for architecture code to reject the loading |
595 | * of the ELF & cause an exec syscall to return an error. This is called after |
596 | * all program headers to be checked by arch_elf_pt_proc have been. |
597 | * |
598 | * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load |
599 | * with that return code. |
600 | */ |
601 | static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp, |
602 | struct elfhdr *interp_ehdr, |
603 | struct arch_elf_state *state) |
604 | { |
605 | /* Dummy implementation, always proceed */ |
606 | return 0; |
607 | } |
608 | |
609 | #endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */ |
610 | |
611 | static inline int make_prot(u32 p_flags, struct arch_elf_state *arch_state, |
612 | bool has_interp, bool is_interp) |
613 | { |
614 | int prot = 0; |
615 | |
616 | if (p_flags & PF_R) |
617 | prot |= PROT_READ; |
618 | if (p_flags & PF_W) |
619 | prot |= PROT_WRITE; |
620 | if (p_flags & PF_X) |
621 | prot |= PROT_EXEC; |
622 | |
623 | return arch_elf_adjust_prot(prot, state: arch_state, has_interp, is_interp); |
624 | } |
625 | |
626 | /* This is much more generalized than the library routine read function, |
627 | so we keep this separate. Technically the library read function |
628 | is only provided so that we can read a.out libraries that have |
629 | an ELF header */ |
630 | |
631 | static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, |
632 | struct file *interpreter, |
633 | unsigned long no_base, struct elf_phdr *interp_elf_phdata, |
634 | struct arch_elf_state *arch_state) |
635 | { |
636 | struct elf_phdr *eppnt; |
637 | unsigned long load_addr = 0; |
638 | int load_addr_set = 0; |
639 | unsigned long error = ~0UL; |
640 | unsigned long total_size; |
641 | int i; |
642 | |
643 | /* First of all, some simple consistency checks */ |
644 | if (interp_elf_ex->e_type != ET_EXEC && |
645 | interp_elf_ex->e_type != ET_DYN) |
646 | goto out; |
647 | if (!elf_check_arch(interp_elf_ex) || |
648 | elf_check_fdpic(interp_elf_ex)) |
649 | goto out; |
650 | if (!interpreter->f_op->mmap) |
651 | goto out; |
652 | |
653 | total_size = total_mapping_size(phdr: interp_elf_phdata, |
654 | nr: interp_elf_ex->e_phnum); |
655 | if (!total_size) { |
656 | error = -EINVAL; |
657 | goto out; |
658 | } |
659 | |
660 | eppnt = interp_elf_phdata; |
661 | for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { |
662 | if (eppnt->p_type == PT_LOAD) { |
663 | int elf_type = MAP_PRIVATE; |
664 | int elf_prot = make_prot(p_flags: eppnt->p_flags, arch_state, |
665 | has_interp: true, is_interp: true); |
666 | unsigned long vaddr = 0; |
667 | unsigned long k, map_addr; |
668 | |
669 | vaddr = eppnt->p_vaddr; |
670 | if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) |
671 | elf_type |= MAP_FIXED; |
672 | else if (no_base && interp_elf_ex->e_type == ET_DYN) |
673 | load_addr = -vaddr; |
674 | |
675 | map_addr = elf_load(filep: interpreter, addr: load_addr + vaddr, |
676 | eppnt, prot: elf_prot, type: elf_type, total_size); |
677 | total_size = 0; |
678 | error = map_addr; |
679 | if (BAD_ADDR(map_addr)) |
680 | goto out; |
681 | |
682 | if (!load_addr_set && |
683 | interp_elf_ex->e_type == ET_DYN) { |
684 | load_addr = map_addr - ELF_PAGESTART(vaddr); |
685 | load_addr_set = 1; |
686 | } |
687 | |
688 | /* |
689 | * Check to see if the section's size will overflow the |
690 | * allowed task size. Note that p_filesz must always be |
691 | * <= p_memsize so it's only necessary to check p_memsz. |
692 | */ |
693 | k = load_addr + eppnt->p_vaddr; |
694 | if (BAD_ADDR(k) || |
695 | eppnt->p_filesz > eppnt->p_memsz || |
696 | eppnt->p_memsz > TASK_SIZE || |
697 | TASK_SIZE - eppnt->p_memsz < k) { |
698 | error = -ENOMEM; |
699 | goto out; |
700 | } |
701 | } |
702 | } |
703 | |
704 | error = load_addr; |
705 | out: |
706 | return error; |
707 | } |
708 | |
709 | /* |
710 | * These are the functions used to load ELF style executables and shared |
711 | * libraries. There is no binary dependent code anywhere else. |
712 | */ |
713 | |
714 | static int parse_elf_property(const char *data, size_t *off, size_t datasz, |
715 | struct arch_elf_state *arch, |
716 | bool have_prev_type, u32 *prev_type) |
717 | { |
718 | size_t o, step; |
719 | const struct gnu_property *pr; |
720 | int ret; |
721 | |
722 | if (*off == datasz) |
723 | return -ENOENT; |
724 | |
725 | if (WARN_ON_ONCE(*off > datasz || *off % ELF_GNU_PROPERTY_ALIGN)) |
726 | return -EIO; |
727 | o = *off; |
728 | datasz -= *off; |
729 | |
730 | if (datasz < sizeof(*pr)) |
731 | return -ENOEXEC; |
732 | pr = (const struct gnu_property *)(data + o); |
733 | o += sizeof(*pr); |
734 | datasz -= sizeof(*pr); |
735 | |
736 | if (pr->pr_datasz > datasz) |
737 | return -ENOEXEC; |
738 | |
739 | WARN_ON_ONCE(o % ELF_GNU_PROPERTY_ALIGN); |
740 | step = round_up(pr->pr_datasz, ELF_GNU_PROPERTY_ALIGN); |
741 | if (step > datasz) |
742 | return -ENOEXEC; |
743 | |
744 | /* Properties are supposed to be unique and sorted on pr_type: */ |
745 | if (have_prev_type && pr->pr_type <= *prev_type) |
746 | return -ENOEXEC; |
747 | *prev_type = pr->pr_type; |
748 | |
749 | ret = arch_parse_elf_property(type: pr->pr_type, data: data + o, |
750 | datasz: pr->pr_datasz, ELF_COMPAT, arch); |
751 | if (ret) |
752 | return ret; |
753 | |
754 | *off = o + step; |
755 | return 0; |
756 | } |
757 | |
758 | #define NOTE_DATA_SZ SZ_1K |
759 | #define GNU_PROPERTY_TYPE_0_NAME "GNU" |
760 | #define NOTE_NAME_SZ (sizeof(GNU_PROPERTY_TYPE_0_NAME)) |
761 | |
762 | static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr, |
763 | struct arch_elf_state *arch) |
764 | { |
765 | union { |
766 | struct elf_note nhdr; |
767 | char data[NOTE_DATA_SZ]; |
768 | } note; |
769 | loff_t pos; |
770 | ssize_t n; |
771 | size_t off, datasz; |
772 | int ret; |
773 | bool have_prev_type; |
774 | u32 prev_type; |
775 | |
776 | if (!IS_ENABLED(CONFIG_ARCH_USE_GNU_PROPERTY) || !phdr) |
777 | return 0; |
778 | |
779 | /* load_elf_binary() shouldn't call us unless this is true... */ |
780 | if (WARN_ON_ONCE(phdr->p_type != PT_GNU_PROPERTY)) |
781 | return -ENOEXEC; |
782 | |
783 | /* If the properties are crazy large, that's too bad (for now): */ |
784 | if (phdr->p_filesz > sizeof(note)) |
785 | return -ENOEXEC; |
786 | |
787 | pos = phdr->p_offset; |
788 | n = kernel_read(f, ¬e, phdr->p_filesz, &pos); |
789 | |
790 | BUILD_BUG_ON(sizeof(note) < sizeof(note.nhdr) + NOTE_NAME_SZ); |
791 | if (n < 0 || n < sizeof(note.nhdr) + NOTE_NAME_SZ) |
792 | return -EIO; |
793 | |
794 | if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 || |
795 | note.nhdr.n_namesz != NOTE_NAME_SZ || |
796 | strncmp(note.data + sizeof(note.nhdr), |
797 | GNU_PROPERTY_TYPE_0_NAME, n - sizeof(note.nhdr))) |
798 | return -ENOEXEC; |
799 | |
800 | off = round_up(sizeof(note.nhdr) + NOTE_NAME_SZ, |
801 | ELF_GNU_PROPERTY_ALIGN); |
802 | if (off > n) |
803 | return -ENOEXEC; |
804 | |
805 | if (note.nhdr.n_descsz > n - off) |
806 | return -ENOEXEC; |
807 | datasz = off + note.nhdr.n_descsz; |
808 | |
809 | have_prev_type = false; |
810 | do { |
811 | ret = parse_elf_property(data: note.data, off: &off, datasz, arch, |
812 | have_prev_type, prev_type: &prev_type); |
813 | have_prev_type = true; |
814 | } while (!ret); |
815 | |
816 | return ret == -ENOENT ? 0 : ret; |
817 | } |
818 | |
819 | static int load_elf_binary(struct linux_binprm *bprm) |
820 | { |
821 | struct file *interpreter = NULL; /* to shut gcc up */ |
822 | unsigned long load_bias = 0, phdr_addr = 0; |
823 | int first_pt_load = 1; |
824 | unsigned long error; |
825 | struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL; |
826 | struct elf_phdr *elf_property_phdata = NULL; |
827 | unsigned long elf_brk; |
828 | int retval, i; |
829 | unsigned long elf_entry; |
830 | unsigned long e_entry; |
831 | unsigned long interp_load_addr = 0; |
832 | unsigned long start_code, end_code, start_data, end_data; |
833 | unsigned long reloc_func_desc __maybe_unused = 0; |
834 | int executable_stack = EXSTACK_DEFAULT; |
835 | struct elfhdr *elf_ex = (struct elfhdr *)bprm->buf; |
836 | struct elfhdr *interp_elf_ex = NULL; |
837 | struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE; |
838 | struct mm_struct *mm; |
839 | struct pt_regs *regs; |
840 | |
841 | retval = -ENOEXEC; |
842 | /* First of all, some simple consistency checks */ |
843 | if (memcmp(p: elf_ex->e_ident, ELFMAG, SELFMAG) != 0) |
844 | goto out; |
845 | |
846 | if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN) |
847 | goto out; |
848 | if (!elf_check_arch(elf_ex)) |
849 | goto out; |
850 | if (elf_check_fdpic(elf_ex)) |
851 | goto out; |
852 | if (!bprm->file->f_op->mmap) |
853 | goto out; |
854 | |
855 | elf_phdata = load_elf_phdrs(elf_ex, elf_file: bprm->file); |
856 | if (!elf_phdata) |
857 | goto out; |
858 | |
859 | elf_ppnt = elf_phdata; |
860 | for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) { |
861 | char *elf_interpreter; |
862 | |
863 | if (elf_ppnt->p_type == PT_GNU_PROPERTY) { |
864 | elf_property_phdata = elf_ppnt; |
865 | continue; |
866 | } |
867 | |
868 | if (elf_ppnt->p_type != PT_INTERP) |
869 | continue; |
870 | |
871 | /* |
872 | * This is the program interpreter used for shared libraries - |
873 | * for now assume that this is an a.out format binary. |
874 | */ |
875 | retval = -ENOEXEC; |
876 | if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2) |
877 | goto out_free_ph; |
878 | |
879 | retval = -ENOMEM; |
880 | elf_interpreter = kmalloc(size: elf_ppnt->p_filesz, GFP_KERNEL); |
881 | if (!elf_interpreter) |
882 | goto out_free_ph; |
883 | |
884 | retval = elf_read(file: bprm->file, buf: elf_interpreter, len: elf_ppnt->p_filesz, |
885 | pos: elf_ppnt->p_offset); |
886 | if (retval < 0) |
887 | goto out_free_interp; |
888 | /* make sure path is NULL terminated */ |
889 | retval = -ENOEXEC; |
890 | if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0') |
891 | goto out_free_interp; |
892 | |
893 | interpreter = open_exec(elf_interpreter); |
894 | kfree(objp: elf_interpreter); |
895 | retval = PTR_ERR(ptr: interpreter); |
896 | if (IS_ERR(ptr: interpreter)) |
897 | goto out_free_ph; |
898 | |
899 | /* |
900 | * If the binary is not readable then enforce mm->dumpable = 0 |
901 | * regardless of the interpreter's permissions. |
902 | */ |
903 | would_dump(bprm, interpreter); |
904 | |
905 | interp_elf_ex = kmalloc(size: sizeof(*interp_elf_ex), GFP_KERNEL); |
906 | if (!interp_elf_ex) { |
907 | retval = -ENOMEM; |
908 | goto out_free_file; |
909 | } |
910 | |
911 | /* Get the exec headers */ |
912 | retval = elf_read(file: interpreter, buf: interp_elf_ex, |
913 | len: sizeof(*interp_elf_ex), pos: 0); |
914 | if (retval < 0) |
915 | goto out_free_dentry; |
916 | |
917 | break; |
918 | |
919 | out_free_interp: |
920 | kfree(objp: elf_interpreter); |
921 | goto out_free_ph; |
922 | } |
923 | |
924 | elf_ppnt = elf_phdata; |
925 | for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) |
926 | switch (elf_ppnt->p_type) { |
927 | case PT_GNU_STACK: |
928 | if (elf_ppnt->p_flags & PF_X) |
929 | executable_stack = EXSTACK_ENABLE_X; |
930 | else |
931 | executable_stack = EXSTACK_DISABLE_X; |
932 | break; |
933 | |
934 | case PT_LOPROC ... PT_HIPROC: |
935 | retval = arch_elf_pt_proc(ehdr: elf_ex, phdr: elf_ppnt, |
936 | elf: bprm->file, is_interp: false, |
937 | state: &arch_state); |
938 | if (retval) |
939 | goto out_free_dentry; |
940 | break; |
941 | } |
942 | |
943 | /* Some simple consistency checks for the interpreter */ |
944 | if (interpreter) { |
945 | retval = -ELIBBAD; |
946 | /* Not an ELF interpreter */ |
947 | if (memcmp(p: interp_elf_ex->e_ident, ELFMAG, SELFMAG) != 0) |
948 | goto out_free_dentry; |
949 | /* Verify the interpreter has a valid arch */ |
950 | if (!elf_check_arch(interp_elf_ex) || |
951 | elf_check_fdpic(interp_elf_ex)) |
952 | goto out_free_dentry; |
953 | |
954 | /* Load the interpreter program headers */ |
955 | interp_elf_phdata = load_elf_phdrs(elf_ex: interp_elf_ex, |
956 | elf_file: interpreter); |
957 | if (!interp_elf_phdata) |
958 | goto out_free_dentry; |
959 | |
960 | /* Pass PT_LOPROC..PT_HIPROC headers to arch code */ |
961 | elf_property_phdata = NULL; |
962 | elf_ppnt = interp_elf_phdata; |
963 | for (i = 0; i < interp_elf_ex->e_phnum; i++, elf_ppnt++) |
964 | switch (elf_ppnt->p_type) { |
965 | case PT_GNU_PROPERTY: |
966 | elf_property_phdata = elf_ppnt; |
967 | break; |
968 | |
969 | case PT_LOPROC ... PT_HIPROC: |
970 | retval = arch_elf_pt_proc(ehdr: interp_elf_ex, |
971 | phdr: elf_ppnt, elf: interpreter, |
972 | is_interp: true, state: &arch_state); |
973 | if (retval) |
974 | goto out_free_dentry; |
975 | break; |
976 | } |
977 | } |
978 | |
979 | retval = parse_elf_properties(f: interpreter ?: bprm->file, |
980 | phdr: elf_property_phdata, arch: &arch_state); |
981 | if (retval) |
982 | goto out_free_dentry; |
983 | |
984 | /* |
985 | * Allow arch code to reject the ELF at this point, whilst it's |
986 | * still possible to return an error to the code that invoked |
987 | * the exec syscall. |
988 | */ |
989 | retval = arch_check_elf(ehdr: elf_ex, |
990 | has_interp: !!interpreter, interp_ehdr: interp_elf_ex, |
991 | state: &arch_state); |
992 | if (retval) |
993 | goto out_free_dentry; |
994 | |
995 | /* Flush all traces of the currently running executable */ |
996 | retval = begin_new_exec(bprm); |
997 | if (retval) |
998 | goto out_free_dentry; |
999 | |
1000 | /* Do this immediately, since STACK_TOP as used in setup_arg_pages |
1001 | may depend on the personality. */ |
1002 | SET_PERSONALITY2(*elf_ex, &arch_state); |
1003 | if (elf_read_implies_exec(*elf_ex, executable_stack)) |
1004 | current->personality |= READ_IMPLIES_EXEC; |
1005 | |
1006 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) |
1007 | current->flags |= PF_RANDOMIZE; |
1008 | |
1009 | setup_new_exec(bprm); |
1010 | |
1011 | /* Do this so that we can load the interpreter, if need be. We will |
1012 | change some of these later */ |
1013 | retval = setup_arg_pages(bprm, stack_top: randomize_stack_top(STACK_TOP), |
1014 | executable_stack); |
1015 | if (retval < 0) |
1016 | goto out_free_dentry; |
1017 | |
1018 | elf_brk = 0; |
1019 | |
1020 | start_code = ~0UL; |
1021 | end_code = 0; |
1022 | start_data = 0; |
1023 | end_data = 0; |
1024 | |
1025 | /* Now we do a little grungy work by mmapping the ELF image into |
1026 | the correct location in memory. */ |
1027 | for(i = 0, elf_ppnt = elf_phdata; |
1028 | i < elf_ex->e_phnum; i++, elf_ppnt++) { |
1029 | int elf_prot, elf_flags; |
1030 | unsigned long k, vaddr; |
1031 | unsigned long total_size = 0; |
1032 | unsigned long alignment; |
1033 | |
1034 | if (elf_ppnt->p_type != PT_LOAD) |
1035 | continue; |
1036 | |
1037 | elf_prot = make_prot(p_flags: elf_ppnt->p_flags, arch_state: &arch_state, |
1038 | has_interp: !!interpreter, is_interp: false); |
1039 | |
1040 | elf_flags = MAP_PRIVATE; |
1041 | |
1042 | vaddr = elf_ppnt->p_vaddr; |
1043 | /* |
1044 | * The first time through the loop, first_pt_load is true: |
1045 | * layout will be calculated. Once set, use MAP_FIXED since |
1046 | * we know we've already safely mapped the entire region with |
1047 | * MAP_FIXED_NOREPLACE in the once-per-binary logic following. |
1048 | */ |
1049 | if (!first_pt_load) { |
1050 | elf_flags |= MAP_FIXED; |
1051 | } else if (elf_ex->e_type == ET_EXEC) { |
1052 | /* |
1053 | * This logic is run once for the first LOAD Program |
1054 | * Header for ET_EXEC binaries. No special handling |
1055 | * is needed. |
1056 | */ |
1057 | elf_flags |= MAP_FIXED_NOREPLACE; |
1058 | } else if (elf_ex->e_type == ET_DYN) { |
1059 | /* |
1060 | * This logic is run once for the first LOAD Program |
1061 | * Header for ET_DYN binaries to calculate the |
1062 | * randomization (load_bias) for all the LOAD |
1063 | * Program Headers. |
1064 | * |
1065 | * There are effectively two types of ET_DYN |
1066 | * binaries: programs (i.e. PIE: ET_DYN with INTERP) |
1067 | * and loaders (ET_DYN without INTERP, since they |
1068 | * _are_ the ELF interpreter). The loaders must |
1069 | * be loaded away from programs since the program |
1070 | * may otherwise collide with the loader (especially |
1071 | * for ET_EXEC which does not have a randomized |
1072 | * position). For example to handle invocations of |
1073 | * "./ld.so someprog" to test out a new version of |
1074 | * the loader, the subsequent program that the |
1075 | * loader loads must avoid the loader itself, so |
1076 | * they cannot share the same load range. Sufficient |
1077 | * room for the brk must be allocated with the |
1078 | * loader as well, since brk must be available with |
1079 | * the loader. |
1080 | * |
1081 | * Therefore, programs are loaded offset from |
1082 | * ELF_ET_DYN_BASE and loaders are loaded into the |
1083 | * independently randomized mmap region (0 load_bias |
1084 | * without MAP_FIXED nor MAP_FIXED_NOREPLACE). |
1085 | */ |
1086 | if (interpreter) { |
1087 | load_bias = ELF_ET_DYN_BASE; |
1088 | if (current->flags & PF_RANDOMIZE) |
1089 | load_bias += arch_mmap_rnd(); |
1090 | alignment = maximum_alignment(cmds: elf_phdata, nr: elf_ex->e_phnum); |
1091 | if (alignment) |
1092 | load_bias &= ~(alignment - 1); |
1093 | elf_flags |= MAP_FIXED_NOREPLACE; |
1094 | } else |
1095 | load_bias = 0; |
1096 | |
1097 | /* |
1098 | * Since load_bias is used for all subsequent loading |
1099 | * calculations, we must lower it by the first vaddr |
1100 | * so that the remaining calculations based on the |
1101 | * ELF vaddrs will be correctly offset. The result |
1102 | * is then page aligned. |
1103 | */ |
1104 | load_bias = ELF_PAGESTART(load_bias - vaddr); |
1105 | |
1106 | /* |
1107 | * Calculate the entire size of the ELF mapping |
1108 | * (total_size), used for the initial mapping, |
1109 | * due to load_addr_set which is set to true later |
1110 | * once the initial mapping is performed. |
1111 | * |
1112 | * Note that this is only sensible when the LOAD |
1113 | * segments are contiguous (or overlapping). If |
1114 | * used for LOADs that are far apart, this would |
1115 | * cause the holes between LOADs to be mapped, |
1116 | * running the risk of having the mapping fail, |
1117 | * as it would be larger than the ELF file itself. |
1118 | * |
1119 | * As a result, only ET_DYN does this, since |
1120 | * some ET_EXEC (e.g. ia64) may have large virtual |
1121 | * memory holes between LOADs. |
1122 | * |
1123 | */ |
1124 | total_size = total_mapping_size(phdr: elf_phdata, |
1125 | nr: elf_ex->e_phnum); |
1126 | if (!total_size) { |
1127 | retval = -EINVAL; |
1128 | goto out_free_dentry; |
1129 | } |
1130 | } |
1131 | |
1132 | error = elf_load(filep: bprm->file, addr: load_bias + vaddr, eppnt: elf_ppnt, |
1133 | prot: elf_prot, type: elf_flags, total_size); |
1134 | if (BAD_ADDR(error)) { |
1135 | retval = IS_ERR_VALUE(error) ? |
1136 | PTR_ERR(ptr: (void*)error) : -EINVAL; |
1137 | goto out_free_dentry; |
1138 | } |
1139 | |
1140 | if (first_pt_load) { |
1141 | first_pt_load = 0; |
1142 | if (elf_ex->e_type == ET_DYN) { |
1143 | load_bias += error - |
1144 | ELF_PAGESTART(load_bias + vaddr); |
1145 | reloc_func_desc = load_bias; |
1146 | } |
1147 | } |
1148 | |
1149 | /* |
1150 | * Figure out which segment in the file contains the Program |
1151 | * Header table, and map to the associated memory address. |
1152 | */ |
1153 | if (elf_ppnt->p_offset <= elf_ex->e_phoff && |
1154 | elf_ex->e_phoff < elf_ppnt->p_offset + elf_ppnt->p_filesz) { |
1155 | phdr_addr = elf_ex->e_phoff - elf_ppnt->p_offset + |
1156 | elf_ppnt->p_vaddr; |
1157 | } |
1158 | |
1159 | k = elf_ppnt->p_vaddr; |
1160 | if ((elf_ppnt->p_flags & PF_X) && k < start_code) |
1161 | start_code = k; |
1162 | if (start_data < k) |
1163 | start_data = k; |
1164 | |
1165 | /* |
1166 | * Check to see if the section's size will overflow the |
1167 | * allowed task size. Note that p_filesz must always be |
1168 | * <= p_memsz so it is only necessary to check p_memsz. |
1169 | */ |
1170 | if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz || |
1171 | elf_ppnt->p_memsz > TASK_SIZE || |
1172 | TASK_SIZE - elf_ppnt->p_memsz < k) { |
1173 | /* set_brk can never work. Avoid overflows. */ |
1174 | retval = -EINVAL; |
1175 | goto out_free_dentry; |
1176 | } |
1177 | |
1178 | k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; |
1179 | |
1180 | if ((elf_ppnt->p_flags & PF_X) && end_code < k) |
1181 | end_code = k; |
1182 | if (end_data < k) |
1183 | end_data = k; |
1184 | k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; |
1185 | if (k > elf_brk) |
1186 | elf_brk = k; |
1187 | } |
1188 | |
1189 | e_entry = elf_ex->e_entry + load_bias; |
1190 | phdr_addr += load_bias; |
1191 | elf_brk += load_bias; |
1192 | start_code += load_bias; |
1193 | end_code += load_bias; |
1194 | start_data += load_bias; |
1195 | end_data += load_bias; |
1196 | |
1197 | current->mm->start_brk = current->mm->brk = ELF_PAGEALIGN(elf_brk); |
1198 | |
1199 | if (interpreter) { |
1200 | elf_entry = load_elf_interp(interp_elf_ex, |
1201 | interpreter, |
1202 | no_base: load_bias, interp_elf_phdata, |
1203 | arch_state: &arch_state); |
1204 | if (!IS_ERR_VALUE(elf_entry)) { |
1205 | /* |
1206 | * load_elf_interp() returns relocation |
1207 | * adjustment |
1208 | */ |
1209 | interp_load_addr = elf_entry; |
1210 | elf_entry += interp_elf_ex->e_entry; |
1211 | } |
1212 | if (BAD_ADDR(elf_entry)) { |
1213 | retval = IS_ERR_VALUE(elf_entry) ? |
1214 | (int)elf_entry : -EINVAL; |
1215 | goto out_free_dentry; |
1216 | } |
1217 | reloc_func_desc = interp_load_addr; |
1218 | |
1219 | allow_write_access(file: interpreter); |
1220 | fput(interpreter); |
1221 | |
1222 | kfree(objp: interp_elf_ex); |
1223 | kfree(objp: interp_elf_phdata); |
1224 | } else { |
1225 | elf_entry = e_entry; |
1226 | if (BAD_ADDR(elf_entry)) { |
1227 | retval = -EINVAL; |
1228 | goto out_free_dentry; |
1229 | } |
1230 | } |
1231 | |
1232 | kfree(objp: elf_phdata); |
1233 | |
1234 | set_binfmt(&elf_format); |
1235 | |
1236 | #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES |
1237 | retval = ARCH_SETUP_ADDITIONAL_PAGES(bprm, elf_ex, !!interpreter); |
1238 | if (retval < 0) |
1239 | goto out; |
1240 | #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */ |
1241 | |
1242 | retval = create_elf_tables(bprm, exec: elf_ex, interp_load_addr, |
1243 | e_entry, phdr_addr); |
1244 | if (retval < 0) |
1245 | goto out; |
1246 | |
1247 | mm = current->mm; |
1248 | mm->end_code = end_code; |
1249 | mm->start_code = start_code; |
1250 | mm->start_data = start_data; |
1251 | mm->end_data = end_data; |
1252 | mm->start_stack = bprm->p; |
1253 | |
1254 | if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) { |
1255 | /* |
1256 | * For architectures with ELF randomization, when executing |
1257 | * a loader directly (i.e. no interpreter listed in ELF |
1258 | * headers), move the brk area out of the mmap region |
1259 | * (since it grows up, and may collide early with the stack |
1260 | * growing down), and into the unused ELF_ET_DYN_BASE region. |
1261 | */ |
1262 | if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) && |
1263 | elf_ex->e_type == ET_DYN && !interpreter) { |
1264 | mm->brk = mm->start_brk = ELF_ET_DYN_BASE; |
1265 | } |
1266 | |
1267 | mm->brk = mm->start_brk = arch_randomize_brk(mm); |
1268 | #ifdef compat_brk_randomized |
1269 | current->brk_randomized = 1; |
1270 | #endif |
1271 | } |
1272 | |
1273 | if (current->personality & MMAP_PAGE_ZERO) { |
1274 | /* Why this, you ask??? Well SVr4 maps page 0 as read-only, |
1275 | and some applications "depend" upon this behavior. |
1276 | Since we do not have the power to recompile these, we |
1277 | emulate the SVr4 behavior. Sigh. */ |
1278 | error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC, |
1279 | MAP_FIXED | MAP_PRIVATE, 0); |
1280 | } |
1281 | |
1282 | regs = current_pt_regs(); |
1283 | #ifdef ELF_PLAT_INIT |
1284 | /* |
1285 | * The ABI may specify that certain registers be set up in special |
1286 | * ways (on i386 %edx is the address of a DT_FINI function, for |
1287 | * example. In addition, it may also specify (eg, PowerPC64 ELF) |
1288 | * that the e_entry field is the address of the function descriptor |
1289 | * for the startup routine, rather than the address of the startup |
1290 | * routine itself. This macro performs whatever initialization to |
1291 | * the regs structure is required as well as any relocations to the |
1292 | * function descriptor entries when executing dynamically links apps. |
1293 | */ |
1294 | ELF_PLAT_INIT(regs, reloc_func_desc); |
1295 | #endif |
1296 | |
1297 | finalize_exec(bprm); |
1298 | START_THREAD(elf_ex, regs, elf_entry, bprm->p); |
1299 | retval = 0; |
1300 | out: |
1301 | return retval; |
1302 | |
1303 | /* error cleanup */ |
1304 | out_free_dentry: |
1305 | kfree(objp: interp_elf_ex); |
1306 | kfree(objp: interp_elf_phdata); |
1307 | out_free_file: |
1308 | allow_write_access(file: interpreter); |
1309 | if (interpreter) |
1310 | fput(interpreter); |
1311 | out_free_ph: |
1312 | kfree(objp: elf_phdata); |
1313 | goto out; |
1314 | } |
1315 | |
1316 | #ifdef CONFIG_USELIB |
1317 | /* This is really simpleminded and specialized - we are loading an |
1318 | a.out library that is given an ELF header. */ |
1319 | static int load_elf_library(struct file *file) |
1320 | { |
1321 | struct elf_phdr *elf_phdata; |
1322 | struct elf_phdr *eppnt; |
1323 | int retval, error, i, j; |
1324 | struct elfhdr elf_ex; |
1325 | |
1326 | error = -ENOEXEC; |
1327 | retval = elf_read(file, buf: &elf_ex, len: sizeof(elf_ex), pos: 0); |
1328 | if (retval < 0) |
1329 | goto out; |
1330 | |
1331 | if (memcmp(p: elf_ex.e_ident, ELFMAG, SELFMAG) != 0) |
1332 | goto out; |
1333 | |
1334 | /* First of all, some simple consistency checks */ |
1335 | if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || |
1336 | !elf_check_arch(&elf_ex) || !file->f_op->mmap) |
1337 | goto out; |
1338 | if (elf_check_fdpic(&elf_ex)) |
1339 | goto out; |
1340 | |
1341 | /* Now read in all of the header information */ |
1342 | |
1343 | j = sizeof(struct elf_phdr) * elf_ex.e_phnum; |
1344 | /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */ |
1345 | |
1346 | error = -ENOMEM; |
1347 | elf_phdata = kmalloc(size: j, GFP_KERNEL); |
1348 | if (!elf_phdata) |
1349 | goto out; |
1350 | |
1351 | eppnt = elf_phdata; |
1352 | error = -ENOEXEC; |
1353 | retval = elf_read(file, buf: eppnt, len: j, pos: elf_ex.e_phoff); |
1354 | if (retval < 0) |
1355 | goto out_free_ph; |
1356 | |
1357 | for (j = 0, i = 0; i<elf_ex.e_phnum; i++) |
1358 | if ((eppnt + i)->p_type == PT_LOAD) |
1359 | j++; |
1360 | if (j != 1) |
1361 | goto out_free_ph; |
1362 | |
1363 | while (eppnt->p_type != PT_LOAD) |
1364 | eppnt++; |
1365 | |
1366 | /* Now use mmap to map the library into memory. */ |
1367 | error = elf_load(filep: file, ELF_PAGESTART(eppnt->p_vaddr), |
1368 | eppnt, |
1369 | PROT_READ | PROT_WRITE | PROT_EXEC, |
1370 | MAP_FIXED_NOREPLACE | MAP_PRIVATE, |
1371 | total_size: 0); |
1372 | |
1373 | if (error != ELF_PAGESTART(eppnt->p_vaddr)) |
1374 | goto out_free_ph; |
1375 | |
1376 | error = 0; |
1377 | |
1378 | out_free_ph: |
1379 | kfree(objp: elf_phdata); |
1380 | out: |
1381 | return error; |
1382 | } |
1383 | #endif /* #ifdef CONFIG_USELIB */ |
1384 | |
1385 | #ifdef CONFIG_ELF_CORE |
1386 | /* |
1387 | * ELF core dumper |
1388 | * |
1389 | * Modelled on fs/exec.c:aout_core_dump() |
1390 | * Jeremy Fitzhardinge <jeremy@sw.oz.au> |
1391 | */ |
1392 | |
1393 | /* An ELF note in memory */ |
1394 | struct memelfnote |
1395 | { |
1396 | const char *name; |
1397 | int type; |
1398 | unsigned int datasz; |
1399 | void *data; |
1400 | }; |
1401 | |
1402 | static int notesize(struct memelfnote *en) |
1403 | { |
1404 | int sz; |
1405 | |
1406 | sz = sizeof(struct elf_note); |
1407 | sz += roundup(strlen(en->name) + 1, 4); |
1408 | sz += roundup(en->datasz, 4); |
1409 | |
1410 | return sz; |
1411 | } |
1412 | |
1413 | static int writenote(struct memelfnote *men, struct coredump_params *cprm) |
1414 | { |
1415 | struct elf_note en; |
1416 | en.n_namesz = strlen(men->name) + 1; |
1417 | en.n_descsz = men->datasz; |
1418 | en.n_type = men->type; |
1419 | |
1420 | return dump_emit(cprm, addr: &en, nr: sizeof(en)) && |
1421 | dump_emit(cprm, addr: men->name, nr: en.n_namesz) && dump_align(cprm, align: 4) && |
1422 | dump_emit(cprm, addr: men->data, nr: men->datasz) && dump_align(cprm, align: 4); |
1423 | } |
1424 | |
1425 | static void (struct elfhdr *elf, int segs, |
1426 | u16 machine, u32 flags) |
1427 | { |
1428 | memset(elf, 0, sizeof(*elf)); |
1429 | |
1430 | memcpy(elf->e_ident, ELFMAG, SELFMAG); |
1431 | elf->e_ident[EI_CLASS] = ELF_CLASS; |
1432 | elf->e_ident[EI_DATA] = ELF_DATA; |
1433 | elf->e_ident[EI_VERSION] = EV_CURRENT; |
1434 | elf->e_ident[EI_OSABI] = ELF_OSABI; |
1435 | |
1436 | elf->e_type = ET_CORE; |
1437 | elf->e_machine = machine; |
1438 | elf->e_version = EV_CURRENT; |
1439 | elf->e_phoff = sizeof(struct elfhdr); |
1440 | elf->e_flags = flags; |
1441 | elf->e_ehsize = sizeof(struct elfhdr); |
1442 | elf->e_phentsize = sizeof(struct elf_phdr); |
1443 | elf->e_phnum = segs; |
1444 | } |
1445 | |
1446 | static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset) |
1447 | { |
1448 | phdr->p_type = PT_NOTE; |
1449 | phdr->p_offset = offset; |
1450 | phdr->p_vaddr = 0; |
1451 | phdr->p_paddr = 0; |
1452 | phdr->p_filesz = sz; |
1453 | phdr->p_memsz = 0; |
1454 | phdr->p_flags = 0; |
1455 | phdr->p_align = 4; |
1456 | } |
1457 | |
1458 | static void fill_note(struct memelfnote *note, const char *name, int type, |
1459 | unsigned int sz, void *data) |
1460 | { |
1461 | note->name = name; |
1462 | note->type = type; |
1463 | note->datasz = sz; |
1464 | note->data = data; |
1465 | } |
1466 | |
1467 | /* |
1468 | * fill up all the fields in prstatus from the given task struct, except |
1469 | * registers which need to be filled up separately. |
1470 | */ |
1471 | static void fill_prstatus(struct elf_prstatus_common *prstatus, |
1472 | struct task_struct *p, long signr) |
1473 | { |
1474 | prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; |
1475 | prstatus->pr_sigpend = p->pending.signal.sig[0]; |
1476 | prstatus->pr_sighold = p->blocked.sig[0]; |
1477 | rcu_read_lock(); |
1478 | prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent)); |
1479 | rcu_read_unlock(); |
1480 | prstatus->pr_pid = task_pid_vnr(tsk: p); |
1481 | prstatus->pr_pgrp = task_pgrp_vnr(tsk: p); |
1482 | prstatus->pr_sid = task_session_vnr(tsk: p); |
1483 | if (thread_group_leader(p)) { |
1484 | struct task_cputime cputime; |
1485 | |
1486 | /* |
1487 | * This is the record for the group leader. It shows the |
1488 | * group-wide total, not its individual thread total. |
1489 | */ |
1490 | thread_group_cputime(tsk: p, times: &cputime); |
1491 | prstatus->pr_utime = ns_to_kernel_old_timeval(nsec: cputime.utime); |
1492 | prstatus->pr_stime = ns_to_kernel_old_timeval(nsec: cputime.stime); |
1493 | } else { |
1494 | u64 utime, stime; |
1495 | |
1496 | task_cputime(t: p, utime: &utime, stime: &stime); |
1497 | prstatus->pr_utime = ns_to_kernel_old_timeval(nsec: utime); |
1498 | prstatus->pr_stime = ns_to_kernel_old_timeval(nsec: stime); |
1499 | } |
1500 | |
1501 | prstatus->pr_cutime = ns_to_kernel_old_timeval(nsec: p->signal->cutime); |
1502 | prstatus->pr_cstime = ns_to_kernel_old_timeval(nsec: p->signal->cstime); |
1503 | } |
1504 | |
1505 | static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, |
1506 | struct mm_struct *mm) |
1507 | { |
1508 | const struct cred *cred; |
1509 | unsigned int i, len; |
1510 | unsigned int state; |
1511 | |
1512 | /* first copy the parameters from user space */ |
1513 | memset(psinfo, 0, sizeof(struct elf_prpsinfo)); |
1514 | |
1515 | len = mm->arg_end - mm->arg_start; |
1516 | if (len >= ELF_PRARGSZ) |
1517 | len = ELF_PRARGSZ-1; |
1518 | if (copy_from_user(to: &psinfo->pr_psargs, |
1519 | from: (const char __user *)mm->arg_start, n: len)) |
1520 | return -EFAULT; |
1521 | for(i = 0; i < len; i++) |
1522 | if (psinfo->pr_psargs[i] == 0) |
1523 | psinfo->pr_psargs[i] = ' '; |
1524 | psinfo->pr_psargs[len] = 0; |
1525 | |
1526 | rcu_read_lock(); |
1527 | psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent)); |
1528 | rcu_read_unlock(); |
1529 | psinfo->pr_pid = task_pid_vnr(tsk: p); |
1530 | psinfo->pr_pgrp = task_pgrp_vnr(tsk: p); |
1531 | psinfo->pr_sid = task_session_vnr(tsk: p); |
1532 | |
1533 | state = READ_ONCE(p->__state); |
1534 | i = state ? ffz(~state) + 1 : 0; |
1535 | psinfo->pr_state = i; |
1536 | psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW" [i]; |
1537 | psinfo->pr_zomb = psinfo->pr_sname == 'Z'; |
1538 | psinfo->pr_nice = task_nice(p); |
1539 | psinfo->pr_flag = p->flags; |
1540 | rcu_read_lock(); |
1541 | cred = __task_cred(p); |
1542 | SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid)); |
1543 | SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid)); |
1544 | rcu_read_unlock(); |
1545 | get_task_comm(psinfo->pr_fname, p); |
1546 | |
1547 | return 0; |
1548 | } |
1549 | |
1550 | static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm) |
1551 | { |
1552 | elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv; |
1553 | int i = 0; |
1554 | do |
1555 | i += 2; |
1556 | while (auxv[i - 2] != AT_NULL); |
1557 | fill_note(note, name: "CORE" , NT_AUXV, sz: i * sizeof(elf_addr_t), data: auxv); |
1558 | } |
1559 | |
1560 | static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata, |
1561 | const kernel_siginfo_t *siginfo) |
1562 | { |
1563 | copy_siginfo_to_external(to: csigdata, from: siginfo); |
1564 | fill_note(note, name: "CORE" , NT_SIGINFO, sz: sizeof(*csigdata), data: csigdata); |
1565 | } |
1566 | |
1567 | #define MAX_FILE_NOTE_SIZE (4*1024*1024) |
1568 | /* |
1569 | * Format of NT_FILE note: |
1570 | * |
1571 | * long count -- how many files are mapped |
1572 | * long page_size -- units for file_ofs |
1573 | * array of [COUNT] elements of |
1574 | * long start |
1575 | * long end |
1576 | * long file_ofs |
1577 | * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL... |
1578 | */ |
1579 | static int fill_files_note(struct memelfnote *note, struct coredump_params *cprm) |
1580 | { |
1581 | unsigned count, size, names_ofs, remaining, n; |
1582 | user_long_t *data; |
1583 | user_long_t *start_end_ofs; |
1584 | char *name_base, *name_curpos; |
1585 | int i; |
1586 | |
1587 | /* *Estimated* file count and total data size needed */ |
1588 | count = cprm->vma_count; |
1589 | if (count > UINT_MAX / 64) |
1590 | return -EINVAL; |
1591 | size = count * 64; |
1592 | |
1593 | names_ofs = (2 + 3 * count) * sizeof(data[0]); |
1594 | alloc: |
1595 | if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */ |
1596 | return -EINVAL; |
1597 | size = round_up(size, PAGE_SIZE); |
1598 | /* |
1599 | * "size" can be 0 here legitimately. |
1600 | * Let it ENOMEM and omit NT_FILE section which will be empty anyway. |
1601 | */ |
1602 | data = kvmalloc(size, GFP_KERNEL); |
1603 | if (ZERO_OR_NULL_PTR(data)) |
1604 | return -ENOMEM; |
1605 | |
1606 | start_end_ofs = data + 2; |
1607 | name_base = name_curpos = ((char *)data) + names_ofs; |
1608 | remaining = size - names_ofs; |
1609 | count = 0; |
1610 | for (i = 0; i < cprm->vma_count; i++) { |
1611 | struct core_vma_metadata *m = &cprm->vma_meta[i]; |
1612 | struct file *file; |
1613 | const char *filename; |
1614 | |
1615 | file = m->file; |
1616 | if (!file) |
1617 | continue; |
1618 | filename = file_path(file, name_curpos, remaining); |
1619 | if (IS_ERR(ptr: filename)) { |
1620 | if (PTR_ERR(ptr: filename) == -ENAMETOOLONG) { |
1621 | kvfree(addr: data); |
1622 | size = size * 5 / 4; |
1623 | goto alloc; |
1624 | } |
1625 | continue; |
1626 | } |
1627 | |
1628 | /* file_path() fills at the end, move name down */ |
1629 | /* n = strlen(filename) + 1: */ |
1630 | n = (name_curpos + remaining) - filename; |
1631 | remaining = filename - name_curpos; |
1632 | memmove(name_curpos, filename, n); |
1633 | name_curpos += n; |
1634 | |
1635 | *start_end_ofs++ = m->start; |
1636 | *start_end_ofs++ = m->end; |
1637 | *start_end_ofs++ = m->pgoff; |
1638 | count++; |
1639 | } |
1640 | |
1641 | /* Now we know exact count of files, can store it */ |
1642 | data[0] = count; |
1643 | data[1] = PAGE_SIZE; |
1644 | /* |
1645 | * Count usually is less than mm->map_count, |
1646 | * we need to move filenames down. |
1647 | */ |
1648 | n = cprm->vma_count - count; |
1649 | if (n != 0) { |
1650 | unsigned shift_bytes = n * 3 * sizeof(data[0]); |
1651 | memmove(name_base - shift_bytes, name_base, |
1652 | name_curpos - name_base); |
1653 | name_curpos -= shift_bytes; |
1654 | } |
1655 | |
1656 | size = name_curpos - (char *)data; |
1657 | fill_note(note, name: "CORE" , NT_FILE, sz: size, data); |
1658 | return 0; |
1659 | } |
1660 | |
1661 | #include <linux/regset.h> |
1662 | |
1663 | struct elf_thread_core_info { |
1664 | struct elf_thread_core_info *next; |
1665 | struct task_struct *task; |
1666 | struct elf_prstatus prstatus; |
1667 | struct memelfnote notes[]; |
1668 | }; |
1669 | |
1670 | struct elf_note_info { |
1671 | struct elf_thread_core_info *thread; |
1672 | struct memelfnote psinfo; |
1673 | struct memelfnote signote; |
1674 | struct memelfnote auxv; |
1675 | struct memelfnote files; |
1676 | user_siginfo_t csigdata; |
1677 | size_t size; |
1678 | int thread_notes; |
1679 | }; |
1680 | |
1681 | #ifdef CORE_DUMP_USE_REGSET |
1682 | /* |
1683 | * When a regset has a writeback hook, we call it on each thread before |
1684 | * dumping user memory. On register window machines, this makes sure the |
1685 | * user memory backing the register data is up to date before we read it. |
1686 | */ |
1687 | static void do_thread_regset_writeback(struct task_struct *task, |
1688 | const struct user_regset *regset) |
1689 | { |
1690 | if (regset->writeback) |
1691 | regset->writeback(task, regset, 1); |
1692 | } |
1693 | |
1694 | #ifndef PRSTATUS_SIZE |
1695 | #define PRSTATUS_SIZE sizeof(struct elf_prstatus) |
1696 | #endif |
1697 | |
1698 | #ifndef SET_PR_FPVALID |
1699 | #define SET_PR_FPVALID(S) ((S)->pr_fpvalid = 1) |
1700 | #endif |
1701 | |
1702 | static int fill_thread_core_info(struct elf_thread_core_info *t, |
1703 | const struct user_regset_view *view, |
1704 | long signr, struct elf_note_info *info) |
1705 | { |
1706 | unsigned int note_iter, view_iter; |
1707 | |
1708 | /* |
1709 | * NT_PRSTATUS is the one special case, because the regset data |
1710 | * goes into the pr_reg field inside the note contents, rather |
1711 | * than being the whole note contents. We fill the regset in here. |
1712 | * We assume that regset 0 is NT_PRSTATUS. |
1713 | */ |
1714 | fill_prstatus(prstatus: &t->prstatus.common, p: t->task, signr); |
1715 | regset_get(target: t->task, regset: &view->regsets[0], |
1716 | size: sizeof(t->prstatus.pr_reg), data: &t->prstatus.pr_reg); |
1717 | |
1718 | fill_note(note: &t->notes[0], name: "CORE" , NT_PRSTATUS, |
1719 | PRSTATUS_SIZE, data: &t->prstatus); |
1720 | info->size += notesize(en: &t->notes[0]); |
1721 | |
1722 | do_thread_regset_writeback(task: t->task, regset: &view->regsets[0]); |
1723 | |
1724 | /* |
1725 | * Each other regset might generate a note too. For each regset |
1726 | * that has no core_note_type or is inactive, skip it. |
1727 | */ |
1728 | note_iter = 1; |
1729 | for (view_iter = 1; view_iter < view->n; ++view_iter) { |
1730 | const struct user_regset *regset = &view->regsets[view_iter]; |
1731 | int note_type = regset->core_note_type; |
1732 | bool is_fpreg = note_type == NT_PRFPREG; |
1733 | void *data; |
1734 | int ret; |
1735 | |
1736 | do_thread_regset_writeback(task: t->task, regset); |
1737 | if (!note_type) // not for coredumps |
1738 | continue; |
1739 | if (regset->active && regset->active(t->task, regset) <= 0) |
1740 | continue; |
1741 | |
1742 | ret = regset_get_alloc(target: t->task, regset, size: ~0U, data: &data); |
1743 | if (ret < 0) |
1744 | continue; |
1745 | |
1746 | if (WARN_ON_ONCE(note_iter >= info->thread_notes)) |
1747 | break; |
1748 | |
1749 | if (is_fpreg) |
1750 | SET_PR_FPVALID(&t->prstatus); |
1751 | |
1752 | fill_note(note: &t->notes[note_iter], name: is_fpreg ? "CORE" : "LINUX" , |
1753 | type: note_type, sz: ret, data); |
1754 | |
1755 | info->size += notesize(en: &t->notes[note_iter]); |
1756 | note_iter++; |
1757 | } |
1758 | |
1759 | return 1; |
1760 | } |
1761 | #else |
1762 | static int fill_thread_core_info(struct elf_thread_core_info *t, |
1763 | const struct user_regset_view *view, |
1764 | long signr, struct elf_note_info *info) |
1765 | { |
1766 | struct task_struct *p = t->task; |
1767 | elf_fpregset_t *fpu; |
1768 | |
1769 | fill_prstatus(&t->prstatus.common, p, signr); |
1770 | elf_core_copy_task_regs(p, &t->prstatus.pr_reg); |
1771 | |
1772 | fill_note(&t->notes[0], "CORE" , NT_PRSTATUS, sizeof(t->prstatus), |
1773 | &(t->prstatus)); |
1774 | info->size += notesize(&t->notes[0]); |
1775 | |
1776 | fpu = kzalloc(sizeof(elf_fpregset_t), GFP_KERNEL); |
1777 | if (!fpu || !elf_core_copy_task_fpregs(p, fpu)) { |
1778 | kfree(fpu); |
1779 | return 1; |
1780 | } |
1781 | |
1782 | t->prstatus.pr_fpvalid = 1; |
1783 | fill_note(&t->notes[1], "CORE" , NT_PRFPREG, sizeof(*fpu), fpu); |
1784 | info->size += notesize(&t->notes[1]); |
1785 | |
1786 | return 1; |
1787 | } |
1788 | #endif |
1789 | |
1790 | static int fill_note_info(struct elfhdr *elf, int phdrs, |
1791 | struct elf_note_info *info, |
1792 | struct coredump_params *cprm) |
1793 | { |
1794 | struct task_struct *dump_task = current; |
1795 | const struct user_regset_view *view; |
1796 | struct elf_thread_core_info *t; |
1797 | struct elf_prpsinfo *psinfo; |
1798 | struct core_thread *ct; |
1799 | |
1800 | psinfo = kmalloc(size: sizeof(*psinfo), GFP_KERNEL); |
1801 | if (!psinfo) |
1802 | return 0; |
1803 | fill_note(note: &info->psinfo, name: "CORE" , NT_PRPSINFO, sz: sizeof(*psinfo), data: psinfo); |
1804 | |
1805 | #ifdef CORE_DUMP_USE_REGSET |
1806 | view = task_user_regset_view(tsk: dump_task); |
1807 | |
1808 | /* |
1809 | * Figure out how many notes we're going to need for each thread. |
1810 | */ |
1811 | info->thread_notes = 0; |
1812 | for (int i = 0; i < view->n; ++i) |
1813 | if (view->regsets[i].core_note_type != 0) |
1814 | ++info->thread_notes; |
1815 | |
1816 | /* |
1817 | * Sanity check. We rely on regset 0 being in NT_PRSTATUS, |
1818 | * since it is our one special case. |
1819 | */ |
1820 | if (unlikely(info->thread_notes == 0) || |
1821 | unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) { |
1822 | WARN_ON(1); |
1823 | return 0; |
1824 | } |
1825 | |
1826 | /* |
1827 | * Initialize the ELF file header. |
1828 | */ |
1829 | fill_elf_header(elf, segs: phdrs, |
1830 | machine: view->e_machine, flags: view->e_flags); |
1831 | #else |
1832 | view = NULL; |
1833 | info->thread_notes = 2; |
1834 | fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS); |
1835 | #endif |
1836 | |
1837 | /* |
1838 | * Allocate a structure for each thread. |
1839 | */ |
1840 | info->thread = kzalloc(offsetof(struct elf_thread_core_info, |
1841 | notes[info->thread_notes]), |
1842 | GFP_KERNEL); |
1843 | if (unlikely(!info->thread)) |
1844 | return 0; |
1845 | |
1846 | info->thread->task = dump_task; |
1847 | for (ct = dump_task->signal->core_state->dumper.next; ct; ct = ct->next) { |
1848 | t = kzalloc(offsetof(struct elf_thread_core_info, |
1849 | notes[info->thread_notes]), |
1850 | GFP_KERNEL); |
1851 | if (unlikely(!t)) |
1852 | return 0; |
1853 | |
1854 | t->task = ct->task; |
1855 | t->next = info->thread->next; |
1856 | info->thread->next = t; |
1857 | } |
1858 | |
1859 | /* |
1860 | * Now fill in each thread's information. |
1861 | */ |
1862 | for (t = info->thread; t != NULL; t = t->next) |
1863 | if (!fill_thread_core_info(t, view, signr: cprm->siginfo->si_signo, info)) |
1864 | return 0; |
1865 | |
1866 | /* |
1867 | * Fill in the two process-wide notes. |
1868 | */ |
1869 | fill_psinfo(psinfo, p: dump_task->group_leader, mm: dump_task->mm); |
1870 | info->size += notesize(en: &info->psinfo); |
1871 | |
1872 | fill_siginfo_note(note: &info->signote, csigdata: &info->csigdata, siginfo: cprm->siginfo); |
1873 | info->size += notesize(en: &info->signote); |
1874 | |
1875 | fill_auxv_note(note: &info->auxv, current->mm); |
1876 | info->size += notesize(en: &info->auxv); |
1877 | |
1878 | if (fill_files_note(note: &info->files, cprm) == 0) |
1879 | info->size += notesize(en: &info->files); |
1880 | |
1881 | return 1; |
1882 | } |
1883 | |
1884 | /* |
1885 | * Write all the notes for each thread. When writing the first thread, the |
1886 | * process-wide notes are interleaved after the first thread-specific note. |
1887 | */ |
1888 | static int write_note_info(struct elf_note_info *info, |
1889 | struct coredump_params *cprm) |
1890 | { |
1891 | bool first = true; |
1892 | struct elf_thread_core_info *t = info->thread; |
1893 | |
1894 | do { |
1895 | int i; |
1896 | |
1897 | if (!writenote(men: &t->notes[0], cprm)) |
1898 | return 0; |
1899 | |
1900 | if (first && !writenote(men: &info->psinfo, cprm)) |
1901 | return 0; |
1902 | if (first && !writenote(men: &info->signote, cprm)) |
1903 | return 0; |
1904 | if (first && !writenote(men: &info->auxv, cprm)) |
1905 | return 0; |
1906 | if (first && info->files.data && |
1907 | !writenote(men: &info->files, cprm)) |
1908 | return 0; |
1909 | |
1910 | for (i = 1; i < info->thread_notes; ++i) |
1911 | if (t->notes[i].data && |
1912 | !writenote(men: &t->notes[i], cprm)) |
1913 | return 0; |
1914 | |
1915 | first = false; |
1916 | t = t->next; |
1917 | } while (t); |
1918 | |
1919 | return 1; |
1920 | } |
1921 | |
1922 | static void free_note_info(struct elf_note_info *info) |
1923 | { |
1924 | struct elf_thread_core_info *threads = info->thread; |
1925 | while (threads) { |
1926 | unsigned int i; |
1927 | struct elf_thread_core_info *t = threads; |
1928 | threads = t->next; |
1929 | WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus); |
1930 | for (i = 1; i < info->thread_notes; ++i) |
1931 | kfree(objp: t->notes[i].data); |
1932 | kfree(objp: t); |
1933 | } |
1934 | kfree(objp: info->psinfo.data); |
1935 | kvfree(addr: info->files.data); |
1936 | } |
1937 | |
1938 | static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum, |
1939 | elf_addr_t e_shoff, int segs) |
1940 | { |
1941 | elf->e_shoff = e_shoff; |
1942 | elf->e_shentsize = sizeof(*shdr4extnum); |
1943 | elf->e_shnum = 1; |
1944 | elf->e_shstrndx = SHN_UNDEF; |
1945 | |
1946 | memset(shdr4extnum, 0, sizeof(*shdr4extnum)); |
1947 | |
1948 | shdr4extnum->sh_type = SHT_NULL; |
1949 | shdr4extnum->sh_size = elf->e_shnum; |
1950 | shdr4extnum->sh_link = elf->e_shstrndx; |
1951 | shdr4extnum->sh_info = segs; |
1952 | } |
1953 | |
1954 | /* |
1955 | * Actual dumper |
1956 | * |
1957 | * This is a two-pass process; first we find the offsets of the bits, |
1958 | * and then they are actually written out. If we run out of core limit |
1959 | * we just truncate. |
1960 | */ |
1961 | static int elf_core_dump(struct coredump_params *cprm) |
1962 | { |
1963 | int has_dumped = 0; |
1964 | int segs, i; |
1965 | struct elfhdr elf; |
1966 | loff_t offset = 0, dataoff; |
1967 | struct elf_note_info info = { }; |
1968 | struct elf_phdr *phdr4note = NULL; |
1969 | struct elf_shdr *shdr4extnum = NULL; |
1970 | Elf_Half e_phnum; |
1971 | elf_addr_t e_shoff; |
1972 | |
1973 | /* |
1974 | * The number of segs are recored into ELF header as 16bit value. |
1975 | * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here. |
1976 | */ |
1977 | segs = cprm->vma_count + elf_core_extra_phdrs(cprm); |
1978 | |
1979 | /* for notes section */ |
1980 | segs++; |
1981 | |
1982 | /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid |
1983 | * this, kernel supports extended numbering. Have a look at |
1984 | * include/linux/elf.h for further information. */ |
1985 | e_phnum = segs > PN_XNUM ? PN_XNUM : segs; |
1986 | |
1987 | /* |
1988 | * Collect all the non-memory information about the process for the |
1989 | * notes. This also sets up the file header. |
1990 | */ |
1991 | if (!fill_note_info(elf: &elf, phdrs: e_phnum, info: &info, cprm)) |
1992 | goto end_coredump; |
1993 | |
1994 | has_dumped = 1; |
1995 | |
1996 | offset += sizeof(elf); /* ELF header */ |
1997 | offset += segs * sizeof(struct elf_phdr); /* Program headers */ |
1998 | |
1999 | /* Write notes phdr entry */ |
2000 | { |
2001 | size_t sz = info.size; |
2002 | |
2003 | /* For cell spufs */ |
2004 | sz += elf_coredump_extra_notes_size(); |
2005 | |
2006 | phdr4note = kmalloc(size: sizeof(*phdr4note), GFP_KERNEL); |
2007 | if (!phdr4note) |
2008 | goto end_coredump; |
2009 | |
2010 | fill_elf_note_phdr(phdr: phdr4note, sz, offset); |
2011 | offset += sz; |
2012 | } |
2013 | |
2014 | dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); |
2015 | |
2016 | offset += cprm->vma_data_size; |
2017 | offset += elf_core_extra_data_size(cprm); |
2018 | e_shoff = offset; |
2019 | |
2020 | if (e_phnum == PN_XNUM) { |
2021 | shdr4extnum = kmalloc(size: sizeof(*shdr4extnum), GFP_KERNEL); |
2022 | if (!shdr4extnum) |
2023 | goto end_coredump; |
2024 | fill_extnum_info(elf: &elf, shdr4extnum, e_shoff, segs); |
2025 | } |
2026 | |
2027 | offset = dataoff; |
2028 | |
2029 | if (!dump_emit(cprm, addr: &elf, nr: sizeof(elf))) |
2030 | goto end_coredump; |
2031 | |
2032 | if (!dump_emit(cprm, addr: phdr4note, nr: sizeof(*phdr4note))) |
2033 | goto end_coredump; |
2034 | |
2035 | /* Write program headers for segments dump */ |
2036 | for (i = 0; i < cprm->vma_count; i++) { |
2037 | struct core_vma_metadata *meta = cprm->vma_meta + i; |
2038 | struct elf_phdr phdr; |
2039 | |
2040 | phdr.p_type = PT_LOAD; |
2041 | phdr.p_offset = offset; |
2042 | phdr.p_vaddr = meta->start; |
2043 | phdr.p_paddr = 0; |
2044 | phdr.p_filesz = meta->dump_size; |
2045 | phdr.p_memsz = meta->end - meta->start; |
2046 | offset += phdr.p_filesz; |
2047 | phdr.p_flags = 0; |
2048 | if (meta->flags & VM_READ) |
2049 | phdr.p_flags |= PF_R; |
2050 | if (meta->flags & VM_WRITE) |
2051 | phdr.p_flags |= PF_W; |
2052 | if (meta->flags & VM_EXEC) |
2053 | phdr.p_flags |= PF_X; |
2054 | phdr.p_align = ELF_EXEC_PAGESIZE; |
2055 | |
2056 | if (!dump_emit(cprm, addr: &phdr, nr: sizeof(phdr))) |
2057 | goto end_coredump; |
2058 | } |
2059 | |
2060 | if (!elf_core_write_extra_phdrs(cprm, offset)) |
2061 | goto end_coredump; |
2062 | |
2063 | /* write out the notes section */ |
2064 | if (!write_note_info(info: &info, cprm)) |
2065 | goto end_coredump; |
2066 | |
2067 | /* For cell spufs */ |
2068 | if (elf_coredump_extra_notes_write(cprm)) |
2069 | goto end_coredump; |
2070 | |
2071 | /* Align to page */ |
2072 | dump_skip_to(cprm, to: dataoff); |
2073 | |
2074 | for (i = 0; i < cprm->vma_count; i++) { |
2075 | struct core_vma_metadata *meta = cprm->vma_meta + i; |
2076 | |
2077 | if (!dump_user_range(cprm, start: meta->start, len: meta->dump_size)) |
2078 | goto end_coredump; |
2079 | } |
2080 | |
2081 | if (!elf_core_write_extra_data(cprm)) |
2082 | goto end_coredump; |
2083 | |
2084 | if (e_phnum == PN_XNUM) { |
2085 | if (!dump_emit(cprm, addr: shdr4extnum, nr: sizeof(*shdr4extnum))) |
2086 | goto end_coredump; |
2087 | } |
2088 | |
2089 | end_coredump: |
2090 | free_note_info(info: &info); |
2091 | kfree(objp: shdr4extnum); |
2092 | kfree(objp: phdr4note); |
2093 | return has_dumped; |
2094 | } |
2095 | |
2096 | #endif /* CONFIG_ELF_CORE */ |
2097 | |
2098 | static int __init init_elf_binfmt(void) |
2099 | { |
2100 | register_binfmt(fmt: &elf_format); |
2101 | return 0; |
2102 | } |
2103 | |
2104 | static void __exit exit_elf_binfmt(void) |
2105 | { |
2106 | /* Remove the COFF and ELF loaders. */ |
2107 | unregister_binfmt(&elf_format); |
2108 | } |
2109 | |
2110 | core_initcall(init_elf_binfmt); |
2111 | module_exit(exit_elf_binfmt); |
2112 | |
2113 | #ifdef CONFIG_BINFMT_ELF_KUNIT_TEST |
2114 | #include "binfmt_elf_test.c" |
2115 | #endif |
2116 | |