1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. |
5 | * |
6 | * Copyright (C) 1991, 1992 Linus Torvalds |
7 | * Copyright (C) 1994 - 2000 Ralf Baechle |
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
9 | * Copyright (C) 2014, Imagination Technologies Ltd. |
10 | */ |
11 | #include <linux/cache.h> |
12 | #include <linux/context_tracking.h> |
13 | #include <linux/irqflags.h> |
14 | #include <linux/sched.h> |
15 | #include <linux/mm.h> |
16 | #include <linux/personality.h> |
17 | #include <linux/smp.h> |
18 | #include <linux/kernel.h> |
19 | #include <linux/signal.h> |
20 | #include <linux/errno.h> |
21 | #include <linux/wait.h> |
22 | #include <linux/ptrace.h> |
23 | #include <linux/unistd.h> |
24 | #include <linux/uprobes.h> |
25 | #include <linux/compiler.h> |
26 | #include <linux/syscalls.h> |
27 | #include <linux/uaccess.h> |
28 | #include <linux/resume_user_mode.h> |
29 | |
30 | #include <asm/abi.h> |
31 | #include <asm/asm.h> |
32 | #include <linux/bitops.h> |
33 | #include <asm/cacheflush.h> |
34 | #include <asm/fpu.h> |
35 | #include <asm/sim.h> |
36 | #include <asm/ucontext.h> |
37 | #include <asm/cpu-features.h> |
38 | #include <asm/dsp.h> |
39 | #include <asm/inst.h> |
40 | #include <asm/msa.h> |
41 | #include <asm/syscalls.h> |
42 | |
43 | #include "signal-common.h" |
44 | |
45 | static int (*save_fp_context)(void __user *sc); |
46 | static int (*restore_fp_context)(void __user *sc); |
47 | |
48 | struct sigframe { |
49 | u32 sf_ass[4]; /* argument save space for o32 */ |
50 | u32 sf_pad[2]; /* Was: signal trampoline */ |
51 | |
52 | /* Matches struct ucontext from its uc_mcontext field onwards */ |
53 | struct sigcontext sf_sc; |
54 | sigset_t sf_mask; |
55 | unsigned long long sf_extcontext[]; |
56 | }; |
57 | |
58 | struct rt_sigframe { |
59 | u32 rs_ass[4]; /* argument save space for o32 */ |
60 | u32 rs_pad[2]; /* Was: signal trampoline */ |
61 | struct siginfo rs_info; |
62 | struct ucontext rs_uc; |
63 | }; |
64 | |
65 | #ifdef CONFIG_MIPS_FP_SUPPORT |
66 | |
67 | /* |
68 | * Thread saved context copy to/from a signal context presumed to be on the |
69 | * user stack, and therefore accessed with appropriate macros from uaccess.h. |
70 | */ |
71 | static int copy_fp_to_sigcontext(void __user *sc) |
72 | { |
73 | struct mips_abi *abi = current->thread.abi; |
74 | uint64_t __user *fpregs = sc + abi->off_sc_fpregs; |
75 | uint32_t __user *csr = sc + abi->off_sc_fpc_csr; |
76 | int i; |
77 | int err = 0; |
78 | int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1; |
79 | |
80 | for (i = 0; i < NUM_FPU_REGS; i += inc) { |
81 | err |= |
82 | __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0), |
83 | &fpregs[i]); |
84 | } |
85 | err |= __put_user(current->thread.fpu.fcr31, csr); |
86 | |
87 | return err; |
88 | } |
89 | |
90 | static int copy_fp_from_sigcontext(void __user *sc) |
91 | { |
92 | struct mips_abi *abi = current->thread.abi; |
93 | uint64_t __user *fpregs = sc + abi->off_sc_fpregs; |
94 | uint32_t __user *csr = sc + abi->off_sc_fpc_csr; |
95 | int i; |
96 | int err = 0; |
97 | int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1; |
98 | u64 fpr_val; |
99 | |
100 | for (i = 0; i < NUM_FPU_REGS; i += inc) { |
101 | err |= __get_user(fpr_val, &fpregs[i]); |
102 | set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val); |
103 | } |
104 | err |= __get_user(current->thread.fpu.fcr31, csr); |
105 | |
106 | return err; |
107 | } |
108 | |
109 | #else /* !CONFIG_MIPS_FP_SUPPORT */ |
110 | |
111 | static int copy_fp_to_sigcontext(void __user *sc) |
112 | { |
113 | return 0; |
114 | } |
115 | |
116 | static int copy_fp_from_sigcontext(void __user *sc) |
117 | { |
118 | return 0; |
119 | } |
120 | |
121 | #endif /* !CONFIG_MIPS_FP_SUPPORT */ |
122 | |
123 | /* |
124 | * Wrappers for the assembly _{save,restore}_fp_context functions. |
125 | */ |
126 | static int save_hw_fp_context(void __user *sc) |
127 | { |
128 | struct mips_abi *abi = current->thread.abi; |
129 | uint64_t __user *fpregs = sc + abi->off_sc_fpregs; |
130 | uint32_t __user *csr = sc + abi->off_sc_fpc_csr; |
131 | |
132 | return _save_fp_context(fpregs, csr); |
133 | } |
134 | |
135 | static int restore_hw_fp_context(void __user *sc) |
136 | { |
137 | struct mips_abi *abi = current->thread.abi; |
138 | uint64_t __user *fpregs = sc + abi->off_sc_fpregs; |
139 | uint32_t __user *csr = sc + abi->off_sc_fpc_csr; |
140 | |
141 | return _restore_fp_context(fpregs, csr); |
142 | } |
143 | |
144 | /* |
145 | * Extended context handling. |
146 | */ |
147 | |
148 | static inline void __user *sc_to_extcontext(void __user *sc) |
149 | { |
150 | struct ucontext __user *uc; |
151 | |
152 | /* |
153 | * We can just pretend the sigcontext is always embedded in a struct |
154 | * ucontext here, because the offset from sigcontext to extended |
155 | * context is the same in the struct sigframe case. |
156 | */ |
157 | uc = container_of(sc, struct ucontext, uc_mcontext); |
158 | return &uc->uc_extcontext; |
159 | } |
160 | |
161 | #ifdef CONFIG_CPU_HAS_MSA |
162 | |
163 | static int save_msa_extcontext(void __user *buf) |
164 | { |
165 | struct msa_extcontext __user *msa = buf; |
166 | uint64_t val; |
167 | int i, err; |
168 | |
169 | if (!thread_msa_context_live()) |
170 | return 0; |
171 | |
172 | /* |
173 | * Ensure that we can't lose the live MSA context between checking |
174 | * for it & writing it to memory. |
175 | */ |
176 | preempt_disable(); |
177 | |
178 | if (is_msa_enabled()) { |
179 | /* |
180 | * There are no EVA versions of the vector register load/store |
181 | * instructions, so MSA context has to be saved to kernel memory |
182 | * and then copied to user memory. The save to kernel memory |
183 | * should already have been done when handling scalar FP |
184 | * context. |
185 | */ |
186 | BUG_ON(IS_ENABLED(CONFIG_EVA)); |
187 | |
188 | err = __put_user(read_msa_csr(), &msa->csr); |
189 | err |= _save_msa_all_upper(&msa->wr); |
190 | |
191 | preempt_enable(); |
192 | } else { |
193 | preempt_enable(); |
194 | |
195 | err = __put_user(current->thread.fpu.msacsr, &msa->csr); |
196 | |
197 | for (i = 0; i < NUM_FPU_REGS; i++) { |
198 | val = get_fpr64(¤t->thread.fpu.fpr[i], 1); |
199 | err |= __put_user(val, &msa->wr[i]); |
200 | } |
201 | } |
202 | |
203 | err |= __put_user(MSA_EXTCONTEXT_MAGIC, &msa->ext.magic); |
204 | err |= __put_user(sizeof(*msa), &msa->ext.size); |
205 | |
206 | return err ? -EFAULT : sizeof(*msa); |
207 | } |
208 | |
209 | static int restore_msa_extcontext(void __user *buf, unsigned int size) |
210 | { |
211 | struct msa_extcontext __user *msa = buf; |
212 | unsigned long long val; |
213 | unsigned int csr; |
214 | int i, err; |
215 | |
216 | if (size != sizeof(*msa)) |
217 | return -EINVAL; |
218 | |
219 | err = get_user(csr, &msa->csr); |
220 | if (err) |
221 | return err; |
222 | |
223 | preempt_disable(); |
224 | |
225 | if (is_msa_enabled()) { |
226 | /* |
227 | * There are no EVA versions of the vector register load/store |
228 | * instructions, so MSA context has to be copied to kernel |
229 | * memory and later loaded to registers. The same is true of |
230 | * scalar FP context, so FPU & MSA should have already been |
231 | * disabled whilst handling scalar FP context. |
232 | */ |
233 | BUG_ON(IS_ENABLED(CONFIG_EVA)); |
234 | |
235 | write_msa_csr(csr); |
236 | err |= _restore_msa_all_upper(&msa->wr); |
237 | preempt_enable(); |
238 | } else { |
239 | preempt_enable(); |
240 | |
241 | current->thread.fpu.msacsr = csr; |
242 | |
243 | for (i = 0; i < NUM_FPU_REGS; i++) { |
244 | err |= __get_user(val, &msa->wr[i]); |
245 | set_fpr64(¤t->thread.fpu.fpr[i], 1, val); |
246 | } |
247 | } |
248 | |
249 | return err; |
250 | } |
251 | |
252 | #else /* !CONFIG_CPU_HAS_MSA */ |
253 | |
254 | static int save_msa_extcontext(void __user *buf) |
255 | { |
256 | return 0; |
257 | } |
258 | |
259 | static int restore_msa_extcontext(void __user *buf, unsigned int size) |
260 | { |
261 | return SIGSYS; |
262 | } |
263 | |
264 | #endif /* !CONFIG_CPU_HAS_MSA */ |
265 | |
266 | static int save_extcontext(void __user *buf) |
267 | { |
268 | int sz; |
269 | |
270 | sz = save_msa_extcontext(buf); |
271 | if (sz < 0) |
272 | return sz; |
273 | buf += sz; |
274 | |
275 | /* If no context was saved then trivially return */ |
276 | if (!sz) |
277 | return 0; |
278 | |
279 | /* Write the end marker */ |
280 | if (__put_user(END_EXTCONTEXT_MAGIC, (u32 *)buf)) |
281 | return -EFAULT; |
282 | |
283 | sz += sizeof(((struct extcontext *)NULL)->magic); |
284 | return sz; |
285 | } |
286 | |
287 | static int restore_extcontext(void __user *buf) |
288 | { |
289 | struct extcontext ext; |
290 | int err; |
291 | |
292 | while (1) { |
293 | err = __get_user(ext.magic, (unsigned int *)buf); |
294 | if (err) |
295 | return err; |
296 | |
297 | if (ext.magic == END_EXTCONTEXT_MAGIC) |
298 | return 0; |
299 | |
300 | err = __get_user(ext.size, (unsigned int *)(buf |
301 | + offsetof(struct extcontext, size))); |
302 | if (err) |
303 | return err; |
304 | |
305 | switch (ext.magic) { |
306 | case MSA_EXTCONTEXT_MAGIC: |
307 | err = restore_msa_extcontext(buf, size: ext.size); |
308 | break; |
309 | |
310 | default: |
311 | err = -EINVAL; |
312 | break; |
313 | } |
314 | |
315 | if (err) |
316 | return err; |
317 | |
318 | buf += ext.size; |
319 | } |
320 | } |
321 | |
322 | /* |
323 | * Helper routines |
324 | */ |
325 | int protected_save_fp_context(void __user *sc) |
326 | { |
327 | struct mips_abi *abi = current->thread.abi; |
328 | uint64_t __user *fpregs = sc + abi->off_sc_fpregs; |
329 | uint32_t __user *csr = sc + abi->off_sc_fpc_csr; |
330 | uint32_t __user *used_math = sc + abi->off_sc_used_math; |
331 | unsigned int used, ext_sz; |
332 | int err; |
333 | |
334 | used = used_math() ? USED_FP : 0; |
335 | if (!used) |
336 | goto fp_done; |
337 | |
338 | if (!test_thread_flag(TIF_32BIT_FPREGS)) |
339 | used |= USED_FR1; |
340 | if (test_thread_flag(TIF_HYBRID_FPREGS)) |
341 | used |= USED_HYBRID_FPRS; |
342 | |
343 | /* |
344 | * EVA does not have userland equivalents of ldc1 or sdc1, so |
345 | * save to the kernel FP context & copy that to userland below. |
346 | */ |
347 | if (IS_ENABLED(CONFIG_EVA)) |
348 | lose_fpu(1); |
349 | |
350 | while (1) { |
351 | lock_fpu_owner(); |
352 | if (is_fpu_owner()) { |
353 | err = save_fp_context(sc); |
354 | unlock_fpu_owner(); |
355 | } else { |
356 | unlock_fpu_owner(); |
357 | err = copy_fp_to_sigcontext(sc); |
358 | } |
359 | if (likely(!err)) |
360 | break; |
361 | /* touch the sigcontext and try again */ |
362 | err = __put_user(0, &fpregs[0]) | |
363 | __put_user(0, &fpregs[31]) | |
364 | __put_user(0, csr); |
365 | if (err) |
366 | return err; /* really bad sigcontext */ |
367 | } |
368 | |
369 | fp_done: |
370 | ext_sz = err = save_extcontext(buf: sc_to_extcontext(sc)); |
371 | if (err < 0) |
372 | return err; |
373 | used |= ext_sz ? USED_EXTCONTEXT : 0; |
374 | |
375 | return __put_user(used, used_math); |
376 | } |
377 | |
378 | int protected_restore_fp_context(void __user *sc) |
379 | { |
380 | struct mips_abi *abi = current->thread.abi; |
381 | uint64_t __user *fpregs = sc + abi->off_sc_fpregs; |
382 | uint32_t __user *csr = sc + abi->off_sc_fpc_csr; |
383 | uint32_t __user *used_math = sc + abi->off_sc_used_math; |
384 | unsigned int used; |
385 | int err, sig = 0, tmp __maybe_unused; |
386 | |
387 | err = __get_user(used, used_math); |
388 | conditional_used_math(used & USED_FP); |
389 | |
390 | /* |
391 | * The signal handler may have used FPU; give it up if the program |
392 | * doesn't want it following sigreturn. |
393 | */ |
394 | if (err || !(used & USED_FP)) |
395 | lose_fpu(0); |
396 | if (err) |
397 | return err; |
398 | if (!(used & USED_FP)) |
399 | goto fp_done; |
400 | |
401 | err = sig = fpcsr_pending(fpcsr: csr); |
402 | if (err < 0) |
403 | return err; |
404 | |
405 | /* |
406 | * EVA does not have userland equivalents of ldc1 or sdc1, so we |
407 | * disable the FPU here such that the code below simply copies to |
408 | * the kernel FP context. |
409 | */ |
410 | if (IS_ENABLED(CONFIG_EVA)) |
411 | lose_fpu(0); |
412 | |
413 | while (1) { |
414 | lock_fpu_owner(); |
415 | if (is_fpu_owner()) { |
416 | err = restore_fp_context(sc); |
417 | unlock_fpu_owner(); |
418 | } else { |
419 | unlock_fpu_owner(); |
420 | err = copy_fp_from_sigcontext(sc); |
421 | } |
422 | if (likely(!err)) |
423 | break; |
424 | /* touch the sigcontext and try again */ |
425 | err = __get_user(tmp, &fpregs[0]) | |
426 | __get_user(tmp, &fpregs[31]) | |
427 | __get_user(tmp, csr); |
428 | if (err) |
429 | break; /* really bad sigcontext */ |
430 | } |
431 | |
432 | fp_done: |
433 | if (!err && (used & USED_EXTCONTEXT)) |
434 | err = restore_extcontext(buf: sc_to_extcontext(sc)); |
435 | |
436 | return err ?: sig; |
437 | } |
438 | |
439 | int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) |
440 | { |
441 | int err = 0; |
442 | int i; |
443 | |
444 | err |= __put_user(regs->cp0_epc, &sc->sc_pc); |
445 | |
446 | err |= __put_user(0, &sc->sc_regs[0]); |
447 | for (i = 1; i < 32; i++) |
448 | err |= __put_user(regs->regs[i], &sc->sc_regs[i]); |
449 | |
450 | #ifdef CONFIG_CPU_HAS_SMARTMIPS |
451 | err |= __put_user(regs->acx, &sc->sc_acx); |
452 | #endif |
453 | err |= __put_user(regs->hi, &sc->sc_mdhi); |
454 | err |= __put_user(regs->lo, &sc->sc_mdlo); |
455 | if (cpu_has_dsp) { |
456 | err |= __put_user(mfhi1(), &sc->sc_hi1); |
457 | err |= __put_user(mflo1(), &sc->sc_lo1); |
458 | err |= __put_user(mfhi2(), &sc->sc_hi2); |
459 | err |= __put_user(mflo2(), &sc->sc_lo2); |
460 | err |= __put_user(mfhi3(), &sc->sc_hi3); |
461 | err |= __put_user(mflo3(), &sc->sc_lo3); |
462 | err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); |
463 | } |
464 | |
465 | |
466 | /* |
467 | * Save FPU state to signal context. Signal handler |
468 | * will "inherit" current FPU state. |
469 | */ |
470 | err |= protected_save_fp_context(sc); |
471 | |
472 | return err; |
473 | } |
474 | |
475 | static size_t extcontext_max_size(void) |
476 | { |
477 | size_t sz = 0; |
478 | |
479 | /* |
480 | * The assumption here is that between this point & the point at which |
481 | * the extended context is saved the size of the context should only |
482 | * ever be able to shrink (if the task is preempted), but never grow. |
483 | * That is, what this function returns is an upper bound on the size of |
484 | * the extended context for the current task at the current time. |
485 | */ |
486 | |
487 | if (thread_msa_context_live()) |
488 | sz += sizeof(struct msa_extcontext); |
489 | |
490 | /* If any context is saved then we'll append the end marker */ |
491 | if (sz) |
492 | sz += sizeof(((struct extcontext *)NULL)->magic); |
493 | |
494 | return sz; |
495 | } |
496 | |
497 | int fpcsr_pending(unsigned int __user *fpcsr) |
498 | { |
499 | int err, sig = 0; |
500 | unsigned int csr, enabled; |
501 | |
502 | err = __get_user(csr, fpcsr); |
503 | enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5); |
504 | /* |
505 | * If the signal handler set some FPU exceptions, clear it and |
506 | * send SIGFPE. |
507 | */ |
508 | if (csr & enabled) { |
509 | csr &= ~enabled; |
510 | err |= __put_user(csr, fpcsr); |
511 | sig = SIGFPE; |
512 | } |
513 | return err ?: sig; |
514 | } |
515 | |
516 | int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) |
517 | { |
518 | unsigned long treg; |
519 | int err = 0; |
520 | int i; |
521 | |
522 | /* Always make any pending restarted system calls return -EINTR */ |
523 | current->restart_block.fn = do_no_restart_syscall; |
524 | |
525 | err |= __get_user(regs->cp0_epc, &sc->sc_pc); |
526 | |
527 | #ifdef CONFIG_CPU_HAS_SMARTMIPS |
528 | err |= __get_user(regs->acx, &sc->sc_acx); |
529 | #endif |
530 | err |= __get_user(regs->hi, &sc->sc_mdhi); |
531 | err |= __get_user(regs->lo, &sc->sc_mdlo); |
532 | if (cpu_has_dsp) { |
533 | err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); |
534 | err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); |
535 | err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); |
536 | err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); |
537 | err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); |
538 | err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); |
539 | err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); |
540 | } |
541 | |
542 | for (i = 1; i < 32; i++) |
543 | err |= __get_user(regs->regs[i], &sc->sc_regs[i]); |
544 | |
545 | return err ?: protected_restore_fp_context(sc); |
546 | } |
547 | |
548 | #ifdef CONFIG_WAR_ICACHE_REFILLS |
549 | #define SIGMASK ~(cpu_icache_line_size()-1) |
550 | #else |
551 | #define SIGMASK ALMASK |
552 | #endif |
553 | |
554 | void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, |
555 | size_t frame_size) |
556 | { |
557 | unsigned long sp; |
558 | |
559 | /* Leave space for potential extended context */ |
560 | frame_size += extcontext_max_size(); |
561 | |
562 | /* Default to using normal stack */ |
563 | sp = regs->regs[29]; |
564 | |
565 | /* |
566 | * If we are on the alternate signal stack and would overflow it, don't. |
567 | * Return an always-bogus address instead so we will die with SIGSEGV. |
568 | */ |
569 | if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) |
570 | return (void __user __force *)(-1UL); |
571 | |
572 | /* |
573 | * FPU emulator may have its own trampoline active just |
574 | * above the user stack, 16-bytes before the next lowest |
575 | * 16 byte boundary. Try to avoid trashing it. |
576 | */ |
577 | sp -= 32; |
578 | |
579 | sp = sigsp(sp, ksig); |
580 | |
581 | return (void __user *)((sp - frame_size) & SIGMASK); |
582 | } |
583 | |
584 | /* |
585 | * Atomically swap in the new signal mask, and wait for a signal. |
586 | */ |
587 | |
588 | #ifdef CONFIG_TRAD_SIGNALS |
589 | SYSCALL_DEFINE1(sigsuspend, sigset_t __user *, uset) |
590 | { |
591 | return sys_rt_sigsuspend(uset, sizeof(sigset_t)); |
592 | } |
593 | #endif |
594 | |
595 | #ifdef CONFIG_TRAD_SIGNALS |
596 | SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act, |
597 | struct sigaction __user *, oact) |
598 | { |
599 | struct k_sigaction new_ka, old_ka; |
600 | int ret; |
601 | int err = 0; |
602 | |
603 | if (act) { |
604 | old_sigset_t mask; |
605 | |
606 | if (!access_ok(act, sizeof(*act))) |
607 | return -EFAULT; |
608 | err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler); |
609 | err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); |
610 | err |= __get_user(mask, &act->sa_mask.sig[0]); |
611 | if (err) |
612 | return -EFAULT; |
613 | |
614 | siginitset(&new_ka.sa.sa_mask, mask); |
615 | } |
616 | |
617 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); |
618 | |
619 | if (!ret && oact) { |
620 | if (!access_ok(oact, sizeof(*oact))) |
621 | return -EFAULT; |
622 | err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); |
623 | err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); |
624 | err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); |
625 | err |= __put_user(0, &oact->sa_mask.sig[1]); |
626 | err |= __put_user(0, &oact->sa_mask.sig[2]); |
627 | err |= __put_user(0, &oact->sa_mask.sig[3]); |
628 | if (err) |
629 | return -EFAULT; |
630 | } |
631 | |
632 | return ret; |
633 | } |
634 | #endif |
635 | |
636 | #ifdef CONFIG_TRAD_SIGNALS |
637 | asmlinkage void sys_sigreturn(void) |
638 | { |
639 | struct sigframe __user *frame; |
640 | struct pt_regs *regs; |
641 | sigset_t blocked; |
642 | int sig; |
643 | |
644 | regs = current_pt_regs(); |
645 | frame = (struct sigframe __user *)regs->regs[29]; |
646 | if (!access_ok(frame, sizeof(*frame))) |
647 | goto badframe; |
648 | if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) |
649 | goto badframe; |
650 | |
651 | set_current_blocked(&blocked); |
652 | |
653 | sig = restore_sigcontext(regs, &frame->sf_sc); |
654 | if (sig < 0) |
655 | goto badframe; |
656 | else if (sig) |
657 | force_sig(sig); |
658 | |
659 | /* |
660 | * Don't let your children do this ... |
661 | */ |
662 | __asm__ __volatile__( |
663 | "move\t$29, %0\n\t" |
664 | "j\tsyscall_exit" |
665 | : /* no outputs */ |
666 | : "r" (regs)); |
667 | /* Unreached */ |
668 | |
669 | badframe: |
670 | force_sig(SIGSEGV); |
671 | } |
672 | #endif /* CONFIG_TRAD_SIGNALS */ |
673 | |
674 | asmlinkage void sys_rt_sigreturn(void) |
675 | { |
676 | struct rt_sigframe __user *frame; |
677 | struct pt_regs *regs; |
678 | sigset_t set; |
679 | int sig; |
680 | |
681 | regs = current_pt_regs(); |
682 | frame = (struct rt_sigframe __user *)regs->regs[29]; |
683 | if (!access_ok(frame, sizeof(*frame))) |
684 | goto badframe; |
685 | if (__copy_from_user(to: &set, from: &frame->rs_uc.uc_sigmask, n: sizeof(set))) |
686 | goto badframe; |
687 | |
688 | set_current_blocked(&set); |
689 | |
690 | sig = restore_sigcontext(regs, sc: &frame->rs_uc.uc_mcontext); |
691 | if (sig < 0) |
692 | goto badframe; |
693 | else if (sig) |
694 | force_sig(sig); |
695 | |
696 | if (restore_altstack(&frame->rs_uc.uc_stack)) |
697 | goto badframe; |
698 | |
699 | /* |
700 | * Don't let your children do this ... |
701 | */ |
702 | __asm__ __volatile__( |
703 | "move\t$29, %0\n\t" |
704 | "j\tsyscall_exit" |
705 | : /* no outputs */ |
706 | : "r" (regs)); |
707 | /* Unreached */ |
708 | |
709 | badframe: |
710 | force_sig(SIGSEGV); |
711 | } |
712 | |
713 | #ifdef CONFIG_TRAD_SIGNALS |
714 | static int setup_frame(void *sig_return, struct ksignal *ksig, |
715 | struct pt_regs *regs, sigset_t *set) |
716 | { |
717 | struct sigframe __user *frame; |
718 | int err = 0; |
719 | |
720 | frame = get_sigframe(ksig, regs, sizeof(*frame)); |
721 | if (!access_ok(frame, sizeof (*frame))) |
722 | return -EFAULT; |
723 | |
724 | err |= setup_sigcontext(regs, &frame->sf_sc); |
725 | err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); |
726 | if (err) |
727 | return -EFAULT; |
728 | |
729 | /* |
730 | * Arguments to signal handler: |
731 | * |
732 | * a0 = signal number |
733 | * a1 = 0 (should be cause) |
734 | * a2 = pointer to struct sigcontext |
735 | * |
736 | * $25 and c0_epc point to the signal handler, $29 points to the |
737 | * struct sigframe. |
738 | */ |
739 | regs->regs[ 4] = ksig->sig; |
740 | regs->regs[ 5] = 0; |
741 | regs->regs[ 6] = (unsigned long) &frame->sf_sc; |
742 | regs->regs[29] = (unsigned long) frame; |
743 | regs->regs[31] = (unsigned long) sig_return; |
744 | regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; |
745 | |
746 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n" , |
747 | current->comm, current->pid, |
748 | frame, regs->cp0_epc, regs->regs[31]); |
749 | return 0; |
750 | } |
751 | #endif |
752 | |
753 | static int setup_rt_frame(void *sig_return, struct ksignal *ksig, |
754 | struct pt_regs *regs, sigset_t *set) |
755 | { |
756 | struct rt_sigframe __user *frame; |
757 | |
758 | frame = get_sigframe(ksig, regs, frame_size: sizeof(*frame)); |
759 | if (!access_ok(frame, sizeof (*frame))) |
760 | return -EFAULT; |
761 | |
762 | /* Create siginfo. */ |
763 | if (copy_siginfo_to_user(to: &frame->rs_info, from: &ksig->info)) |
764 | return -EFAULT; |
765 | |
766 | /* Create the ucontext. */ |
767 | if (__put_user(0, &frame->rs_uc.uc_flags)) |
768 | return -EFAULT; |
769 | if (__put_user(NULL, &frame->rs_uc.uc_link)) |
770 | return -EFAULT; |
771 | if (__save_altstack(&frame->rs_uc.uc_stack, regs->regs[29])) |
772 | return -EFAULT; |
773 | if (setup_sigcontext(regs, sc: &frame->rs_uc.uc_mcontext)) |
774 | return -EFAULT; |
775 | if (__copy_to_user(to: &frame->rs_uc.uc_sigmask, from: set, n: sizeof(*set))) |
776 | return -EFAULT; |
777 | |
778 | /* |
779 | * Arguments to signal handler: |
780 | * |
781 | * a0 = signal number |
782 | * a1 = 0 (should be cause) |
783 | * a2 = pointer to ucontext |
784 | * |
785 | * $25 and c0_epc point to the signal handler, $29 points to |
786 | * the struct rt_sigframe. |
787 | */ |
788 | regs->regs[ 4] = ksig->sig; |
789 | regs->regs[ 5] = (unsigned long) &frame->rs_info; |
790 | regs->regs[ 6] = (unsigned long) &frame->rs_uc; |
791 | regs->regs[29] = (unsigned long) frame; |
792 | regs->regs[31] = (unsigned long) sig_return; |
793 | regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; |
794 | |
795 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n" , |
796 | current->comm, current->pid, |
797 | frame, regs->cp0_epc, regs->regs[31]); |
798 | |
799 | return 0; |
800 | } |
801 | |
802 | struct mips_abi mips_abi = { |
803 | #ifdef CONFIG_TRAD_SIGNALS |
804 | .setup_frame = setup_frame, |
805 | #endif |
806 | .setup_rt_frame = setup_rt_frame, |
807 | .restart = __NR_restart_syscall, |
808 | |
809 | .off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs), |
810 | .off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr), |
811 | .off_sc_used_math = offsetof(struct sigcontext, sc_used_math), |
812 | |
813 | .vdso = &vdso_image, |
814 | }; |
815 | |
816 | static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
817 | { |
818 | sigset_t *oldset = sigmask_to_save(); |
819 | int ret; |
820 | struct mips_abi *abi = current->thread.abi; |
821 | void *vdso = current->mm->context.vdso; |
822 | |
823 | /* |
824 | * If we were emulating a delay slot instruction, exit that frame such |
825 | * that addresses in the sigframe are as expected for userland and we |
826 | * don't have a problem if we reuse the thread's frame for an |
827 | * instruction within the signal handler. |
828 | */ |
829 | dsemul_thread_rollback(regs); |
830 | |
831 | if (regs->regs[0]) { |
832 | switch(regs->regs[2]) { |
833 | case ERESTART_RESTARTBLOCK: |
834 | case ERESTARTNOHAND: |
835 | regs->regs[2] = EINTR; |
836 | break; |
837 | case ERESTARTSYS: |
838 | if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { |
839 | regs->regs[2] = EINTR; |
840 | break; |
841 | } |
842 | fallthrough; |
843 | case ERESTARTNOINTR: |
844 | regs->regs[7] = regs->regs[26]; |
845 | regs->regs[2] = regs->regs[0]; |
846 | regs->cp0_epc -= 4; |
847 | } |
848 | |
849 | regs->regs[0] = 0; /* Don't deal with this again. */ |
850 | } |
851 | |
852 | rseq_signal_deliver(ksig, regs); |
853 | |
854 | if (sig_uses_siginfo(&ksig->ka, abi)) |
855 | ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn, |
856 | ksig, regs, oldset); |
857 | else |
858 | ret = abi->setup_frame(vdso + abi->vdso->off_sigreturn, |
859 | ksig, regs, oldset); |
860 | |
861 | signal_setup_done(failed: ret, ksig, stepping: 0); |
862 | } |
863 | |
864 | static void do_signal(struct pt_regs *regs) |
865 | { |
866 | struct ksignal ksig; |
867 | |
868 | if (get_signal(ksig: &ksig)) { |
869 | /* Whee! Actually deliver the signal. */ |
870 | handle_signal(ksig: &ksig, regs); |
871 | return; |
872 | } |
873 | |
874 | if (regs->regs[0]) { |
875 | switch (regs->regs[2]) { |
876 | case ERESTARTNOHAND: |
877 | case ERESTARTSYS: |
878 | case ERESTARTNOINTR: |
879 | regs->regs[2] = regs->regs[0]; |
880 | regs->regs[7] = regs->regs[26]; |
881 | regs->cp0_epc -= 4; |
882 | break; |
883 | |
884 | case ERESTART_RESTARTBLOCK: |
885 | regs->regs[2] = current->thread.abi->restart; |
886 | regs->regs[7] = regs->regs[26]; |
887 | regs->cp0_epc -= 4; |
888 | break; |
889 | } |
890 | regs->regs[0] = 0; /* Don't deal with this again. */ |
891 | } |
892 | |
893 | /* |
894 | * If there's no signal to deliver, we just put the saved sigmask |
895 | * back |
896 | */ |
897 | restore_saved_sigmask(); |
898 | } |
899 | |
900 | /* |
901 | * notification of userspace execution resumption |
902 | * - triggered by the TIF_WORK_MASK flags |
903 | */ |
904 | asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, |
905 | __u32 thread_info_flags) |
906 | { |
907 | local_irq_enable(); |
908 | |
909 | user_exit(); |
910 | |
911 | if (thread_info_flags & _TIF_UPROBE) |
912 | uprobe_notify_resume(regs); |
913 | |
914 | /* deal with pending signal delivery */ |
915 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) |
916 | do_signal(regs); |
917 | |
918 | if (thread_info_flags & _TIF_NOTIFY_RESUME) |
919 | resume_user_mode_work(regs); |
920 | |
921 | user_enter(); |
922 | } |
923 | |
924 | #if defined(CONFIG_SMP) && defined(CONFIG_MIPS_FP_SUPPORT) |
925 | static int smp_save_fp_context(void __user *sc) |
926 | { |
927 | return raw_cpu_has_fpu |
928 | ? save_hw_fp_context(sc) |
929 | : copy_fp_to_sigcontext(sc); |
930 | } |
931 | |
932 | static int smp_restore_fp_context(void __user *sc) |
933 | { |
934 | return raw_cpu_has_fpu |
935 | ? restore_hw_fp_context(sc) |
936 | : copy_fp_from_sigcontext(sc); |
937 | } |
938 | #endif |
939 | |
940 | static int signal_setup(void) |
941 | { |
942 | /* |
943 | * The offset from sigcontext to extended context should be the same |
944 | * regardless of the type of signal, such that userland can always know |
945 | * where to look if it wishes to find the extended context structures. |
946 | */ |
947 | BUILD_BUG_ON((offsetof(struct sigframe, sf_extcontext) - |
948 | offsetof(struct sigframe, sf_sc)) != |
949 | (offsetof(struct rt_sigframe, rs_uc.uc_extcontext) - |
950 | offsetof(struct rt_sigframe, rs_uc.uc_mcontext))); |
951 | |
952 | #if defined(CONFIG_SMP) && defined(CONFIG_MIPS_FP_SUPPORT) |
953 | /* For now just do the cpu_has_fpu check when the functions are invoked */ |
954 | save_fp_context = smp_save_fp_context; |
955 | restore_fp_context = smp_restore_fp_context; |
956 | #else |
957 | if (cpu_has_fpu) { |
958 | save_fp_context = save_hw_fp_context; |
959 | restore_fp_context = restore_hw_fp_context; |
960 | } else { |
961 | save_fp_context = copy_fp_to_sigcontext; |
962 | restore_fp_context = copy_fp_from_sigcontext; |
963 | } |
964 | #endif /* CONFIG_SMP */ |
965 | |
966 | return 0; |
967 | } |
968 | |
969 | arch_initcall(signal_setup); |
970 | |