1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * linux/kernel/signal.c |
4 | * |
5 | * Copyright (C) 1991, 1992 Linus Torvalds |
6 | * |
7 | * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson |
8 | * |
9 | * 2003-06-02 Jim Houston - Concurrent Computer Corp. |
10 | * Changes to use preallocated sigqueue structures |
11 | * to allow signals to be sent reliably. |
12 | */ |
13 | |
14 | #include <linux/slab.h> |
15 | #include <linux/export.h> |
16 | #include <linux/init.h> |
17 | #include <linux/sched/mm.h> |
18 | #include <linux/sched/user.h> |
19 | #include <linux/sched/debug.h> |
20 | #include <linux/sched/task.h> |
21 | #include <linux/sched/task_stack.h> |
22 | #include <linux/sched/cputime.h> |
23 | #include <linux/file.h> |
24 | #include <linux/fs.h> |
25 | #include <linux/mm.h> |
26 | #include <linux/proc_fs.h> |
27 | #include <linux/tty.h> |
28 | #include <linux/binfmts.h> |
29 | #include <linux/coredump.h> |
30 | #include <linux/security.h> |
31 | #include <linux/syscalls.h> |
32 | #include <linux/ptrace.h> |
33 | #include <linux/signal.h> |
34 | #include <linux/signalfd.h> |
35 | #include <linux/ratelimit.h> |
36 | #include <linux/task_work.h> |
37 | #include <linux/capability.h> |
38 | #include <linux/freezer.h> |
39 | #include <linux/pid_namespace.h> |
40 | #include <linux/nsproxy.h> |
41 | #include <linux/user_namespace.h> |
42 | #include <linux/uprobes.h> |
43 | #include <linux/compat.h> |
44 | #include <linux/cn_proc.h> |
45 | #include <linux/compiler.h> |
46 | #include <linux/posix-timers.h> |
47 | #include <linux/cgroup.h> |
48 | #include <linux/audit.h> |
49 | #include <linux/sysctl.h> |
50 | |
51 | #define CREATE_TRACE_POINTS |
52 | #include <trace/events/signal.h> |
53 | |
54 | #include <asm/param.h> |
55 | #include <linux/uaccess.h> |
56 | #include <asm/unistd.h> |
57 | #include <asm/siginfo.h> |
58 | #include <asm/cacheflush.h> |
59 | #include <asm/syscall.h> /* for syscall_get_* */ |
60 | |
61 | /* |
62 | * SLAB caches for signal bits. |
63 | */ |
64 | |
65 | static struct kmem_cache *sigqueue_cachep; |
66 | |
67 | int print_fatal_signals __read_mostly; |
68 | |
69 | static void __user *sig_handler(struct task_struct *t, int sig) |
70 | { |
71 | return t->sighand->action[sig - 1].sa.sa_handler; |
72 | } |
73 | |
74 | static inline bool sig_handler_ignored(void __user *handler, int sig) |
75 | { |
76 | /* Is it explicitly or implicitly ignored? */ |
77 | return handler == SIG_IGN || |
78 | (handler == SIG_DFL && sig_kernel_ignore(sig)); |
79 | } |
80 | |
81 | static bool sig_task_ignored(struct task_struct *t, int sig, bool force) |
82 | { |
83 | void __user *handler; |
84 | |
85 | handler = sig_handler(t, sig); |
86 | |
87 | /* SIGKILL and SIGSTOP may not be sent to the global init */ |
88 | if (unlikely(is_global_init(t) && sig_kernel_only(sig))) |
89 | return true; |
90 | |
91 | if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && |
92 | handler == SIG_DFL && !(force && sig_kernel_only(sig))) |
93 | return true; |
94 | |
95 | /* Only allow kernel generated signals to this kthread */ |
96 | if (unlikely((t->flags & PF_KTHREAD) && |
97 | (handler == SIG_KTHREAD_KERNEL) && !force)) |
98 | return true; |
99 | |
100 | return sig_handler_ignored(handler, sig); |
101 | } |
102 | |
103 | static bool sig_ignored(struct task_struct *t, int sig, bool force) |
104 | { |
105 | /* |
106 | * Blocked signals are never ignored, since the |
107 | * signal handler may change by the time it is |
108 | * unblocked. |
109 | */ |
110 | if (sigismember(set: &t->blocked, sig: sig) || sigismember(set: &t->real_blocked, sig: sig)) |
111 | return false; |
112 | |
113 | /* |
114 | * Tracers may want to know about even ignored signal unless it |
115 | * is SIGKILL which can't be reported anyway but can be ignored |
116 | * by SIGNAL_UNKILLABLE task. |
117 | */ |
118 | if (t->ptrace && sig != SIGKILL) |
119 | return false; |
120 | |
121 | return sig_task_ignored(t, sig, force); |
122 | } |
123 | |
124 | /* |
125 | * Re-calculate pending state from the set of locally pending |
126 | * signals, globally pending signals, and blocked signals. |
127 | */ |
128 | static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked) |
129 | { |
130 | unsigned long ready; |
131 | long i; |
132 | |
133 | switch (_NSIG_WORDS) { |
134 | default: |
135 | for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) |
136 | ready |= signal->sig[i] &~ blocked->sig[i]; |
137 | break; |
138 | |
139 | case 4: ready = signal->sig[3] &~ blocked->sig[3]; |
140 | ready |= signal->sig[2] &~ blocked->sig[2]; |
141 | ready |= signal->sig[1] &~ blocked->sig[1]; |
142 | ready |= signal->sig[0] &~ blocked->sig[0]; |
143 | break; |
144 | |
145 | case 2: ready = signal->sig[1] &~ blocked->sig[1]; |
146 | ready |= signal->sig[0] &~ blocked->sig[0]; |
147 | break; |
148 | |
149 | case 1: ready = signal->sig[0] &~ blocked->sig[0]; |
150 | } |
151 | return ready != 0; |
152 | } |
153 | |
154 | #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) |
155 | |
156 | static bool recalc_sigpending_tsk(struct task_struct *t) |
157 | { |
158 | if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) || |
159 | PENDING(&t->pending, &t->blocked) || |
160 | PENDING(&t->signal->shared_pending, &t->blocked) || |
161 | cgroup_task_frozen(task: t)) { |
162 | set_tsk_thread_flag(tsk: t, TIF_SIGPENDING); |
163 | return true; |
164 | } |
165 | |
166 | /* |
167 | * We must never clear the flag in another thread, or in current |
168 | * when it's possible the current syscall is returning -ERESTART*. |
169 | * So we don't clear it here, and only callers who know they should do. |
170 | */ |
171 | return false; |
172 | } |
173 | |
174 | /* |
175 | * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. |
176 | * This is superfluous when called on current, the wakeup is a harmless no-op. |
177 | */ |
178 | void recalc_sigpending_and_wake(struct task_struct *t) |
179 | { |
180 | if (recalc_sigpending_tsk(t)) |
181 | signal_wake_up(t, fatal: 0); |
182 | } |
183 | |
184 | void recalc_sigpending(void) |
185 | { |
186 | if (!recalc_sigpending_tsk(current) && !freezing(current)) |
187 | clear_thread_flag(TIF_SIGPENDING); |
188 | |
189 | } |
190 | EXPORT_SYMBOL(recalc_sigpending); |
191 | |
192 | void calculate_sigpending(void) |
193 | { |
194 | /* Have any signals or users of TIF_SIGPENDING been delayed |
195 | * until after fork? |
196 | */ |
197 | spin_lock_irq(lock: ¤t->sighand->siglock); |
198 | set_tsk_thread_flag(current, TIF_SIGPENDING); |
199 | recalc_sigpending(); |
200 | spin_unlock_irq(lock: ¤t->sighand->siglock); |
201 | } |
202 | |
203 | /* Given the mask, find the first available signal that should be serviced. */ |
204 | |
205 | #define SYNCHRONOUS_MASK \ |
206 | (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ |
207 | sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS)) |
208 | |
209 | int next_signal(struct sigpending *pending, sigset_t *mask) |
210 | { |
211 | unsigned long i, *s, *m, x; |
212 | int sig = 0; |
213 | |
214 | s = pending->signal.sig; |
215 | m = mask->sig; |
216 | |
217 | /* |
218 | * Handle the first word specially: it contains the |
219 | * synchronous signals that need to be dequeued first. |
220 | */ |
221 | x = *s &~ *m; |
222 | if (x) { |
223 | if (x & SYNCHRONOUS_MASK) |
224 | x &= SYNCHRONOUS_MASK; |
225 | sig = ffz(~x) + 1; |
226 | return sig; |
227 | } |
228 | |
229 | switch (_NSIG_WORDS) { |
230 | default: |
231 | for (i = 1; i < _NSIG_WORDS; ++i) { |
232 | x = *++s &~ *++m; |
233 | if (!x) |
234 | continue; |
235 | sig = ffz(~x) + i*_NSIG_BPW + 1; |
236 | break; |
237 | } |
238 | break; |
239 | |
240 | case 2: |
241 | x = s[1] &~ m[1]; |
242 | if (!x) |
243 | break; |
244 | sig = ffz(~x) + _NSIG_BPW + 1; |
245 | break; |
246 | |
247 | case 1: |
248 | /* Nothing to do */ |
249 | break; |
250 | } |
251 | |
252 | return sig; |
253 | } |
254 | |
255 | static inline void print_dropped_signal(int sig) |
256 | { |
257 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); |
258 | |
259 | if (!print_fatal_signals) |
260 | return; |
261 | |
262 | if (!__ratelimit(&ratelimit_state)) |
263 | return; |
264 | |
265 | pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n" , |
266 | current->comm, current->pid, sig); |
267 | } |
268 | |
269 | /** |
270 | * task_set_jobctl_pending - set jobctl pending bits |
271 | * @task: target task |
272 | * @mask: pending bits to set |
273 | * |
274 | * Clear @mask from @task->jobctl. @mask must be subset of |
275 | * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | |
276 | * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is |
277 | * cleared. If @task is already being killed or exiting, this function |
278 | * becomes noop. |
279 | * |
280 | * CONTEXT: |
281 | * Must be called with @task->sighand->siglock held. |
282 | * |
283 | * RETURNS: |
284 | * %true if @mask is set, %false if made noop because @task was dying. |
285 | */ |
286 | bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask) |
287 | { |
288 | BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | |
289 | JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); |
290 | BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); |
291 | |
292 | if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) |
293 | return false; |
294 | |
295 | if (mask & JOBCTL_STOP_SIGMASK) |
296 | task->jobctl &= ~JOBCTL_STOP_SIGMASK; |
297 | |
298 | task->jobctl |= mask; |
299 | return true; |
300 | } |
301 | |
302 | /** |
303 | * task_clear_jobctl_trapping - clear jobctl trapping bit |
304 | * @task: target task |
305 | * |
306 | * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. |
307 | * Clear it and wake up the ptracer. Note that we don't need any further |
308 | * locking. @task->siglock guarantees that @task->parent points to the |
309 | * ptracer. |
310 | * |
311 | * CONTEXT: |
312 | * Must be called with @task->sighand->siglock held. |
313 | */ |
314 | void task_clear_jobctl_trapping(struct task_struct *task) |
315 | { |
316 | if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { |
317 | task->jobctl &= ~JOBCTL_TRAPPING; |
318 | smp_mb(); /* advised by wake_up_bit() */ |
319 | wake_up_bit(word: &task->jobctl, JOBCTL_TRAPPING_BIT); |
320 | } |
321 | } |
322 | |
323 | /** |
324 | * task_clear_jobctl_pending - clear jobctl pending bits |
325 | * @task: target task |
326 | * @mask: pending bits to clear |
327 | * |
328 | * Clear @mask from @task->jobctl. @mask must be subset of |
329 | * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other |
330 | * STOP bits are cleared together. |
331 | * |
332 | * If clearing of @mask leaves no stop or trap pending, this function calls |
333 | * task_clear_jobctl_trapping(). |
334 | * |
335 | * CONTEXT: |
336 | * Must be called with @task->sighand->siglock held. |
337 | */ |
338 | void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask) |
339 | { |
340 | BUG_ON(mask & ~JOBCTL_PENDING_MASK); |
341 | |
342 | if (mask & JOBCTL_STOP_PENDING) |
343 | mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; |
344 | |
345 | task->jobctl &= ~mask; |
346 | |
347 | if (!(task->jobctl & JOBCTL_PENDING_MASK)) |
348 | task_clear_jobctl_trapping(task); |
349 | } |
350 | |
351 | /** |
352 | * task_participate_group_stop - participate in a group stop |
353 | * @task: task participating in a group stop |
354 | * |
355 | * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. |
356 | * Group stop states are cleared and the group stop count is consumed if |
357 | * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group |
358 | * stop, the appropriate `SIGNAL_*` flags are set. |
359 | * |
360 | * CONTEXT: |
361 | * Must be called with @task->sighand->siglock held. |
362 | * |
363 | * RETURNS: |
364 | * %true if group stop completion should be notified to the parent, %false |
365 | * otherwise. |
366 | */ |
367 | static bool task_participate_group_stop(struct task_struct *task) |
368 | { |
369 | struct signal_struct *sig = task->signal; |
370 | bool consume = task->jobctl & JOBCTL_STOP_CONSUME; |
371 | |
372 | WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); |
373 | |
374 | task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); |
375 | |
376 | if (!consume) |
377 | return false; |
378 | |
379 | if (!WARN_ON_ONCE(sig->group_stop_count == 0)) |
380 | sig->group_stop_count--; |
381 | |
382 | /* |
383 | * Tell the caller to notify completion iff we are entering into a |
384 | * fresh group stop. Read comment in do_signal_stop() for details. |
385 | */ |
386 | if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { |
387 | signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED); |
388 | return true; |
389 | } |
390 | return false; |
391 | } |
392 | |
393 | void task_join_group_stop(struct task_struct *task) |
394 | { |
395 | unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK; |
396 | struct signal_struct *sig = current->signal; |
397 | |
398 | if (sig->group_stop_count) { |
399 | sig->group_stop_count++; |
400 | mask |= JOBCTL_STOP_CONSUME; |
401 | } else if (!(sig->flags & SIGNAL_STOP_STOPPED)) |
402 | return; |
403 | |
404 | /* Have the new thread join an on-going signal group stop */ |
405 | task_set_jobctl_pending(task, mask: mask | JOBCTL_STOP_PENDING); |
406 | } |
407 | |
408 | /* |
409 | * allocate a new signal queue record |
410 | * - this may be called without locks if and only if t == current, otherwise an |
411 | * appropriate lock must be held to stop the target task from exiting |
412 | */ |
413 | static struct sigqueue * |
414 | __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags, |
415 | int override_rlimit, const unsigned int sigqueue_flags) |
416 | { |
417 | struct sigqueue *q = NULL; |
418 | struct ucounts *ucounts; |
419 | long sigpending; |
420 | |
421 | /* |
422 | * Protect access to @t credentials. This can go away when all |
423 | * callers hold rcu read lock. |
424 | * |
425 | * NOTE! A pending signal will hold on to the user refcount, |
426 | * and we get/put the refcount only when the sigpending count |
427 | * changes from/to zero. |
428 | */ |
429 | rcu_read_lock(); |
430 | ucounts = task_ucounts(t); |
431 | sigpending = inc_rlimit_get_ucounts(ucounts, type: UCOUNT_RLIMIT_SIGPENDING); |
432 | rcu_read_unlock(); |
433 | if (!sigpending) |
434 | return NULL; |
435 | |
436 | if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) { |
437 | q = kmem_cache_alloc(cachep: sigqueue_cachep, flags: gfp_flags); |
438 | } else { |
439 | print_dropped_signal(sig); |
440 | } |
441 | |
442 | if (unlikely(q == NULL)) { |
443 | dec_rlimit_put_ucounts(ucounts, type: UCOUNT_RLIMIT_SIGPENDING); |
444 | } else { |
445 | INIT_LIST_HEAD(list: &q->list); |
446 | q->flags = sigqueue_flags; |
447 | q->ucounts = ucounts; |
448 | } |
449 | return q; |
450 | } |
451 | |
452 | static void __sigqueue_free(struct sigqueue *q) |
453 | { |
454 | if (q->flags & SIGQUEUE_PREALLOC) |
455 | return; |
456 | if (q->ucounts) { |
457 | dec_rlimit_put_ucounts(ucounts: q->ucounts, type: UCOUNT_RLIMIT_SIGPENDING); |
458 | q->ucounts = NULL; |
459 | } |
460 | kmem_cache_free(s: sigqueue_cachep, objp: q); |
461 | } |
462 | |
463 | void flush_sigqueue(struct sigpending *queue) |
464 | { |
465 | struct sigqueue *q; |
466 | |
467 | sigemptyset(set: &queue->signal); |
468 | while (!list_empty(head: &queue->list)) { |
469 | q = list_entry(queue->list.next, struct sigqueue , list); |
470 | list_del_init(entry: &q->list); |
471 | __sigqueue_free(q); |
472 | } |
473 | } |
474 | |
475 | /* |
476 | * Flush all pending signals for this kthread. |
477 | */ |
478 | void flush_signals(struct task_struct *t) |
479 | { |
480 | unsigned long flags; |
481 | |
482 | spin_lock_irqsave(&t->sighand->siglock, flags); |
483 | clear_tsk_thread_flag(tsk: t, TIF_SIGPENDING); |
484 | flush_sigqueue(queue: &t->pending); |
485 | flush_sigqueue(queue: &t->signal->shared_pending); |
486 | spin_unlock_irqrestore(lock: &t->sighand->siglock, flags); |
487 | } |
488 | EXPORT_SYMBOL(flush_signals); |
489 | |
490 | #ifdef CONFIG_POSIX_TIMERS |
491 | static void __flush_itimer_signals(struct sigpending *pending) |
492 | { |
493 | sigset_t signal, retain; |
494 | struct sigqueue *q, *n; |
495 | |
496 | signal = pending->signal; |
497 | sigemptyset(set: &retain); |
498 | |
499 | list_for_each_entry_safe(q, n, &pending->list, list) { |
500 | int sig = q->info.si_signo; |
501 | |
502 | if (likely(q->info.si_code != SI_TIMER)) { |
503 | sigaddset(set: &retain, sig: sig); |
504 | } else { |
505 | sigdelset(set: &signal, sig: sig); |
506 | list_del_init(entry: &q->list); |
507 | __sigqueue_free(q); |
508 | } |
509 | } |
510 | |
511 | sigorsets(r: &pending->signal, a: &signal, b: &retain); |
512 | } |
513 | |
514 | void flush_itimer_signals(void) |
515 | { |
516 | struct task_struct *tsk = current; |
517 | unsigned long flags; |
518 | |
519 | spin_lock_irqsave(&tsk->sighand->siglock, flags); |
520 | __flush_itimer_signals(pending: &tsk->pending); |
521 | __flush_itimer_signals(pending: &tsk->signal->shared_pending); |
522 | spin_unlock_irqrestore(lock: &tsk->sighand->siglock, flags); |
523 | } |
524 | #endif |
525 | |
526 | void ignore_signals(struct task_struct *t) |
527 | { |
528 | int i; |
529 | |
530 | for (i = 0; i < _NSIG; ++i) |
531 | t->sighand->action[i].sa.sa_handler = SIG_IGN; |
532 | |
533 | flush_signals(t); |
534 | } |
535 | |
536 | /* |
537 | * Flush all handlers for a task. |
538 | */ |
539 | |
540 | void |
541 | flush_signal_handlers(struct task_struct *t, int force_default) |
542 | { |
543 | int i; |
544 | struct k_sigaction *ka = &t->sighand->action[0]; |
545 | for (i = _NSIG ; i != 0 ; i--) { |
546 | if (force_default || ka->sa.sa_handler != SIG_IGN) |
547 | ka->sa.sa_handler = SIG_DFL; |
548 | ka->sa.sa_flags = 0; |
549 | #ifdef __ARCH_HAS_SA_RESTORER |
550 | ka->sa.sa_restorer = NULL; |
551 | #endif |
552 | sigemptyset(set: &ka->sa.sa_mask); |
553 | ka++; |
554 | } |
555 | } |
556 | |
557 | bool unhandled_signal(struct task_struct *tsk, int sig) |
558 | { |
559 | void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; |
560 | if (is_global_init(tsk)) |
561 | return true; |
562 | |
563 | if (handler != SIG_IGN && handler != SIG_DFL) |
564 | return false; |
565 | |
566 | /* If dying, we handle all new signals by ignoring them */ |
567 | if (fatal_signal_pending(p: tsk)) |
568 | return false; |
569 | |
570 | /* if ptraced, let the tracer determine */ |
571 | return !tsk->ptrace; |
572 | } |
573 | |
574 | static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info, |
575 | bool *resched_timer) |
576 | { |
577 | struct sigqueue *q, *first = NULL; |
578 | |
579 | /* |
580 | * Collect the siginfo appropriate to this signal. Check if |
581 | * there is another siginfo for the same signal. |
582 | */ |
583 | list_for_each_entry(q, &list->list, list) { |
584 | if (q->info.si_signo == sig) { |
585 | if (first) |
586 | goto still_pending; |
587 | first = q; |
588 | } |
589 | } |
590 | |
591 | sigdelset(set: &list->signal, sig: sig); |
592 | |
593 | if (first) { |
594 | still_pending: |
595 | list_del_init(entry: &first->list); |
596 | copy_siginfo(to: info, from: &first->info); |
597 | |
598 | *resched_timer = |
599 | (first->flags & SIGQUEUE_PREALLOC) && |
600 | (info->si_code == SI_TIMER) && |
601 | (info->si_sys_private); |
602 | |
603 | __sigqueue_free(q: first); |
604 | } else { |
605 | /* |
606 | * Ok, it wasn't in the queue. This must be |
607 | * a fast-pathed signal or we must have been |
608 | * out of queue space. So zero out the info. |
609 | */ |
610 | clear_siginfo(info); |
611 | info->si_signo = sig; |
612 | info->si_errno = 0; |
613 | info->si_code = SI_USER; |
614 | info->si_pid = 0; |
615 | info->si_uid = 0; |
616 | } |
617 | } |
618 | |
619 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, |
620 | kernel_siginfo_t *info, bool *resched_timer) |
621 | { |
622 | int sig = next_signal(pending, mask); |
623 | |
624 | if (sig) |
625 | collect_signal(sig, list: pending, info, resched_timer); |
626 | return sig; |
627 | } |
628 | |
629 | /* |
630 | * Dequeue a signal and return the element to the caller, which is |
631 | * expected to free it. |
632 | * |
633 | * All callers have to hold the siglock. |
634 | */ |
635 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, |
636 | kernel_siginfo_t *info, enum pid_type *type) |
637 | { |
638 | bool resched_timer = false; |
639 | int signr; |
640 | |
641 | /* We only dequeue private signals from ourselves, we don't let |
642 | * signalfd steal them |
643 | */ |
644 | *type = PIDTYPE_PID; |
645 | signr = __dequeue_signal(pending: &tsk->pending, mask, info, resched_timer: &resched_timer); |
646 | if (!signr) { |
647 | *type = PIDTYPE_TGID; |
648 | signr = __dequeue_signal(pending: &tsk->signal->shared_pending, |
649 | mask, info, resched_timer: &resched_timer); |
650 | #ifdef CONFIG_POSIX_TIMERS |
651 | /* |
652 | * itimer signal ? |
653 | * |
654 | * itimers are process shared and we restart periodic |
655 | * itimers in the signal delivery path to prevent DoS |
656 | * attacks in the high resolution timer case. This is |
657 | * compliant with the old way of self-restarting |
658 | * itimers, as the SIGALRM is a legacy signal and only |
659 | * queued once. Changing the restart behaviour to |
660 | * restart the timer in the signal dequeue path is |
661 | * reducing the timer noise on heavy loaded !highres |
662 | * systems too. |
663 | */ |
664 | if (unlikely(signr == SIGALRM)) { |
665 | struct hrtimer *tmr = &tsk->signal->real_timer; |
666 | |
667 | if (!hrtimer_is_queued(timer: tmr) && |
668 | tsk->signal->it_real_incr != 0) { |
669 | hrtimer_forward(timer: tmr, now: tmr->base->get_time(), |
670 | interval: tsk->signal->it_real_incr); |
671 | hrtimer_restart(timer: tmr); |
672 | } |
673 | } |
674 | #endif |
675 | } |
676 | |
677 | recalc_sigpending(); |
678 | if (!signr) |
679 | return 0; |
680 | |
681 | if (unlikely(sig_kernel_stop(signr))) { |
682 | /* |
683 | * Set a marker that we have dequeued a stop signal. Our |
684 | * caller might release the siglock and then the pending |
685 | * stop signal it is about to process is no longer in the |
686 | * pending bitmasks, but must still be cleared by a SIGCONT |
687 | * (and overruled by a SIGKILL). So those cases clear this |
688 | * shared flag after we've set it. Note that this flag may |
689 | * remain set after the signal we return is ignored or |
690 | * handled. That doesn't matter because its only purpose |
691 | * is to alert stop-signal processing code when another |
692 | * processor has come along and cleared the flag. |
693 | */ |
694 | current->jobctl |= JOBCTL_STOP_DEQUEUED; |
695 | } |
696 | #ifdef CONFIG_POSIX_TIMERS |
697 | if (resched_timer) { |
698 | /* |
699 | * Release the siglock to ensure proper locking order |
700 | * of timer locks outside of siglocks. Note, we leave |
701 | * irqs disabled here, since the posix-timers code is |
702 | * about to disable them again anyway. |
703 | */ |
704 | spin_unlock(lock: &tsk->sighand->siglock); |
705 | posixtimer_rearm(info); |
706 | spin_lock(lock: &tsk->sighand->siglock); |
707 | |
708 | /* Don't expose the si_sys_private value to userspace */ |
709 | info->si_sys_private = 0; |
710 | } |
711 | #endif |
712 | return signr; |
713 | } |
714 | EXPORT_SYMBOL_GPL(dequeue_signal); |
715 | |
716 | static int dequeue_synchronous_signal(kernel_siginfo_t *info) |
717 | { |
718 | struct task_struct *tsk = current; |
719 | struct sigpending *pending = &tsk->pending; |
720 | struct sigqueue *q, *sync = NULL; |
721 | |
722 | /* |
723 | * Might a synchronous signal be in the queue? |
724 | */ |
725 | if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK)) |
726 | return 0; |
727 | |
728 | /* |
729 | * Return the first synchronous signal in the queue. |
730 | */ |
731 | list_for_each_entry(q, &pending->list, list) { |
732 | /* Synchronous signals have a positive si_code */ |
733 | if ((q->info.si_code > SI_USER) && |
734 | (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) { |
735 | sync = q; |
736 | goto next; |
737 | } |
738 | } |
739 | return 0; |
740 | next: |
741 | /* |
742 | * Check if there is another siginfo for the same signal. |
743 | */ |
744 | list_for_each_entry_continue(q, &pending->list, list) { |
745 | if (q->info.si_signo == sync->info.si_signo) |
746 | goto still_pending; |
747 | } |
748 | |
749 | sigdelset(set: &pending->signal, sig: sync->info.si_signo); |
750 | recalc_sigpending(); |
751 | still_pending: |
752 | list_del_init(entry: &sync->list); |
753 | copy_siginfo(to: info, from: &sync->info); |
754 | __sigqueue_free(q: sync); |
755 | return info->si_signo; |
756 | } |
757 | |
758 | /* |
759 | * Tell a process that it has a new active signal.. |
760 | * |
761 | * NOTE! we rely on the previous spin_lock to |
762 | * lock interrupts for us! We can only be called with |
763 | * "siglock" held, and the local interrupt must |
764 | * have been disabled when that got acquired! |
765 | * |
766 | * No need to set need_resched since signal event passing |
767 | * goes through ->blocked |
768 | */ |
769 | void signal_wake_up_state(struct task_struct *t, unsigned int state) |
770 | { |
771 | lockdep_assert_held(&t->sighand->siglock); |
772 | |
773 | set_tsk_thread_flag(tsk: t, TIF_SIGPENDING); |
774 | |
775 | /* |
776 | * TASK_WAKEKILL also means wake it up in the stopped/traced/killable |
777 | * case. We don't check t->state here because there is a race with it |
778 | * executing another processor and just now entering stopped state. |
779 | * By using wake_up_state, we ensure the process will wake up and |
780 | * handle its death signal. |
781 | */ |
782 | if (!wake_up_state(tsk: t, state: state | TASK_INTERRUPTIBLE)) |
783 | kick_process(tsk: t); |
784 | } |
785 | |
786 | /* |
787 | * Remove signals in mask from the pending set and queue. |
788 | * Returns 1 if any signals were found. |
789 | * |
790 | * All callers must be holding the siglock. |
791 | */ |
792 | static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) |
793 | { |
794 | struct sigqueue *q, *n; |
795 | sigset_t m; |
796 | |
797 | sigandsets(r: &m, a: mask, b: &s->signal); |
798 | if (sigisemptyset(set: &m)) |
799 | return; |
800 | |
801 | sigandnsets(r: &s->signal, a: &s->signal, b: mask); |
802 | list_for_each_entry_safe(q, n, &s->list, list) { |
803 | if (sigismember(set: mask, sig: q->info.si_signo)) { |
804 | list_del_init(entry: &q->list); |
805 | __sigqueue_free(q); |
806 | } |
807 | } |
808 | } |
809 | |
810 | static inline int is_si_special(const struct kernel_siginfo *info) |
811 | { |
812 | return info <= SEND_SIG_PRIV; |
813 | } |
814 | |
815 | static inline bool si_fromuser(const struct kernel_siginfo *info) |
816 | { |
817 | return info == SEND_SIG_NOINFO || |
818 | (!is_si_special(info) && SI_FROMUSER(info)); |
819 | } |
820 | |
821 | /* |
822 | * called with RCU read lock from check_kill_permission() |
823 | */ |
824 | static bool kill_ok_by_cred(struct task_struct *t) |
825 | { |
826 | const struct cred *cred = current_cred(); |
827 | const struct cred *tcred = __task_cred(t); |
828 | |
829 | return uid_eq(left: cred->euid, right: tcred->suid) || |
830 | uid_eq(left: cred->euid, right: tcred->uid) || |
831 | uid_eq(left: cred->uid, right: tcred->suid) || |
832 | uid_eq(left: cred->uid, right: tcred->uid) || |
833 | ns_capable(ns: tcred->user_ns, CAP_KILL); |
834 | } |
835 | |
836 | /* |
837 | * Bad permissions for sending the signal |
838 | * - the caller must hold the RCU read lock |
839 | */ |
840 | static int check_kill_permission(int sig, struct kernel_siginfo *info, |
841 | struct task_struct *t) |
842 | { |
843 | struct pid *sid; |
844 | int error; |
845 | |
846 | if (!valid_signal(sig)) |
847 | return -EINVAL; |
848 | |
849 | if (!si_fromuser(info)) |
850 | return 0; |
851 | |
852 | error = audit_signal_info(sig, t); /* Let audit system see the signal */ |
853 | if (error) |
854 | return error; |
855 | |
856 | if (!same_thread_group(current, p2: t) && |
857 | !kill_ok_by_cred(t)) { |
858 | switch (sig) { |
859 | case SIGCONT: |
860 | sid = task_session(task: t); |
861 | /* |
862 | * We don't return the error if sid == NULL. The |
863 | * task was unhashed, the caller must notice this. |
864 | */ |
865 | if (!sid || sid == task_session(current)) |
866 | break; |
867 | fallthrough; |
868 | default: |
869 | return -EPERM; |
870 | } |
871 | } |
872 | |
873 | return security_task_kill(p: t, info, sig, NULL); |
874 | } |
875 | |
876 | /** |
877 | * ptrace_trap_notify - schedule trap to notify ptracer |
878 | * @t: tracee wanting to notify tracer |
879 | * |
880 | * This function schedules sticky ptrace trap which is cleared on the next |
881 | * TRAP_STOP to notify ptracer of an event. @t must have been seized by |
882 | * ptracer. |
883 | * |
884 | * If @t is running, STOP trap will be taken. If trapped for STOP and |
885 | * ptracer is listening for events, tracee is woken up so that it can |
886 | * re-trap for the new event. If trapped otherwise, STOP trap will be |
887 | * eventually taken without returning to userland after the existing traps |
888 | * are finished by PTRACE_CONT. |
889 | * |
890 | * CONTEXT: |
891 | * Must be called with @task->sighand->siglock held. |
892 | */ |
893 | static void ptrace_trap_notify(struct task_struct *t) |
894 | { |
895 | WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); |
896 | lockdep_assert_held(&t->sighand->siglock); |
897 | |
898 | task_set_jobctl_pending(task: t, JOBCTL_TRAP_NOTIFY); |
899 | ptrace_signal_wake_up(t, resume: t->jobctl & JOBCTL_LISTENING); |
900 | } |
901 | |
902 | /* |
903 | * Handle magic process-wide effects of stop/continue signals. Unlike |
904 | * the signal actions, these happen immediately at signal-generation |
905 | * time regardless of blocking, ignoring, or handling. This does the |
906 | * actual continuing for SIGCONT, but not the actual stopping for stop |
907 | * signals. The process stop is done as a signal action for SIG_DFL. |
908 | * |
909 | * Returns true if the signal should be actually delivered, otherwise |
910 | * it should be dropped. |
911 | */ |
912 | static bool prepare_signal(int sig, struct task_struct *p, bool force) |
913 | { |
914 | struct signal_struct *signal = p->signal; |
915 | struct task_struct *t; |
916 | sigset_t flush; |
917 | |
918 | if (signal->flags & SIGNAL_GROUP_EXIT) { |
919 | if (signal->core_state) |
920 | return sig == SIGKILL; |
921 | /* |
922 | * The process is in the middle of dying, drop the signal. |
923 | */ |
924 | return false; |
925 | } else if (sig_kernel_stop(sig)) { |
926 | /* |
927 | * This is a stop signal. Remove SIGCONT from all queues. |
928 | */ |
929 | siginitset(set: &flush, sigmask(SIGCONT)); |
930 | flush_sigqueue_mask(mask: &flush, s: &signal->shared_pending); |
931 | for_each_thread(p, t) |
932 | flush_sigqueue_mask(mask: &flush, s: &t->pending); |
933 | } else if (sig == SIGCONT) { |
934 | unsigned int why; |
935 | /* |
936 | * Remove all stop signals from all queues, wake all threads. |
937 | */ |
938 | siginitset(set: &flush, SIG_KERNEL_STOP_MASK); |
939 | flush_sigqueue_mask(mask: &flush, s: &signal->shared_pending); |
940 | for_each_thread(p, t) { |
941 | flush_sigqueue_mask(mask: &flush, s: &t->pending); |
942 | task_clear_jobctl_pending(task: t, JOBCTL_STOP_PENDING); |
943 | if (likely(!(t->ptrace & PT_SEIZED))) { |
944 | t->jobctl &= ~JOBCTL_STOPPED; |
945 | wake_up_state(tsk: t, __TASK_STOPPED); |
946 | } else |
947 | ptrace_trap_notify(t); |
948 | } |
949 | |
950 | /* |
951 | * Notify the parent with CLD_CONTINUED if we were stopped. |
952 | * |
953 | * If we were in the middle of a group stop, we pretend it |
954 | * was already finished, and then continued. Since SIGCHLD |
955 | * doesn't queue we report only CLD_STOPPED, as if the next |
956 | * CLD_CONTINUED was dropped. |
957 | */ |
958 | why = 0; |
959 | if (signal->flags & SIGNAL_STOP_STOPPED) |
960 | why |= SIGNAL_CLD_CONTINUED; |
961 | else if (signal->group_stop_count) |
962 | why |= SIGNAL_CLD_STOPPED; |
963 | |
964 | if (why) { |
965 | /* |
966 | * The first thread which returns from do_signal_stop() |
967 | * will take ->siglock, notice SIGNAL_CLD_MASK, and |
968 | * notify its parent. See get_signal(). |
969 | */ |
970 | signal_set_stop_flags(sig: signal, flags: why | SIGNAL_STOP_CONTINUED); |
971 | signal->group_stop_count = 0; |
972 | signal->group_exit_code = 0; |
973 | } |
974 | } |
975 | |
976 | return !sig_ignored(t: p, sig, force); |
977 | } |
978 | |
979 | /* |
980 | * Test if P wants to take SIG. After we've checked all threads with this, |
981 | * it's equivalent to finding no threads not blocking SIG. Any threads not |
982 | * blocking SIG were ruled out because they are not running and already |
983 | * have pending signals. Such threads will dequeue from the shared queue |
984 | * as soon as they're available, so putting the signal on the shared queue |
985 | * will be equivalent to sending it to one such thread. |
986 | */ |
987 | static inline bool wants_signal(int sig, struct task_struct *p) |
988 | { |
989 | if (sigismember(set: &p->blocked, sig: sig)) |
990 | return false; |
991 | |
992 | if (p->flags & PF_EXITING) |
993 | return false; |
994 | |
995 | if (sig == SIGKILL) |
996 | return true; |
997 | |
998 | if (task_is_stopped_or_traced(p)) |
999 | return false; |
1000 | |
1001 | return task_curr(p) || !task_sigpending(p); |
1002 | } |
1003 | |
1004 | static void complete_signal(int sig, struct task_struct *p, enum pid_type type) |
1005 | { |
1006 | struct signal_struct *signal = p->signal; |
1007 | struct task_struct *t; |
1008 | |
1009 | /* |
1010 | * Now find a thread we can wake up to take the signal off the queue. |
1011 | * |
1012 | * Try the suggested task first (may or may not be the main thread). |
1013 | */ |
1014 | if (wants_signal(sig, p)) |
1015 | t = p; |
1016 | else if ((type == PIDTYPE_PID) || thread_group_empty(p)) |
1017 | /* |
1018 | * There is just one thread and it does not need to be woken. |
1019 | * It will dequeue unblocked signals before it runs again. |
1020 | */ |
1021 | return; |
1022 | else { |
1023 | /* |
1024 | * Otherwise try to find a suitable thread. |
1025 | */ |
1026 | t = signal->curr_target; |
1027 | while (!wants_signal(sig, p: t)) { |
1028 | t = next_thread(p: t); |
1029 | if (t == signal->curr_target) |
1030 | /* |
1031 | * No thread needs to be woken. |
1032 | * Any eligible threads will see |
1033 | * the signal in the queue soon. |
1034 | */ |
1035 | return; |
1036 | } |
1037 | signal->curr_target = t; |
1038 | } |
1039 | |
1040 | /* |
1041 | * Found a killable thread. If the signal will be fatal, |
1042 | * then start taking the whole group down immediately. |
1043 | */ |
1044 | if (sig_fatal(p, sig) && |
1045 | (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) && |
1046 | !sigismember(set: &t->real_blocked, sig: sig) && |
1047 | (sig == SIGKILL || !p->ptrace)) { |
1048 | /* |
1049 | * This signal will be fatal to the whole group. |
1050 | */ |
1051 | if (!sig_kernel_coredump(sig)) { |
1052 | /* |
1053 | * Start a group exit and wake everybody up. |
1054 | * This way we don't have other threads |
1055 | * running and doing things after a slower |
1056 | * thread has the fatal signal pending. |
1057 | */ |
1058 | signal->flags = SIGNAL_GROUP_EXIT; |
1059 | signal->group_exit_code = sig; |
1060 | signal->group_stop_count = 0; |
1061 | __for_each_thread(signal, t) { |
1062 | task_clear_jobctl_pending(task: t, JOBCTL_PENDING_MASK); |
1063 | sigaddset(set: &t->pending.signal, SIGKILL); |
1064 | signal_wake_up(t, fatal: 1); |
1065 | } |
1066 | return; |
1067 | } |
1068 | } |
1069 | |
1070 | /* |
1071 | * The signal is already in the shared-pending queue. |
1072 | * Tell the chosen thread to wake up and dequeue it. |
1073 | */ |
1074 | signal_wake_up(t, fatal: sig == SIGKILL); |
1075 | return; |
1076 | } |
1077 | |
1078 | static inline bool legacy_queue(struct sigpending *signals, int sig) |
1079 | { |
1080 | return (sig < SIGRTMIN) && sigismember(set: &signals->signal, sig: sig); |
1081 | } |
1082 | |
1083 | static int __send_signal_locked(int sig, struct kernel_siginfo *info, |
1084 | struct task_struct *t, enum pid_type type, bool force) |
1085 | { |
1086 | struct sigpending *pending; |
1087 | struct sigqueue *q; |
1088 | int override_rlimit; |
1089 | int ret = 0, result; |
1090 | |
1091 | lockdep_assert_held(&t->sighand->siglock); |
1092 | |
1093 | result = TRACE_SIGNAL_IGNORED; |
1094 | if (!prepare_signal(sig, p: t, force)) |
1095 | goto ret; |
1096 | |
1097 | pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; |
1098 | /* |
1099 | * Short-circuit ignored signals and support queuing |
1100 | * exactly one non-rt signal, so that we can get more |
1101 | * detailed information about the cause of the signal. |
1102 | */ |
1103 | result = TRACE_SIGNAL_ALREADY_PENDING; |
1104 | if (legacy_queue(signals: pending, sig)) |
1105 | goto ret; |
1106 | |
1107 | result = TRACE_SIGNAL_DELIVERED; |
1108 | /* |
1109 | * Skip useless siginfo allocation for SIGKILL and kernel threads. |
1110 | */ |
1111 | if ((sig == SIGKILL) || (t->flags & PF_KTHREAD)) |
1112 | goto out_set; |
1113 | |
1114 | /* |
1115 | * Real-time signals must be queued if sent by sigqueue, or |
1116 | * some other real-time mechanism. It is implementation |
1117 | * defined whether kill() does so. We attempt to do so, on |
1118 | * the principle of least surprise, but since kill is not |
1119 | * allowed to fail with EAGAIN when low on memory we just |
1120 | * make sure at least one signal gets delivered and don't |
1121 | * pass on the info struct. |
1122 | */ |
1123 | if (sig < SIGRTMIN) |
1124 | override_rlimit = (is_si_special(info) || info->si_code >= 0); |
1125 | else |
1126 | override_rlimit = 0; |
1127 | |
1128 | q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, sigqueue_flags: 0); |
1129 | |
1130 | if (q) { |
1131 | list_add_tail(new: &q->list, head: &pending->list); |
1132 | switch ((unsigned long) info) { |
1133 | case (unsigned long) SEND_SIG_NOINFO: |
1134 | clear_siginfo(info: &q->info); |
1135 | q->info.si_signo = sig; |
1136 | q->info.si_errno = 0; |
1137 | q->info.si_code = SI_USER; |
1138 | q->info.si_pid = task_tgid_nr_ns(current, |
1139 | ns: task_active_pid_ns(tsk: t)); |
1140 | rcu_read_lock(); |
1141 | q->info.si_uid = |
1142 | from_kuid_munged(task_cred_xxx(t, user_ns), |
1143 | current_uid()); |
1144 | rcu_read_unlock(); |
1145 | break; |
1146 | case (unsigned long) SEND_SIG_PRIV: |
1147 | clear_siginfo(info: &q->info); |
1148 | q->info.si_signo = sig; |
1149 | q->info.si_errno = 0; |
1150 | q->info.si_code = SI_KERNEL; |
1151 | q->info.si_pid = 0; |
1152 | q->info.si_uid = 0; |
1153 | break; |
1154 | default: |
1155 | copy_siginfo(to: &q->info, from: info); |
1156 | break; |
1157 | } |
1158 | } else if (!is_si_special(info) && |
1159 | sig >= SIGRTMIN && info->si_code != SI_USER) { |
1160 | /* |
1161 | * Queue overflow, abort. We may abort if the |
1162 | * signal was rt and sent by user using something |
1163 | * other than kill(). |
1164 | */ |
1165 | result = TRACE_SIGNAL_OVERFLOW_FAIL; |
1166 | ret = -EAGAIN; |
1167 | goto ret; |
1168 | } else { |
1169 | /* |
1170 | * This is a silent loss of information. We still |
1171 | * send the signal, but the *info bits are lost. |
1172 | */ |
1173 | result = TRACE_SIGNAL_LOSE_INFO; |
1174 | } |
1175 | |
1176 | out_set: |
1177 | signalfd_notify(tsk: t, sig); |
1178 | sigaddset(set: &pending->signal, sig: sig); |
1179 | |
1180 | /* Let multiprocess signals appear after on-going forks */ |
1181 | if (type > PIDTYPE_TGID) { |
1182 | struct multiprocess_signals *delayed; |
1183 | hlist_for_each_entry(delayed, &t->signal->multiprocess, node) { |
1184 | sigset_t *signal = &delayed->signal; |
1185 | /* Can't queue both a stop and a continue signal */ |
1186 | if (sig == SIGCONT) |
1187 | sigdelsetmask(set: signal, SIG_KERNEL_STOP_MASK); |
1188 | else if (sig_kernel_stop(sig)) |
1189 | sigdelset(set: signal, SIGCONT); |
1190 | sigaddset(set: signal, sig: sig); |
1191 | } |
1192 | } |
1193 | |
1194 | complete_signal(sig, p: t, type); |
1195 | ret: |
1196 | trace_signal_generate(sig, info, task: t, group: type != PIDTYPE_PID, result); |
1197 | return ret; |
1198 | } |
1199 | |
1200 | static inline bool has_si_pid_and_uid(struct kernel_siginfo *info) |
1201 | { |
1202 | bool ret = false; |
1203 | switch (siginfo_layout(sig: info->si_signo, si_code: info->si_code)) { |
1204 | case SIL_KILL: |
1205 | case SIL_CHLD: |
1206 | case SIL_RT: |
1207 | ret = true; |
1208 | break; |
1209 | case SIL_TIMER: |
1210 | case SIL_POLL: |
1211 | case SIL_FAULT: |
1212 | case SIL_FAULT_TRAPNO: |
1213 | case SIL_FAULT_MCEERR: |
1214 | case SIL_FAULT_BNDERR: |
1215 | case SIL_FAULT_PKUERR: |
1216 | case SIL_FAULT_PERF_EVENT: |
1217 | case SIL_SYS: |
1218 | ret = false; |
1219 | break; |
1220 | } |
1221 | return ret; |
1222 | } |
1223 | |
1224 | int send_signal_locked(int sig, struct kernel_siginfo *info, |
1225 | struct task_struct *t, enum pid_type type) |
1226 | { |
1227 | /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */ |
1228 | bool force = false; |
1229 | |
1230 | if (info == SEND_SIG_NOINFO) { |
1231 | /* Force if sent from an ancestor pid namespace */ |
1232 | force = !task_pid_nr_ns(current, ns: task_active_pid_ns(tsk: t)); |
1233 | } else if (info == SEND_SIG_PRIV) { |
1234 | /* Don't ignore kernel generated signals */ |
1235 | force = true; |
1236 | } else if (has_si_pid_and_uid(info)) { |
1237 | /* SIGKILL and SIGSTOP is special or has ids */ |
1238 | struct user_namespace *t_user_ns; |
1239 | |
1240 | rcu_read_lock(); |
1241 | t_user_ns = task_cred_xxx(t, user_ns); |
1242 | if (current_user_ns() != t_user_ns) { |
1243 | kuid_t uid = make_kuid(current_user_ns(), uid: info->si_uid); |
1244 | info->si_uid = from_kuid_munged(to: t_user_ns, uid); |
1245 | } |
1246 | rcu_read_unlock(); |
1247 | |
1248 | /* A kernel generated signal? */ |
1249 | force = (info->si_code == SI_KERNEL); |
1250 | |
1251 | /* From an ancestor pid namespace? */ |
1252 | if (!task_pid_nr_ns(current, ns: task_active_pid_ns(tsk: t))) { |
1253 | info->si_pid = 0; |
1254 | force = true; |
1255 | } |
1256 | } |
1257 | return __send_signal_locked(sig, info, t, type, force); |
1258 | } |
1259 | |
1260 | static void print_fatal_signal(int signr) |
1261 | { |
1262 | struct pt_regs *regs = task_pt_regs(current); |
1263 | struct file *exe_file; |
1264 | |
1265 | exe_file = get_task_exe_file(current); |
1266 | if (exe_file) { |
1267 | pr_info("%pD: %s: potentially unexpected fatal signal %d.\n" , |
1268 | exe_file, current->comm, signr); |
1269 | fput(exe_file); |
1270 | } else { |
1271 | pr_info("%s: potentially unexpected fatal signal %d.\n" , |
1272 | current->comm, signr); |
1273 | } |
1274 | |
1275 | #if defined(__i386__) && !defined(__arch_um__) |
1276 | pr_info("code at %08lx: " , regs->ip); |
1277 | { |
1278 | int i; |
1279 | for (i = 0; i < 16; i++) { |
1280 | unsigned char insn; |
1281 | |
1282 | if (get_user(insn, (unsigned char *)(regs->ip + i))) |
1283 | break; |
1284 | pr_cont("%02x " , insn); |
1285 | } |
1286 | } |
1287 | pr_cont("\n" ); |
1288 | #endif |
1289 | preempt_disable(); |
1290 | show_regs(regs); |
1291 | preempt_enable(); |
1292 | } |
1293 | |
1294 | static int __init setup_print_fatal_signals(char *str) |
1295 | { |
1296 | get_option (str: &str, pint: &print_fatal_signals); |
1297 | |
1298 | return 1; |
1299 | } |
1300 | |
1301 | __setup("print-fatal-signals=" , setup_print_fatal_signals); |
1302 | |
1303 | int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, |
1304 | enum pid_type type) |
1305 | { |
1306 | unsigned long flags; |
1307 | int ret = -ESRCH; |
1308 | |
1309 | if (lock_task_sighand(task: p, flags: &flags)) { |
1310 | ret = send_signal_locked(sig, info, t: p, type); |
1311 | unlock_task_sighand(task: p, flags: &flags); |
1312 | } |
1313 | |
1314 | return ret; |
1315 | } |
1316 | |
1317 | enum sig_handler { |
1318 | HANDLER_CURRENT, /* If reachable use the current handler */ |
1319 | HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */ |
1320 | HANDLER_EXIT, /* Only visible as the process exit code */ |
1321 | }; |
1322 | |
1323 | /* |
1324 | * Force a signal that the process can't ignore: if necessary |
1325 | * we unblock the signal and change any SIG_IGN to SIG_DFL. |
1326 | * |
1327 | * Note: If we unblock the signal, we always reset it to SIG_DFL, |
1328 | * since we do not want to have a signal handler that was blocked |
1329 | * be invoked when user space had explicitly blocked it. |
1330 | * |
1331 | * We don't want to have recursive SIGSEGV's etc, for example, |
1332 | * that is why we also clear SIGNAL_UNKILLABLE. |
1333 | */ |
1334 | static int |
1335 | force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, |
1336 | enum sig_handler handler) |
1337 | { |
1338 | unsigned long int flags; |
1339 | int ret, blocked, ignored; |
1340 | struct k_sigaction *action; |
1341 | int sig = info->si_signo; |
1342 | |
1343 | spin_lock_irqsave(&t->sighand->siglock, flags); |
1344 | action = &t->sighand->action[sig-1]; |
1345 | ignored = action->sa.sa_handler == SIG_IGN; |
1346 | blocked = sigismember(set: &t->blocked, sig: sig); |
1347 | if (blocked || ignored || (handler != HANDLER_CURRENT)) { |
1348 | action->sa.sa_handler = SIG_DFL; |
1349 | if (handler == HANDLER_EXIT) |
1350 | action->sa.sa_flags |= SA_IMMUTABLE; |
1351 | if (blocked) { |
1352 | sigdelset(set: &t->blocked, sig: sig); |
1353 | recalc_sigpending_and_wake(t); |
1354 | } |
1355 | } |
1356 | /* |
1357 | * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect |
1358 | * debugging to leave init killable. But HANDLER_EXIT is always fatal. |
1359 | */ |
1360 | if (action->sa.sa_handler == SIG_DFL && |
1361 | (!t->ptrace || (handler == HANDLER_EXIT))) |
1362 | t->signal->flags &= ~SIGNAL_UNKILLABLE; |
1363 | ret = send_signal_locked(sig, info, t, type: PIDTYPE_PID); |
1364 | spin_unlock_irqrestore(lock: &t->sighand->siglock, flags); |
1365 | |
1366 | return ret; |
1367 | } |
1368 | |
1369 | int force_sig_info(struct kernel_siginfo *info) |
1370 | { |
1371 | return force_sig_info_to_task(info, current, handler: HANDLER_CURRENT); |
1372 | } |
1373 | |
1374 | /* |
1375 | * Nuke all other threads in the group. |
1376 | */ |
1377 | int zap_other_threads(struct task_struct *p) |
1378 | { |
1379 | struct task_struct *t = p; |
1380 | int count = 0; |
1381 | |
1382 | p->signal->group_stop_count = 0; |
1383 | |
1384 | while_each_thread(p, t) { |
1385 | task_clear_jobctl_pending(task: t, JOBCTL_PENDING_MASK); |
1386 | /* Don't require de_thread to wait for the vhost_worker */ |
1387 | if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER) |
1388 | count++; |
1389 | |
1390 | /* Don't bother with already dead threads */ |
1391 | if (t->exit_state) |
1392 | continue; |
1393 | sigaddset(set: &t->pending.signal, SIGKILL); |
1394 | signal_wake_up(t, fatal: 1); |
1395 | } |
1396 | |
1397 | return count; |
1398 | } |
1399 | |
1400 | struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, |
1401 | unsigned long *flags) |
1402 | { |
1403 | struct sighand_struct *sighand; |
1404 | |
1405 | rcu_read_lock(); |
1406 | for (;;) { |
1407 | sighand = rcu_dereference(tsk->sighand); |
1408 | if (unlikely(sighand == NULL)) |
1409 | break; |
1410 | |
1411 | /* |
1412 | * This sighand can be already freed and even reused, but |
1413 | * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which |
1414 | * initializes ->siglock: this slab can't go away, it has |
1415 | * the same object type, ->siglock can't be reinitialized. |
1416 | * |
1417 | * We need to ensure that tsk->sighand is still the same |
1418 | * after we take the lock, we can race with de_thread() or |
1419 | * __exit_signal(). In the latter case the next iteration |
1420 | * must see ->sighand == NULL. |
1421 | */ |
1422 | spin_lock_irqsave(&sighand->siglock, *flags); |
1423 | if (likely(sighand == rcu_access_pointer(tsk->sighand))) |
1424 | break; |
1425 | spin_unlock_irqrestore(lock: &sighand->siglock, flags: *flags); |
1426 | } |
1427 | rcu_read_unlock(); |
1428 | |
1429 | return sighand; |
1430 | } |
1431 | |
1432 | #ifdef CONFIG_LOCKDEP |
1433 | void lockdep_assert_task_sighand_held(struct task_struct *task) |
1434 | { |
1435 | struct sighand_struct *sighand; |
1436 | |
1437 | rcu_read_lock(); |
1438 | sighand = rcu_dereference(task->sighand); |
1439 | if (sighand) |
1440 | lockdep_assert_held(&sighand->siglock); |
1441 | else |
1442 | WARN_ON_ONCE(1); |
1443 | rcu_read_unlock(); |
1444 | } |
1445 | #endif |
1446 | |
1447 | /* |
1448 | * send signal info to all the members of a group |
1449 | */ |
1450 | int group_send_sig_info(int sig, struct kernel_siginfo *info, |
1451 | struct task_struct *p, enum pid_type type) |
1452 | { |
1453 | int ret; |
1454 | |
1455 | rcu_read_lock(); |
1456 | ret = check_kill_permission(sig, info, t: p); |
1457 | rcu_read_unlock(); |
1458 | |
1459 | if (!ret && sig) |
1460 | ret = do_send_sig_info(sig, info, p, type); |
1461 | |
1462 | return ret; |
1463 | } |
1464 | |
1465 | /* |
1466 | * __kill_pgrp_info() sends a signal to a process group: this is what the tty |
1467 | * control characters do (^C, ^Z etc) |
1468 | * - the caller must hold at least a readlock on tasklist_lock |
1469 | */ |
1470 | int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp) |
1471 | { |
1472 | struct task_struct *p = NULL; |
1473 | int ret = -ESRCH; |
1474 | |
1475 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { |
1476 | int err = group_send_sig_info(sig, info, p, type: PIDTYPE_PGID); |
1477 | /* |
1478 | * If group_send_sig_info() succeeds at least once ret |
1479 | * becomes 0 and after that the code below has no effect. |
1480 | * Otherwise we return the last err or -ESRCH if this |
1481 | * process group is empty. |
1482 | */ |
1483 | if (ret) |
1484 | ret = err; |
1485 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
1486 | |
1487 | return ret; |
1488 | } |
1489 | |
1490 | int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid) |
1491 | { |
1492 | int error = -ESRCH; |
1493 | struct task_struct *p; |
1494 | |
1495 | for (;;) { |
1496 | rcu_read_lock(); |
1497 | p = pid_task(pid, PIDTYPE_PID); |
1498 | if (p) |
1499 | error = group_send_sig_info(sig, info, p, type: PIDTYPE_TGID); |
1500 | rcu_read_unlock(); |
1501 | if (likely(!p || error != -ESRCH)) |
1502 | return error; |
1503 | |
1504 | /* |
1505 | * The task was unhashed in between, try again. If it |
1506 | * is dead, pid_task() will return NULL, if we race with |
1507 | * de_thread() it will find the new leader. |
1508 | */ |
1509 | } |
1510 | } |
1511 | |
1512 | static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid) |
1513 | { |
1514 | int error; |
1515 | rcu_read_lock(); |
1516 | error = kill_pid_info(sig, info, pid: find_vpid(nr: pid)); |
1517 | rcu_read_unlock(); |
1518 | return error; |
1519 | } |
1520 | |
1521 | static inline bool kill_as_cred_perm(const struct cred *cred, |
1522 | struct task_struct *target) |
1523 | { |
1524 | const struct cred *pcred = __task_cred(target); |
1525 | |
1526 | return uid_eq(left: cred->euid, right: pcred->suid) || |
1527 | uid_eq(left: cred->euid, right: pcred->uid) || |
1528 | uid_eq(left: cred->uid, right: pcred->suid) || |
1529 | uid_eq(left: cred->uid, right: pcred->uid); |
1530 | } |
1531 | |
1532 | /* |
1533 | * The usb asyncio usage of siginfo is wrong. The glibc support |
1534 | * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT. |
1535 | * AKA after the generic fields: |
1536 | * kernel_pid_t si_pid; |
1537 | * kernel_uid32_t si_uid; |
1538 | * sigval_t si_value; |
1539 | * |
1540 | * Unfortunately when usb generates SI_ASYNCIO it assumes the layout |
1541 | * after the generic fields is: |
1542 | * void __user *si_addr; |
1543 | * |
1544 | * This is a practical problem when there is a 64bit big endian kernel |
1545 | * and a 32bit userspace. As the 32bit address will encoded in the low |
1546 | * 32bits of the pointer. Those low 32bits will be stored at higher |
1547 | * address than appear in a 32 bit pointer. So userspace will not |
1548 | * see the address it was expecting for it's completions. |
1549 | * |
1550 | * There is nothing in the encoding that can allow |
1551 | * copy_siginfo_to_user32 to detect this confusion of formats, so |
1552 | * handle this by requiring the caller of kill_pid_usb_asyncio to |
1553 | * notice when this situration takes place and to store the 32bit |
1554 | * pointer in sival_int, instead of sival_addr of the sigval_t addr |
1555 | * parameter. |
1556 | */ |
1557 | int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, |
1558 | struct pid *pid, const struct cred *cred) |
1559 | { |
1560 | struct kernel_siginfo info; |
1561 | struct task_struct *p; |
1562 | unsigned long flags; |
1563 | int ret = -EINVAL; |
1564 | |
1565 | if (!valid_signal(sig)) |
1566 | return ret; |
1567 | |
1568 | clear_siginfo(info: &info); |
1569 | info.si_signo = sig; |
1570 | info.si_errno = errno; |
1571 | info.si_code = SI_ASYNCIO; |
1572 | *((sigval_t *)&info.si_pid) = addr; |
1573 | |
1574 | rcu_read_lock(); |
1575 | p = pid_task(pid, PIDTYPE_PID); |
1576 | if (!p) { |
1577 | ret = -ESRCH; |
1578 | goto out_unlock; |
1579 | } |
1580 | if (!kill_as_cred_perm(cred, target: p)) { |
1581 | ret = -EPERM; |
1582 | goto out_unlock; |
1583 | } |
1584 | ret = security_task_kill(p, info: &info, sig, cred); |
1585 | if (ret) |
1586 | goto out_unlock; |
1587 | |
1588 | if (sig) { |
1589 | if (lock_task_sighand(task: p, flags: &flags)) { |
1590 | ret = __send_signal_locked(sig, info: &info, t: p, type: PIDTYPE_TGID, force: false); |
1591 | unlock_task_sighand(task: p, flags: &flags); |
1592 | } else |
1593 | ret = -ESRCH; |
1594 | } |
1595 | out_unlock: |
1596 | rcu_read_unlock(); |
1597 | return ret; |
1598 | } |
1599 | EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio); |
1600 | |
1601 | /* |
1602 | * kill_something_info() interprets pid in interesting ways just like kill(2). |
1603 | * |
1604 | * POSIX specifies that kill(-1,sig) is unspecified, but what we have |
1605 | * is probably wrong. Should make it like BSD or SYSV. |
1606 | */ |
1607 | |
1608 | static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid) |
1609 | { |
1610 | int ret; |
1611 | |
1612 | if (pid > 0) |
1613 | return kill_proc_info(sig, info, pid); |
1614 | |
1615 | /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */ |
1616 | if (pid == INT_MIN) |
1617 | return -ESRCH; |
1618 | |
1619 | read_lock(&tasklist_lock); |
1620 | if (pid != -1) { |
1621 | ret = __kill_pgrp_info(sig, info, |
1622 | pgrp: pid ? find_vpid(nr: -pid) : task_pgrp(current)); |
1623 | } else { |
1624 | int retval = 0, count = 0; |
1625 | struct task_struct * p; |
1626 | |
1627 | for_each_process(p) { |
1628 | if (task_pid_vnr(tsk: p) > 1 && |
1629 | !same_thread_group(p1: p, current)) { |
1630 | int err = group_send_sig_info(sig, info, p, |
1631 | type: PIDTYPE_MAX); |
1632 | ++count; |
1633 | if (err != -EPERM) |
1634 | retval = err; |
1635 | } |
1636 | } |
1637 | ret = count ? retval : -ESRCH; |
1638 | } |
1639 | read_unlock(&tasklist_lock); |
1640 | |
1641 | return ret; |
1642 | } |
1643 | |
1644 | /* |
1645 | * These are for backward compatibility with the rest of the kernel source. |
1646 | */ |
1647 | |
1648 | int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p) |
1649 | { |
1650 | /* |
1651 | * Make sure legacy kernel users don't send in bad values |
1652 | * (normal paths check this in check_kill_permission). |
1653 | */ |
1654 | if (!valid_signal(sig)) |
1655 | return -EINVAL; |
1656 | |
1657 | return do_send_sig_info(sig, info, p, type: PIDTYPE_PID); |
1658 | } |
1659 | EXPORT_SYMBOL(send_sig_info); |
1660 | |
1661 | #define __si_special(priv) \ |
1662 | ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) |
1663 | |
1664 | int |
1665 | send_sig(int sig, struct task_struct *p, int priv) |
1666 | { |
1667 | return send_sig_info(sig, __si_special(priv), p); |
1668 | } |
1669 | EXPORT_SYMBOL(send_sig); |
1670 | |
1671 | void force_sig(int sig) |
1672 | { |
1673 | struct kernel_siginfo info; |
1674 | |
1675 | clear_siginfo(info: &info); |
1676 | info.si_signo = sig; |
1677 | info.si_errno = 0; |
1678 | info.si_code = SI_KERNEL; |
1679 | info.si_pid = 0; |
1680 | info.si_uid = 0; |
1681 | force_sig_info(info: &info); |
1682 | } |
1683 | EXPORT_SYMBOL(force_sig); |
1684 | |
1685 | void force_fatal_sig(int sig) |
1686 | { |
1687 | struct kernel_siginfo info; |
1688 | |
1689 | clear_siginfo(info: &info); |
1690 | info.si_signo = sig; |
1691 | info.si_errno = 0; |
1692 | info.si_code = SI_KERNEL; |
1693 | info.si_pid = 0; |
1694 | info.si_uid = 0; |
1695 | force_sig_info_to_task(info: &info, current, handler: HANDLER_SIG_DFL); |
1696 | } |
1697 | |
1698 | void force_exit_sig(int sig) |
1699 | { |
1700 | struct kernel_siginfo info; |
1701 | |
1702 | clear_siginfo(info: &info); |
1703 | info.si_signo = sig; |
1704 | info.si_errno = 0; |
1705 | info.si_code = SI_KERNEL; |
1706 | info.si_pid = 0; |
1707 | info.si_uid = 0; |
1708 | force_sig_info_to_task(info: &info, current, handler: HANDLER_EXIT); |
1709 | } |
1710 | |
1711 | /* |
1712 | * When things go south during signal handling, we |
1713 | * will force a SIGSEGV. And if the signal that caused |
1714 | * the problem was already a SIGSEGV, we'll want to |
1715 | * make sure we don't even try to deliver the signal.. |
1716 | */ |
1717 | void force_sigsegv(int sig) |
1718 | { |
1719 | if (sig == SIGSEGV) |
1720 | force_fatal_sig(SIGSEGV); |
1721 | else |
1722 | force_sig(SIGSEGV); |
1723 | } |
1724 | |
1725 | int force_sig_fault_to_task(int sig, int code, void __user *addr, |
1726 | struct task_struct *t) |
1727 | { |
1728 | struct kernel_siginfo info; |
1729 | |
1730 | clear_siginfo(info: &info); |
1731 | info.si_signo = sig; |
1732 | info.si_errno = 0; |
1733 | info.si_code = code; |
1734 | info.si_addr = addr; |
1735 | return force_sig_info_to_task(info: &info, t, handler: HANDLER_CURRENT); |
1736 | } |
1737 | |
1738 | int force_sig_fault(int sig, int code, void __user *addr) |
1739 | { |
1740 | return force_sig_fault_to_task(sig, code, addr, current); |
1741 | } |
1742 | |
1743 | int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t) |
1744 | { |
1745 | struct kernel_siginfo info; |
1746 | |
1747 | clear_siginfo(info: &info); |
1748 | info.si_signo = sig; |
1749 | info.si_errno = 0; |
1750 | info.si_code = code; |
1751 | info.si_addr = addr; |
1752 | return send_sig_info(info.si_signo, &info, t); |
1753 | } |
1754 | |
1755 | int force_sig_mceerr(int code, void __user *addr, short lsb) |
1756 | { |
1757 | struct kernel_siginfo info; |
1758 | |
1759 | WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); |
1760 | clear_siginfo(info: &info); |
1761 | info.si_signo = SIGBUS; |
1762 | info.si_errno = 0; |
1763 | info.si_code = code; |
1764 | info.si_addr = addr; |
1765 | info.si_addr_lsb = lsb; |
1766 | return force_sig_info(info: &info); |
1767 | } |
1768 | |
1769 | int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t) |
1770 | { |
1771 | struct kernel_siginfo info; |
1772 | |
1773 | WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); |
1774 | clear_siginfo(info: &info); |
1775 | info.si_signo = SIGBUS; |
1776 | info.si_errno = 0; |
1777 | info.si_code = code; |
1778 | info.si_addr = addr; |
1779 | info.si_addr_lsb = lsb; |
1780 | return send_sig_info(info.si_signo, &info, t); |
1781 | } |
1782 | EXPORT_SYMBOL(send_sig_mceerr); |
1783 | |
1784 | int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper) |
1785 | { |
1786 | struct kernel_siginfo info; |
1787 | |
1788 | clear_siginfo(info: &info); |
1789 | info.si_signo = SIGSEGV; |
1790 | info.si_errno = 0; |
1791 | info.si_code = SEGV_BNDERR; |
1792 | info.si_addr = addr; |
1793 | info.si_lower = lower; |
1794 | info.si_upper = upper; |
1795 | return force_sig_info(info: &info); |
1796 | } |
1797 | |
1798 | #ifdef SEGV_PKUERR |
1799 | int force_sig_pkuerr(void __user *addr, u32 pkey) |
1800 | { |
1801 | struct kernel_siginfo info; |
1802 | |
1803 | clear_siginfo(info: &info); |
1804 | info.si_signo = SIGSEGV; |
1805 | info.si_errno = 0; |
1806 | info.si_code = SEGV_PKUERR; |
1807 | info.si_addr = addr; |
1808 | info.si_pkey = pkey; |
1809 | return force_sig_info(info: &info); |
1810 | } |
1811 | #endif |
1812 | |
1813 | int send_sig_perf(void __user *addr, u32 type, u64 sig_data) |
1814 | { |
1815 | struct kernel_siginfo info; |
1816 | |
1817 | clear_siginfo(info: &info); |
1818 | info.si_signo = SIGTRAP; |
1819 | info.si_errno = 0; |
1820 | info.si_code = TRAP_PERF; |
1821 | info.si_addr = addr; |
1822 | info.si_perf_data = sig_data; |
1823 | info.si_perf_type = type; |
1824 | |
1825 | /* |
1826 | * Signals generated by perf events should not terminate the whole |
1827 | * process if SIGTRAP is blocked, however, delivering the signal |
1828 | * asynchronously is better than not delivering at all. But tell user |
1829 | * space if the signal was asynchronous, so it can clearly be |
1830 | * distinguished from normal synchronous ones. |
1831 | */ |
1832 | info.si_perf_flags = sigismember(set: ¤t->blocked, sig: info.si_signo) ? |
1833 | TRAP_PERF_FLAG_ASYNC : |
1834 | 0; |
1835 | |
1836 | return send_sig_info(info.si_signo, &info, current); |
1837 | } |
1838 | |
1839 | /** |
1840 | * force_sig_seccomp - signals the task to allow in-process syscall emulation |
1841 | * @syscall: syscall number to send to userland |
1842 | * @reason: filter-supplied reason code to send to userland (via si_errno) |
1843 | * @force_coredump: true to trigger a coredump |
1844 | * |
1845 | * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info. |
1846 | */ |
1847 | int force_sig_seccomp(int syscall, int reason, bool force_coredump) |
1848 | { |
1849 | struct kernel_siginfo info; |
1850 | |
1851 | clear_siginfo(info: &info); |
1852 | info.si_signo = SIGSYS; |
1853 | info.si_code = SYS_SECCOMP; |
1854 | info.si_call_addr = (void __user *)KSTK_EIP(current); |
1855 | info.si_errno = reason; |
1856 | info.si_arch = syscall_get_arch(current); |
1857 | info.si_syscall = syscall; |
1858 | return force_sig_info_to_task(info: &info, current, |
1859 | handler: force_coredump ? HANDLER_EXIT : HANDLER_CURRENT); |
1860 | } |
1861 | |
1862 | /* For the crazy architectures that include trap information in |
1863 | * the errno field, instead of an actual errno value. |
1864 | */ |
1865 | int force_sig_ptrace_errno_trap(int errno, void __user *addr) |
1866 | { |
1867 | struct kernel_siginfo info; |
1868 | |
1869 | clear_siginfo(info: &info); |
1870 | info.si_signo = SIGTRAP; |
1871 | info.si_errno = errno; |
1872 | info.si_code = TRAP_HWBKPT; |
1873 | info.si_addr = addr; |
1874 | return force_sig_info(info: &info); |
1875 | } |
1876 | |
1877 | /* For the rare architectures that include trap information using |
1878 | * si_trapno. |
1879 | */ |
1880 | int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno) |
1881 | { |
1882 | struct kernel_siginfo info; |
1883 | |
1884 | clear_siginfo(info: &info); |
1885 | info.si_signo = sig; |
1886 | info.si_errno = 0; |
1887 | info.si_code = code; |
1888 | info.si_addr = addr; |
1889 | info.si_trapno = trapno; |
1890 | return force_sig_info(info: &info); |
1891 | } |
1892 | |
1893 | /* For the rare architectures that include trap information using |
1894 | * si_trapno. |
1895 | */ |
1896 | int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno, |
1897 | struct task_struct *t) |
1898 | { |
1899 | struct kernel_siginfo info; |
1900 | |
1901 | clear_siginfo(info: &info); |
1902 | info.si_signo = sig; |
1903 | info.si_errno = 0; |
1904 | info.si_code = code; |
1905 | info.si_addr = addr; |
1906 | info.si_trapno = trapno; |
1907 | return send_sig_info(info.si_signo, &info, t); |
1908 | } |
1909 | |
1910 | int kill_pgrp(struct pid *pid, int sig, int priv) |
1911 | { |
1912 | int ret; |
1913 | |
1914 | read_lock(&tasklist_lock); |
1915 | ret = __kill_pgrp_info(sig, __si_special(priv), pgrp: pid); |
1916 | read_unlock(&tasklist_lock); |
1917 | |
1918 | return ret; |
1919 | } |
1920 | EXPORT_SYMBOL(kill_pgrp); |
1921 | |
1922 | int kill_pid(struct pid *pid, int sig, int priv) |
1923 | { |
1924 | return kill_pid_info(sig, __si_special(priv), pid); |
1925 | } |
1926 | EXPORT_SYMBOL(kill_pid); |
1927 | |
1928 | /* |
1929 | * These functions support sending signals using preallocated sigqueue |
1930 | * structures. This is needed "because realtime applications cannot |
1931 | * afford to lose notifications of asynchronous events, like timer |
1932 | * expirations or I/O completions". In the case of POSIX Timers |
1933 | * we allocate the sigqueue structure from the timer_create. If this |
1934 | * allocation fails we are able to report the failure to the application |
1935 | * with an EAGAIN error. |
1936 | */ |
1937 | struct sigqueue *sigqueue_alloc(void) |
1938 | { |
1939 | return __sigqueue_alloc(sig: -1, current, GFP_KERNEL, override_rlimit: 0, SIGQUEUE_PREALLOC); |
1940 | } |
1941 | |
1942 | void sigqueue_free(struct sigqueue *q) |
1943 | { |
1944 | unsigned long flags; |
1945 | spinlock_t *lock = ¤t->sighand->siglock; |
1946 | |
1947 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1948 | /* |
1949 | * We must hold ->siglock while testing q->list |
1950 | * to serialize with collect_signal() or with |
1951 | * __exit_signal()->flush_sigqueue(). |
1952 | */ |
1953 | spin_lock_irqsave(lock, flags); |
1954 | q->flags &= ~SIGQUEUE_PREALLOC; |
1955 | /* |
1956 | * If it is queued it will be freed when dequeued, |
1957 | * like the "regular" sigqueue. |
1958 | */ |
1959 | if (!list_empty(head: &q->list)) |
1960 | q = NULL; |
1961 | spin_unlock_irqrestore(lock, flags); |
1962 | |
1963 | if (q) |
1964 | __sigqueue_free(q); |
1965 | } |
1966 | |
1967 | int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type) |
1968 | { |
1969 | int sig = q->info.si_signo; |
1970 | struct sigpending *pending; |
1971 | struct task_struct *t; |
1972 | unsigned long flags; |
1973 | int ret, result; |
1974 | |
1975 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1976 | |
1977 | ret = -1; |
1978 | rcu_read_lock(); |
1979 | |
1980 | /* |
1981 | * This function is used by POSIX timers to deliver a timer signal. |
1982 | * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID |
1983 | * set), the signal must be delivered to the specific thread (queues |
1984 | * into t->pending). |
1985 | * |
1986 | * Where type is not PIDTYPE_PID, signals must be delivered to the |
1987 | * process. In this case, prefer to deliver to current if it is in |
1988 | * the same thread group as the target process, which avoids |
1989 | * unnecessarily waking up a potentially idle task. |
1990 | */ |
1991 | t = pid_task(pid, type); |
1992 | if (!t) |
1993 | goto ret; |
1994 | if (type != PIDTYPE_PID && same_thread_group(p1: t, current)) |
1995 | t = current; |
1996 | if (!likely(lock_task_sighand(t, &flags))) |
1997 | goto ret; |
1998 | |
1999 | ret = 1; /* the signal is ignored */ |
2000 | result = TRACE_SIGNAL_IGNORED; |
2001 | if (!prepare_signal(sig, p: t, force: false)) |
2002 | goto out; |
2003 | |
2004 | ret = 0; |
2005 | if (unlikely(!list_empty(&q->list))) { |
2006 | /* |
2007 | * If an SI_TIMER entry is already queue just increment |
2008 | * the overrun count. |
2009 | */ |
2010 | BUG_ON(q->info.si_code != SI_TIMER); |
2011 | q->info.si_overrun++; |
2012 | result = TRACE_SIGNAL_ALREADY_PENDING; |
2013 | goto out; |
2014 | } |
2015 | q->info.si_overrun = 0; |
2016 | |
2017 | signalfd_notify(tsk: t, sig); |
2018 | pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; |
2019 | list_add_tail(new: &q->list, head: &pending->list); |
2020 | sigaddset(set: &pending->signal, sig: sig); |
2021 | complete_signal(sig, p: t, type); |
2022 | result = TRACE_SIGNAL_DELIVERED; |
2023 | out: |
2024 | trace_signal_generate(sig, info: &q->info, task: t, group: type != PIDTYPE_PID, result); |
2025 | unlock_task_sighand(task: t, flags: &flags); |
2026 | ret: |
2027 | rcu_read_unlock(); |
2028 | return ret; |
2029 | } |
2030 | |
2031 | static void do_notify_pidfd(struct task_struct *task) |
2032 | { |
2033 | struct pid *pid; |
2034 | |
2035 | WARN_ON(task->exit_state == 0); |
2036 | pid = task_pid(task); |
2037 | wake_up_all(&pid->wait_pidfd); |
2038 | } |
2039 | |
2040 | /* |
2041 | * Let a parent know about the death of a child. |
2042 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. |
2043 | * |
2044 | * Returns true if our parent ignored us and so we've switched to |
2045 | * self-reaping. |
2046 | */ |
2047 | bool do_notify_parent(struct task_struct *tsk, int sig) |
2048 | { |
2049 | struct kernel_siginfo info; |
2050 | unsigned long flags; |
2051 | struct sighand_struct *psig; |
2052 | bool autoreap = false; |
2053 | u64 utime, stime; |
2054 | |
2055 | WARN_ON_ONCE(sig == -1); |
2056 | |
2057 | /* do_notify_parent_cldstop should have been called instead. */ |
2058 | WARN_ON_ONCE(task_is_stopped_or_traced(tsk)); |
2059 | |
2060 | WARN_ON_ONCE(!tsk->ptrace && |
2061 | (tsk->group_leader != tsk || !thread_group_empty(tsk))); |
2062 | |
2063 | /* Wake up all pidfd waiters */ |
2064 | do_notify_pidfd(task: tsk); |
2065 | |
2066 | if (sig != SIGCHLD) { |
2067 | /* |
2068 | * This is only possible if parent == real_parent. |
2069 | * Check if it has changed security domain. |
2070 | */ |
2071 | if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id)) |
2072 | sig = SIGCHLD; |
2073 | } |
2074 | |
2075 | clear_siginfo(info: &info); |
2076 | info.si_signo = sig; |
2077 | info.si_errno = 0; |
2078 | /* |
2079 | * We are under tasklist_lock here so our parent is tied to |
2080 | * us and cannot change. |
2081 | * |
2082 | * task_active_pid_ns will always return the same pid namespace |
2083 | * until a task passes through release_task. |
2084 | * |
2085 | * write_lock() currently calls preempt_disable() which is the |
2086 | * same as rcu_read_lock(), but according to Oleg, this is not |
2087 | * correct to rely on this |
2088 | */ |
2089 | rcu_read_lock(); |
2090 | info.si_pid = task_pid_nr_ns(tsk, ns: task_active_pid_ns(tsk: tsk->parent)); |
2091 | info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), |
2092 | task_uid(tsk)); |
2093 | rcu_read_unlock(); |
2094 | |
2095 | task_cputime(t: tsk, utime: &utime, stime: &stime); |
2096 | info.si_utime = nsec_to_clock_t(x: utime + tsk->signal->utime); |
2097 | info.si_stime = nsec_to_clock_t(x: stime + tsk->signal->stime); |
2098 | |
2099 | info.si_status = tsk->exit_code & 0x7f; |
2100 | if (tsk->exit_code & 0x80) |
2101 | info.si_code = CLD_DUMPED; |
2102 | else if (tsk->exit_code & 0x7f) |
2103 | info.si_code = CLD_KILLED; |
2104 | else { |
2105 | info.si_code = CLD_EXITED; |
2106 | info.si_status = tsk->exit_code >> 8; |
2107 | } |
2108 | |
2109 | psig = tsk->parent->sighand; |
2110 | spin_lock_irqsave(&psig->siglock, flags); |
2111 | if (!tsk->ptrace && sig == SIGCHLD && |
2112 | (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || |
2113 | (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { |
2114 | /* |
2115 | * We are exiting and our parent doesn't care. POSIX.1 |
2116 | * defines special semantics for setting SIGCHLD to SIG_IGN |
2117 | * or setting the SA_NOCLDWAIT flag: we should be reaped |
2118 | * automatically and not left for our parent's wait4 call. |
2119 | * Rather than having the parent do it as a magic kind of |
2120 | * signal handler, we just set this to tell do_exit that we |
2121 | * can be cleaned up without becoming a zombie. Note that |
2122 | * we still call __wake_up_parent in this case, because a |
2123 | * blocked sys_wait4 might now return -ECHILD. |
2124 | * |
2125 | * Whether we send SIGCHLD or not for SA_NOCLDWAIT |
2126 | * is implementation-defined: we do (if you don't want |
2127 | * it, just use SIG_IGN instead). |
2128 | */ |
2129 | autoreap = true; |
2130 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) |
2131 | sig = 0; |
2132 | } |
2133 | /* |
2134 | * Send with __send_signal as si_pid and si_uid are in the |
2135 | * parent's namespaces. |
2136 | */ |
2137 | if (valid_signal(sig) && sig) |
2138 | __send_signal_locked(sig, info: &info, t: tsk->parent, type: PIDTYPE_TGID, force: false); |
2139 | __wake_up_parent(p: tsk, parent: tsk->parent); |
2140 | spin_unlock_irqrestore(lock: &psig->siglock, flags); |
2141 | |
2142 | return autoreap; |
2143 | } |
2144 | |
2145 | /** |
2146 | * do_notify_parent_cldstop - notify parent of stopped/continued state change |
2147 | * @tsk: task reporting the state change |
2148 | * @for_ptracer: the notification is for ptracer |
2149 | * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report |
2150 | * |
2151 | * Notify @tsk's parent that the stopped/continued state has changed. If |
2152 | * @for_ptracer is %false, @tsk's group leader notifies to its real parent. |
2153 | * If %true, @tsk reports to @tsk->parent which should be the ptracer. |
2154 | * |
2155 | * CONTEXT: |
2156 | * Must be called with tasklist_lock at least read locked. |
2157 | */ |
2158 | static void do_notify_parent_cldstop(struct task_struct *tsk, |
2159 | bool for_ptracer, int why) |
2160 | { |
2161 | struct kernel_siginfo info; |
2162 | unsigned long flags; |
2163 | struct task_struct *parent; |
2164 | struct sighand_struct *sighand; |
2165 | u64 utime, stime; |
2166 | |
2167 | if (for_ptracer) { |
2168 | parent = tsk->parent; |
2169 | } else { |
2170 | tsk = tsk->group_leader; |
2171 | parent = tsk->real_parent; |
2172 | } |
2173 | |
2174 | clear_siginfo(info: &info); |
2175 | info.si_signo = SIGCHLD; |
2176 | info.si_errno = 0; |
2177 | /* |
2178 | * see comment in do_notify_parent() about the following 4 lines |
2179 | */ |
2180 | rcu_read_lock(); |
2181 | info.si_pid = task_pid_nr_ns(tsk, ns: task_active_pid_ns(tsk: parent)); |
2182 | info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); |
2183 | rcu_read_unlock(); |
2184 | |
2185 | task_cputime(t: tsk, utime: &utime, stime: &stime); |
2186 | info.si_utime = nsec_to_clock_t(x: utime); |
2187 | info.si_stime = nsec_to_clock_t(x: stime); |
2188 | |
2189 | info.si_code = why; |
2190 | switch (why) { |
2191 | case CLD_CONTINUED: |
2192 | info.si_status = SIGCONT; |
2193 | break; |
2194 | case CLD_STOPPED: |
2195 | info.si_status = tsk->signal->group_exit_code & 0x7f; |
2196 | break; |
2197 | case CLD_TRAPPED: |
2198 | info.si_status = tsk->exit_code & 0x7f; |
2199 | break; |
2200 | default: |
2201 | BUG(); |
2202 | } |
2203 | |
2204 | sighand = parent->sighand; |
2205 | spin_lock_irqsave(&sighand->siglock, flags); |
2206 | if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && |
2207 | !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) |
2208 | send_signal_locked(SIGCHLD, info: &info, t: parent, type: PIDTYPE_TGID); |
2209 | /* |
2210 | * Even if SIGCHLD is not generated, we must wake up wait4 calls. |
2211 | */ |
2212 | __wake_up_parent(p: tsk, parent); |
2213 | spin_unlock_irqrestore(lock: &sighand->siglock, flags); |
2214 | } |
2215 | |
2216 | /* |
2217 | * This must be called with current->sighand->siglock held. |
2218 | * |
2219 | * This should be the path for all ptrace stops. |
2220 | * We always set current->last_siginfo while stopped here. |
2221 | * That makes it a way to test a stopped process for |
2222 | * being ptrace-stopped vs being job-control-stopped. |
2223 | * |
2224 | * Returns the signal the ptracer requested the code resume |
2225 | * with. If the code did not stop because the tracer is gone, |
2226 | * the stop signal remains unchanged unless clear_code. |
2227 | */ |
2228 | static int ptrace_stop(int exit_code, int why, unsigned long message, |
2229 | kernel_siginfo_t *info) |
2230 | __releases(¤t->sighand->siglock) |
2231 | __acquires(¤t->sighand->siglock) |
2232 | { |
2233 | bool gstop_done = false; |
2234 | |
2235 | if (arch_ptrace_stop_needed()) { |
2236 | /* |
2237 | * The arch code has something special to do before a |
2238 | * ptrace stop. This is allowed to block, e.g. for faults |
2239 | * on user stack pages. We can't keep the siglock while |
2240 | * calling arch_ptrace_stop, so we must release it now. |
2241 | * To preserve proper semantics, we must do this before |
2242 | * any signal bookkeeping like checking group_stop_count. |
2243 | */ |
2244 | spin_unlock_irq(lock: ¤t->sighand->siglock); |
2245 | arch_ptrace_stop(); |
2246 | spin_lock_irq(lock: ¤t->sighand->siglock); |
2247 | } |
2248 | |
2249 | /* |
2250 | * After this point ptrace_signal_wake_up or signal_wake_up |
2251 | * will clear TASK_TRACED if ptrace_unlink happens or a fatal |
2252 | * signal comes in. Handle previous ptrace_unlinks and fatal |
2253 | * signals here to prevent ptrace_stop sleeping in schedule. |
2254 | */ |
2255 | if (!current->ptrace || __fatal_signal_pending(current)) |
2256 | return exit_code; |
2257 | |
2258 | set_special_state(TASK_TRACED); |
2259 | current->jobctl |= JOBCTL_TRACED; |
2260 | |
2261 | /* |
2262 | * We're committing to trapping. TRACED should be visible before |
2263 | * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). |
2264 | * Also, transition to TRACED and updates to ->jobctl should be |
2265 | * atomic with respect to siglock and should be done after the arch |
2266 | * hook as siglock is released and regrabbed across it. |
2267 | * |
2268 | * TRACER TRACEE |
2269 | * |
2270 | * ptrace_attach() |
2271 | * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED) |
2272 | * do_wait() |
2273 | * set_current_state() smp_wmb(); |
2274 | * ptrace_do_wait() |
2275 | * wait_task_stopped() |
2276 | * task_stopped_code() |
2277 | * [L] task_is_traced() [S] task_clear_jobctl_trapping(); |
2278 | */ |
2279 | smp_wmb(); |
2280 | |
2281 | current->ptrace_message = message; |
2282 | current->last_siginfo = info; |
2283 | current->exit_code = exit_code; |
2284 | |
2285 | /* |
2286 | * If @why is CLD_STOPPED, we're trapping to participate in a group |
2287 | * stop. Do the bookkeeping. Note that if SIGCONT was delievered |
2288 | * across siglock relocks since INTERRUPT was scheduled, PENDING |
2289 | * could be clear now. We act as if SIGCONT is received after |
2290 | * TASK_TRACED is entered - ignore it. |
2291 | */ |
2292 | if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) |
2293 | gstop_done = task_participate_group_stop(current); |
2294 | |
2295 | /* any trap clears pending STOP trap, STOP trap clears NOTIFY */ |
2296 | task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); |
2297 | if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) |
2298 | task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); |
2299 | |
2300 | /* entering a trap, clear TRAPPING */ |
2301 | task_clear_jobctl_trapping(current); |
2302 | |
2303 | spin_unlock_irq(lock: ¤t->sighand->siglock); |
2304 | read_lock(&tasklist_lock); |
2305 | /* |
2306 | * Notify parents of the stop. |
2307 | * |
2308 | * While ptraced, there are two parents - the ptracer and |
2309 | * the real_parent of the group_leader. The ptracer should |
2310 | * know about every stop while the real parent is only |
2311 | * interested in the completion of group stop. The states |
2312 | * for the two don't interact with each other. Notify |
2313 | * separately unless they're gonna be duplicates. |
2314 | */ |
2315 | if (current->ptrace) |
2316 | do_notify_parent_cldstop(current, for_ptracer: true, why); |
2317 | if (gstop_done && (!current->ptrace || ptrace_reparented(current))) |
2318 | do_notify_parent_cldstop(current, for_ptracer: false, why); |
2319 | |
2320 | /* |
2321 | * The previous do_notify_parent_cldstop() invocation woke ptracer. |
2322 | * One a PREEMPTION kernel this can result in preemption requirement |
2323 | * which will be fulfilled after read_unlock() and the ptracer will be |
2324 | * put on the CPU. |
2325 | * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for |
2326 | * this task wait in schedule(). If this task gets preempted then it |
2327 | * remains enqueued on the runqueue. The ptracer will observe this and |
2328 | * then sleep for a delay of one HZ tick. In the meantime this task |
2329 | * gets scheduled, enters schedule() and will wait for the ptracer. |
2330 | * |
2331 | * This preemption point is not bad from a correctness point of |
2332 | * view but extends the runtime by one HZ tick time due to the |
2333 | * ptracer's sleep. The preempt-disable section ensures that there |
2334 | * will be no preemption between unlock and schedule() and so |
2335 | * improving the performance since the ptracer will observe that |
2336 | * the tracee is scheduled out once it gets on the CPU. |
2337 | * |
2338 | * On PREEMPT_RT locking tasklist_lock does not disable preemption. |
2339 | * Therefore the task can be preempted after do_notify_parent_cldstop() |
2340 | * before unlocking tasklist_lock so there is no benefit in doing this. |
2341 | * |
2342 | * In fact disabling preemption is harmful on PREEMPT_RT because |
2343 | * the spinlock_t in cgroup_enter_frozen() must not be acquired |
2344 | * with preemption disabled due to the 'sleeping' spinlock |
2345 | * substitution of RT. |
2346 | */ |
2347 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
2348 | preempt_disable(); |
2349 | read_unlock(&tasklist_lock); |
2350 | cgroup_enter_frozen(); |
2351 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
2352 | preempt_enable_no_resched(); |
2353 | schedule(); |
2354 | cgroup_leave_frozen(always_leave: true); |
2355 | |
2356 | /* |
2357 | * We are back. Now reacquire the siglock before touching |
2358 | * last_siginfo, so that we are sure to have synchronized with |
2359 | * any signal-sending on another CPU that wants to examine it. |
2360 | */ |
2361 | spin_lock_irq(lock: ¤t->sighand->siglock); |
2362 | exit_code = current->exit_code; |
2363 | current->last_siginfo = NULL; |
2364 | current->ptrace_message = 0; |
2365 | current->exit_code = 0; |
2366 | |
2367 | /* LISTENING can be set only during STOP traps, clear it */ |
2368 | current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN); |
2369 | |
2370 | /* |
2371 | * Queued signals ignored us while we were stopped for tracing. |
2372 | * So check for any that we should take before resuming user mode. |
2373 | * This sets TIF_SIGPENDING, but never clears it. |
2374 | */ |
2375 | recalc_sigpending_tsk(current); |
2376 | return exit_code; |
2377 | } |
2378 | |
2379 | static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message) |
2380 | { |
2381 | kernel_siginfo_t info; |
2382 | |
2383 | clear_siginfo(info: &info); |
2384 | info.si_signo = signr; |
2385 | info.si_code = exit_code; |
2386 | info.si_pid = task_pid_vnr(current); |
2387 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
2388 | |
2389 | /* Let the debugger run. */ |
2390 | return ptrace_stop(exit_code, why, message, info: &info); |
2391 | } |
2392 | |
2393 | int ptrace_notify(int exit_code, unsigned long message) |
2394 | { |
2395 | int signr; |
2396 | |
2397 | BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); |
2398 | if (unlikely(task_work_pending(current))) |
2399 | task_work_run(); |
2400 | |
2401 | spin_lock_irq(lock: ¤t->sighand->siglock); |
2402 | signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message); |
2403 | spin_unlock_irq(lock: ¤t->sighand->siglock); |
2404 | return signr; |
2405 | } |
2406 | |
2407 | /** |
2408 | * do_signal_stop - handle group stop for SIGSTOP and other stop signals |
2409 | * @signr: signr causing group stop if initiating |
2410 | * |
2411 | * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr |
2412 | * and participate in it. If already set, participate in the existing |
2413 | * group stop. If participated in a group stop (and thus slept), %true is |
2414 | * returned with siglock released. |
2415 | * |
2416 | * If ptraced, this function doesn't handle stop itself. Instead, |
2417 | * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock |
2418 | * untouched. The caller must ensure that INTERRUPT trap handling takes |
2419 | * places afterwards. |
2420 | * |
2421 | * CONTEXT: |
2422 | * Must be called with @current->sighand->siglock held, which is released |
2423 | * on %true return. |
2424 | * |
2425 | * RETURNS: |
2426 | * %false if group stop is already cancelled or ptrace trap is scheduled. |
2427 | * %true if participated in group stop. |
2428 | */ |
2429 | static bool do_signal_stop(int signr) |
2430 | __releases(¤t->sighand->siglock) |
2431 | { |
2432 | struct signal_struct *sig = current->signal; |
2433 | |
2434 | if (!(current->jobctl & JOBCTL_STOP_PENDING)) { |
2435 | unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; |
2436 | struct task_struct *t; |
2437 | |
2438 | /* signr will be recorded in task->jobctl for retries */ |
2439 | WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); |
2440 | |
2441 | if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || |
2442 | unlikely(sig->flags & SIGNAL_GROUP_EXIT) || |
2443 | unlikely(sig->group_exec_task)) |
2444 | return false; |
2445 | /* |
2446 | * There is no group stop already in progress. We must |
2447 | * initiate one now. |
2448 | * |
2449 | * While ptraced, a task may be resumed while group stop is |
2450 | * still in effect and then receive a stop signal and |
2451 | * initiate another group stop. This deviates from the |
2452 | * usual behavior as two consecutive stop signals can't |
2453 | * cause two group stops when !ptraced. That is why we |
2454 | * also check !task_is_stopped(t) below. |
2455 | * |
2456 | * The condition can be distinguished by testing whether |
2457 | * SIGNAL_STOP_STOPPED is already set. Don't generate |
2458 | * group_exit_code in such case. |
2459 | * |
2460 | * This is not necessary for SIGNAL_STOP_CONTINUED because |
2461 | * an intervening stop signal is required to cause two |
2462 | * continued events regardless of ptrace. |
2463 | */ |
2464 | if (!(sig->flags & SIGNAL_STOP_STOPPED)) |
2465 | sig->group_exit_code = signr; |
2466 | |
2467 | sig->group_stop_count = 0; |
2468 | |
2469 | if (task_set_jobctl_pending(current, mask: signr | gstop)) |
2470 | sig->group_stop_count++; |
2471 | |
2472 | t = current; |
2473 | while_each_thread(current, t) { |
2474 | /* |
2475 | * Setting state to TASK_STOPPED for a group |
2476 | * stop is always done with the siglock held, |
2477 | * so this check has no races. |
2478 | */ |
2479 | if (!task_is_stopped(t) && |
2480 | task_set_jobctl_pending(task: t, mask: signr | gstop)) { |
2481 | sig->group_stop_count++; |
2482 | if (likely(!(t->ptrace & PT_SEIZED))) |
2483 | signal_wake_up(t, fatal: 0); |
2484 | else |
2485 | ptrace_trap_notify(t); |
2486 | } |
2487 | } |
2488 | } |
2489 | |
2490 | if (likely(!current->ptrace)) { |
2491 | int notify = 0; |
2492 | |
2493 | /* |
2494 | * If there are no other threads in the group, or if there |
2495 | * is a group stop in progress and we are the last to stop, |
2496 | * report to the parent. |
2497 | */ |
2498 | if (task_participate_group_stop(current)) |
2499 | notify = CLD_STOPPED; |
2500 | |
2501 | current->jobctl |= JOBCTL_STOPPED; |
2502 | set_special_state(TASK_STOPPED); |
2503 | spin_unlock_irq(lock: ¤t->sighand->siglock); |
2504 | |
2505 | /* |
2506 | * Notify the parent of the group stop completion. Because |
2507 | * we're not holding either the siglock or tasklist_lock |
2508 | * here, ptracer may attach inbetween; however, this is for |
2509 | * group stop and should always be delivered to the real |
2510 | * parent of the group leader. The new ptracer will get |
2511 | * its notification when this task transitions into |
2512 | * TASK_TRACED. |
2513 | */ |
2514 | if (notify) { |
2515 | read_lock(&tasklist_lock); |
2516 | do_notify_parent_cldstop(current, for_ptracer: false, why: notify); |
2517 | read_unlock(&tasklist_lock); |
2518 | } |
2519 | |
2520 | /* Now we don't run again until woken by SIGCONT or SIGKILL */ |
2521 | cgroup_enter_frozen(); |
2522 | schedule(); |
2523 | return true; |
2524 | } else { |
2525 | /* |
2526 | * While ptraced, group stop is handled by STOP trap. |
2527 | * Schedule it and let the caller deal with it. |
2528 | */ |
2529 | task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); |
2530 | return false; |
2531 | } |
2532 | } |
2533 | |
2534 | /** |
2535 | * do_jobctl_trap - take care of ptrace jobctl traps |
2536 | * |
2537 | * When PT_SEIZED, it's used for both group stop and explicit |
2538 | * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with |
2539 | * accompanying siginfo. If stopped, lower eight bits of exit_code contain |
2540 | * the stop signal; otherwise, %SIGTRAP. |
2541 | * |
2542 | * When !PT_SEIZED, it's used only for group stop trap with stop signal |
2543 | * number as exit_code and no siginfo. |
2544 | * |
2545 | * CONTEXT: |
2546 | * Must be called with @current->sighand->siglock held, which may be |
2547 | * released and re-acquired before returning with intervening sleep. |
2548 | */ |
2549 | static void do_jobctl_trap(void) |
2550 | { |
2551 | struct signal_struct *signal = current->signal; |
2552 | int signr = current->jobctl & JOBCTL_STOP_SIGMASK; |
2553 | |
2554 | if (current->ptrace & PT_SEIZED) { |
2555 | if (!signal->group_stop_count && |
2556 | !(signal->flags & SIGNAL_STOP_STOPPED)) |
2557 | signr = SIGTRAP; |
2558 | WARN_ON_ONCE(!signr); |
2559 | ptrace_do_notify(signr, exit_code: signr | (PTRACE_EVENT_STOP << 8), |
2560 | CLD_STOPPED, message: 0); |
2561 | } else { |
2562 | WARN_ON_ONCE(!signr); |
2563 | ptrace_stop(exit_code: signr, CLD_STOPPED, message: 0, NULL); |
2564 | } |
2565 | } |
2566 | |
2567 | /** |
2568 | * do_freezer_trap - handle the freezer jobctl trap |
2569 | * |
2570 | * Puts the task into frozen state, if only the task is not about to quit. |
2571 | * In this case it drops JOBCTL_TRAP_FREEZE. |
2572 | * |
2573 | * CONTEXT: |
2574 | * Must be called with @current->sighand->siglock held, |
2575 | * which is always released before returning. |
2576 | */ |
2577 | static void do_freezer_trap(void) |
2578 | __releases(¤t->sighand->siglock) |
2579 | { |
2580 | /* |
2581 | * If there are other trap bits pending except JOBCTL_TRAP_FREEZE, |
2582 | * let's make another loop to give it a chance to be handled. |
2583 | * In any case, we'll return back. |
2584 | */ |
2585 | if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) != |
2586 | JOBCTL_TRAP_FREEZE) { |
2587 | spin_unlock_irq(lock: ¤t->sighand->siglock); |
2588 | return; |
2589 | } |
2590 | |
2591 | /* |
2592 | * Now we're sure that there is no pending fatal signal and no |
2593 | * pending traps. Clear TIF_SIGPENDING to not get out of schedule() |
2594 | * immediately (if there is a non-fatal signal pending), and |
2595 | * put the task into sleep. |
2596 | */ |
2597 | __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); |
2598 | clear_thread_flag(TIF_SIGPENDING); |
2599 | spin_unlock_irq(lock: ¤t->sighand->siglock); |
2600 | cgroup_enter_frozen(); |
2601 | schedule(); |
2602 | } |
2603 | |
2604 | static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type) |
2605 | { |
2606 | /* |
2607 | * We do not check sig_kernel_stop(signr) but set this marker |
2608 | * unconditionally because we do not know whether debugger will |
2609 | * change signr. This flag has no meaning unless we are going |
2610 | * to stop after return from ptrace_stop(). In this case it will |
2611 | * be checked in do_signal_stop(), we should only stop if it was |
2612 | * not cleared by SIGCONT while we were sleeping. See also the |
2613 | * comment in dequeue_signal(). |
2614 | */ |
2615 | current->jobctl |= JOBCTL_STOP_DEQUEUED; |
2616 | signr = ptrace_stop(exit_code: signr, CLD_TRAPPED, message: 0, info); |
2617 | |
2618 | /* We're back. Did the debugger cancel the sig? */ |
2619 | if (signr == 0) |
2620 | return signr; |
2621 | |
2622 | /* |
2623 | * Update the siginfo structure if the signal has |
2624 | * changed. If the debugger wanted something |
2625 | * specific in the siginfo structure then it should |
2626 | * have updated *info via PTRACE_SETSIGINFO. |
2627 | */ |
2628 | if (signr != info->si_signo) { |
2629 | clear_siginfo(info); |
2630 | info->si_signo = signr; |
2631 | info->si_errno = 0; |
2632 | info->si_code = SI_USER; |
2633 | rcu_read_lock(); |
2634 | info->si_pid = task_pid_vnr(current->parent); |
2635 | info->si_uid = from_kuid_munged(current_user_ns(), |
2636 | task_uid(current->parent)); |
2637 | rcu_read_unlock(); |
2638 | } |
2639 | |
2640 | /* If the (new) signal is now blocked, requeue it. */ |
2641 | if (sigismember(set: ¤t->blocked, sig: signr) || |
2642 | fatal_signal_pending(current)) { |
2643 | send_signal_locked(sig: signr, info, current, type); |
2644 | signr = 0; |
2645 | } |
2646 | |
2647 | return signr; |
2648 | } |
2649 | |
2650 | static void hide_si_addr_tag_bits(struct ksignal *ksig) |
2651 | { |
2652 | switch (siginfo_layout(sig: ksig->sig, si_code: ksig->info.si_code)) { |
2653 | case SIL_FAULT: |
2654 | case SIL_FAULT_TRAPNO: |
2655 | case SIL_FAULT_MCEERR: |
2656 | case SIL_FAULT_BNDERR: |
2657 | case SIL_FAULT_PKUERR: |
2658 | case SIL_FAULT_PERF_EVENT: |
2659 | ksig->info.si_addr = arch_untagged_si_addr( |
2660 | addr: ksig->info.si_addr, sig: ksig->sig, si_code: ksig->info.si_code); |
2661 | break; |
2662 | case SIL_KILL: |
2663 | case SIL_TIMER: |
2664 | case SIL_POLL: |
2665 | case SIL_CHLD: |
2666 | case SIL_RT: |
2667 | case SIL_SYS: |
2668 | break; |
2669 | } |
2670 | } |
2671 | |
2672 | bool get_signal(struct ksignal *ksig) |
2673 | { |
2674 | struct sighand_struct *sighand = current->sighand; |
2675 | struct signal_struct *signal = current->signal; |
2676 | int signr; |
2677 | |
2678 | clear_notify_signal(); |
2679 | if (unlikely(task_work_pending(current))) |
2680 | task_work_run(); |
2681 | |
2682 | if (!task_sigpending(current)) |
2683 | return false; |
2684 | |
2685 | if (unlikely(uprobe_deny_signal())) |
2686 | return false; |
2687 | |
2688 | /* |
2689 | * Do this once, we can't return to user-mode if freezing() == T. |
2690 | * do_signal_stop() and ptrace_stop() do freezable_schedule() and |
2691 | * thus do not need another check after return. |
2692 | */ |
2693 | try_to_freeze(); |
2694 | |
2695 | relock: |
2696 | spin_lock_irq(lock: &sighand->siglock); |
2697 | |
2698 | /* |
2699 | * Every stopped thread goes here after wakeup. Check to see if |
2700 | * we should notify the parent, prepare_signal(SIGCONT) encodes |
2701 | * the CLD_ si_code into SIGNAL_CLD_MASK bits. |
2702 | */ |
2703 | if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { |
2704 | int why; |
2705 | |
2706 | if (signal->flags & SIGNAL_CLD_CONTINUED) |
2707 | why = CLD_CONTINUED; |
2708 | else |
2709 | why = CLD_STOPPED; |
2710 | |
2711 | signal->flags &= ~SIGNAL_CLD_MASK; |
2712 | |
2713 | spin_unlock_irq(lock: &sighand->siglock); |
2714 | |
2715 | /* |
2716 | * Notify the parent that we're continuing. This event is |
2717 | * always per-process and doesn't make whole lot of sense |
2718 | * for ptracers, who shouldn't consume the state via |
2719 | * wait(2) either, but, for backward compatibility, notify |
2720 | * the ptracer of the group leader too unless it's gonna be |
2721 | * a duplicate. |
2722 | */ |
2723 | read_lock(&tasklist_lock); |
2724 | do_notify_parent_cldstop(current, for_ptracer: false, why); |
2725 | |
2726 | if (ptrace_reparented(current->group_leader)) |
2727 | do_notify_parent_cldstop(current->group_leader, |
2728 | for_ptracer: true, why); |
2729 | read_unlock(&tasklist_lock); |
2730 | |
2731 | goto relock; |
2732 | } |
2733 | |
2734 | for (;;) { |
2735 | struct k_sigaction *ka; |
2736 | enum pid_type type; |
2737 | |
2738 | /* Has this task already been marked for death? */ |
2739 | if ((signal->flags & SIGNAL_GROUP_EXIT) || |
2740 | signal->group_exec_task) { |
2741 | clear_siginfo(info: &ksig->info); |
2742 | ksig->info.si_signo = signr = SIGKILL; |
2743 | sigdelset(set: ¤t->pending.signal, SIGKILL); |
2744 | trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO, |
2745 | ka: &sighand->action[SIGKILL - 1]); |
2746 | recalc_sigpending(); |
2747 | goto fatal; |
2748 | } |
2749 | |
2750 | if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && |
2751 | do_signal_stop(signr: 0)) |
2752 | goto relock; |
2753 | |
2754 | if (unlikely(current->jobctl & |
2755 | (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) { |
2756 | if (current->jobctl & JOBCTL_TRAP_MASK) { |
2757 | do_jobctl_trap(); |
2758 | spin_unlock_irq(lock: &sighand->siglock); |
2759 | } else if (current->jobctl & JOBCTL_TRAP_FREEZE) |
2760 | do_freezer_trap(); |
2761 | |
2762 | goto relock; |
2763 | } |
2764 | |
2765 | /* |
2766 | * If the task is leaving the frozen state, let's update |
2767 | * cgroup counters and reset the frozen bit. |
2768 | */ |
2769 | if (unlikely(cgroup_task_frozen(current))) { |
2770 | spin_unlock_irq(lock: &sighand->siglock); |
2771 | cgroup_leave_frozen(always_leave: false); |
2772 | goto relock; |
2773 | } |
2774 | |
2775 | /* |
2776 | * Signals generated by the execution of an instruction |
2777 | * need to be delivered before any other pending signals |
2778 | * so that the instruction pointer in the signal stack |
2779 | * frame points to the faulting instruction. |
2780 | */ |
2781 | type = PIDTYPE_PID; |
2782 | signr = dequeue_synchronous_signal(info: &ksig->info); |
2783 | if (!signr) |
2784 | signr = dequeue_signal(current, ¤t->blocked, |
2785 | &ksig->info, &type); |
2786 | |
2787 | if (!signr) |
2788 | break; /* will return 0 */ |
2789 | |
2790 | if (unlikely(current->ptrace) && (signr != SIGKILL) && |
2791 | !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) { |
2792 | signr = ptrace_signal(signr, info: &ksig->info, type); |
2793 | if (!signr) |
2794 | continue; |
2795 | } |
2796 | |
2797 | ka = &sighand->action[signr-1]; |
2798 | |
2799 | /* Trace actually delivered signals. */ |
2800 | trace_signal_deliver(sig: signr, info: &ksig->info, ka); |
2801 | |
2802 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ |
2803 | continue; |
2804 | if (ka->sa.sa_handler != SIG_DFL) { |
2805 | /* Run the handler. */ |
2806 | ksig->ka = *ka; |
2807 | |
2808 | if (ka->sa.sa_flags & SA_ONESHOT) |
2809 | ka->sa.sa_handler = SIG_DFL; |
2810 | |
2811 | break; /* will return non-zero "signr" value */ |
2812 | } |
2813 | |
2814 | /* |
2815 | * Now we are doing the default action for this signal. |
2816 | */ |
2817 | if (sig_kernel_ignore(signr)) /* Default is nothing. */ |
2818 | continue; |
2819 | |
2820 | /* |
2821 | * Global init gets no signals it doesn't want. |
2822 | * Container-init gets no signals it doesn't want from same |
2823 | * container. |
2824 | * |
2825 | * Note that if global/container-init sees a sig_kernel_only() |
2826 | * signal here, the signal must have been generated internally |
2827 | * or must have come from an ancestor namespace. In either |
2828 | * case, the signal cannot be dropped. |
2829 | */ |
2830 | if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && |
2831 | !sig_kernel_only(signr)) |
2832 | continue; |
2833 | |
2834 | if (sig_kernel_stop(signr)) { |
2835 | /* |
2836 | * The default action is to stop all threads in |
2837 | * the thread group. The job control signals |
2838 | * do nothing in an orphaned pgrp, but SIGSTOP |
2839 | * always works. Note that siglock needs to be |
2840 | * dropped during the call to is_orphaned_pgrp() |
2841 | * because of lock ordering with tasklist_lock. |
2842 | * This allows an intervening SIGCONT to be posted. |
2843 | * We need to check for that and bail out if necessary. |
2844 | */ |
2845 | if (signr != SIGSTOP) { |
2846 | spin_unlock_irq(lock: &sighand->siglock); |
2847 | |
2848 | /* signals can be posted during this window */ |
2849 | |
2850 | if (is_current_pgrp_orphaned()) |
2851 | goto relock; |
2852 | |
2853 | spin_lock_irq(lock: &sighand->siglock); |
2854 | } |
2855 | |
2856 | if (likely(do_signal_stop(ksig->info.si_signo))) { |
2857 | /* It released the siglock. */ |
2858 | goto relock; |
2859 | } |
2860 | |
2861 | /* |
2862 | * We didn't actually stop, due to a race |
2863 | * with SIGCONT or something like that. |
2864 | */ |
2865 | continue; |
2866 | } |
2867 | |
2868 | fatal: |
2869 | spin_unlock_irq(lock: &sighand->siglock); |
2870 | if (unlikely(cgroup_task_frozen(current))) |
2871 | cgroup_leave_frozen(always_leave: true); |
2872 | |
2873 | /* |
2874 | * Anything else is fatal, maybe with a core dump. |
2875 | */ |
2876 | current->flags |= PF_SIGNALED; |
2877 | |
2878 | if (sig_kernel_coredump(signr)) { |
2879 | if (print_fatal_signals) |
2880 | print_fatal_signal(signr: ksig->info.si_signo); |
2881 | proc_coredump_connector(current); |
2882 | /* |
2883 | * If it was able to dump core, this kills all |
2884 | * other threads in the group and synchronizes with |
2885 | * their demise. If we lost the race with another |
2886 | * thread getting here, it set group_exit_code |
2887 | * first and our do_group_exit call below will use |
2888 | * that value and ignore the one we pass it. |
2889 | */ |
2890 | do_coredump(siginfo: &ksig->info); |
2891 | } |
2892 | |
2893 | /* |
2894 | * PF_USER_WORKER threads will catch and exit on fatal signals |
2895 | * themselves. They have cleanup that must be performed, so |
2896 | * we cannot call do_exit() on their behalf. |
2897 | */ |
2898 | if (current->flags & PF_USER_WORKER) |
2899 | goto out; |
2900 | |
2901 | /* |
2902 | * Death signals, no core dump. |
2903 | */ |
2904 | do_group_exit(ksig->info.si_signo); |
2905 | /* NOTREACHED */ |
2906 | } |
2907 | spin_unlock_irq(lock: &sighand->siglock); |
2908 | out: |
2909 | ksig->sig = signr; |
2910 | |
2911 | if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS)) |
2912 | hide_si_addr_tag_bits(ksig); |
2913 | |
2914 | return ksig->sig > 0; |
2915 | } |
2916 | |
2917 | /** |
2918 | * signal_delivered - called after signal delivery to update blocked signals |
2919 | * @ksig: kernel signal struct |
2920 | * @stepping: nonzero if debugger single-step or block-step in use |
2921 | * |
2922 | * This function should be called when a signal has successfully been |
2923 | * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask |
2924 | * is always blocked), and the signal itself is blocked unless %SA_NODEFER |
2925 | * is set in @ksig->ka.sa.sa_flags. Tracing is notified. |
2926 | */ |
2927 | static void signal_delivered(struct ksignal *ksig, int stepping) |
2928 | { |
2929 | sigset_t blocked; |
2930 | |
2931 | /* A signal was successfully delivered, and the |
2932 | saved sigmask was stored on the signal frame, |
2933 | and will be restored by sigreturn. So we can |
2934 | simply clear the restore sigmask flag. */ |
2935 | clear_restore_sigmask(); |
2936 | |
2937 | sigorsets(r: &blocked, a: ¤t->blocked, b: &ksig->ka.sa.sa_mask); |
2938 | if (!(ksig->ka.sa.sa_flags & SA_NODEFER)) |
2939 | sigaddset(set: &blocked, sig: ksig->sig); |
2940 | set_current_blocked(&blocked); |
2941 | if (current->sas_ss_flags & SS_AUTODISARM) |
2942 | sas_ss_reset(current); |
2943 | if (stepping) |
2944 | ptrace_notify(SIGTRAP, message: 0); |
2945 | } |
2946 | |
2947 | void signal_setup_done(int failed, struct ksignal *ksig, int stepping) |
2948 | { |
2949 | if (failed) |
2950 | force_sigsegv(sig: ksig->sig); |
2951 | else |
2952 | signal_delivered(ksig, stepping); |
2953 | } |
2954 | |
2955 | /* |
2956 | * It could be that complete_signal() picked us to notify about the |
2957 | * group-wide signal. Other threads should be notified now to take |
2958 | * the shared signals in @which since we will not. |
2959 | */ |
2960 | static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) |
2961 | { |
2962 | sigset_t retarget; |
2963 | struct task_struct *t; |
2964 | |
2965 | sigandsets(r: &retarget, a: &tsk->signal->shared_pending.signal, b: which); |
2966 | if (sigisemptyset(set: &retarget)) |
2967 | return; |
2968 | |
2969 | t = tsk; |
2970 | while_each_thread(tsk, t) { |
2971 | if (t->flags & PF_EXITING) |
2972 | continue; |
2973 | |
2974 | if (!has_pending_signals(signal: &retarget, blocked: &t->blocked)) |
2975 | continue; |
2976 | /* Remove the signals this thread can handle. */ |
2977 | sigandsets(r: &retarget, a: &retarget, b: &t->blocked); |
2978 | |
2979 | if (!task_sigpending(p: t)) |
2980 | signal_wake_up(t, fatal: 0); |
2981 | |
2982 | if (sigisemptyset(set: &retarget)) |
2983 | break; |
2984 | } |
2985 | } |
2986 | |
2987 | void exit_signals(struct task_struct *tsk) |
2988 | { |
2989 | int group_stop = 0; |
2990 | sigset_t unblocked; |
2991 | |
2992 | /* |
2993 | * @tsk is about to have PF_EXITING set - lock out users which |
2994 | * expect stable threadgroup. |
2995 | */ |
2996 | cgroup_threadgroup_change_begin(tsk); |
2997 | |
2998 | if (thread_group_empty(p: tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) { |
2999 | sched_mm_cid_exit_signals(t: tsk); |
3000 | tsk->flags |= PF_EXITING; |
3001 | cgroup_threadgroup_change_end(tsk); |
3002 | return; |
3003 | } |
3004 | |
3005 | spin_lock_irq(lock: &tsk->sighand->siglock); |
3006 | /* |
3007 | * From now this task is not visible for group-wide signals, |
3008 | * see wants_signal(), do_signal_stop(). |
3009 | */ |
3010 | sched_mm_cid_exit_signals(t: tsk); |
3011 | tsk->flags |= PF_EXITING; |
3012 | |
3013 | cgroup_threadgroup_change_end(tsk); |
3014 | |
3015 | if (!task_sigpending(p: tsk)) |
3016 | goto out; |
3017 | |
3018 | unblocked = tsk->blocked; |
3019 | signotset(set: &unblocked); |
3020 | retarget_shared_pending(tsk, which: &unblocked); |
3021 | |
3022 | if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && |
3023 | task_participate_group_stop(task: tsk)) |
3024 | group_stop = CLD_STOPPED; |
3025 | out: |
3026 | spin_unlock_irq(lock: &tsk->sighand->siglock); |
3027 | |
3028 | /* |
3029 | * If group stop has completed, deliver the notification. This |
3030 | * should always go to the real parent of the group leader. |
3031 | */ |
3032 | if (unlikely(group_stop)) { |
3033 | read_lock(&tasklist_lock); |
3034 | do_notify_parent_cldstop(tsk, for_ptracer: false, why: group_stop); |
3035 | read_unlock(&tasklist_lock); |
3036 | } |
3037 | } |
3038 | |
3039 | /* |
3040 | * System call entry points. |
3041 | */ |
3042 | |
3043 | /** |
3044 | * sys_restart_syscall - restart a system call |
3045 | */ |
3046 | SYSCALL_DEFINE0(restart_syscall) |
3047 | { |
3048 | struct restart_block *restart = ¤t->restart_block; |
3049 | return restart->fn(restart); |
3050 | } |
3051 | |
3052 | long do_no_restart_syscall(struct restart_block *param) |
3053 | { |
3054 | return -EINTR; |
3055 | } |
3056 | |
3057 | static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) |
3058 | { |
3059 | if (task_sigpending(p: tsk) && !thread_group_empty(p: tsk)) { |
3060 | sigset_t newblocked; |
3061 | /* A set of now blocked but previously unblocked signals. */ |
3062 | sigandnsets(r: &newblocked, a: newset, b: ¤t->blocked); |
3063 | retarget_shared_pending(tsk, which: &newblocked); |
3064 | } |
3065 | tsk->blocked = *newset; |
3066 | recalc_sigpending(); |
3067 | } |
3068 | |
3069 | /** |
3070 | * set_current_blocked - change current->blocked mask |
3071 | * @newset: new mask |
3072 | * |
3073 | * It is wrong to change ->blocked directly, this helper should be used |
3074 | * to ensure the process can't miss a shared signal we are going to block. |
3075 | */ |
3076 | void set_current_blocked(sigset_t *newset) |
3077 | { |
3078 | sigdelsetmask(set: newset, sigmask(SIGKILL) | sigmask(SIGSTOP)); |
3079 | __set_current_blocked(newset); |
3080 | } |
3081 | |
3082 | void __set_current_blocked(const sigset_t *newset) |
3083 | { |
3084 | struct task_struct *tsk = current; |
3085 | |
3086 | /* |
3087 | * In case the signal mask hasn't changed, there is nothing we need |
3088 | * to do. The current->blocked shouldn't be modified by other task. |
3089 | */ |
3090 | if (sigequalsets(set1: &tsk->blocked, set2: newset)) |
3091 | return; |
3092 | |
3093 | spin_lock_irq(lock: &tsk->sighand->siglock); |
3094 | __set_task_blocked(tsk, newset); |
3095 | spin_unlock_irq(lock: &tsk->sighand->siglock); |
3096 | } |
3097 | |
3098 | /* |
3099 | * This is also useful for kernel threads that want to temporarily |
3100 | * (or permanently) block certain signals. |
3101 | * |
3102 | * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel |
3103 | * interface happily blocks "unblockable" signals like SIGKILL |
3104 | * and friends. |
3105 | */ |
3106 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) |
3107 | { |
3108 | struct task_struct *tsk = current; |
3109 | sigset_t newset; |
3110 | |
3111 | /* Lockless, only current can change ->blocked, never from irq */ |
3112 | if (oldset) |
3113 | *oldset = tsk->blocked; |
3114 | |
3115 | switch (how) { |
3116 | case SIG_BLOCK: |
3117 | sigorsets(r: &newset, a: &tsk->blocked, b: set); |
3118 | break; |
3119 | case SIG_UNBLOCK: |
3120 | sigandnsets(r: &newset, a: &tsk->blocked, b: set); |
3121 | break; |
3122 | case SIG_SETMASK: |
3123 | newset = *set; |
3124 | break; |
3125 | default: |
3126 | return -EINVAL; |
3127 | } |
3128 | |
3129 | __set_current_blocked(newset: &newset); |
3130 | return 0; |
3131 | } |
3132 | EXPORT_SYMBOL(sigprocmask); |
3133 | |
3134 | /* |
3135 | * The api helps set app-provided sigmasks. |
3136 | * |
3137 | * This is useful for syscalls such as ppoll, pselect, io_pgetevents and |
3138 | * epoll_pwait where a new sigmask is passed from userland for the syscalls. |
3139 | * |
3140 | * Note that it does set_restore_sigmask() in advance, so it must be always |
3141 | * paired with restore_saved_sigmask_unless() before return from syscall. |
3142 | */ |
3143 | int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize) |
3144 | { |
3145 | sigset_t kmask; |
3146 | |
3147 | if (!umask) |
3148 | return 0; |
3149 | if (sigsetsize != sizeof(sigset_t)) |
3150 | return -EINVAL; |
3151 | if (copy_from_user(to: &kmask, from: umask, n: sizeof(sigset_t))) |
3152 | return -EFAULT; |
3153 | |
3154 | set_restore_sigmask(); |
3155 | current->saved_sigmask = current->blocked; |
3156 | set_current_blocked(&kmask); |
3157 | |
3158 | return 0; |
3159 | } |
3160 | |
3161 | #ifdef CONFIG_COMPAT |
3162 | int set_compat_user_sigmask(const compat_sigset_t __user *umask, |
3163 | size_t sigsetsize) |
3164 | { |
3165 | sigset_t kmask; |
3166 | |
3167 | if (!umask) |
3168 | return 0; |
3169 | if (sigsetsize != sizeof(compat_sigset_t)) |
3170 | return -EINVAL; |
3171 | if (get_compat_sigset(set: &kmask, compat: umask)) |
3172 | return -EFAULT; |
3173 | |
3174 | set_restore_sigmask(); |
3175 | current->saved_sigmask = current->blocked; |
3176 | set_current_blocked(&kmask); |
3177 | |
3178 | return 0; |
3179 | } |
3180 | #endif |
3181 | |
3182 | /** |
3183 | * sys_rt_sigprocmask - change the list of currently blocked signals |
3184 | * @how: whether to add, remove, or set signals |
3185 | * @nset: stores pending signals |
3186 | * @oset: previous value of signal mask if non-null |
3187 | * @sigsetsize: size of sigset_t type |
3188 | */ |
3189 | SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, |
3190 | sigset_t __user *, oset, size_t, sigsetsize) |
3191 | { |
3192 | sigset_t old_set, new_set; |
3193 | int error; |
3194 | |
3195 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
3196 | if (sigsetsize != sizeof(sigset_t)) |
3197 | return -EINVAL; |
3198 | |
3199 | old_set = current->blocked; |
3200 | |
3201 | if (nset) { |
3202 | if (copy_from_user(to: &new_set, from: nset, n: sizeof(sigset_t))) |
3203 | return -EFAULT; |
3204 | sigdelsetmask(set: &new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
3205 | |
3206 | error = sigprocmask(how, &new_set, NULL); |
3207 | if (error) |
3208 | return error; |
3209 | } |
3210 | |
3211 | if (oset) { |
3212 | if (copy_to_user(to: oset, from: &old_set, n: sizeof(sigset_t))) |
3213 | return -EFAULT; |
3214 | } |
3215 | |
3216 | return 0; |
3217 | } |
3218 | |
3219 | #ifdef CONFIG_COMPAT |
3220 | COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset, |
3221 | compat_sigset_t __user *, oset, compat_size_t, sigsetsize) |
3222 | { |
3223 | sigset_t old_set = current->blocked; |
3224 | |
3225 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
3226 | if (sigsetsize != sizeof(sigset_t)) |
3227 | return -EINVAL; |
3228 | |
3229 | if (nset) { |
3230 | sigset_t new_set; |
3231 | int error; |
3232 | if (get_compat_sigset(set: &new_set, compat: nset)) |
3233 | return -EFAULT; |
3234 | sigdelsetmask(set: &new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
3235 | |
3236 | error = sigprocmask(how, &new_set, NULL); |
3237 | if (error) |
3238 | return error; |
3239 | } |
3240 | return oset ? put_compat_sigset(compat: oset, set: &old_set, size: sizeof(*oset)) : 0; |
3241 | } |
3242 | #endif |
3243 | |
3244 | static void do_sigpending(sigset_t *set) |
3245 | { |
3246 | spin_lock_irq(lock: ¤t->sighand->siglock); |
3247 | sigorsets(r: set, a: ¤t->pending.signal, |
3248 | b: ¤t->signal->shared_pending.signal); |
3249 | spin_unlock_irq(lock: ¤t->sighand->siglock); |
3250 | |
3251 | /* Outside the lock because only this thread touches it. */ |
3252 | sigandsets(r: set, a: ¤t->blocked, b: set); |
3253 | } |
3254 | |
3255 | /** |
3256 | * sys_rt_sigpending - examine a pending signal that has been raised |
3257 | * while blocked |
3258 | * @uset: stores pending signals |
3259 | * @sigsetsize: size of sigset_t type or larger |
3260 | */ |
3261 | SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) |
3262 | { |
3263 | sigset_t set; |
3264 | |
3265 | if (sigsetsize > sizeof(*uset)) |
3266 | return -EINVAL; |
3267 | |
3268 | do_sigpending(set: &set); |
3269 | |
3270 | if (copy_to_user(to: uset, from: &set, n: sigsetsize)) |
3271 | return -EFAULT; |
3272 | |
3273 | return 0; |
3274 | } |
3275 | |
3276 | #ifdef CONFIG_COMPAT |
3277 | COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset, |
3278 | compat_size_t, sigsetsize) |
3279 | { |
3280 | sigset_t set; |
3281 | |
3282 | if (sigsetsize > sizeof(*uset)) |
3283 | return -EINVAL; |
3284 | |
3285 | do_sigpending(set: &set); |
3286 | |
3287 | return put_compat_sigset(compat: uset, set: &set, size: sigsetsize); |
3288 | } |
3289 | #endif |
3290 | |
3291 | static const struct { |
3292 | unsigned char limit, layout; |
3293 | } sig_sicodes[] = { |
3294 | [SIGILL] = { NSIGILL, SIL_FAULT }, |
3295 | [SIGFPE] = { NSIGFPE, SIL_FAULT }, |
3296 | [SIGSEGV] = { NSIGSEGV, SIL_FAULT }, |
3297 | [SIGBUS] = { NSIGBUS, SIL_FAULT }, |
3298 | [SIGTRAP] = { NSIGTRAP, SIL_FAULT }, |
3299 | #if defined(SIGEMT) |
3300 | [SIGEMT] = { NSIGEMT, SIL_FAULT }, |
3301 | #endif |
3302 | [SIGCHLD] = { NSIGCHLD, SIL_CHLD }, |
3303 | [SIGPOLL] = { NSIGPOLL, SIL_POLL }, |
3304 | [SIGSYS] = { NSIGSYS, SIL_SYS }, |
3305 | }; |
3306 | |
3307 | static bool known_siginfo_layout(unsigned sig, int si_code) |
3308 | { |
3309 | if (si_code == SI_KERNEL) |
3310 | return true; |
3311 | else if ((si_code > SI_USER)) { |
3312 | if (sig_specific_sicodes(sig)) { |
3313 | if (si_code <= sig_sicodes[sig].limit) |
3314 | return true; |
3315 | } |
3316 | else if (si_code <= NSIGPOLL) |
3317 | return true; |
3318 | } |
3319 | else if (si_code >= SI_DETHREAD) |
3320 | return true; |
3321 | else if (si_code == SI_ASYNCNL) |
3322 | return true; |
3323 | return false; |
3324 | } |
3325 | |
3326 | enum siginfo_layout siginfo_layout(unsigned sig, int si_code) |
3327 | { |
3328 | enum siginfo_layout layout = SIL_KILL; |
3329 | if ((si_code > SI_USER) && (si_code < SI_KERNEL)) { |
3330 | if ((sig < ARRAY_SIZE(sig_sicodes)) && |
3331 | (si_code <= sig_sicodes[sig].limit)) { |
3332 | layout = sig_sicodes[sig].layout; |
3333 | /* Handle the exceptions */ |
3334 | if ((sig == SIGBUS) && |
3335 | (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO)) |
3336 | layout = SIL_FAULT_MCEERR; |
3337 | else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR)) |
3338 | layout = SIL_FAULT_BNDERR; |
3339 | #ifdef SEGV_PKUERR |
3340 | else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR)) |
3341 | layout = SIL_FAULT_PKUERR; |
3342 | #endif |
3343 | else if ((sig == SIGTRAP) && (si_code == TRAP_PERF)) |
3344 | layout = SIL_FAULT_PERF_EVENT; |
3345 | else if (IS_ENABLED(CONFIG_SPARC) && |
3346 | (sig == SIGILL) && (si_code == ILL_ILLTRP)) |
3347 | layout = SIL_FAULT_TRAPNO; |
3348 | else if (IS_ENABLED(CONFIG_ALPHA) && |
3349 | ((sig == SIGFPE) || |
3350 | ((sig == SIGTRAP) && (si_code == TRAP_UNK)))) |
3351 | layout = SIL_FAULT_TRAPNO; |
3352 | } |
3353 | else if (si_code <= NSIGPOLL) |
3354 | layout = SIL_POLL; |
3355 | } else { |
3356 | if (si_code == SI_TIMER) |
3357 | layout = SIL_TIMER; |
3358 | else if (si_code == SI_SIGIO) |
3359 | layout = SIL_POLL; |
3360 | else if (si_code < 0) |
3361 | layout = SIL_RT; |
3362 | } |
3363 | return layout; |
3364 | } |
3365 | |
3366 | static inline char __user *si_expansion(const siginfo_t __user *info) |
3367 | { |
3368 | return ((char __user *)info) + sizeof(struct kernel_siginfo); |
3369 | } |
3370 | |
3371 | int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from) |
3372 | { |
3373 | char __user *expansion = si_expansion(info: to); |
3374 | if (copy_to_user(to, from , n: sizeof(struct kernel_siginfo))) |
3375 | return -EFAULT; |
3376 | if (clear_user(to: expansion, SI_EXPANSION_SIZE)) |
3377 | return -EFAULT; |
3378 | return 0; |
3379 | } |
3380 | |
3381 | static int post_copy_siginfo_from_user(kernel_siginfo_t *info, |
3382 | const siginfo_t __user *from) |
3383 | { |
3384 | if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) { |
3385 | char __user *expansion = si_expansion(info: from); |
3386 | char buf[SI_EXPANSION_SIZE]; |
3387 | int i; |
3388 | /* |
3389 | * An unknown si_code might need more than |
3390 | * sizeof(struct kernel_siginfo) bytes. Verify all of the |
3391 | * extra bytes are 0. This guarantees copy_siginfo_to_user |
3392 | * will return this data to userspace exactly. |
3393 | */ |
3394 | if (copy_from_user(to: &buf, from: expansion, SI_EXPANSION_SIZE)) |
3395 | return -EFAULT; |
3396 | for (i = 0; i < SI_EXPANSION_SIZE; i++) { |
3397 | if (buf[i] != 0) |
3398 | return -E2BIG; |
3399 | } |
3400 | } |
3401 | return 0; |
3402 | } |
3403 | |
3404 | static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to, |
3405 | const siginfo_t __user *from) |
3406 | { |
3407 | if (copy_from_user(to, from, n: sizeof(struct kernel_siginfo))) |
3408 | return -EFAULT; |
3409 | to->si_signo = signo; |
3410 | return post_copy_siginfo_from_user(info: to, from); |
3411 | } |
3412 | |
3413 | int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from) |
3414 | { |
3415 | if (copy_from_user(to, from, n: sizeof(struct kernel_siginfo))) |
3416 | return -EFAULT; |
3417 | return post_copy_siginfo_from_user(info: to, from); |
3418 | } |
3419 | |
3420 | #ifdef CONFIG_COMPAT |
3421 | /** |
3422 | * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo |
3423 | * @to: compat siginfo destination |
3424 | * @from: kernel siginfo source |
3425 | * |
3426 | * Note: This function does not work properly for the SIGCHLD on x32, but |
3427 | * fortunately it doesn't have to. The only valid callers for this function are |
3428 | * copy_siginfo_to_user32, which is overriden for x32 and the coredump code. |
3429 | * The latter does not care because SIGCHLD will never cause a coredump. |
3430 | */ |
3431 | void copy_siginfo_to_external32(struct compat_siginfo *to, |
3432 | const struct kernel_siginfo *from) |
3433 | { |
3434 | memset(to, 0, sizeof(*to)); |
3435 | |
3436 | to->si_signo = from->si_signo; |
3437 | to->si_errno = from->si_errno; |
3438 | to->si_code = from->si_code; |
3439 | switch(siginfo_layout(sig: from->si_signo, si_code: from->si_code)) { |
3440 | case SIL_KILL: |
3441 | to->si_pid = from->si_pid; |
3442 | to->si_uid = from->si_uid; |
3443 | break; |
3444 | case SIL_TIMER: |
3445 | to->si_tid = from->si_tid; |
3446 | to->si_overrun = from->si_overrun; |
3447 | to->si_int = from->si_int; |
3448 | break; |
3449 | case SIL_POLL: |
3450 | to->si_band = from->si_band; |
3451 | to->si_fd = from->si_fd; |
3452 | break; |
3453 | case SIL_FAULT: |
3454 | to->si_addr = ptr_to_compat(uptr: from->si_addr); |
3455 | break; |
3456 | case SIL_FAULT_TRAPNO: |
3457 | to->si_addr = ptr_to_compat(uptr: from->si_addr); |
3458 | to->si_trapno = from->si_trapno; |
3459 | break; |
3460 | case SIL_FAULT_MCEERR: |
3461 | to->si_addr = ptr_to_compat(uptr: from->si_addr); |
3462 | to->si_addr_lsb = from->si_addr_lsb; |
3463 | break; |
3464 | case SIL_FAULT_BNDERR: |
3465 | to->si_addr = ptr_to_compat(uptr: from->si_addr); |
3466 | to->si_lower = ptr_to_compat(uptr: from->si_lower); |
3467 | to->si_upper = ptr_to_compat(uptr: from->si_upper); |
3468 | break; |
3469 | case SIL_FAULT_PKUERR: |
3470 | to->si_addr = ptr_to_compat(uptr: from->si_addr); |
3471 | to->si_pkey = from->si_pkey; |
3472 | break; |
3473 | case SIL_FAULT_PERF_EVENT: |
3474 | to->si_addr = ptr_to_compat(uptr: from->si_addr); |
3475 | to->si_perf_data = from->si_perf_data; |
3476 | to->si_perf_type = from->si_perf_type; |
3477 | to->si_perf_flags = from->si_perf_flags; |
3478 | break; |
3479 | case SIL_CHLD: |
3480 | to->si_pid = from->si_pid; |
3481 | to->si_uid = from->si_uid; |
3482 | to->si_status = from->si_status; |
3483 | to->si_utime = from->si_utime; |
3484 | to->si_stime = from->si_stime; |
3485 | break; |
3486 | case SIL_RT: |
3487 | to->si_pid = from->si_pid; |
3488 | to->si_uid = from->si_uid; |
3489 | to->si_int = from->si_int; |
3490 | break; |
3491 | case SIL_SYS: |
3492 | to->si_call_addr = ptr_to_compat(uptr: from->si_call_addr); |
3493 | to->si_syscall = from->si_syscall; |
3494 | to->si_arch = from->si_arch; |
3495 | break; |
3496 | } |
3497 | } |
3498 | |
3499 | int __copy_siginfo_to_user32(struct compat_siginfo __user *to, |
3500 | const struct kernel_siginfo *from) |
3501 | { |
3502 | struct compat_siginfo new; |
3503 | |
3504 | copy_siginfo_to_external32(to: &new, from); |
3505 | if (copy_to_user(to, from: &new, n: sizeof(struct compat_siginfo))) |
3506 | return -EFAULT; |
3507 | return 0; |
3508 | } |
3509 | |
3510 | static int post_copy_siginfo_from_user32(kernel_siginfo_t *to, |
3511 | const struct compat_siginfo *from) |
3512 | { |
3513 | clear_siginfo(info: to); |
3514 | to->si_signo = from->si_signo; |
3515 | to->si_errno = from->si_errno; |
3516 | to->si_code = from->si_code; |
3517 | switch(siginfo_layout(sig: from->si_signo, si_code: from->si_code)) { |
3518 | case SIL_KILL: |
3519 | to->si_pid = from->si_pid; |
3520 | to->si_uid = from->si_uid; |
3521 | break; |
3522 | case SIL_TIMER: |
3523 | to->si_tid = from->si_tid; |
3524 | to->si_overrun = from->si_overrun; |
3525 | to->si_int = from->si_int; |
3526 | break; |
3527 | case SIL_POLL: |
3528 | to->si_band = from->si_band; |
3529 | to->si_fd = from->si_fd; |
3530 | break; |
3531 | case SIL_FAULT: |
3532 | to->si_addr = compat_ptr(uptr: from->si_addr); |
3533 | break; |
3534 | case SIL_FAULT_TRAPNO: |
3535 | to->si_addr = compat_ptr(uptr: from->si_addr); |
3536 | to->si_trapno = from->si_trapno; |
3537 | break; |
3538 | case SIL_FAULT_MCEERR: |
3539 | to->si_addr = compat_ptr(uptr: from->si_addr); |
3540 | to->si_addr_lsb = from->si_addr_lsb; |
3541 | break; |
3542 | case SIL_FAULT_BNDERR: |
3543 | to->si_addr = compat_ptr(uptr: from->si_addr); |
3544 | to->si_lower = compat_ptr(uptr: from->si_lower); |
3545 | to->si_upper = compat_ptr(uptr: from->si_upper); |
3546 | break; |
3547 | case SIL_FAULT_PKUERR: |
3548 | to->si_addr = compat_ptr(uptr: from->si_addr); |
3549 | to->si_pkey = from->si_pkey; |
3550 | break; |
3551 | case SIL_FAULT_PERF_EVENT: |
3552 | to->si_addr = compat_ptr(uptr: from->si_addr); |
3553 | to->si_perf_data = from->si_perf_data; |
3554 | to->si_perf_type = from->si_perf_type; |
3555 | to->si_perf_flags = from->si_perf_flags; |
3556 | break; |
3557 | case SIL_CHLD: |
3558 | to->si_pid = from->si_pid; |
3559 | to->si_uid = from->si_uid; |
3560 | to->si_status = from->si_status; |
3561 | #ifdef CONFIG_X86_X32_ABI |
3562 | if (in_x32_syscall()) { |
3563 | to->si_utime = from->_sifields._sigchld_x32._utime; |
3564 | to->si_stime = from->_sifields._sigchld_x32._stime; |
3565 | } else |
3566 | #endif |
3567 | { |
3568 | to->si_utime = from->si_utime; |
3569 | to->si_stime = from->si_stime; |
3570 | } |
3571 | break; |
3572 | case SIL_RT: |
3573 | to->si_pid = from->si_pid; |
3574 | to->si_uid = from->si_uid; |
3575 | to->si_int = from->si_int; |
3576 | break; |
3577 | case SIL_SYS: |
3578 | to->si_call_addr = compat_ptr(uptr: from->si_call_addr); |
3579 | to->si_syscall = from->si_syscall; |
3580 | to->si_arch = from->si_arch; |
3581 | break; |
3582 | } |
3583 | return 0; |
3584 | } |
3585 | |
3586 | static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to, |
3587 | const struct compat_siginfo __user *ufrom) |
3588 | { |
3589 | struct compat_siginfo from; |
3590 | |
3591 | if (copy_from_user(to: &from, from: ufrom, n: sizeof(struct compat_siginfo))) |
3592 | return -EFAULT; |
3593 | |
3594 | from.si_signo = signo; |
3595 | return post_copy_siginfo_from_user32(to, from: &from); |
3596 | } |
3597 | |
3598 | int copy_siginfo_from_user32(struct kernel_siginfo *to, |
3599 | const struct compat_siginfo __user *ufrom) |
3600 | { |
3601 | struct compat_siginfo from; |
3602 | |
3603 | if (copy_from_user(to: &from, from: ufrom, n: sizeof(struct compat_siginfo))) |
3604 | return -EFAULT; |
3605 | |
3606 | return post_copy_siginfo_from_user32(to, from: &from); |
3607 | } |
3608 | #endif /* CONFIG_COMPAT */ |
3609 | |
3610 | /** |
3611 | * do_sigtimedwait - wait for queued signals specified in @which |
3612 | * @which: queued signals to wait for |
3613 | * @info: if non-null, the signal's siginfo is returned here |
3614 | * @ts: upper bound on process time suspension |
3615 | */ |
3616 | static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info, |
3617 | const struct timespec64 *ts) |
3618 | { |
3619 | ktime_t *to = NULL, timeout = KTIME_MAX; |
3620 | struct task_struct *tsk = current; |
3621 | sigset_t mask = *which; |
3622 | enum pid_type type; |
3623 | int sig, ret = 0; |
3624 | |
3625 | if (ts) { |
3626 | if (!timespec64_valid(ts)) |
3627 | return -EINVAL; |
3628 | timeout = timespec64_to_ktime(ts: *ts); |
3629 | to = &timeout; |
3630 | } |
3631 | |
3632 | /* |
3633 | * Invert the set of allowed signals to get those we want to block. |
3634 | */ |
3635 | sigdelsetmask(set: &mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); |
3636 | signotset(set: &mask); |
3637 | |
3638 | spin_lock_irq(lock: &tsk->sighand->siglock); |
3639 | sig = dequeue_signal(tsk, &mask, info, &type); |
3640 | if (!sig && timeout) { |
3641 | /* |
3642 | * None ready, temporarily unblock those we're interested |
3643 | * while we are sleeping in so that we'll be awakened when |
3644 | * they arrive. Unblocking is always fine, we can avoid |
3645 | * set_current_blocked(). |
3646 | */ |
3647 | tsk->real_blocked = tsk->blocked; |
3648 | sigandsets(r: &tsk->blocked, a: &tsk->blocked, b: &mask); |
3649 | recalc_sigpending(); |
3650 | spin_unlock_irq(lock: &tsk->sighand->siglock); |
3651 | |
3652 | __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); |
3653 | ret = schedule_hrtimeout_range(expires: to, delta: tsk->timer_slack_ns, |
3654 | mode: HRTIMER_MODE_REL); |
3655 | spin_lock_irq(lock: &tsk->sighand->siglock); |
3656 | __set_task_blocked(tsk, newset: &tsk->real_blocked); |
3657 | sigemptyset(set: &tsk->real_blocked); |
3658 | sig = dequeue_signal(tsk, &mask, info, &type); |
3659 | } |
3660 | spin_unlock_irq(lock: &tsk->sighand->siglock); |
3661 | |
3662 | if (sig) |
3663 | return sig; |
3664 | return ret ? -EINTR : -EAGAIN; |
3665 | } |
3666 | |
3667 | /** |
3668 | * sys_rt_sigtimedwait - synchronously wait for queued signals specified |
3669 | * in @uthese |
3670 | * @uthese: queued signals to wait for |
3671 | * @uinfo: if non-null, the signal's siginfo is returned here |
3672 | * @uts: upper bound on process time suspension |
3673 | * @sigsetsize: size of sigset_t type |
3674 | */ |
3675 | SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, |
3676 | siginfo_t __user *, uinfo, |
3677 | const struct __kernel_timespec __user *, uts, |
3678 | size_t, sigsetsize) |
3679 | { |
3680 | sigset_t these; |
3681 | struct timespec64 ts; |
3682 | kernel_siginfo_t info; |
3683 | int ret; |
3684 | |
3685 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
3686 | if (sigsetsize != sizeof(sigset_t)) |
3687 | return -EINVAL; |
3688 | |
3689 | if (copy_from_user(to: &these, from: uthese, n: sizeof(these))) |
3690 | return -EFAULT; |
3691 | |
3692 | if (uts) { |
3693 | if (get_timespec64(ts: &ts, uts)) |
3694 | return -EFAULT; |
3695 | } |
3696 | |
3697 | ret = do_sigtimedwait(which: &these, info: &info, ts: uts ? &ts : NULL); |
3698 | |
3699 | if (ret > 0 && uinfo) { |
3700 | if (copy_siginfo_to_user(to: uinfo, from: &info)) |
3701 | ret = -EFAULT; |
3702 | } |
3703 | |
3704 | return ret; |
3705 | } |
3706 | |
3707 | #ifdef CONFIG_COMPAT_32BIT_TIME |
3708 | SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese, |
3709 | siginfo_t __user *, uinfo, |
3710 | const struct old_timespec32 __user *, uts, |
3711 | size_t, sigsetsize) |
3712 | { |
3713 | sigset_t these; |
3714 | struct timespec64 ts; |
3715 | kernel_siginfo_t info; |
3716 | int ret; |
3717 | |
3718 | if (sigsetsize != sizeof(sigset_t)) |
3719 | return -EINVAL; |
3720 | |
3721 | if (copy_from_user(to: &these, from: uthese, n: sizeof(these))) |
3722 | return -EFAULT; |
3723 | |
3724 | if (uts) { |
3725 | if (get_old_timespec32(&ts, uts)) |
3726 | return -EFAULT; |
3727 | } |
3728 | |
3729 | ret = do_sigtimedwait(which: &these, info: &info, ts: uts ? &ts : NULL); |
3730 | |
3731 | if (ret > 0 && uinfo) { |
3732 | if (copy_siginfo_to_user(to: uinfo, from: &info)) |
3733 | ret = -EFAULT; |
3734 | } |
3735 | |
3736 | return ret; |
3737 | } |
3738 | #endif |
3739 | |
3740 | #ifdef CONFIG_COMPAT |
3741 | COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese, |
3742 | struct compat_siginfo __user *, uinfo, |
3743 | struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize) |
3744 | { |
3745 | sigset_t s; |
3746 | struct timespec64 t; |
3747 | kernel_siginfo_t info; |
3748 | long ret; |
3749 | |
3750 | if (sigsetsize != sizeof(sigset_t)) |
3751 | return -EINVAL; |
3752 | |
3753 | if (get_compat_sigset(set: &s, compat: uthese)) |
3754 | return -EFAULT; |
3755 | |
3756 | if (uts) { |
3757 | if (get_timespec64(ts: &t, uts)) |
3758 | return -EFAULT; |
3759 | } |
3760 | |
3761 | ret = do_sigtimedwait(which: &s, info: &info, ts: uts ? &t : NULL); |
3762 | |
3763 | if (ret > 0 && uinfo) { |
3764 | if (copy_siginfo_to_user32(to: uinfo, from: &info)) |
3765 | ret = -EFAULT; |
3766 | } |
3767 | |
3768 | return ret; |
3769 | } |
3770 | |
3771 | #ifdef CONFIG_COMPAT_32BIT_TIME |
3772 | COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese, |
3773 | struct compat_siginfo __user *, uinfo, |
3774 | struct old_timespec32 __user *, uts, compat_size_t, sigsetsize) |
3775 | { |
3776 | sigset_t s; |
3777 | struct timespec64 t; |
3778 | kernel_siginfo_t info; |
3779 | long ret; |
3780 | |
3781 | if (sigsetsize != sizeof(sigset_t)) |
3782 | return -EINVAL; |
3783 | |
3784 | if (get_compat_sigset(set: &s, compat: uthese)) |
3785 | return -EFAULT; |
3786 | |
3787 | if (uts) { |
3788 | if (get_old_timespec32(&t, uts)) |
3789 | return -EFAULT; |
3790 | } |
3791 | |
3792 | ret = do_sigtimedwait(which: &s, info: &info, ts: uts ? &t : NULL); |
3793 | |
3794 | if (ret > 0 && uinfo) { |
3795 | if (copy_siginfo_to_user32(to: uinfo, from: &info)) |
3796 | ret = -EFAULT; |
3797 | } |
3798 | |
3799 | return ret; |
3800 | } |
3801 | #endif |
3802 | #endif |
3803 | |
3804 | static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info) |
3805 | { |
3806 | clear_siginfo(info); |
3807 | info->si_signo = sig; |
3808 | info->si_errno = 0; |
3809 | info->si_code = SI_USER; |
3810 | info->si_pid = task_tgid_vnr(current); |
3811 | info->si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
3812 | } |
3813 | |
3814 | /** |
3815 | * sys_kill - send a signal to a process |
3816 | * @pid: the PID of the process |
3817 | * @sig: signal to be sent |
3818 | */ |
3819 | SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) |
3820 | { |
3821 | struct kernel_siginfo info; |
3822 | |
3823 | prepare_kill_siginfo(sig, info: &info); |
3824 | |
3825 | return kill_something_info(sig, info: &info, pid); |
3826 | } |
3827 | |
3828 | /* |
3829 | * Verify that the signaler and signalee either are in the same pid namespace |
3830 | * or that the signaler's pid namespace is an ancestor of the signalee's pid |
3831 | * namespace. |
3832 | */ |
3833 | static bool access_pidfd_pidns(struct pid *pid) |
3834 | { |
3835 | struct pid_namespace *active = task_active_pid_ns(current); |
3836 | struct pid_namespace *p = ns_of_pid(pid); |
3837 | |
3838 | for (;;) { |
3839 | if (!p) |
3840 | return false; |
3841 | if (p == active) |
3842 | break; |
3843 | p = p->parent; |
3844 | } |
3845 | |
3846 | return true; |
3847 | } |
3848 | |
3849 | static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, |
3850 | siginfo_t __user *info) |
3851 | { |
3852 | #ifdef CONFIG_COMPAT |
3853 | /* |
3854 | * Avoid hooking up compat syscalls and instead handle necessary |
3855 | * conversions here. Note, this is a stop-gap measure and should not be |
3856 | * considered a generic solution. |
3857 | */ |
3858 | if (in_compat_syscall()) |
3859 | return copy_siginfo_from_user32( |
3860 | to: kinfo, ufrom: (struct compat_siginfo __user *)info); |
3861 | #endif |
3862 | return copy_siginfo_from_user(to: kinfo, from: info); |
3863 | } |
3864 | |
3865 | static struct pid *pidfd_to_pid(const struct file *file) |
3866 | { |
3867 | struct pid *pid; |
3868 | |
3869 | pid = pidfd_pid(file); |
3870 | if (!IS_ERR(ptr: pid)) |
3871 | return pid; |
3872 | |
3873 | return tgid_pidfd_to_pid(file); |
3874 | } |
3875 | |
3876 | /** |
3877 | * sys_pidfd_send_signal - Signal a process through a pidfd |
3878 | * @pidfd: file descriptor of the process |
3879 | * @sig: signal to send |
3880 | * @info: signal info |
3881 | * @flags: future flags |
3882 | * |
3883 | * The syscall currently only signals via PIDTYPE_PID which covers |
3884 | * kill(<positive-pid>, <signal>. It does not signal threads or process |
3885 | * groups. |
3886 | * In order to extend the syscall to threads and process groups the @flags |
3887 | * argument should be used. In essence, the @flags argument will determine |
3888 | * what is signaled and not the file descriptor itself. Put in other words, |
3889 | * grouping is a property of the flags argument not a property of the file |
3890 | * descriptor. |
3891 | * |
3892 | * Return: 0 on success, negative errno on failure |
3893 | */ |
3894 | SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig, |
3895 | siginfo_t __user *, info, unsigned int, flags) |
3896 | { |
3897 | int ret; |
3898 | struct fd f; |
3899 | struct pid *pid; |
3900 | kernel_siginfo_t kinfo; |
3901 | |
3902 | /* Enforce flags be set to 0 until we add an extension. */ |
3903 | if (flags) |
3904 | return -EINVAL; |
3905 | |
3906 | f = fdget(fd: pidfd); |
3907 | if (!f.file) |
3908 | return -EBADF; |
3909 | |
3910 | /* Is this a pidfd? */ |
3911 | pid = pidfd_to_pid(file: f.file); |
3912 | if (IS_ERR(ptr: pid)) { |
3913 | ret = PTR_ERR(ptr: pid); |
3914 | goto err; |
3915 | } |
3916 | |
3917 | ret = -EINVAL; |
3918 | if (!access_pidfd_pidns(pid)) |
3919 | goto err; |
3920 | |
3921 | if (info) { |
3922 | ret = copy_siginfo_from_user_any(kinfo: &kinfo, info); |
3923 | if (unlikely(ret)) |
3924 | goto err; |
3925 | |
3926 | ret = -EINVAL; |
3927 | if (unlikely(sig != kinfo.si_signo)) |
3928 | goto err; |
3929 | |
3930 | /* Only allow sending arbitrary signals to yourself. */ |
3931 | ret = -EPERM; |
3932 | if ((task_pid(current) != pid) && |
3933 | (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) |
3934 | goto err; |
3935 | } else { |
3936 | prepare_kill_siginfo(sig, info: &kinfo); |
3937 | } |
3938 | |
3939 | ret = kill_pid_info(sig, info: &kinfo, pid); |
3940 | |
3941 | err: |
3942 | fdput(fd: f); |
3943 | return ret; |
3944 | } |
3945 | |
3946 | static int |
3947 | do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info) |
3948 | { |
3949 | struct task_struct *p; |
3950 | int error = -ESRCH; |
3951 | |
3952 | rcu_read_lock(); |
3953 | p = find_task_by_vpid(nr: pid); |
3954 | if (p && (tgid <= 0 || task_tgid_vnr(tsk: p) == tgid)) { |
3955 | error = check_kill_permission(sig, info, t: p); |
3956 | /* |
3957 | * The null signal is a permissions and process existence |
3958 | * probe. No signal is actually delivered. |
3959 | */ |
3960 | if (!error && sig) { |
3961 | error = do_send_sig_info(sig, info, p, type: PIDTYPE_PID); |
3962 | /* |
3963 | * If lock_task_sighand() failed we pretend the task |
3964 | * dies after receiving the signal. The window is tiny, |
3965 | * and the signal is private anyway. |
3966 | */ |
3967 | if (unlikely(error == -ESRCH)) |
3968 | error = 0; |
3969 | } |
3970 | } |
3971 | rcu_read_unlock(); |
3972 | |
3973 | return error; |
3974 | } |
3975 | |
3976 | static int do_tkill(pid_t tgid, pid_t pid, int sig) |
3977 | { |
3978 | struct kernel_siginfo info; |
3979 | |
3980 | clear_siginfo(info: &info); |
3981 | info.si_signo = sig; |
3982 | info.si_errno = 0; |
3983 | info.si_code = SI_TKILL; |
3984 | info.si_pid = task_tgid_vnr(current); |
3985 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
3986 | |
3987 | return do_send_specific(tgid, pid, sig, info: &info); |
3988 | } |
3989 | |
3990 | /** |
3991 | * sys_tgkill - send signal to one specific thread |
3992 | * @tgid: the thread group ID of the thread |
3993 | * @pid: the PID of the thread |
3994 | * @sig: signal to be sent |
3995 | * |
3996 | * This syscall also checks the @tgid and returns -ESRCH even if the PID |
3997 | * exists but it's not belonging to the target process anymore. This |
3998 | * method solves the problem of threads exiting and PIDs getting reused. |
3999 | */ |
4000 | SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) |
4001 | { |
4002 | /* This is only valid for single tasks */ |
4003 | if (pid <= 0 || tgid <= 0) |
4004 | return -EINVAL; |
4005 | |
4006 | return do_tkill(tgid, pid, sig); |
4007 | } |
4008 | |
4009 | /** |
4010 | * sys_tkill - send signal to one specific task |
4011 | * @pid: the PID of the task |
4012 | * @sig: signal to be sent |
4013 | * |
4014 | * Send a signal to only one task, even if it's a CLONE_THREAD task. |
4015 | */ |
4016 | SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) |
4017 | { |
4018 | /* This is only valid for single tasks */ |
4019 | if (pid <= 0) |
4020 | return -EINVAL; |
4021 | |
4022 | return do_tkill(tgid: 0, pid, sig); |
4023 | } |
4024 | |
4025 | static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info) |
4026 | { |
4027 | /* Not even root can pretend to send signals from the kernel. |
4028 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
4029 | */ |
4030 | if ((info->si_code >= 0 || info->si_code == SI_TKILL) && |
4031 | (task_pid_vnr(current) != pid)) |
4032 | return -EPERM; |
4033 | |
4034 | /* POSIX.1b doesn't mention process groups. */ |
4035 | return kill_proc_info(sig, info, pid); |
4036 | } |
4037 | |
4038 | /** |
4039 | * sys_rt_sigqueueinfo - send signal information to a signal |
4040 | * @pid: the PID of the thread |
4041 | * @sig: signal to be sent |
4042 | * @uinfo: signal info to be sent |
4043 | */ |
4044 | SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, |
4045 | siginfo_t __user *, uinfo) |
4046 | { |
4047 | kernel_siginfo_t info; |
4048 | int ret = __copy_siginfo_from_user(signo: sig, to: &info, from: uinfo); |
4049 | if (unlikely(ret)) |
4050 | return ret; |
4051 | return do_rt_sigqueueinfo(pid, sig, info: &info); |
4052 | } |
4053 | |
4054 | #ifdef CONFIG_COMPAT |
4055 | COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, |
4056 | compat_pid_t, pid, |
4057 | int, sig, |
4058 | struct compat_siginfo __user *, uinfo) |
4059 | { |
4060 | kernel_siginfo_t info; |
4061 | int ret = __copy_siginfo_from_user32(signo: sig, to: &info, ufrom: uinfo); |
4062 | if (unlikely(ret)) |
4063 | return ret; |
4064 | return do_rt_sigqueueinfo(pid, sig, info: &info); |
4065 | } |
4066 | #endif |
4067 | |
4068 | static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info) |
4069 | { |
4070 | /* This is only valid for single tasks */ |
4071 | if (pid <= 0 || tgid <= 0) |
4072 | return -EINVAL; |
4073 | |
4074 | /* Not even root can pretend to send signals from the kernel. |
4075 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
4076 | */ |
4077 | if ((info->si_code >= 0 || info->si_code == SI_TKILL) && |
4078 | (task_pid_vnr(current) != pid)) |
4079 | return -EPERM; |
4080 | |
4081 | return do_send_specific(tgid, pid, sig, info); |
4082 | } |
4083 | |
4084 | SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, |
4085 | siginfo_t __user *, uinfo) |
4086 | { |
4087 | kernel_siginfo_t info; |
4088 | int ret = __copy_siginfo_from_user(signo: sig, to: &info, from: uinfo); |
4089 | if (unlikely(ret)) |
4090 | return ret; |
4091 | return do_rt_tgsigqueueinfo(tgid, pid, sig, info: &info); |
4092 | } |
4093 | |
4094 | #ifdef CONFIG_COMPAT |
4095 | COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, |
4096 | compat_pid_t, tgid, |
4097 | compat_pid_t, pid, |
4098 | int, sig, |
4099 | struct compat_siginfo __user *, uinfo) |
4100 | { |
4101 | kernel_siginfo_t info; |
4102 | int ret = __copy_siginfo_from_user32(signo: sig, to: &info, ufrom: uinfo); |
4103 | if (unlikely(ret)) |
4104 | return ret; |
4105 | return do_rt_tgsigqueueinfo(tgid, pid, sig, info: &info); |
4106 | } |
4107 | #endif |
4108 | |
4109 | /* |
4110 | * For kthreads only, must not be used if cloned with CLONE_SIGHAND |
4111 | */ |
4112 | void kernel_sigaction(int sig, __sighandler_t action) |
4113 | { |
4114 | spin_lock_irq(lock: ¤t->sighand->siglock); |
4115 | current->sighand->action[sig - 1].sa.sa_handler = action; |
4116 | if (action == SIG_IGN) { |
4117 | sigset_t mask; |
4118 | |
4119 | sigemptyset(set: &mask); |
4120 | sigaddset(set: &mask, sig: sig); |
4121 | |
4122 | flush_sigqueue_mask(mask: &mask, s: ¤t->signal->shared_pending); |
4123 | flush_sigqueue_mask(mask: &mask, s: ¤t->pending); |
4124 | recalc_sigpending(); |
4125 | } |
4126 | spin_unlock_irq(lock: ¤t->sighand->siglock); |
4127 | } |
4128 | EXPORT_SYMBOL(kernel_sigaction); |
4129 | |
4130 | void __weak sigaction_compat_abi(struct k_sigaction *act, |
4131 | struct k_sigaction *oact) |
4132 | { |
4133 | } |
4134 | |
4135 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) |
4136 | { |
4137 | struct task_struct *p = current, *t; |
4138 | struct k_sigaction *k; |
4139 | sigset_t mask; |
4140 | |
4141 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) |
4142 | return -EINVAL; |
4143 | |
4144 | k = &p->sighand->action[sig-1]; |
4145 | |
4146 | spin_lock_irq(lock: &p->sighand->siglock); |
4147 | if (k->sa.sa_flags & SA_IMMUTABLE) { |
4148 | spin_unlock_irq(lock: &p->sighand->siglock); |
4149 | return -EINVAL; |
4150 | } |
4151 | if (oact) |
4152 | *oact = *k; |
4153 | |
4154 | /* |
4155 | * Make sure that we never accidentally claim to support SA_UNSUPPORTED, |
4156 | * e.g. by having an architecture use the bit in their uapi. |
4157 | */ |
4158 | BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED); |
4159 | |
4160 | /* |
4161 | * Clear unknown flag bits in order to allow userspace to detect missing |
4162 | * support for flag bits and to allow the kernel to use non-uapi bits |
4163 | * internally. |
4164 | */ |
4165 | if (act) |
4166 | act->sa.sa_flags &= UAPI_SA_FLAGS; |
4167 | if (oact) |
4168 | oact->sa.sa_flags &= UAPI_SA_FLAGS; |
4169 | |
4170 | sigaction_compat_abi(act, oact); |
4171 | |
4172 | if (act) { |
4173 | sigdelsetmask(set: &act->sa.sa_mask, |
4174 | sigmask(SIGKILL) | sigmask(SIGSTOP)); |
4175 | *k = *act; |
4176 | /* |
4177 | * POSIX 3.3.1.3: |
4178 | * "Setting a signal action to SIG_IGN for a signal that is |
4179 | * pending shall cause the pending signal to be discarded, |
4180 | * whether or not it is blocked." |
4181 | * |
4182 | * "Setting a signal action to SIG_DFL for a signal that is |
4183 | * pending and whose default action is to ignore the signal |
4184 | * (for example, SIGCHLD), shall cause the pending signal to |
4185 | * be discarded, whether or not it is blocked" |
4186 | */ |
4187 | if (sig_handler_ignored(handler: sig_handler(t: p, sig), sig)) { |
4188 | sigemptyset(set: &mask); |
4189 | sigaddset(set: &mask, sig: sig); |
4190 | flush_sigqueue_mask(mask: &mask, s: &p->signal->shared_pending); |
4191 | for_each_thread(p, t) |
4192 | flush_sigqueue_mask(mask: &mask, s: &t->pending); |
4193 | } |
4194 | } |
4195 | |
4196 | spin_unlock_irq(lock: &p->sighand->siglock); |
4197 | return 0; |
4198 | } |
4199 | |
4200 | #ifdef CONFIG_DYNAMIC_SIGFRAME |
4201 | static inline void sigaltstack_lock(void) |
4202 | __acquires(¤t->sighand->siglock) |
4203 | { |
4204 | spin_lock_irq(lock: ¤t->sighand->siglock); |
4205 | } |
4206 | |
4207 | static inline void sigaltstack_unlock(void) |
4208 | __releases(¤t->sighand->siglock) |
4209 | { |
4210 | spin_unlock_irq(lock: ¤t->sighand->siglock); |
4211 | } |
4212 | #else |
4213 | static inline void sigaltstack_lock(void) { } |
4214 | static inline void sigaltstack_unlock(void) { } |
4215 | #endif |
4216 | |
4217 | static int |
4218 | do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp, |
4219 | size_t min_ss_size) |
4220 | { |
4221 | struct task_struct *t = current; |
4222 | int ret = 0; |
4223 | |
4224 | if (oss) { |
4225 | memset(oss, 0, sizeof(stack_t)); |
4226 | oss->ss_sp = (void __user *) t->sas_ss_sp; |
4227 | oss->ss_size = t->sas_ss_size; |
4228 | oss->ss_flags = sas_ss_flags(sp) | |
4229 | (current->sas_ss_flags & SS_FLAG_BITS); |
4230 | } |
4231 | |
4232 | if (ss) { |
4233 | void __user *ss_sp = ss->ss_sp; |
4234 | size_t ss_size = ss->ss_size; |
4235 | unsigned ss_flags = ss->ss_flags; |
4236 | int ss_mode; |
4237 | |
4238 | if (unlikely(on_sig_stack(sp))) |
4239 | return -EPERM; |
4240 | |
4241 | ss_mode = ss_flags & ~SS_FLAG_BITS; |
4242 | if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK && |
4243 | ss_mode != 0)) |
4244 | return -EINVAL; |
4245 | |
4246 | /* |
4247 | * Return before taking any locks if no actual |
4248 | * sigaltstack changes were requested. |
4249 | */ |
4250 | if (t->sas_ss_sp == (unsigned long)ss_sp && |
4251 | t->sas_ss_size == ss_size && |
4252 | t->sas_ss_flags == ss_flags) |
4253 | return 0; |
4254 | |
4255 | sigaltstack_lock(); |
4256 | if (ss_mode == SS_DISABLE) { |
4257 | ss_size = 0; |
4258 | ss_sp = NULL; |
4259 | } else { |
4260 | if (unlikely(ss_size < min_ss_size)) |
4261 | ret = -ENOMEM; |
4262 | if (!sigaltstack_size_valid(ss_size)) |
4263 | ret = -ENOMEM; |
4264 | } |
4265 | if (!ret) { |
4266 | t->sas_ss_sp = (unsigned long) ss_sp; |
4267 | t->sas_ss_size = ss_size; |
4268 | t->sas_ss_flags = ss_flags; |
4269 | } |
4270 | sigaltstack_unlock(); |
4271 | } |
4272 | return ret; |
4273 | } |
4274 | |
4275 | SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) |
4276 | { |
4277 | stack_t new, old; |
4278 | int err; |
4279 | if (uss && copy_from_user(to: &new, from: uss, n: sizeof(stack_t))) |
4280 | return -EFAULT; |
4281 | err = do_sigaltstack(ss: uss ? &new : NULL, oss: uoss ? &old : NULL, |
4282 | current_user_stack_pointer(), |
4283 | MINSIGSTKSZ); |
4284 | if (!err && uoss && copy_to_user(to: uoss, from: &old, n: sizeof(stack_t))) |
4285 | err = -EFAULT; |
4286 | return err; |
4287 | } |
4288 | |
4289 | int restore_altstack(const stack_t __user *uss) |
4290 | { |
4291 | stack_t new; |
4292 | if (copy_from_user(to: &new, from: uss, n: sizeof(stack_t))) |
4293 | return -EFAULT; |
4294 | (void)do_sigaltstack(ss: &new, NULL, current_user_stack_pointer(), |
4295 | MINSIGSTKSZ); |
4296 | /* squash all but EFAULT for now */ |
4297 | return 0; |
4298 | } |
4299 | |
4300 | int __save_altstack(stack_t __user *uss, unsigned long sp) |
4301 | { |
4302 | struct task_struct *t = current; |
4303 | int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) | |
4304 | __put_user(t->sas_ss_flags, &uss->ss_flags) | |
4305 | __put_user(t->sas_ss_size, &uss->ss_size); |
4306 | return err; |
4307 | } |
4308 | |
4309 | #ifdef CONFIG_COMPAT |
4310 | static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr, |
4311 | compat_stack_t __user *uoss_ptr) |
4312 | { |
4313 | stack_t uss, uoss; |
4314 | int ret; |
4315 | |
4316 | if (uss_ptr) { |
4317 | compat_stack_t uss32; |
4318 | if (copy_from_user(to: &uss32, from: uss_ptr, n: sizeof(compat_stack_t))) |
4319 | return -EFAULT; |
4320 | uss.ss_sp = compat_ptr(uptr: uss32.ss_sp); |
4321 | uss.ss_flags = uss32.ss_flags; |
4322 | uss.ss_size = uss32.ss_size; |
4323 | } |
4324 | ret = do_sigaltstack(ss: uss_ptr ? &uss : NULL, oss: &uoss, |
4325 | compat_user_stack_pointer(), |
4326 | COMPAT_MINSIGSTKSZ); |
4327 | if (ret >= 0 && uoss_ptr) { |
4328 | compat_stack_t old; |
4329 | memset(&old, 0, sizeof(old)); |
4330 | old.ss_sp = ptr_to_compat(uptr: uoss.ss_sp); |
4331 | old.ss_flags = uoss.ss_flags; |
4332 | old.ss_size = uoss.ss_size; |
4333 | if (copy_to_user(to: uoss_ptr, from: &old, n: sizeof(compat_stack_t))) |
4334 | ret = -EFAULT; |
4335 | } |
4336 | return ret; |
4337 | } |
4338 | |
4339 | COMPAT_SYSCALL_DEFINE2(sigaltstack, |
4340 | const compat_stack_t __user *, uss_ptr, |
4341 | compat_stack_t __user *, uoss_ptr) |
4342 | { |
4343 | return do_compat_sigaltstack(uss_ptr, uoss_ptr); |
4344 | } |
4345 | |
4346 | int compat_restore_altstack(const compat_stack_t __user *uss) |
4347 | { |
4348 | int err = do_compat_sigaltstack(uss_ptr: uss, NULL); |
4349 | /* squash all but -EFAULT for now */ |
4350 | return err == -EFAULT ? err : 0; |
4351 | } |
4352 | |
4353 | int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) |
4354 | { |
4355 | int err; |
4356 | struct task_struct *t = current; |
4357 | err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), |
4358 | &uss->ss_sp) | |
4359 | __put_user(t->sas_ss_flags, &uss->ss_flags) | |
4360 | __put_user(t->sas_ss_size, &uss->ss_size); |
4361 | return err; |
4362 | } |
4363 | #endif |
4364 | |
4365 | #ifdef __ARCH_WANT_SYS_SIGPENDING |
4366 | |
4367 | /** |
4368 | * sys_sigpending - examine pending signals |
4369 | * @uset: where mask of pending signal is returned |
4370 | */ |
4371 | SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset) |
4372 | { |
4373 | sigset_t set; |
4374 | |
4375 | if (sizeof(old_sigset_t) > sizeof(*uset)) |
4376 | return -EINVAL; |
4377 | |
4378 | do_sigpending(set: &set); |
4379 | |
4380 | if (copy_to_user(to: uset, from: &set, n: sizeof(old_sigset_t))) |
4381 | return -EFAULT; |
4382 | |
4383 | return 0; |
4384 | } |
4385 | |
4386 | #ifdef CONFIG_COMPAT |
4387 | COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32) |
4388 | { |
4389 | sigset_t set; |
4390 | |
4391 | do_sigpending(set: &set); |
4392 | |
4393 | return put_user(set.sig[0], set32); |
4394 | } |
4395 | #endif |
4396 | |
4397 | #endif |
4398 | |
4399 | #ifdef __ARCH_WANT_SYS_SIGPROCMASK |
4400 | /** |
4401 | * sys_sigprocmask - examine and change blocked signals |
4402 | * @how: whether to add, remove, or set signals |
4403 | * @nset: signals to add or remove (if non-null) |
4404 | * @oset: previous value of signal mask if non-null |
4405 | * |
4406 | * Some platforms have their own version with special arguments; |
4407 | * others support only sys_rt_sigprocmask. |
4408 | */ |
4409 | |
4410 | SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, |
4411 | old_sigset_t __user *, oset) |
4412 | { |
4413 | old_sigset_t old_set, new_set; |
4414 | sigset_t new_blocked; |
4415 | |
4416 | old_set = current->blocked.sig[0]; |
4417 | |
4418 | if (nset) { |
4419 | if (copy_from_user(to: &new_set, from: nset, n: sizeof(*nset))) |
4420 | return -EFAULT; |
4421 | |
4422 | new_blocked = current->blocked; |
4423 | |
4424 | switch (how) { |
4425 | case SIG_BLOCK: |
4426 | sigaddsetmask(set: &new_blocked, mask: new_set); |
4427 | break; |
4428 | case SIG_UNBLOCK: |
4429 | sigdelsetmask(set: &new_blocked, mask: new_set); |
4430 | break; |
4431 | case SIG_SETMASK: |
4432 | new_blocked.sig[0] = new_set; |
4433 | break; |
4434 | default: |
4435 | return -EINVAL; |
4436 | } |
4437 | |
4438 | set_current_blocked(&new_blocked); |
4439 | } |
4440 | |
4441 | if (oset) { |
4442 | if (copy_to_user(to: oset, from: &old_set, n: sizeof(*oset))) |
4443 | return -EFAULT; |
4444 | } |
4445 | |
4446 | return 0; |
4447 | } |
4448 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ |
4449 | |
4450 | #ifndef CONFIG_ODD_RT_SIGACTION |
4451 | /** |
4452 | * sys_rt_sigaction - alter an action taken by a process |
4453 | * @sig: signal to be sent |
4454 | * @act: new sigaction |
4455 | * @oact: used to save the previous sigaction |
4456 | * @sigsetsize: size of sigset_t type |
4457 | */ |
4458 | SYSCALL_DEFINE4(rt_sigaction, int, sig, |
4459 | const struct sigaction __user *, act, |
4460 | struct sigaction __user *, oact, |
4461 | size_t, sigsetsize) |
4462 | { |
4463 | struct k_sigaction new_sa, old_sa; |
4464 | int ret; |
4465 | |
4466 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
4467 | if (sigsetsize != sizeof(sigset_t)) |
4468 | return -EINVAL; |
4469 | |
4470 | if (act && copy_from_user(to: &new_sa.sa, from: act, n: sizeof(new_sa.sa))) |
4471 | return -EFAULT; |
4472 | |
4473 | ret = do_sigaction(sig, act: act ? &new_sa : NULL, oact: oact ? &old_sa : NULL); |
4474 | if (ret) |
4475 | return ret; |
4476 | |
4477 | if (oact && copy_to_user(to: oact, from: &old_sa.sa, n: sizeof(old_sa.sa))) |
4478 | return -EFAULT; |
4479 | |
4480 | return 0; |
4481 | } |
4482 | #ifdef CONFIG_COMPAT |
4483 | COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig, |
4484 | const struct compat_sigaction __user *, act, |
4485 | struct compat_sigaction __user *, oact, |
4486 | compat_size_t, sigsetsize) |
4487 | { |
4488 | struct k_sigaction new_ka, old_ka; |
4489 | #ifdef __ARCH_HAS_SA_RESTORER |
4490 | compat_uptr_t restorer; |
4491 | #endif |
4492 | int ret; |
4493 | |
4494 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
4495 | if (sigsetsize != sizeof(compat_sigset_t)) |
4496 | return -EINVAL; |
4497 | |
4498 | if (act) { |
4499 | compat_uptr_t handler; |
4500 | ret = get_user(handler, &act->sa_handler); |
4501 | new_ka.sa.sa_handler = compat_ptr(uptr: handler); |
4502 | #ifdef __ARCH_HAS_SA_RESTORER |
4503 | ret |= get_user(restorer, &act->sa_restorer); |
4504 | new_ka.sa.sa_restorer = compat_ptr(uptr: restorer); |
4505 | #endif |
4506 | ret |= get_compat_sigset(set: &new_ka.sa.sa_mask, compat: &act->sa_mask); |
4507 | ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); |
4508 | if (ret) |
4509 | return -EFAULT; |
4510 | } |
4511 | |
4512 | ret = do_sigaction(sig, act: act ? &new_ka : NULL, oact: oact ? &old_ka : NULL); |
4513 | if (!ret && oact) { |
4514 | ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), |
4515 | &oact->sa_handler); |
4516 | ret |= put_compat_sigset(compat: &oact->sa_mask, set: &old_ka.sa.sa_mask, |
4517 | size: sizeof(oact->sa_mask)); |
4518 | ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); |
4519 | #ifdef __ARCH_HAS_SA_RESTORER |
4520 | ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), |
4521 | &oact->sa_restorer); |
4522 | #endif |
4523 | } |
4524 | return ret; |
4525 | } |
4526 | #endif |
4527 | #endif /* !CONFIG_ODD_RT_SIGACTION */ |
4528 | |
4529 | #ifdef CONFIG_OLD_SIGACTION |
4530 | SYSCALL_DEFINE3(sigaction, int, sig, |
4531 | const struct old_sigaction __user *, act, |
4532 | struct old_sigaction __user *, oact) |
4533 | { |
4534 | struct k_sigaction new_ka, old_ka; |
4535 | int ret; |
4536 | |
4537 | if (act) { |
4538 | old_sigset_t mask; |
4539 | if (!access_ok(act, sizeof(*act)) || |
4540 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || |
4541 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || |
4542 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || |
4543 | __get_user(mask, &act->sa_mask)) |
4544 | return -EFAULT; |
4545 | #ifdef __ARCH_HAS_KA_RESTORER |
4546 | new_ka.ka_restorer = NULL; |
4547 | #endif |
4548 | siginitset(&new_ka.sa.sa_mask, mask); |
4549 | } |
4550 | |
4551 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); |
4552 | |
4553 | if (!ret && oact) { |
4554 | if (!access_ok(oact, sizeof(*oact)) || |
4555 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || |
4556 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || |
4557 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || |
4558 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) |
4559 | return -EFAULT; |
4560 | } |
4561 | |
4562 | return ret; |
4563 | } |
4564 | #endif |
4565 | #ifdef CONFIG_COMPAT_OLD_SIGACTION |
4566 | COMPAT_SYSCALL_DEFINE3(sigaction, int, sig, |
4567 | const struct compat_old_sigaction __user *, act, |
4568 | struct compat_old_sigaction __user *, oact) |
4569 | { |
4570 | struct k_sigaction new_ka, old_ka; |
4571 | int ret; |
4572 | compat_old_sigset_t mask; |
4573 | compat_uptr_t handler, restorer; |
4574 | |
4575 | if (act) { |
4576 | if (!access_ok(act, sizeof(*act)) || |
4577 | __get_user(handler, &act->sa_handler) || |
4578 | __get_user(restorer, &act->sa_restorer) || |
4579 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || |
4580 | __get_user(mask, &act->sa_mask)) |
4581 | return -EFAULT; |
4582 | |
4583 | #ifdef __ARCH_HAS_KA_RESTORER |
4584 | new_ka.ka_restorer = NULL; |
4585 | #endif |
4586 | new_ka.sa.sa_handler = compat_ptr(uptr: handler); |
4587 | new_ka.sa.sa_restorer = compat_ptr(uptr: restorer); |
4588 | siginitset(set: &new_ka.sa.sa_mask, mask); |
4589 | } |
4590 | |
4591 | ret = do_sigaction(sig, act: act ? &new_ka : NULL, oact: oact ? &old_ka : NULL); |
4592 | |
4593 | if (!ret && oact) { |
4594 | if (!access_ok(oact, sizeof(*oact)) || |
4595 | __put_user(ptr_to_compat(old_ka.sa.sa_handler), |
4596 | &oact->sa_handler) || |
4597 | __put_user(ptr_to_compat(old_ka.sa.sa_restorer), |
4598 | &oact->sa_restorer) || |
4599 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || |
4600 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) |
4601 | return -EFAULT; |
4602 | } |
4603 | return ret; |
4604 | } |
4605 | #endif |
4606 | |
4607 | #ifdef CONFIG_SGETMASK_SYSCALL |
4608 | |
4609 | /* |
4610 | * For backwards compatibility. Functionality superseded by sigprocmask. |
4611 | */ |
4612 | SYSCALL_DEFINE0(sgetmask) |
4613 | { |
4614 | /* SMP safe */ |
4615 | return current->blocked.sig[0]; |
4616 | } |
4617 | |
4618 | SYSCALL_DEFINE1(ssetmask, int, newmask) |
4619 | { |
4620 | int old = current->blocked.sig[0]; |
4621 | sigset_t newset; |
4622 | |
4623 | siginitset(set: &newset, mask: newmask); |
4624 | set_current_blocked(&newset); |
4625 | |
4626 | return old; |
4627 | } |
4628 | #endif /* CONFIG_SGETMASK_SYSCALL */ |
4629 | |
4630 | #ifdef __ARCH_WANT_SYS_SIGNAL |
4631 | /* |
4632 | * For backwards compatibility. Functionality superseded by sigaction. |
4633 | */ |
4634 | SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) |
4635 | { |
4636 | struct k_sigaction new_sa, old_sa; |
4637 | int ret; |
4638 | |
4639 | new_sa.sa.sa_handler = handler; |
4640 | new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; |
4641 | sigemptyset(set: &new_sa.sa.sa_mask); |
4642 | |
4643 | ret = do_sigaction(sig, act: &new_sa, oact: &old_sa); |
4644 | |
4645 | return ret ? ret : (unsigned long)old_sa.sa.sa_handler; |
4646 | } |
4647 | #endif /* __ARCH_WANT_SYS_SIGNAL */ |
4648 | |
4649 | #ifdef __ARCH_WANT_SYS_PAUSE |
4650 | |
4651 | SYSCALL_DEFINE0(pause) |
4652 | { |
4653 | while (!signal_pending(current)) { |
4654 | __set_current_state(TASK_INTERRUPTIBLE); |
4655 | schedule(); |
4656 | } |
4657 | return -ERESTARTNOHAND; |
4658 | } |
4659 | |
4660 | #endif |
4661 | |
4662 | static int sigsuspend(sigset_t *set) |
4663 | { |
4664 | current->saved_sigmask = current->blocked; |
4665 | set_current_blocked(set); |
4666 | |
4667 | while (!signal_pending(current)) { |
4668 | __set_current_state(TASK_INTERRUPTIBLE); |
4669 | schedule(); |
4670 | } |
4671 | set_restore_sigmask(); |
4672 | return -ERESTARTNOHAND; |
4673 | } |
4674 | |
4675 | /** |
4676 | * sys_rt_sigsuspend - replace the signal mask for a value with the |
4677 | * @unewset value until a signal is received |
4678 | * @unewset: new signal mask value |
4679 | * @sigsetsize: size of sigset_t type |
4680 | */ |
4681 | SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) |
4682 | { |
4683 | sigset_t newset; |
4684 | |
4685 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
4686 | if (sigsetsize != sizeof(sigset_t)) |
4687 | return -EINVAL; |
4688 | |
4689 | if (copy_from_user(to: &newset, from: unewset, n: sizeof(newset))) |
4690 | return -EFAULT; |
4691 | return sigsuspend(set: &newset); |
4692 | } |
4693 | |
4694 | #ifdef CONFIG_COMPAT |
4695 | COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize) |
4696 | { |
4697 | sigset_t newset; |
4698 | |
4699 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
4700 | if (sigsetsize != sizeof(sigset_t)) |
4701 | return -EINVAL; |
4702 | |
4703 | if (get_compat_sigset(set: &newset, compat: unewset)) |
4704 | return -EFAULT; |
4705 | return sigsuspend(set: &newset); |
4706 | } |
4707 | #endif |
4708 | |
4709 | #ifdef CONFIG_OLD_SIGSUSPEND |
4710 | SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask) |
4711 | { |
4712 | sigset_t blocked; |
4713 | siginitset(&blocked, mask); |
4714 | return sigsuspend(&blocked); |
4715 | } |
4716 | #endif |
4717 | #ifdef CONFIG_OLD_SIGSUSPEND3 |
4718 | SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask) |
4719 | { |
4720 | sigset_t blocked; |
4721 | siginitset(set: &blocked, mask); |
4722 | return sigsuspend(set: &blocked); |
4723 | } |
4724 | #endif |
4725 | |
4726 | __weak const char *arch_vma_name(struct vm_area_struct *vma) |
4727 | { |
4728 | return NULL; |
4729 | } |
4730 | |
4731 | static inline void siginfo_buildtime_checks(void) |
4732 | { |
4733 | BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE); |
4734 | |
4735 | /* Verify the offsets in the two siginfos match */ |
4736 | #define CHECK_OFFSET(field) \ |
4737 | BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field)) |
4738 | |
4739 | /* kill */ |
4740 | CHECK_OFFSET(si_pid); |
4741 | CHECK_OFFSET(si_uid); |
4742 | |
4743 | /* timer */ |
4744 | CHECK_OFFSET(si_tid); |
4745 | CHECK_OFFSET(si_overrun); |
4746 | CHECK_OFFSET(si_value); |
4747 | |
4748 | /* rt */ |
4749 | CHECK_OFFSET(si_pid); |
4750 | CHECK_OFFSET(si_uid); |
4751 | CHECK_OFFSET(si_value); |
4752 | |
4753 | /* sigchld */ |
4754 | CHECK_OFFSET(si_pid); |
4755 | CHECK_OFFSET(si_uid); |
4756 | CHECK_OFFSET(si_status); |
4757 | CHECK_OFFSET(si_utime); |
4758 | CHECK_OFFSET(si_stime); |
4759 | |
4760 | /* sigfault */ |
4761 | CHECK_OFFSET(si_addr); |
4762 | CHECK_OFFSET(si_trapno); |
4763 | CHECK_OFFSET(si_addr_lsb); |
4764 | CHECK_OFFSET(si_lower); |
4765 | CHECK_OFFSET(si_upper); |
4766 | CHECK_OFFSET(si_pkey); |
4767 | CHECK_OFFSET(si_perf_data); |
4768 | CHECK_OFFSET(si_perf_type); |
4769 | CHECK_OFFSET(si_perf_flags); |
4770 | |
4771 | /* sigpoll */ |
4772 | CHECK_OFFSET(si_band); |
4773 | CHECK_OFFSET(si_fd); |
4774 | |
4775 | /* sigsys */ |
4776 | CHECK_OFFSET(si_call_addr); |
4777 | CHECK_OFFSET(si_syscall); |
4778 | CHECK_OFFSET(si_arch); |
4779 | #undef CHECK_OFFSET |
4780 | |
4781 | /* usb asyncio */ |
4782 | BUILD_BUG_ON(offsetof(struct siginfo, si_pid) != |
4783 | offsetof(struct siginfo, si_addr)); |
4784 | if (sizeof(int) == sizeof(void __user *)) { |
4785 | BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) != |
4786 | sizeof(void __user *)); |
4787 | } else { |
4788 | BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) + |
4789 | sizeof_field(struct siginfo, si_uid)) != |
4790 | sizeof(void __user *)); |
4791 | BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) != |
4792 | offsetof(struct siginfo, si_uid)); |
4793 | } |
4794 | #ifdef CONFIG_COMPAT |
4795 | BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) != |
4796 | offsetof(struct compat_siginfo, si_addr)); |
4797 | BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) != |
4798 | sizeof(compat_uptr_t)); |
4799 | BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) != |
4800 | sizeof_field(struct siginfo, si_pid)); |
4801 | #endif |
4802 | } |
4803 | |
4804 | #if defined(CONFIG_SYSCTL) |
4805 | static struct ctl_table signal_debug_table[] = { |
4806 | #ifdef CONFIG_SYSCTL_EXCEPTION_TRACE |
4807 | { |
4808 | .procname = "exception-trace" , |
4809 | .data = &show_unhandled_signals, |
4810 | .maxlen = sizeof(int), |
4811 | .mode = 0644, |
4812 | .proc_handler = proc_dointvec |
4813 | }, |
4814 | #endif |
4815 | { } |
4816 | }; |
4817 | |
4818 | static int __init init_signal_sysctls(void) |
4819 | { |
4820 | register_sysctl_init("debug" , signal_debug_table); |
4821 | return 0; |
4822 | } |
4823 | early_initcall(init_signal_sysctls); |
4824 | #endif /* CONFIG_SYSCTL */ |
4825 | |
4826 | void __init signals_init(void) |
4827 | { |
4828 | siginfo_buildtime_checks(); |
4829 | |
4830 | sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT); |
4831 | } |
4832 | |
4833 | #ifdef CONFIG_KGDB_KDB |
4834 | #include <linux/kdb.h> |
4835 | /* |
4836 | * kdb_send_sig - Allows kdb to send signals without exposing |
4837 | * signal internals. This function checks if the required locks are |
4838 | * available before calling the main signal code, to avoid kdb |
4839 | * deadlocks. |
4840 | */ |
4841 | void kdb_send_sig(struct task_struct *t, int sig) |
4842 | { |
4843 | static struct task_struct *kdb_prev_t; |
4844 | int new_t, ret; |
4845 | if (!spin_trylock(lock: &t->sighand->siglock)) { |
4846 | kdb_printf("Can't do kill command now.\n" |
4847 | "The sigmask lock is held somewhere else in " |
4848 | "kernel, try again later\n" ); |
4849 | return; |
4850 | } |
4851 | new_t = kdb_prev_t != t; |
4852 | kdb_prev_t = t; |
4853 | if (!task_is_running(t) && new_t) { |
4854 | spin_unlock(lock: &t->sighand->siglock); |
4855 | kdb_printf("Process is not RUNNING, sending a signal from " |
4856 | "kdb risks deadlock\n" |
4857 | "on the run queue locks. " |
4858 | "The signal has _not_ been sent.\n" |
4859 | "Reissue the kill command if you want to risk " |
4860 | "the deadlock.\n" ); |
4861 | return; |
4862 | } |
4863 | ret = send_signal_locked(sig, SEND_SIG_PRIV, t, type: PIDTYPE_PID); |
4864 | spin_unlock(lock: &t->sighand->siglock); |
4865 | if (ret) |
4866 | kdb_printf("Fail to deliver Signal %d to process %d.\n" , |
4867 | sig, t->pid); |
4868 | else |
4869 | kdb_printf("Signal %d is sent to process %d.\n" , sig, t->pid); |
4870 | } |
4871 | #endif /* CONFIG_KGDB_KDB */ |
4872 | |