1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_SCHED_SIGNAL_H |
3 | #define _LINUX_SCHED_SIGNAL_H |
4 | |
5 | #include <linux/rculist.h> |
6 | #include <linux/signal.h> |
7 | #include <linux/sched.h> |
8 | #include <linux/sched/jobctl.h> |
9 | #include <linux/sched/task.h> |
10 | #include <linux/cred.h> |
11 | #include <linux/refcount.h> |
12 | |
13 | /* |
14 | * Types defining task->signal and task->sighand and APIs using them: |
15 | */ |
16 | |
17 | struct sighand_struct { |
18 | refcount_t count; |
19 | struct k_sigaction action[_NSIG]; |
20 | spinlock_t siglock; |
21 | wait_queue_head_t signalfd_wqh; |
22 | }; |
23 | |
24 | /* |
25 | * Per-process accounting stats: |
26 | */ |
27 | struct pacct_struct { |
28 | int ac_flag; |
29 | long ac_exitcode; |
30 | unsigned long ac_mem; |
31 | u64 ac_utime, ac_stime; |
32 | unsigned long ac_minflt, ac_majflt; |
33 | }; |
34 | |
35 | struct cpu_itimer { |
36 | u64 expires; |
37 | u64 incr; |
38 | }; |
39 | |
40 | /* |
41 | * This is the atomic variant of task_cputime, which can be used for |
42 | * storing and updating task_cputime statistics without locking. |
43 | */ |
44 | struct task_cputime_atomic { |
45 | atomic64_t utime; |
46 | atomic64_t stime; |
47 | atomic64_t sum_exec_runtime; |
48 | }; |
49 | |
50 | #define INIT_CPUTIME_ATOMIC \ |
51 | (struct task_cputime_atomic) { \ |
52 | .utime = ATOMIC64_INIT(0), \ |
53 | .stime = ATOMIC64_INIT(0), \ |
54 | .sum_exec_runtime = ATOMIC64_INIT(0), \ |
55 | } |
56 | /** |
57 | * struct thread_group_cputimer - thread group interval timer counts |
58 | * @cputime_atomic: atomic thread group interval timers. |
59 | * @running: true when there are timers running and |
60 | * @cputime_atomic receives updates. |
61 | * @checking_timer: true when a thread in the group is in the |
62 | * process of checking for thread group timers. |
63 | * |
64 | * This structure contains the version of task_cputime, above, that is |
65 | * used for thread group CPU timer calculations. |
66 | */ |
67 | struct thread_group_cputimer { |
68 | struct task_cputime_atomic cputime_atomic; |
69 | bool running; |
70 | bool checking_timer; |
71 | }; |
72 | |
73 | struct multiprocess_signals { |
74 | sigset_t signal; |
75 | struct hlist_node node; |
76 | }; |
77 | |
78 | /* |
79 | * NOTE! "signal_struct" does not have its own |
80 | * locking, because a shared signal_struct always |
81 | * implies a shared sighand_struct, so locking |
82 | * sighand_struct is always a proper superset of |
83 | * the locking of signal_struct. |
84 | */ |
85 | struct signal_struct { |
86 | refcount_t sigcnt; |
87 | atomic_t live; |
88 | int nr_threads; |
89 | struct list_head thread_head; |
90 | |
91 | wait_queue_head_t wait_chldexit; /* for wait4() */ |
92 | |
93 | /* current thread group signal load-balancing target: */ |
94 | struct task_struct *curr_target; |
95 | |
96 | /* shared signal handling: */ |
97 | struct sigpending shared_pending; |
98 | |
99 | /* For collecting multiprocess signals during fork */ |
100 | struct hlist_head multiprocess; |
101 | |
102 | /* thread group exit support */ |
103 | int group_exit_code; |
104 | /* overloaded: |
105 | * - notify group_exit_task when ->count is equal to notify_count |
106 | * - everyone except group_exit_task is stopped during signal delivery |
107 | * of fatal signals, group_exit_task processes the signal. |
108 | */ |
109 | int notify_count; |
110 | struct task_struct *group_exit_task; |
111 | |
112 | /* thread group stop support, overloads group_exit_code too */ |
113 | int group_stop_count; |
114 | unsigned int flags; /* see SIGNAL_* flags below */ |
115 | |
116 | /* |
117 | * PR_SET_CHILD_SUBREAPER marks a process, like a service |
118 | * manager, to re-parent orphan (double-forking) child processes |
119 | * to this process instead of 'init'. The service manager is |
120 | * able to receive SIGCHLD signals and is able to investigate |
121 | * the process until it calls wait(). All children of this |
122 | * process will inherit a flag if they should look for a |
123 | * child_subreaper process at exit. |
124 | */ |
125 | unsigned int is_child_subreaper:1; |
126 | unsigned int has_child_subreaper:1; |
127 | |
128 | #ifdef CONFIG_POSIX_TIMERS |
129 | |
130 | /* POSIX.1b Interval Timers */ |
131 | int posix_timer_id; |
132 | struct list_head posix_timers; |
133 | |
134 | /* ITIMER_REAL timer for the process */ |
135 | struct hrtimer real_timer; |
136 | ktime_t it_real_incr; |
137 | |
138 | /* |
139 | * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use |
140 | * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these |
141 | * values are defined to 0 and 1 respectively |
142 | */ |
143 | struct cpu_itimer it[2]; |
144 | |
145 | /* |
146 | * Thread group totals for process CPU timers. |
147 | * See thread_group_cputimer(), et al, for details. |
148 | */ |
149 | struct thread_group_cputimer cputimer; |
150 | |
151 | /* Earliest-expiration cache. */ |
152 | struct task_cputime cputime_expires; |
153 | |
154 | struct list_head cpu_timers[3]; |
155 | |
156 | #endif |
157 | |
158 | /* PID/PID hash table linkage. */ |
159 | struct pid *pids[PIDTYPE_MAX]; |
160 | |
161 | #ifdef CONFIG_NO_HZ_FULL |
162 | atomic_t tick_dep_mask; |
163 | #endif |
164 | |
165 | struct pid *tty_old_pgrp; |
166 | |
167 | /* boolean value for session group leader */ |
168 | int leader; |
169 | |
170 | struct tty_struct *tty; /* NULL if no tty */ |
171 | |
172 | #ifdef CONFIG_SCHED_AUTOGROUP |
173 | struct autogroup *autogroup; |
174 | #endif |
175 | /* |
176 | * Cumulative resource counters for dead threads in the group, |
177 | * and for reaped dead child processes forked by this group. |
178 | * Live threads maintain their own counters and add to these |
179 | * in __exit_signal, except for the group leader. |
180 | */ |
181 | seqlock_t stats_lock; |
182 | u64 utime, stime, cutime, cstime; |
183 | u64 gtime; |
184 | u64 cgtime; |
185 | struct prev_cputime prev_cputime; |
186 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; |
187 | unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; |
188 | unsigned long inblock, oublock, cinblock, coublock; |
189 | unsigned long , ; |
190 | struct task_io_accounting ioac; |
191 | |
192 | /* |
193 | * Cumulative ns of schedule CPU time fo dead threads in the |
194 | * group, not including a zombie group leader, (This only differs |
195 | * from jiffies_to_ns(utime + stime) if sched_clock uses something |
196 | * other than jiffies.) |
197 | */ |
198 | unsigned long long sum_sched_runtime; |
199 | |
200 | /* |
201 | * We don't bother to synchronize most readers of this at all, |
202 | * because there is no reader checking a limit that actually needs |
203 | * to get both rlim_cur and rlim_max atomically, and either one |
204 | * alone is a single word that can safely be read normally. |
205 | * getrlimit/setrlimit use task_lock(current->group_leader) to |
206 | * protect this instead of the siglock, because they really |
207 | * have no need to disable irqs. |
208 | */ |
209 | struct rlimit rlim[RLIM_NLIMITS]; |
210 | |
211 | #ifdef CONFIG_BSD_PROCESS_ACCT |
212 | struct pacct_struct pacct; /* per-process accounting information */ |
213 | #endif |
214 | #ifdef CONFIG_TASKSTATS |
215 | struct taskstats *stats; |
216 | #endif |
217 | #ifdef CONFIG_AUDIT |
218 | unsigned audit_tty; |
219 | struct tty_audit_buf *tty_audit_buf; |
220 | #endif |
221 | |
222 | /* |
223 | * Thread is the potential origin of an oom condition; kill first on |
224 | * oom |
225 | */ |
226 | bool oom_flag_origin; |
227 | short oom_score_adj; /* OOM kill score adjustment */ |
228 | short oom_score_adj_min; /* OOM kill score adjustment min value. |
229 | * Only settable by CAP_SYS_RESOURCE. */ |
230 | struct mm_struct *oom_mm; /* recorded mm when the thread group got |
231 | * killed by the oom killer */ |
232 | |
233 | struct mutex cred_guard_mutex; /* guard against foreign influences on |
234 | * credential calculations |
235 | * (notably. ptrace) */ |
236 | } __randomize_layout; |
237 | |
238 | /* |
239 | * Bits in flags field of signal_struct. |
240 | */ |
241 | #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ |
242 | #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ |
243 | #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ |
244 | #define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */ |
245 | /* |
246 | * Pending notifications to parent. |
247 | */ |
248 | #define SIGNAL_CLD_STOPPED 0x00000010 |
249 | #define SIGNAL_CLD_CONTINUED 0x00000020 |
250 | #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) |
251 | |
252 | #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ |
253 | |
254 | #define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ |
255 | SIGNAL_STOP_CONTINUED) |
256 | |
257 | static inline void signal_set_stop_flags(struct signal_struct *sig, |
258 | unsigned int flags) |
259 | { |
260 | WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); |
261 | sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; |
262 | } |
263 | |
264 | /* If true, all threads except ->group_exit_task have pending SIGKILL */ |
265 | static inline int signal_group_exit(const struct signal_struct *sig) |
266 | { |
267 | return (sig->flags & SIGNAL_GROUP_EXIT) || |
268 | (sig->group_exit_task != NULL); |
269 | } |
270 | |
271 | extern void flush_signals(struct task_struct *); |
272 | extern void ignore_signals(struct task_struct *); |
273 | extern void flush_signal_handlers(struct task_struct *, int force_default); |
274 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info); |
275 | |
276 | static inline int kernel_dequeue_signal(void) |
277 | { |
278 | struct task_struct *tsk = current; |
279 | kernel_siginfo_t __info; |
280 | int ret; |
281 | |
282 | spin_lock_irq(&tsk->sighand->siglock); |
283 | ret = dequeue_signal(tsk, &tsk->blocked, &__info); |
284 | spin_unlock_irq(&tsk->sighand->siglock); |
285 | |
286 | return ret; |
287 | } |
288 | |
289 | static inline void kernel_signal_stop(void) |
290 | { |
291 | spin_lock_irq(¤t->sighand->siglock); |
292 | if (current->jobctl & JOBCTL_STOP_DEQUEUED) |
293 | set_special_state(TASK_STOPPED); |
294 | spin_unlock_irq(¤t->sighand->siglock); |
295 | |
296 | schedule(); |
297 | } |
298 | #ifdef __ARCH_SI_TRAPNO |
299 | # define ___ARCH_SI_TRAPNO(_a1) , _a1 |
300 | #else |
301 | # define ___ARCH_SI_TRAPNO(_a1) |
302 | #endif |
303 | #ifdef __ia64__ |
304 | # define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3 |
305 | #else |
306 | # define ___ARCH_SI_IA64(_a1, _a2, _a3) |
307 | #endif |
308 | |
309 | int force_sig_fault(int sig, int code, void __user *addr |
310 | ___ARCH_SI_TRAPNO(int trapno) |
311 | ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) |
312 | , struct task_struct *t); |
313 | int send_sig_fault(int sig, int code, void __user *addr |
314 | ___ARCH_SI_TRAPNO(int trapno) |
315 | ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) |
316 | , struct task_struct *t); |
317 | |
318 | int force_sig_mceerr(int code, void __user *, short, struct task_struct *); |
319 | int send_sig_mceerr(int code, void __user *, short, struct task_struct *); |
320 | |
321 | int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper); |
322 | int force_sig_pkuerr(void __user *addr, u32 pkey); |
323 | |
324 | int force_sig_ptrace_errno_trap(int errno, void __user *addr); |
325 | |
326 | extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *); |
327 | extern void force_sigsegv(int sig, struct task_struct *p); |
328 | extern int force_sig_info(int, struct kernel_siginfo *, struct task_struct *); |
329 | extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp); |
330 | extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid); |
331 | extern int kill_pid_info_as_cred(int, struct kernel_siginfo *, struct pid *, |
332 | const struct cred *); |
333 | extern int kill_pgrp(struct pid *pid, int sig, int priv); |
334 | extern int kill_pid(struct pid *pid, int sig, int priv); |
335 | extern __must_check bool do_notify_parent(struct task_struct *, int); |
336 | extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); |
337 | extern void force_sig(int, struct task_struct *); |
338 | extern int send_sig(int, struct task_struct *, int); |
339 | extern int zap_other_threads(struct task_struct *p); |
340 | extern struct sigqueue *sigqueue_alloc(void); |
341 | extern void sigqueue_free(struct sigqueue *); |
342 | extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type); |
343 | extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); |
344 | |
345 | static inline int restart_syscall(void) |
346 | { |
347 | set_tsk_thread_flag(current, TIF_SIGPENDING); |
348 | return -ERESTARTNOINTR; |
349 | } |
350 | |
351 | static inline int signal_pending(struct task_struct *p) |
352 | { |
353 | return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); |
354 | } |
355 | |
356 | static inline int __fatal_signal_pending(struct task_struct *p) |
357 | { |
358 | return unlikely(sigismember(&p->pending.signal, SIGKILL)); |
359 | } |
360 | |
361 | static inline int fatal_signal_pending(struct task_struct *p) |
362 | { |
363 | return signal_pending(p) && __fatal_signal_pending(p); |
364 | } |
365 | |
366 | static inline int signal_pending_state(long state, struct task_struct *p) |
367 | { |
368 | if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) |
369 | return 0; |
370 | if (!signal_pending(p)) |
371 | return 0; |
372 | |
373 | return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); |
374 | } |
375 | |
376 | /* |
377 | * Reevaluate whether the task has signals pending delivery. |
378 | * Wake the task if so. |
379 | * This is required every time the blocked sigset_t changes. |
380 | * callers must hold sighand->siglock. |
381 | */ |
382 | extern void recalc_sigpending_and_wake(struct task_struct *t); |
383 | extern void recalc_sigpending(void); |
384 | extern void calculate_sigpending(void); |
385 | |
386 | extern void signal_wake_up_state(struct task_struct *t, unsigned int state); |
387 | |
388 | static inline void signal_wake_up(struct task_struct *t, bool resume) |
389 | { |
390 | signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); |
391 | } |
392 | static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) |
393 | { |
394 | signal_wake_up_state(t, resume ? __TASK_TRACED : 0); |
395 | } |
396 | |
397 | void task_join_group_stop(struct task_struct *task); |
398 | |
399 | #ifdef TIF_RESTORE_SIGMASK |
400 | /* |
401 | * Legacy restore_sigmask accessors. These are inefficient on |
402 | * SMP architectures because they require atomic operations. |
403 | */ |
404 | |
405 | /** |
406 | * set_restore_sigmask() - make sure saved_sigmask processing gets done |
407 | * |
408 | * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code |
409 | * will run before returning to user mode, to process the flag. For |
410 | * all callers, TIF_SIGPENDING is already set or it's no harm to set |
411 | * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the |
412 | * arch code will notice on return to user mode, in case those bits |
413 | * are scarce. We set TIF_SIGPENDING here to ensure that the arch |
414 | * signal code always gets run when TIF_RESTORE_SIGMASK is set. |
415 | */ |
416 | static inline void set_restore_sigmask(void) |
417 | { |
418 | set_thread_flag(TIF_RESTORE_SIGMASK); |
419 | WARN_ON(!test_thread_flag(TIF_SIGPENDING)); |
420 | } |
421 | static inline void clear_restore_sigmask(void) |
422 | { |
423 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
424 | } |
425 | static inline bool test_restore_sigmask(void) |
426 | { |
427 | return test_thread_flag(TIF_RESTORE_SIGMASK); |
428 | } |
429 | static inline bool test_and_clear_restore_sigmask(void) |
430 | { |
431 | return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); |
432 | } |
433 | |
434 | #else /* TIF_RESTORE_SIGMASK */ |
435 | |
436 | /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ |
437 | static inline void set_restore_sigmask(void) |
438 | { |
439 | current->restore_sigmask = true; |
440 | WARN_ON(!test_thread_flag(TIF_SIGPENDING)); |
441 | } |
442 | static inline void clear_restore_sigmask(void) |
443 | { |
444 | current->restore_sigmask = false; |
445 | } |
446 | static inline bool test_restore_sigmask(void) |
447 | { |
448 | return current->restore_sigmask; |
449 | } |
450 | static inline bool test_and_clear_restore_sigmask(void) |
451 | { |
452 | if (!current->restore_sigmask) |
453 | return false; |
454 | current->restore_sigmask = false; |
455 | return true; |
456 | } |
457 | #endif |
458 | |
459 | static inline void restore_saved_sigmask(void) |
460 | { |
461 | if (test_and_clear_restore_sigmask()) |
462 | __set_current_blocked(¤t->saved_sigmask); |
463 | } |
464 | |
465 | static inline sigset_t *sigmask_to_save(void) |
466 | { |
467 | sigset_t *res = ¤t->blocked; |
468 | if (unlikely(test_restore_sigmask())) |
469 | res = ¤t->saved_sigmask; |
470 | return res; |
471 | } |
472 | |
473 | static inline int kill_cad_pid(int sig, int priv) |
474 | { |
475 | return kill_pid(cad_pid, sig, priv); |
476 | } |
477 | |
478 | /* These can be the second arg to send_sig_info/send_group_sig_info. */ |
479 | #define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0) |
480 | #define SEND_SIG_PRIV ((struct kernel_siginfo *) 1) |
481 | |
482 | /* |
483 | * True if we are on the alternate signal stack. |
484 | */ |
485 | static inline int on_sig_stack(unsigned long sp) |
486 | { |
487 | /* |
488 | * If the signal stack is SS_AUTODISARM then, by construction, we |
489 | * can't be on the signal stack unless user code deliberately set |
490 | * SS_AUTODISARM when we were already on it. |
491 | * |
492 | * This improves reliability: if user state gets corrupted such that |
493 | * the stack pointer points very close to the end of the signal stack, |
494 | * then this check will enable the signal to be handled anyway. |
495 | */ |
496 | if (current->sas_ss_flags & SS_AUTODISARM) |
497 | return 0; |
498 | |
499 | #ifdef CONFIG_STACK_GROWSUP |
500 | return sp >= current->sas_ss_sp && |
501 | sp - current->sas_ss_sp < current->sas_ss_size; |
502 | #else |
503 | return sp > current->sas_ss_sp && |
504 | sp - current->sas_ss_sp <= current->sas_ss_size; |
505 | #endif |
506 | } |
507 | |
508 | static inline int sas_ss_flags(unsigned long sp) |
509 | { |
510 | if (!current->sas_ss_size) |
511 | return SS_DISABLE; |
512 | |
513 | return on_sig_stack(sp) ? SS_ONSTACK : 0; |
514 | } |
515 | |
516 | static inline void sas_ss_reset(struct task_struct *p) |
517 | { |
518 | p->sas_ss_sp = 0; |
519 | p->sas_ss_size = 0; |
520 | p->sas_ss_flags = SS_DISABLE; |
521 | } |
522 | |
523 | static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) |
524 | { |
525 | if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) |
526 | #ifdef CONFIG_STACK_GROWSUP |
527 | return current->sas_ss_sp; |
528 | #else |
529 | return current->sas_ss_sp + current->sas_ss_size; |
530 | #endif |
531 | return sp; |
532 | } |
533 | |
534 | extern void __cleanup_sighand(struct sighand_struct *); |
535 | extern void flush_itimer_signals(void); |
536 | |
537 | #define tasklist_empty() \ |
538 | list_empty(&init_task.tasks) |
539 | |
540 | #define next_task(p) \ |
541 | list_entry_rcu((p)->tasks.next, struct task_struct, tasks) |
542 | |
543 | #define for_each_process(p) \ |
544 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) |
545 | |
546 | extern bool current_is_single_threaded(void); |
547 | |
548 | /* |
549 | * Careful: do_each_thread/while_each_thread is a double loop so |
550 | * 'break' will not work as expected - use goto instead. |
551 | */ |
552 | #define do_each_thread(g, t) \ |
553 | for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do |
554 | |
555 | #define while_each_thread(g, t) \ |
556 | while ((t = next_thread(t)) != g) |
557 | |
558 | #define __for_each_thread(signal, t) \ |
559 | list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) |
560 | |
561 | #define for_each_thread(p, t) \ |
562 | __for_each_thread((p)->signal, t) |
563 | |
564 | /* Careful: this is a double loop, 'break' won't work as expected. */ |
565 | #define for_each_process_thread(p, t) \ |
566 | for_each_process(p) for_each_thread(p, t) |
567 | |
568 | typedef int (*proc_visitor)(struct task_struct *p, void *data); |
569 | void walk_process_tree(struct task_struct *top, proc_visitor, void *); |
570 | |
571 | static inline |
572 | struct pid *task_pid_type(struct task_struct *task, enum pid_type type) |
573 | { |
574 | struct pid *pid; |
575 | if (type == PIDTYPE_PID) |
576 | pid = task_pid(task); |
577 | else |
578 | pid = task->signal->pids[type]; |
579 | return pid; |
580 | } |
581 | |
582 | static inline struct pid *task_tgid(struct task_struct *task) |
583 | { |
584 | return task->signal->pids[PIDTYPE_TGID]; |
585 | } |
586 | |
587 | /* |
588 | * Without tasklist or RCU lock it is not safe to dereference |
589 | * the result of task_pgrp/task_session even if task == current, |
590 | * we can race with another thread doing sys_setsid/sys_setpgid. |
591 | */ |
592 | static inline struct pid *task_pgrp(struct task_struct *task) |
593 | { |
594 | return task->signal->pids[PIDTYPE_PGID]; |
595 | } |
596 | |
597 | static inline struct pid *task_session(struct task_struct *task) |
598 | { |
599 | return task->signal->pids[PIDTYPE_SID]; |
600 | } |
601 | |
602 | static inline int get_nr_threads(struct task_struct *tsk) |
603 | { |
604 | return tsk->signal->nr_threads; |
605 | } |
606 | |
607 | static inline bool thread_group_leader(struct task_struct *p) |
608 | { |
609 | return p->exit_signal >= 0; |
610 | } |
611 | |
612 | /* Do to the insanities of de_thread it is possible for a process |
613 | * to have the pid of the thread group leader without actually being |
614 | * the thread group leader. For iteration through the pids in proc |
615 | * all we care about is that we have a task with the appropriate |
616 | * pid, we don't actually care if we have the right task. |
617 | */ |
618 | static inline bool has_group_leader_pid(struct task_struct *p) |
619 | { |
620 | return task_pid(p) == task_tgid(p); |
621 | } |
622 | |
623 | static inline |
624 | bool same_thread_group(struct task_struct *p1, struct task_struct *p2) |
625 | { |
626 | return p1->signal == p2->signal; |
627 | } |
628 | |
629 | static inline struct task_struct *next_thread(const struct task_struct *p) |
630 | { |
631 | return list_entry_rcu(p->thread_group.next, |
632 | struct task_struct, thread_group); |
633 | } |
634 | |
635 | static inline int thread_group_empty(struct task_struct *p) |
636 | { |
637 | return list_empty(&p->thread_group); |
638 | } |
639 | |
640 | #define delay_group_leader(p) \ |
641 | (thread_group_leader(p) && !thread_group_empty(p)) |
642 | |
643 | extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, |
644 | unsigned long *flags); |
645 | |
646 | static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk, |
647 | unsigned long *flags) |
648 | { |
649 | struct sighand_struct *ret; |
650 | |
651 | ret = __lock_task_sighand(tsk, flags); |
652 | (void)__cond_lock(&tsk->sighand->siglock, ret); |
653 | return ret; |
654 | } |
655 | |
656 | static inline void unlock_task_sighand(struct task_struct *tsk, |
657 | unsigned long *flags) |
658 | { |
659 | spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); |
660 | } |
661 | |
662 | static inline unsigned long task_rlimit(const struct task_struct *tsk, |
663 | unsigned int limit) |
664 | { |
665 | return READ_ONCE(tsk->signal->rlim[limit].rlim_cur); |
666 | } |
667 | |
668 | static inline unsigned long task_rlimit_max(const struct task_struct *tsk, |
669 | unsigned int limit) |
670 | { |
671 | return READ_ONCE(tsk->signal->rlim[limit].rlim_max); |
672 | } |
673 | |
674 | static inline unsigned long rlimit(unsigned int limit) |
675 | { |
676 | return task_rlimit(current, limit); |
677 | } |
678 | |
679 | static inline unsigned long rlimit_max(unsigned int limit) |
680 | { |
681 | return task_rlimit_max(current, limit); |
682 | } |
683 | |
684 | #endif /* _LINUX_SCHED_SIGNAL_H */ |
685 | |