1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_SCHED_TASK_H |
3 | #define _LINUX_SCHED_TASK_H |
4 | |
5 | /* |
6 | * Interface between the scheduler and various task lifetime (fork()/exit()) |
7 | * functionality: |
8 | */ |
9 | |
10 | #include <linux/sched.h> |
11 | |
12 | struct task_struct; |
13 | struct rusage; |
14 | union thread_union; |
15 | |
16 | /* |
17 | * This serializes "schedule()" and also protects |
18 | * the run-queue from deletions/modifications (but |
19 | * _adding_ to the beginning of the run-queue has |
20 | * a separate lock). |
21 | */ |
22 | extern rwlock_t tasklist_lock; |
23 | extern spinlock_t mmlist_lock; |
24 | |
25 | extern union thread_union init_thread_union; |
26 | extern struct task_struct init_task; |
27 | |
28 | #ifdef CONFIG_PROVE_RCU |
29 | extern int lockdep_tasklist_lock_is_held(void); |
30 | #endif /* #ifdef CONFIG_PROVE_RCU */ |
31 | |
32 | extern asmlinkage void schedule_tail(struct task_struct *prev); |
33 | extern void init_idle(struct task_struct *idle, int cpu); |
34 | |
35 | extern int sched_fork(unsigned long clone_flags, struct task_struct *p); |
36 | extern void sched_dead(struct task_struct *p); |
37 | |
38 | void __noreturn do_task_dead(void); |
39 | |
40 | extern void proc_caches_init(void); |
41 | |
42 | extern void fork_init(void); |
43 | |
44 | extern void release_task(struct task_struct * p); |
45 | |
46 | #ifdef CONFIG_HAVE_COPY_THREAD_TLS |
47 | extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, |
48 | struct task_struct *, unsigned long); |
49 | #else |
50 | extern int copy_thread(unsigned long, unsigned long, unsigned long, |
51 | struct task_struct *); |
52 | |
53 | /* Architectures that haven't opted into copy_thread_tls get the tls argument |
54 | * via pt_regs, so ignore the tls argument passed via C. */ |
55 | static inline int copy_thread_tls( |
56 | unsigned long clone_flags, unsigned long sp, unsigned long arg, |
57 | struct task_struct *p, unsigned long tls) |
58 | { |
59 | return copy_thread(clone_flags, sp, arg, p); |
60 | } |
61 | #endif |
62 | extern void flush_thread(void); |
63 | |
64 | #ifdef CONFIG_HAVE_EXIT_THREAD |
65 | extern void exit_thread(struct task_struct *tsk); |
66 | #else |
67 | static inline void exit_thread(struct task_struct *tsk) |
68 | { |
69 | } |
70 | #endif |
71 | extern void do_group_exit(int); |
72 | |
73 | extern void exit_files(struct task_struct *); |
74 | extern void exit_itimers(struct signal_struct *); |
75 | |
76 | extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); |
77 | extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); |
78 | struct task_struct *fork_idle(int); |
79 | extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); |
80 | extern long kernel_wait4(pid_t, int __user *, int, struct rusage *); |
81 | |
82 | extern void free_task(struct task_struct *tsk); |
83 | |
84 | /* sched_exec is called by processes performing an exec */ |
85 | #ifdef CONFIG_SMP |
86 | extern void sched_exec(void); |
87 | #else |
88 | #define sched_exec() {} |
89 | #endif |
90 | |
91 | #define get_task_struct(tsk) do { refcount_inc(&(tsk)->usage); } while(0) |
92 | |
93 | extern void __put_task_struct(struct task_struct *t); |
94 | |
95 | static inline void put_task_struct(struct task_struct *t) |
96 | { |
97 | if (refcount_dec_and_test(&t->usage)) |
98 | __put_task_struct(t); |
99 | } |
100 | |
101 | struct task_struct *task_rcu_dereference(struct task_struct **ptask); |
102 | |
103 | #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT |
104 | extern int arch_task_struct_size __read_mostly; |
105 | #else |
106 | # define arch_task_struct_size (sizeof(struct task_struct)) |
107 | #endif |
108 | |
109 | #ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST |
110 | /* |
111 | * If an architecture has not declared a thread_struct whitelist we |
112 | * must assume something there may need to be copied to userspace. |
113 | */ |
114 | static inline void arch_thread_struct_whitelist(unsigned long *offset, |
115 | unsigned long *size) |
116 | { |
117 | *offset = 0; |
118 | /* Handle dynamically sized thread_struct. */ |
119 | *size = arch_task_struct_size - offsetof(struct task_struct, thread); |
120 | } |
121 | #endif |
122 | |
123 | #ifdef CONFIG_VMAP_STACK |
124 | static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) |
125 | { |
126 | return t->stack_vm_area; |
127 | } |
128 | #else |
129 | static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) |
130 | { |
131 | return NULL; |
132 | } |
133 | #endif |
134 | |
135 | /* |
136 | * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring |
137 | * subscriptions and synchronises with wait4(). Also used in procfs. Also |
138 | * pins the final release of task.io_context. Also protects ->cpuset and |
139 | * ->cgroup.subsys[]. And ->vfork_done. |
140 | * |
141 | * Nests both inside and outside of read_lock(&tasklist_lock). |
142 | * It must not be nested with write_lock_irq(&tasklist_lock), |
143 | * neither inside nor outside. |
144 | */ |
145 | static inline void task_lock(struct task_struct *p) |
146 | { |
147 | spin_lock(&p->alloc_lock); |
148 | } |
149 | |
150 | static inline void task_unlock(struct task_struct *p) |
151 | { |
152 | spin_unlock(&p->alloc_lock); |
153 | } |
154 | |
155 | #endif /* _LINUX_SCHED_TASK_H */ |
156 | |