1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_CPUSET_H
3#define _LINUX_CPUSET_H
4/*
5 * cpuset interface
6 *
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 *
10 */
11
12#include <linux/sched.h>
13#include <linux/sched/topology.h>
14#include <linux/sched/task.h>
15#include <linux/cpumask.h>
16#include <linux/nodemask.h>
17#include <linux/mm.h>
18#include <linux/jump_label.h>
19
20#ifdef CONFIG_CPUSETS
21
22/*
23 * Static branch rewrites can happen in an arbitrary order for a given
24 * key. In code paths where we need to loop with read_mems_allowed_begin() and
25 * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
26 * to ensure that begin() always gets rewritten before retry() in the
27 * disabled -> enabled transition. If not, then if local irqs are disabled
28 * around the loop, we can deadlock since retry() would always be
29 * comparing the latest value of the mems_allowed seqcount against 0 as
30 * begin() still would see cpusets_enabled() as false. The enabled -> disabled
31 * transition should happen in reverse order for the same reasons (want to stop
32 * looking at real value of mems_allowed.sequence in retry() first).
33 */
34extern struct static_key_false cpusets_pre_enable_key;
35extern struct static_key_false cpusets_enabled_key;
36static inline bool cpusets_enabled(void)
37{
38 return static_branch_unlikely(&cpusets_enabled_key);
39}
40
41static inline void cpuset_inc(void)
42{
43 static_branch_inc(&cpusets_pre_enable_key);
44 static_branch_inc(&cpusets_enabled_key);
45}
46
47static inline void cpuset_dec(void)
48{
49 static_branch_dec(&cpusets_enabled_key);
50 static_branch_dec(&cpusets_pre_enable_key);
51}
52
53extern int cpuset_init(void);
54extern void cpuset_init_smp(void);
55extern void cpuset_force_rebuild(void);
56extern void cpuset_update_active_cpus(void);
57extern void cpuset_wait_for_hotplug(void);
58extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
59extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
60extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
61#define cpuset_current_mems_allowed (current->mems_allowed)
62void cpuset_init_current_mems_allowed(void);
63int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
64
65extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
66
67static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
68{
69 if (cpusets_enabled())
70 return __cpuset_node_allowed(node, gfp_mask);
71 return true;
72}
73
74static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
75{
76 return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
77}
78
79static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
80{
81 if (cpusets_enabled())
82 return __cpuset_zone_allowed(z, gfp_mask);
83 return true;
84}
85
86extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
87 const struct task_struct *tsk2);
88
89#define cpuset_memory_pressure_bump() \
90 do { \
91 if (cpuset_memory_pressure_enabled) \
92 __cpuset_memory_pressure_bump(); \
93 } while (0)
94extern int cpuset_memory_pressure_enabled;
95extern void __cpuset_memory_pressure_bump(void);
96
97extern void cpuset_task_status_allowed(struct seq_file *m,
98 struct task_struct *task);
99extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
100 struct pid *pid, struct task_struct *tsk);
101
102extern int cpuset_mem_spread_node(void);
103extern int cpuset_slab_spread_node(void);
104
105static inline int cpuset_do_page_mem_spread(void)
106{
107 return task_spread_page(current);
108}
109
110static inline int cpuset_do_slab_mem_spread(void)
111{
112 return task_spread_slab(current);
113}
114
115extern bool current_cpuset_is_being_rebound(void);
116
117extern void rebuild_sched_domains(void);
118
119extern void cpuset_print_current_mems_allowed(void);
120
121/*
122 * read_mems_allowed_begin is required when making decisions involving
123 * mems_allowed such as during page allocation. mems_allowed can be updated in
124 * parallel and depending on the new value an operation can fail potentially
125 * causing process failure. A retry loop with read_mems_allowed_begin and
126 * read_mems_allowed_retry prevents these artificial failures.
127 */
128static inline unsigned int read_mems_allowed_begin(void)
129{
130 if (!static_branch_unlikely(&cpusets_pre_enable_key))
131 return 0;
132
133 return read_seqcount_begin(&current->mems_allowed_seq);
134}
135
136/*
137 * If this returns true, the operation that took place after
138 * read_mems_allowed_begin may have failed artificially due to a concurrent
139 * update of mems_allowed. It is up to the caller to retry the operation if
140 * appropriate.
141 */
142static inline bool read_mems_allowed_retry(unsigned int seq)
143{
144 if (!static_branch_unlikely(&cpusets_enabled_key))
145 return false;
146
147 return read_seqcount_retry(&current->mems_allowed_seq, seq);
148}
149
150static inline void set_mems_allowed(nodemask_t nodemask)
151{
152 unsigned long flags;
153
154 task_lock(current);
155 local_irq_save(flags);
156 write_seqcount_begin(&current->mems_allowed_seq);
157 current->mems_allowed = nodemask;
158 write_seqcount_end(&current->mems_allowed_seq);
159 local_irq_restore(flags);
160 task_unlock(current);
161}
162
163#else /* !CONFIG_CPUSETS */
164
165static inline bool cpusets_enabled(void) { return false; }
166
167static inline int cpuset_init(void) { return 0; }
168static inline void cpuset_init_smp(void) {}
169
170static inline void cpuset_force_rebuild(void) { }
171
172static inline void cpuset_update_active_cpus(void)
173{
174 partition_sched_domains(1, NULL, NULL);
175}
176
177static inline void cpuset_wait_for_hotplug(void) { }
178
179static inline void cpuset_cpus_allowed(struct task_struct *p,
180 struct cpumask *mask)
181{
182 cpumask_copy(mask, cpu_possible_mask);
183}
184
185static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
186{
187}
188
189static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
190{
191 return node_possible_map;
192}
193
194#define cpuset_current_mems_allowed (node_states[N_MEMORY])
195static inline void cpuset_init_current_mems_allowed(void) {}
196
197static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
198{
199 return 1;
200}
201
202static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
203{
204 return true;
205}
206
207static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
208{
209 return true;
210}
211
212static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
213{
214 return true;
215}
216
217static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
218 const struct task_struct *tsk2)
219{
220 return 1;
221}
222
223static inline void cpuset_memory_pressure_bump(void) {}
224
225static inline void cpuset_task_status_allowed(struct seq_file *m,
226 struct task_struct *task)
227{
228}
229
230static inline int cpuset_mem_spread_node(void)
231{
232 return 0;
233}
234
235static inline int cpuset_slab_spread_node(void)
236{
237 return 0;
238}
239
240static inline int cpuset_do_page_mem_spread(void)
241{
242 return 0;
243}
244
245static inline int cpuset_do_slab_mem_spread(void)
246{
247 return 0;
248}
249
250static inline bool current_cpuset_is_being_rebound(void)
251{
252 return false;
253}
254
255static inline void rebuild_sched_domains(void)
256{
257 partition_sched_domains(1, NULL, NULL);
258}
259
260static inline void cpuset_print_current_mems_allowed(void)
261{
262}
263
264static inline void set_mems_allowed(nodemask_t nodemask)
265{
266}
267
268static inline unsigned int read_mems_allowed_begin(void)
269{
270 return 0;
271}
272
273static inline bool read_mems_allowed_retry(unsigned int seq)
274{
275 return false;
276}
277
278#endif /* !CONFIG_CPUSETS */
279
280#endif /* _LINUX_CPUSET_H */
281