1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_SCHED_TOPOLOGY_H |
3 | #define _LINUX_SCHED_TOPOLOGY_H |
4 | |
5 | #include <linux/topology.h> |
6 | |
7 | #include <linux/sched/idle.h> |
8 | |
9 | /* |
10 | * sched-domains (multiprocessor balancing) declarations: |
11 | */ |
12 | #ifdef CONFIG_SMP |
13 | |
14 | /* Generate SD flag indexes */ |
15 | #define SD_FLAG(name, mflags) __##name, |
16 | enum { |
17 | #include <linux/sched/sd_flags.h> |
18 | __SD_FLAG_CNT, |
19 | }; |
20 | #undef SD_FLAG |
21 | /* Generate SD flag bits */ |
22 | #define SD_FLAG(name, mflags) name = 1 << __##name, |
23 | enum { |
24 | #include <linux/sched/sd_flags.h> |
25 | }; |
26 | #undef SD_FLAG |
27 | |
28 | #ifdef CONFIG_SCHED_DEBUG |
29 | |
30 | struct sd_flag_debug { |
31 | unsigned int meta_flags; |
32 | char *name; |
33 | }; |
34 | extern const struct sd_flag_debug sd_flag_debug[]; |
35 | |
36 | #endif |
37 | |
38 | #ifdef CONFIG_SCHED_SMT |
39 | static inline int cpu_smt_flags(void) |
40 | { |
41 | return SD_SHARE_CPUCAPACITY | SD_SHARE_LLC; |
42 | } |
43 | #endif |
44 | |
45 | #ifdef CONFIG_SCHED_CLUSTER |
46 | static inline int cpu_cluster_flags(void) |
47 | { |
48 | return SD_CLUSTER | SD_SHARE_LLC; |
49 | } |
50 | #endif |
51 | |
52 | #ifdef CONFIG_SCHED_MC |
53 | static inline int cpu_core_flags(void) |
54 | { |
55 | return SD_SHARE_LLC; |
56 | } |
57 | #endif |
58 | |
59 | #ifdef CONFIG_NUMA |
60 | static inline int cpu_numa_flags(void) |
61 | { |
62 | return SD_NUMA; |
63 | } |
64 | #endif |
65 | |
66 | extern int arch_asym_cpu_priority(int cpu); |
67 | |
68 | struct sched_domain_attr { |
69 | int relax_domain_level; |
70 | }; |
71 | |
72 | #define SD_ATTR_INIT (struct sched_domain_attr) { \ |
73 | .relax_domain_level = -1, \ |
74 | } |
75 | |
76 | extern int sched_domain_level_max; |
77 | |
78 | struct sched_group; |
79 | |
80 | struct sched_domain_shared { |
81 | atomic_t ref; |
82 | atomic_t nr_busy_cpus; |
83 | int has_idle_cores; |
84 | int nr_idle_scan; |
85 | }; |
86 | |
87 | struct sched_domain { |
88 | /* These fields must be setup */ |
89 | struct sched_domain __rcu *parent; /* top domain must be null terminated */ |
90 | struct sched_domain __rcu *child; /* bottom domain must be null terminated */ |
91 | struct sched_group *groups; /* the balancing groups of the domain */ |
92 | unsigned long min_interval; /* Minimum balance interval ms */ |
93 | unsigned long max_interval; /* Maximum balance interval ms */ |
94 | unsigned int busy_factor; /* less balancing by factor if busy */ |
95 | unsigned int imbalance_pct; /* No balance until over watermark */ |
96 | unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ |
97 | unsigned int imb_numa_nr; /* Nr running tasks that allows a NUMA imbalance */ |
98 | |
99 | int nohz_idle; /* NOHZ IDLE status */ |
100 | int flags; /* See SD_* */ |
101 | int level; |
102 | |
103 | /* Runtime fields. */ |
104 | unsigned long last_balance; /* init to jiffies. units in jiffies */ |
105 | unsigned int balance_interval; /* initialise to 1. units in ms. */ |
106 | unsigned int nr_balance_failed; /* initialise to 0 */ |
107 | |
108 | /* idle_balance() stats */ |
109 | u64 max_newidle_lb_cost; |
110 | unsigned long last_decay_max_lb_cost; |
111 | |
112 | #ifdef CONFIG_SCHEDSTATS |
113 | /* load_balance() stats */ |
114 | unsigned int lb_count[CPU_MAX_IDLE_TYPES]; |
115 | unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; |
116 | unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; |
117 | unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; |
118 | unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; |
119 | unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; |
120 | unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; |
121 | unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; |
122 | |
123 | /* Active load balancing */ |
124 | unsigned int alb_count; |
125 | unsigned int alb_failed; |
126 | unsigned int alb_pushed; |
127 | |
128 | /* SD_BALANCE_EXEC stats */ |
129 | unsigned int sbe_count; |
130 | unsigned int sbe_balanced; |
131 | unsigned int sbe_pushed; |
132 | |
133 | /* SD_BALANCE_FORK stats */ |
134 | unsigned int sbf_count; |
135 | unsigned int sbf_balanced; |
136 | unsigned int sbf_pushed; |
137 | |
138 | /* try_to_wake_up() stats */ |
139 | unsigned int ttwu_wake_remote; |
140 | unsigned int ttwu_move_affine; |
141 | unsigned int ttwu_move_balance; |
142 | #endif |
143 | #ifdef CONFIG_SCHED_DEBUG |
144 | char *name; |
145 | #endif |
146 | union { |
147 | void *private; /* used during construction */ |
148 | struct rcu_head rcu; /* used during destruction */ |
149 | }; |
150 | struct sched_domain_shared *shared; |
151 | |
152 | unsigned int span_weight; |
153 | /* |
154 | * Span of all CPUs in this domain. |
155 | * |
156 | * NOTE: this field is variable length. (Allocated dynamically |
157 | * by attaching extra space to the end of the structure, |
158 | * depending on how many CPUs the kernel has booted up with) |
159 | */ |
160 | unsigned long span[]; |
161 | }; |
162 | |
163 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) |
164 | { |
165 | return to_cpumask(sd->span); |
166 | } |
167 | |
168 | extern void partition_sched_domains_locked(int ndoms_new, |
169 | cpumask_var_t doms_new[], |
170 | struct sched_domain_attr *dattr_new); |
171 | |
172 | extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
173 | struct sched_domain_attr *dattr_new); |
174 | |
175 | /* Allocate an array of sched domains, for partition_sched_domains(). */ |
176 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms); |
177 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); |
178 | |
179 | bool cpus_equal_capacity(int this_cpu, int that_cpu); |
180 | bool cpus_share_cache(int this_cpu, int that_cpu); |
181 | bool cpus_share_resources(int this_cpu, int that_cpu); |
182 | |
183 | typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); |
184 | typedef int (*sched_domain_flags_f)(void); |
185 | |
186 | #define SDTL_OVERLAP 0x01 |
187 | |
188 | struct sd_data { |
189 | struct sched_domain *__percpu *sd; |
190 | struct sched_domain_shared *__percpu *sds; |
191 | struct sched_group *__percpu *sg; |
192 | struct sched_group_capacity *__percpu *sgc; |
193 | }; |
194 | |
195 | struct sched_domain_topology_level { |
196 | sched_domain_mask_f mask; |
197 | sched_domain_flags_f sd_flags; |
198 | int flags; |
199 | int numa_level; |
200 | struct sd_data data; |
201 | #ifdef CONFIG_SCHED_DEBUG |
202 | char *name; |
203 | #endif |
204 | }; |
205 | |
206 | extern void __init set_sched_topology(struct sched_domain_topology_level *tl); |
207 | |
208 | #ifdef CONFIG_SCHED_DEBUG |
209 | # define SD_INIT_NAME(type) .name = #type |
210 | #else |
211 | # define SD_INIT_NAME(type) |
212 | #endif |
213 | |
214 | #else /* CONFIG_SMP */ |
215 | |
216 | struct sched_domain_attr; |
217 | |
218 | static inline void |
219 | partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[], |
220 | struct sched_domain_attr *dattr_new) |
221 | { |
222 | } |
223 | |
224 | static inline void |
225 | partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
226 | struct sched_domain_attr *dattr_new) |
227 | { |
228 | } |
229 | |
230 | static inline bool cpus_equal_capacity(int this_cpu, int that_cpu) |
231 | { |
232 | return true; |
233 | } |
234 | |
235 | static inline bool cpus_share_cache(int this_cpu, int that_cpu) |
236 | { |
237 | return true; |
238 | } |
239 | |
240 | static inline bool cpus_share_resources(int this_cpu, int that_cpu) |
241 | { |
242 | return true; |
243 | } |
244 | |
245 | #endif /* !CONFIG_SMP */ |
246 | |
247 | #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) |
248 | extern void rebuild_sched_domains_energy(void); |
249 | #else |
250 | static inline void rebuild_sched_domains_energy(void) |
251 | { |
252 | } |
253 | #endif |
254 | |
255 | #ifndef arch_scale_cpu_capacity |
256 | /** |
257 | * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU. |
258 | * @cpu: the CPU in question. |
259 | * |
260 | * Return: the CPU scale factor normalized against SCHED_CAPACITY_SCALE, i.e. |
261 | * |
262 | * max_perf(cpu) |
263 | * ----------------------------- * SCHED_CAPACITY_SCALE |
264 | * max(max_perf(c) : c \in CPUs) |
265 | */ |
266 | static __always_inline |
267 | unsigned long arch_scale_cpu_capacity(int cpu) |
268 | { |
269 | return SCHED_CAPACITY_SCALE; |
270 | } |
271 | #endif |
272 | |
273 | #ifndef arch_scale_thermal_pressure |
274 | static __always_inline |
275 | unsigned long arch_scale_thermal_pressure(int cpu) |
276 | { |
277 | return 0; |
278 | } |
279 | #endif |
280 | |
281 | #ifndef arch_update_thermal_pressure |
282 | static __always_inline |
283 | void arch_update_thermal_pressure(const struct cpumask *cpus, |
284 | unsigned long capped_frequency) |
285 | { } |
286 | #endif |
287 | |
288 | #ifndef arch_scale_freq_ref |
289 | static __always_inline |
290 | unsigned int arch_scale_freq_ref(int cpu) |
291 | { |
292 | return 0; |
293 | } |
294 | #endif |
295 | |
296 | static inline int task_node(const struct task_struct *p) |
297 | { |
298 | return cpu_to_node(cpu: task_cpu(p)); |
299 | } |
300 | |
301 | #endif /* _LINUX_SCHED_TOPOLOGY_H */ |
302 | |