1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * Common functions for in-kernel torture tests. |
4 | * |
5 | * Copyright (C) IBM Corporation, 2014 |
6 | * |
7 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> |
8 | * Based on kernel/rcu/torture.c. |
9 | */ |
10 | |
11 | #define pr_fmt(fmt) fmt |
12 | |
13 | #include <linux/types.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/init.h> |
16 | #include <linux/module.h> |
17 | #include <linux/kthread.h> |
18 | #include <linux/err.h> |
19 | #include <linux/spinlock.h> |
20 | #include <linux/smp.h> |
21 | #include <linux/interrupt.h> |
22 | #include <linux/sched.h> |
23 | #include <linux/sched/clock.h> |
24 | #include <linux/atomic.h> |
25 | #include <linux/bitops.h> |
26 | #include <linux/completion.h> |
27 | #include <linux/moduleparam.h> |
28 | #include <linux/percpu.h> |
29 | #include <linux/notifier.h> |
30 | #include <linux/reboot.h> |
31 | #include <linux/freezer.h> |
32 | #include <linux/cpu.h> |
33 | #include <linux/delay.h> |
34 | #include <linux/stat.h> |
35 | #include <linux/slab.h> |
36 | #include <linux/trace_clock.h> |
37 | #include <linux/ktime.h> |
38 | #include <asm/byteorder.h> |
39 | #include <linux/torture.h> |
40 | #include <linux/sched/rt.h> |
41 | #include "rcu/rcu.h" |
42 | |
43 | MODULE_LICENSE("GPL" ); |
44 | MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>" ); |
45 | |
46 | static bool disable_onoff_at_boot; |
47 | module_param(disable_onoff_at_boot, bool, 0444); |
48 | |
49 | static bool ftrace_dump_at_shutdown; |
50 | module_param(ftrace_dump_at_shutdown, bool, 0444); |
51 | |
52 | static int verbose_sleep_frequency; |
53 | module_param(verbose_sleep_frequency, int, 0444); |
54 | |
55 | static int verbose_sleep_duration = 1; |
56 | module_param(verbose_sleep_duration, int, 0444); |
57 | |
58 | static int random_shuffle; |
59 | module_param(random_shuffle, int, 0444); |
60 | |
61 | static char *torture_type; |
62 | static int verbose; |
63 | |
64 | /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */ |
65 | #define FULLSTOP_DONTSTOP 0 /* Normal operation. */ |
66 | #define FULLSTOP_SHUTDOWN 1 /* System shutdown with torture running. */ |
67 | #define FULLSTOP_RMMOD 2 /* Normal rmmod of torture. */ |
68 | static int fullstop = FULLSTOP_RMMOD; |
69 | static DEFINE_MUTEX(fullstop_mutex); |
70 | |
71 | static atomic_t verbose_sleep_counter; |
72 | |
73 | /* |
74 | * Sleep if needed from VERBOSE_TOROUT*(). |
75 | */ |
76 | void verbose_torout_sleep(void) |
77 | { |
78 | if (verbose_sleep_frequency > 0 && |
79 | verbose_sleep_duration > 0 && |
80 | !(atomic_inc_return(v: &verbose_sleep_counter) % verbose_sleep_frequency)) |
81 | schedule_timeout_uninterruptible(timeout: verbose_sleep_duration); |
82 | } |
83 | EXPORT_SYMBOL_GPL(verbose_torout_sleep); |
84 | |
85 | /* |
86 | * Schedule a high-resolution-timer sleep in nanoseconds, with a 32-bit |
87 | * nanosecond random fuzz. This function and its friends desynchronize |
88 | * testing from the timer wheel. |
89 | */ |
90 | int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, const enum hrtimer_mode mode, |
91 | struct torture_random_state *trsp) |
92 | { |
93 | ktime_t hto = baset_ns; |
94 | |
95 | if (trsp) |
96 | hto += torture_random(trsp) % fuzzt_ns; |
97 | set_current_state(TASK_IDLE); |
98 | return schedule_hrtimeout(expires: &hto, mode); |
99 | } |
100 | EXPORT_SYMBOL_GPL(torture_hrtimeout_ns); |
101 | |
102 | /* |
103 | * Schedule a high-resolution-timer sleep in microseconds, with a 32-bit |
104 | * nanosecond (not microsecond!) random fuzz. |
105 | */ |
106 | int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state *trsp) |
107 | { |
108 | ktime_t baset_ns = baset_us * NSEC_PER_USEC; |
109 | |
110 | return torture_hrtimeout_ns(baset_ns, fuzzt_ns, HRTIMER_MODE_REL, trsp); |
111 | } |
112 | EXPORT_SYMBOL_GPL(torture_hrtimeout_us); |
113 | |
114 | /* |
115 | * Schedule a high-resolution-timer sleep in milliseconds, with a 32-bit |
116 | * microsecond (not millisecond!) random fuzz. |
117 | */ |
118 | int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state *trsp) |
119 | { |
120 | ktime_t baset_ns = baset_ms * NSEC_PER_MSEC; |
121 | u32 fuzzt_ns; |
122 | |
123 | if ((u32)~0U / NSEC_PER_USEC < fuzzt_us) |
124 | fuzzt_ns = (u32)~0U; |
125 | else |
126 | fuzzt_ns = fuzzt_us * NSEC_PER_USEC; |
127 | return torture_hrtimeout_ns(baset_ns, fuzzt_ns, HRTIMER_MODE_REL, trsp); |
128 | } |
129 | EXPORT_SYMBOL_GPL(torture_hrtimeout_ms); |
130 | |
131 | /* |
132 | * Schedule a high-resolution-timer sleep in jiffies, with an |
133 | * implied one-jiffy random fuzz. This is intended to replace calls to |
134 | * schedule_timeout_interruptible() and friends. |
135 | */ |
136 | int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp) |
137 | { |
138 | ktime_t baset_ns = jiffies_to_nsecs(j: baset_j); |
139 | |
140 | return torture_hrtimeout_ns(baset_ns, jiffies_to_nsecs(j: 1), HRTIMER_MODE_REL, trsp); |
141 | } |
142 | EXPORT_SYMBOL_GPL(torture_hrtimeout_jiffies); |
143 | |
144 | /* |
145 | * Schedule a high-resolution-timer sleep in milliseconds, with a 32-bit |
146 | * millisecond (not second!) random fuzz. |
147 | */ |
148 | int torture_hrtimeout_s(u32 baset_s, u32 fuzzt_ms, struct torture_random_state *trsp) |
149 | { |
150 | ktime_t baset_ns = baset_s * NSEC_PER_SEC; |
151 | u32 fuzzt_ns; |
152 | |
153 | if ((u32)~0U / NSEC_PER_MSEC < fuzzt_ms) |
154 | fuzzt_ns = (u32)~0U; |
155 | else |
156 | fuzzt_ns = fuzzt_ms * NSEC_PER_MSEC; |
157 | return torture_hrtimeout_ns(baset_ns, fuzzt_ns, HRTIMER_MODE_REL, trsp); |
158 | } |
159 | EXPORT_SYMBOL_GPL(torture_hrtimeout_s); |
160 | |
161 | #ifdef CONFIG_HOTPLUG_CPU |
162 | |
163 | /* |
164 | * Variables for online-offline handling. Only present if CPU hotplug |
165 | * is enabled, otherwise does nothing. |
166 | */ |
167 | |
168 | static struct task_struct *onoff_task; |
169 | static long onoff_holdoff; |
170 | static long onoff_interval; |
171 | static torture_ofl_func *onoff_f; |
172 | static long n_offline_attempts; |
173 | static long n_offline_successes; |
174 | static unsigned long sum_offline; |
175 | static int min_offline = -1; |
176 | static int max_offline; |
177 | static long n_online_attempts; |
178 | static long n_online_successes; |
179 | static unsigned long sum_online; |
180 | static int min_online = -1; |
181 | static int max_online; |
182 | |
183 | static int torture_online_cpus = NR_CPUS; |
184 | |
185 | /* |
186 | * Some torture testing leverages confusion as to the number of online |
187 | * CPUs. This function returns the torture-testing view of this number, |
188 | * which allows torture tests to load-balance appropriately. |
189 | */ |
190 | int torture_num_online_cpus(void) |
191 | { |
192 | return READ_ONCE(torture_online_cpus); |
193 | } |
194 | EXPORT_SYMBOL_GPL(torture_num_online_cpus); |
195 | |
196 | /* |
197 | * Attempt to take a CPU offline. Return false if the CPU is already |
198 | * offline or if it is not subject to CPU-hotplug operations. The |
199 | * caller can detect other failures by looking at the statistics. |
200 | */ |
201 | bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes, |
202 | unsigned long *sum_offl, int *min_offl, int *max_offl) |
203 | { |
204 | unsigned long delta; |
205 | int ret; |
206 | char *s; |
207 | unsigned long starttime; |
208 | |
209 | if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu)) |
210 | return false; |
211 | if (num_online_cpus() <= 1) |
212 | return false; /* Can't offline the last CPU. */ |
213 | |
214 | if (verbose > 1) |
215 | pr_alert("%s" TORTURE_FLAG |
216 | "torture_onoff task: offlining %d\n" , |
217 | torture_type, cpu); |
218 | starttime = jiffies; |
219 | (*n_offl_attempts)++; |
220 | ret = remove_cpu(cpu); |
221 | if (ret) { |
222 | s = "" ; |
223 | if (!rcu_inkernel_boot_has_ended() && ret == -EBUSY) { |
224 | // PCI probe frequently disables hotplug during boot. |
225 | (*n_offl_attempts)--; |
226 | s = " (-EBUSY forgiven during boot)" ; |
227 | } |
228 | if (verbose) |
229 | pr_alert("%s" TORTURE_FLAG |
230 | "torture_onoff task: offline %d failed%s: errno %d\n" , |
231 | torture_type, cpu, s, ret); |
232 | } else { |
233 | if (verbose > 1) |
234 | pr_alert("%s" TORTURE_FLAG |
235 | "torture_onoff task: offlined %d\n" , |
236 | torture_type, cpu); |
237 | if (onoff_f) |
238 | onoff_f(); |
239 | (*n_offl_successes)++; |
240 | delta = jiffies - starttime; |
241 | *sum_offl += delta; |
242 | if (*min_offl < 0) { |
243 | *min_offl = delta; |
244 | *max_offl = delta; |
245 | } |
246 | if (*min_offl > delta) |
247 | *min_offl = delta; |
248 | if (*max_offl < delta) |
249 | *max_offl = delta; |
250 | WRITE_ONCE(torture_online_cpus, torture_online_cpus - 1); |
251 | WARN_ON_ONCE(torture_online_cpus <= 0); |
252 | } |
253 | |
254 | return true; |
255 | } |
256 | EXPORT_SYMBOL_GPL(torture_offline); |
257 | |
258 | /* |
259 | * Attempt to bring a CPU online. Return false if the CPU is already |
260 | * online or if it is not subject to CPU-hotplug operations. The |
261 | * caller can detect other failures by looking at the statistics. |
262 | */ |
263 | bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes, |
264 | unsigned long *sum_onl, int *min_onl, int *max_onl) |
265 | { |
266 | unsigned long delta; |
267 | int ret; |
268 | char *s; |
269 | unsigned long starttime; |
270 | |
271 | if (cpu_online(cpu) || !cpu_is_hotpluggable(cpu)) |
272 | return false; |
273 | |
274 | if (verbose > 1) |
275 | pr_alert("%s" TORTURE_FLAG |
276 | "torture_onoff task: onlining %d\n" , |
277 | torture_type, cpu); |
278 | starttime = jiffies; |
279 | (*n_onl_attempts)++; |
280 | ret = add_cpu(cpu); |
281 | if (ret) { |
282 | s = "" ; |
283 | if (!rcu_inkernel_boot_has_ended() && ret == -EBUSY) { |
284 | // PCI probe frequently disables hotplug during boot. |
285 | (*n_onl_attempts)--; |
286 | s = " (-EBUSY forgiven during boot)" ; |
287 | } |
288 | if (verbose) |
289 | pr_alert("%s" TORTURE_FLAG |
290 | "torture_onoff task: online %d failed%s: errno %d\n" , |
291 | torture_type, cpu, s, ret); |
292 | } else { |
293 | if (verbose > 1) |
294 | pr_alert("%s" TORTURE_FLAG |
295 | "torture_onoff task: onlined %d\n" , |
296 | torture_type, cpu); |
297 | (*n_onl_successes)++; |
298 | delta = jiffies - starttime; |
299 | *sum_onl += delta; |
300 | if (*min_onl < 0) { |
301 | *min_onl = delta; |
302 | *max_onl = delta; |
303 | } |
304 | if (*min_onl > delta) |
305 | *min_onl = delta; |
306 | if (*max_onl < delta) |
307 | *max_onl = delta; |
308 | WRITE_ONCE(torture_online_cpus, torture_online_cpus + 1); |
309 | } |
310 | |
311 | return true; |
312 | } |
313 | EXPORT_SYMBOL_GPL(torture_online); |
314 | |
315 | /* |
316 | * Get everything online at the beginning and ends of tests. |
317 | */ |
318 | static void torture_online_all(char *phase) |
319 | { |
320 | int cpu; |
321 | int ret; |
322 | |
323 | for_each_possible_cpu(cpu) { |
324 | if (cpu_online(cpu)) |
325 | continue; |
326 | ret = add_cpu(cpu); |
327 | if (ret && verbose) { |
328 | pr_alert("%s" TORTURE_FLAG |
329 | "%s: %s online %d: errno %d\n" , |
330 | __func__, phase, torture_type, cpu, ret); |
331 | } |
332 | } |
333 | } |
334 | |
335 | /* |
336 | * Execute random CPU-hotplug operations at the interval specified |
337 | * by the onoff_interval. |
338 | */ |
339 | static int |
340 | torture_onoff(void *arg) |
341 | { |
342 | int cpu; |
343 | int maxcpu = -1; |
344 | DEFINE_TORTURE_RANDOM(rand); |
345 | |
346 | VERBOSE_TOROUT_STRING("torture_onoff task started" ); |
347 | for_each_online_cpu(cpu) |
348 | maxcpu = cpu; |
349 | WARN_ON(maxcpu < 0); |
350 | torture_online_all(phase: "Initial" ); |
351 | if (maxcpu == 0) { |
352 | VERBOSE_TOROUT_STRING("Only one CPU, so CPU-hotplug testing is disabled" ); |
353 | goto stop; |
354 | } |
355 | |
356 | if (onoff_holdoff > 0) { |
357 | VERBOSE_TOROUT_STRING("torture_onoff begin holdoff" ); |
358 | torture_hrtimeout_jiffies(onoff_holdoff, &rand); |
359 | VERBOSE_TOROUT_STRING("torture_onoff end holdoff" ); |
360 | } |
361 | while (!torture_must_stop()) { |
362 | if (disable_onoff_at_boot && !rcu_inkernel_boot_has_ended()) { |
363 | torture_hrtimeout_jiffies(HZ / 10, &rand); |
364 | continue; |
365 | } |
366 | cpu = torture_random(trsp: &rand) % (maxcpu + 1); |
367 | if (!torture_offline(cpu, |
368 | &n_offline_attempts, &n_offline_successes, |
369 | &sum_offline, &min_offline, &max_offline)) |
370 | torture_online(cpu, |
371 | &n_online_attempts, &n_online_successes, |
372 | &sum_online, &min_online, &max_online); |
373 | torture_hrtimeout_jiffies(onoff_interval, &rand); |
374 | } |
375 | |
376 | stop: |
377 | torture_kthread_stopping(title: "torture_onoff" ); |
378 | torture_online_all(phase: "Final" ); |
379 | return 0; |
380 | } |
381 | |
382 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
383 | |
384 | /* |
385 | * Initiate online-offline handling. |
386 | */ |
387 | int torture_onoff_init(long ooholdoff, long oointerval, torture_ofl_func *f) |
388 | { |
389 | #ifdef CONFIG_HOTPLUG_CPU |
390 | onoff_holdoff = ooholdoff; |
391 | onoff_interval = oointerval; |
392 | onoff_f = f; |
393 | if (onoff_interval <= 0) |
394 | return 0; |
395 | return torture_create_kthread(torture_onoff, NULL, onoff_task); |
396 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ |
397 | return 0; |
398 | #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ |
399 | } |
400 | EXPORT_SYMBOL_GPL(torture_onoff_init); |
401 | |
402 | /* |
403 | * Clean up after online/offline testing. |
404 | */ |
405 | static void torture_onoff_cleanup(void) |
406 | { |
407 | #ifdef CONFIG_HOTPLUG_CPU |
408 | if (onoff_task == NULL) |
409 | return; |
410 | VERBOSE_TOROUT_STRING("Stopping torture_onoff task" ); |
411 | kthread_stop(k: onoff_task); |
412 | onoff_task = NULL; |
413 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
414 | } |
415 | |
416 | /* |
417 | * Print online/offline testing statistics. |
418 | */ |
419 | void torture_onoff_stats(void) |
420 | { |
421 | #ifdef CONFIG_HOTPLUG_CPU |
422 | pr_cont("onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) " , |
423 | n_online_successes, n_online_attempts, |
424 | n_offline_successes, n_offline_attempts, |
425 | min_online, max_online, |
426 | min_offline, max_offline, |
427 | sum_online, sum_offline, HZ); |
428 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
429 | } |
430 | EXPORT_SYMBOL_GPL(torture_onoff_stats); |
431 | |
432 | /* |
433 | * Were all the online/offline operations successful? |
434 | */ |
435 | bool torture_onoff_failures(void) |
436 | { |
437 | #ifdef CONFIG_HOTPLUG_CPU |
438 | return n_online_successes != n_online_attempts || |
439 | n_offline_successes != n_offline_attempts; |
440 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ |
441 | return false; |
442 | #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ |
443 | } |
444 | EXPORT_SYMBOL_GPL(torture_onoff_failures); |
445 | |
446 | #define TORTURE_RANDOM_MULT 39916801 /* prime */ |
447 | #define TORTURE_RANDOM_ADD 479001701 /* prime */ |
448 | #define TORTURE_RANDOM_REFRESH 10000 |
449 | |
450 | /* |
451 | * Crude but fast random-number generator. Uses a linear congruential |
452 | * generator, with occasional help from cpu_clock(). |
453 | */ |
454 | unsigned long |
455 | torture_random(struct torture_random_state *trsp) |
456 | { |
457 | if (--trsp->trs_count < 0) { |
458 | trsp->trs_state += (unsigned long)local_clock() + raw_smp_processor_id(); |
459 | trsp->trs_count = TORTURE_RANDOM_REFRESH; |
460 | } |
461 | trsp->trs_state = trsp->trs_state * TORTURE_RANDOM_MULT + |
462 | TORTURE_RANDOM_ADD; |
463 | return swahw32(trsp->trs_state); |
464 | } |
465 | EXPORT_SYMBOL_GPL(torture_random); |
466 | |
467 | /* |
468 | * Variables for shuffling. The idea is to ensure that each CPU stays |
469 | * idle for an extended period to test interactions with dyntick idle, |
470 | * as well as interactions with any per-CPU variables. |
471 | */ |
472 | struct shuffle_task { |
473 | struct list_head st_l; |
474 | struct task_struct *st_t; |
475 | }; |
476 | |
477 | static long shuffle_interval; /* In jiffies. */ |
478 | static struct task_struct *shuffler_task; |
479 | static cpumask_var_t shuffle_tmp_mask; |
480 | static int shuffle_idle_cpu; /* Force all torture tasks off this CPU */ |
481 | static struct list_head shuffle_task_list = LIST_HEAD_INIT(shuffle_task_list); |
482 | static DEFINE_MUTEX(shuffle_task_mutex); |
483 | |
484 | /* |
485 | * Register a task to be shuffled. If there is no memory, just splat |
486 | * and don't bother registering. |
487 | */ |
488 | void torture_shuffle_task_register(struct task_struct *tp) |
489 | { |
490 | struct shuffle_task *stp; |
491 | |
492 | if (WARN_ON_ONCE(tp == NULL)) |
493 | return; |
494 | stp = kmalloc(size: sizeof(*stp), GFP_KERNEL); |
495 | if (WARN_ON_ONCE(stp == NULL)) |
496 | return; |
497 | stp->st_t = tp; |
498 | mutex_lock(&shuffle_task_mutex); |
499 | list_add(new: &stp->st_l, head: &shuffle_task_list); |
500 | mutex_unlock(lock: &shuffle_task_mutex); |
501 | } |
502 | EXPORT_SYMBOL_GPL(torture_shuffle_task_register); |
503 | |
504 | /* |
505 | * Unregister all tasks, for example, at the end of the torture run. |
506 | */ |
507 | static void torture_shuffle_task_unregister_all(void) |
508 | { |
509 | struct shuffle_task *stp; |
510 | struct shuffle_task *p; |
511 | |
512 | mutex_lock(&shuffle_task_mutex); |
513 | list_for_each_entry_safe(stp, p, &shuffle_task_list, st_l) { |
514 | list_del(entry: &stp->st_l); |
515 | kfree(objp: stp); |
516 | } |
517 | mutex_unlock(lock: &shuffle_task_mutex); |
518 | } |
519 | |
520 | /* Shuffle tasks such that we allow shuffle_idle_cpu to become idle. |
521 | * A special case is when shuffle_idle_cpu = -1, in which case we allow |
522 | * the tasks to run on all CPUs. |
523 | */ |
524 | static void torture_shuffle_tasks(struct torture_random_state *trp) |
525 | { |
526 | struct shuffle_task *stp; |
527 | |
528 | cpumask_setall(dstp: shuffle_tmp_mask); |
529 | cpus_read_lock(); |
530 | |
531 | /* No point in shuffling if there is only one online CPU (ex: UP) */ |
532 | if (num_online_cpus() == 1) { |
533 | cpus_read_unlock(); |
534 | return; |
535 | } |
536 | |
537 | /* Advance to the next CPU. Upon overflow, don't idle any CPUs. */ |
538 | shuffle_idle_cpu = cpumask_next(n: shuffle_idle_cpu, srcp: shuffle_tmp_mask); |
539 | if (shuffle_idle_cpu >= nr_cpu_ids) |
540 | shuffle_idle_cpu = -1; |
541 | else |
542 | cpumask_clear_cpu(cpu: shuffle_idle_cpu, dstp: shuffle_tmp_mask); |
543 | |
544 | mutex_lock(&shuffle_task_mutex); |
545 | list_for_each_entry(stp, &shuffle_task_list, st_l) { |
546 | if (!random_shuffle || torture_random(trp) & 0x1) |
547 | set_cpus_allowed_ptr(p: stp->st_t, new_mask: shuffle_tmp_mask); |
548 | } |
549 | mutex_unlock(lock: &shuffle_task_mutex); |
550 | |
551 | cpus_read_unlock(); |
552 | } |
553 | |
554 | /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the |
555 | * system to become idle at a time and cut off its timer ticks. This is meant |
556 | * to test the support for such tickless idle CPU in RCU. |
557 | */ |
558 | static int torture_shuffle(void *arg) |
559 | { |
560 | DEFINE_TORTURE_RANDOM(rand); |
561 | |
562 | VERBOSE_TOROUT_STRING("torture_shuffle task started" ); |
563 | do { |
564 | torture_hrtimeout_jiffies(shuffle_interval, &rand); |
565 | torture_shuffle_tasks(trp: &rand); |
566 | torture_shutdown_absorb(title: "torture_shuffle" ); |
567 | } while (!torture_must_stop()); |
568 | torture_kthread_stopping(title: "torture_shuffle" ); |
569 | return 0; |
570 | } |
571 | |
572 | /* |
573 | * Start the shuffler, with shuffint in jiffies. |
574 | */ |
575 | int torture_shuffle_init(long shuffint) |
576 | { |
577 | shuffle_interval = shuffint; |
578 | |
579 | shuffle_idle_cpu = -1; |
580 | |
581 | if (!alloc_cpumask_var(mask: &shuffle_tmp_mask, GFP_KERNEL)) { |
582 | TOROUT_ERRSTRING("Failed to alloc mask" ); |
583 | return -ENOMEM; |
584 | } |
585 | |
586 | /* Create the shuffler thread */ |
587 | return torture_create_kthread(torture_shuffle, NULL, shuffler_task); |
588 | } |
589 | EXPORT_SYMBOL_GPL(torture_shuffle_init); |
590 | |
591 | /* |
592 | * Stop the shuffling. |
593 | */ |
594 | static void torture_shuffle_cleanup(void) |
595 | { |
596 | torture_shuffle_task_unregister_all(); |
597 | if (shuffler_task) { |
598 | VERBOSE_TOROUT_STRING("Stopping torture_shuffle task" ); |
599 | kthread_stop(k: shuffler_task); |
600 | free_cpumask_var(mask: shuffle_tmp_mask); |
601 | } |
602 | shuffler_task = NULL; |
603 | } |
604 | |
605 | /* |
606 | * Variables for auto-shutdown. This allows "lights out" torture runs |
607 | * to be fully scripted. |
608 | */ |
609 | static struct task_struct *shutdown_task; |
610 | static ktime_t shutdown_time; /* time to system shutdown. */ |
611 | static void (*torture_shutdown_hook)(void); |
612 | |
613 | /* |
614 | * Absorb kthreads into a kernel function that won't return, so that |
615 | * they won't ever access module text or data again. |
616 | */ |
617 | void torture_shutdown_absorb(const char *title) |
618 | { |
619 | while (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { |
620 | pr_notice("torture thread %s parking due to system shutdown\n" , |
621 | title); |
622 | schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT); |
623 | } |
624 | } |
625 | EXPORT_SYMBOL_GPL(torture_shutdown_absorb); |
626 | |
627 | /* |
628 | * Cause the torture test to shutdown the system after the test has |
629 | * run for the time specified by the shutdown_secs parameter. |
630 | */ |
631 | static int torture_shutdown(void *arg) |
632 | { |
633 | ktime_t ktime_snap; |
634 | |
635 | VERBOSE_TOROUT_STRING("torture_shutdown task started" ); |
636 | ktime_snap = ktime_get(); |
637 | while (ktime_before(cmp1: ktime_snap, cmp2: shutdown_time) && |
638 | !torture_must_stop()) { |
639 | if (verbose) |
640 | pr_alert("%s" TORTURE_FLAG |
641 | "torture_shutdown task: %llu ms remaining\n" , |
642 | torture_type, |
643 | ktime_ms_delta(shutdown_time, ktime_snap)); |
644 | set_current_state(TASK_INTERRUPTIBLE); |
645 | schedule_hrtimeout(expires: &shutdown_time, mode: HRTIMER_MODE_ABS); |
646 | ktime_snap = ktime_get(); |
647 | } |
648 | if (torture_must_stop()) { |
649 | torture_kthread_stopping(title: "torture_shutdown" ); |
650 | return 0; |
651 | } |
652 | |
653 | /* OK, shut down the system. */ |
654 | |
655 | VERBOSE_TOROUT_STRING("torture_shutdown task shutting down system" ); |
656 | shutdown_task = NULL; /* Avoid self-kill deadlock. */ |
657 | if (torture_shutdown_hook) |
658 | torture_shutdown_hook(); |
659 | else |
660 | VERBOSE_TOROUT_STRING("No torture_shutdown_hook(), skipping." ); |
661 | if (ftrace_dump_at_shutdown) |
662 | rcu_ftrace_dump(DUMP_ALL); |
663 | kernel_power_off(); /* Shut down the system. */ |
664 | return 0; |
665 | } |
666 | |
667 | /* |
668 | * Start up the shutdown task. |
669 | */ |
670 | int torture_shutdown_init(int ssecs, void (*cleanup)(void)) |
671 | { |
672 | torture_shutdown_hook = cleanup; |
673 | if (ssecs > 0) { |
674 | shutdown_time = ktime_add(ktime_get(), ktime_set(ssecs, 0)); |
675 | return torture_create_kthread(torture_shutdown, NULL, |
676 | shutdown_task); |
677 | } |
678 | return 0; |
679 | } |
680 | EXPORT_SYMBOL_GPL(torture_shutdown_init); |
681 | |
682 | /* |
683 | * Detect and respond to a system shutdown. |
684 | */ |
685 | static int torture_shutdown_notify(struct notifier_block *unused1, |
686 | unsigned long unused2, void *unused3) |
687 | { |
688 | mutex_lock(&fullstop_mutex); |
689 | if (READ_ONCE(fullstop) == FULLSTOP_DONTSTOP) { |
690 | VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected" ); |
691 | WRITE_ONCE(fullstop, FULLSTOP_SHUTDOWN); |
692 | } else { |
693 | pr_warn("Concurrent rmmod and shutdown illegal!\n" ); |
694 | } |
695 | mutex_unlock(lock: &fullstop_mutex); |
696 | return NOTIFY_DONE; |
697 | } |
698 | |
699 | static struct notifier_block torture_shutdown_nb = { |
700 | .notifier_call = torture_shutdown_notify, |
701 | }; |
702 | |
703 | /* |
704 | * Shut down the shutdown task. Say what??? Heh! This can happen if |
705 | * the torture module gets an rmmod before the shutdown time arrives. ;-) |
706 | */ |
707 | static void torture_shutdown_cleanup(void) |
708 | { |
709 | unregister_reboot_notifier(&torture_shutdown_nb); |
710 | if (shutdown_task != NULL) { |
711 | VERBOSE_TOROUT_STRING("Stopping torture_shutdown task" ); |
712 | kthread_stop(k: shutdown_task); |
713 | } |
714 | shutdown_task = NULL; |
715 | } |
716 | |
717 | /* |
718 | * Variables for stuttering, which means to periodically pause and |
719 | * restart testing in order to catch bugs that appear when load is |
720 | * suddenly applied to or removed from the system. |
721 | */ |
722 | static struct task_struct *stutter_task; |
723 | static ktime_t stutter_till_abs_time; |
724 | static int stutter; |
725 | static int stutter_gap; |
726 | |
727 | /* |
728 | * Block until the stutter interval ends. This must be called periodically |
729 | * by all running kthreads that need to be subject to stuttering. |
730 | */ |
731 | bool stutter_wait(const char *title) |
732 | { |
733 | bool ret = false; |
734 | ktime_t till_ns; |
735 | |
736 | cond_resched_tasks_rcu_qs(); |
737 | till_ns = READ_ONCE(stutter_till_abs_time); |
738 | if (till_ns && ktime_before(cmp1: ktime_get(), cmp2: till_ns)) { |
739 | torture_hrtimeout_ns(till_ns, 0, HRTIMER_MODE_ABS, NULL); |
740 | ret = true; |
741 | } |
742 | torture_shutdown_absorb(title); |
743 | return ret; |
744 | } |
745 | EXPORT_SYMBOL_GPL(stutter_wait); |
746 | |
747 | /* |
748 | * Cause the torture test to "stutter", starting and stopping all |
749 | * threads periodically. |
750 | */ |
751 | static int torture_stutter(void *arg) |
752 | { |
753 | ktime_t till_ns; |
754 | |
755 | VERBOSE_TOROUT_STRING("torture_stutter task started" ); |
756 | do { |
757 | if (!torture_must_stop() && stutter > 1) { |
758 | till_ns = ktime_add_ns(ktime_get(), |
759 | jiffies_to_nsecs(stutter)); |
760 | WRITE_ONCE(stutter_till_abs_time, till_ns); |
761 | torture_hrtimeout_jiffies(stutter - 1, NULL); |
762 | } |
763 | if (!torture_must_stop()) |
764 | torture_hrtimeout_jiffies(stutter_gap, NULL); |
765 | torture_shutdown_absorb("torture_stutter" ); |
766 | } while (!torture_must_stop()); |
767 | torture_kthread_stopping(title: "torture_stutter" ); |
768 | return 0; |
769 | } |
770 | |
771 | /* |
772 | * Initialize and kick off the torture_stutter kthread. |
773 | */ |
774 | int torture_stutter_init(const int s, const int sgap) |
775 | { |
776 | stutter = s; |
777 | stutter_gap = sgap; |
778 | return torture_create_kthread(torture_stutter, NULL, stutter_task); |
779 | } |
780 | EXPORT_SYMBOL_GPL(torture_stutter_init); |
781 | |
782 | /* |
783 | * Cleanup after the torture_stutter kthread. |
784 | */ |
785 | static void torture_stutter_cleanup(void) |
786 | { |
787 | if (!stutter_task) |
788 | return; |
789 | VERBOSE_TOROUT_STRING("Stopping torture_stutter task" ); |
790 | kthread_stop(k: stutter_task); |
791 | stutter_task = NULL; |
792 | } |
793 | |
794 | static void |
795 | torture_print_module_parms(void) |
796 | { |
797 | pr_alert("torture module --- %s: disable_onoff_at_boot=%d ftrace_dump_at_shutdown=%d verbose_sleep_frequency=%d verbose_sleep_duration=%d random_shuffle=%d\n" , |
798 | torture_type, disable_onoff_at_boot, ftrace_dump_at_shutdown, verbose_sleep_frequency, verbose_sleep_duration, random_shuffle); |
799 | } |
800 | |
801 | /* |
802 | * Initialize torture module. Please note that this is -not- invoked via |
803 | * the usual module_init() mechanism, but rather by an explicit call from |
804 | * the client torture module. This call must be paired with a later |
805 | * torture_init_end(). |
806 | * |
807 | * The runnable parameter points to a flag that controls whether or not |
808 | * the test is currently runnable. If there is no such flag, pass in NULL. |
809 | */ |
810 | bool torture_init_begin(char *ttype, int v) |
811 | { |
812 | mutex_lock(&fullstop_mutex); |
813 | if (torture_type != NULL) { |
814 | pr_alert("%s: Refusing %s init: %s running.\n" , |
815 | __func__, ttype, torture_type); |
816 | pr_alert("%s: One torture test at a time!\n" , __func__); |
817 | mutex_unlock(lock: &fullstop_mutex); |
818 | return false; |
819 | } |
820 | torture_type = ttype; |
821 | verbose = v; |
822 | fullstop = FULLSTOP_DONTSTOP; |
823 | torture_print_module_parms(); |
824 | return true; |
825 | } |
826 | EXPORT_SYMBOL_GPL(torture_init_begin); |
827 | |
828 | /* |
829 | * Tell the torture module that initialization is complete. |
830 | */ |
831 | void torture_init_end(void) |
832 | { |
833 | mutex_unlock(lock: &fullstop_mutex); |
834 | register_reboot_notifier(&torture_shutdown_nb); |
835 | } |
836 | EXPORT_SYMBOL_GPL(torture_init_end); |
837 | |
838 | /* |
839 | * Clean up torture module. Please note that this is -not- invoked via |
840 | * the usual module_exit() mechanism, but rather by an explicit call from |
841 | * the client torture module. Returns true if a race with system shutdown |
842 | * is detected, otherwise, all kthreads started by functions in this file |
843 | * will be shut down. |
844 | * |
845 | * This must be called before the caller starts shutting down its own |
846 | * kthreads. |
847 | * |
848 | * Both torture_cleanup_begin() and torture_cleanup_end() must be paired, |
849 | * in order to correctly perform the cleanup. They are separated because |
850 | * threads can still need to reference the torture_type type, thus nullify |
851 | * only after completing all other relevant calls. |
852 | */ |
853 | bool torture_cleanup_begin(void) |
854 | { |
855 | mutex_lock(&fullstop_mutex); |
856 | if (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { |
857 | pr_warn("Concurrent rmmod and shutdown illegal!\n" ); |
858 | mutex_unlock(lock: &fullstop_mutex); |
859 | schedule_timeout_uninterruptible(timeout: 10); |
860 | return true; |
861 | } |
862 | WRITE_ONCE(fullstop, FULLSTOP_RMMOD); |
863 | mutex_unlock(lock: &fullstop_mutex); |
864 | torture_shutdown_cleanup(); |
865 | torture_shuffle_cleanup(); |
866 | torture_stutter_cleanup(); |
867 | torture_onoff_cleanup(); |
868 | return false; |
869 | } |
870 | EXPORT_SYMBOL_GPL(torture_cleanup_begin); |
871 | |
872 | void torture_cleanup_end(void) |
873 | { |
874 | mutex_lock(&fullstop_mutex); |
875 | torture_type = NULL; |
876 | mutex_unlock(lock: &fullstop_mutex); |
877 | } |
878 | EXPORT_SYMBOL_GPL(torture_cleanup_end); |
879 | |
880 | /* |
881 | * Is it time for the current torture test to stop? |
882 | */ |
883 | bool torture_must_stop(void) |
884 | { |
885 | return torture_must_stop_irq() || kthread_should_stop(); |
886 | } |
887 | EXPORT_SYMBOL_GPL(torture_must_stop); |
888 | |
889 | /* |
890 | * Is it time for the current torture test to stop? This is the irq-safe |
891 | * version, hence no check for kthread_should_stop(). |
892 | */ |
893 | bool torture_must_stop_irq(void) |
894 | { |
895 | return READ_ONCE(fullstop) != FULLSTOP_DONTSTOP; |
896 | } |
897 | EXPORT_SYMBOL_GPL(torture_must_stop_irq); |
898 | |
899 | /* |
900 | * Each kthread must wait for kthread_should_stop() before returning from |
901 | * its top-level function, otherwise segfaults ensue. This function |
902 | * prints a "stopping" message and waits for kthread_should_stop(), and |
903 | * should be called from all torture kthreads immediately prior to |
904 | * returning. |
905 | */ |
906 | void torture_kthread_stopping(char *title) |
907 | { |
908 | char buf[128]; |
909 | |
910 | snprintf(buf, size: sizeof(buf), fmt: "%s is stopping" , title); |
911 | VERBOSE_TOROUT_STRING(buf); |
912 | while (!kthread_should_stop()) { |
913 | torture_shutdown_absorb(title); |
914 | schedule_timeout_uninterruptible(HZ / 20); |
915 | } |
916 | } |
917 | EXPORT_SYMBOL_GPL(torture_kthread_stopping); |
918 | |
919 | /* |
920 | * Create a generic torture kthread that is immediately runnable. If you |
921 | * need the kthread to be stopped so that you can do something to it before |
922 | * it starts, you will need to open-code your own. |
923 | */ |
924 | int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m, |
925 | char *f, struct task_struct **tp, void (*cbf)(struct task_struct *tp)) |
926 | { |
927 | int ret = 0; |
928 | |
929 | VERBOSE_TOROUT_STRING(m); |
930 | *tp = kthread_create(fn, arg, "%s" , s); |
931 | if (IS_ERR(ptr: *tp)) { |
932 | ret = PTR_ERR(ptr: *tp); |
933 | TOROUT_ERRSTRING(f); |
934 | *tp = NULL; |
935 | return ret; |
936 | } |
937 | |
938 | if (cbf) |
939 | cbf(*tp); |
940 | |
941 | wake_up_process(tsk: *tp); // Process is sleeping, so ordering provided. |
942 | torture_shuffle_task_register(*tp); |
943 | return ret; |
944 | } |
945 | EXPORT_SYMBOL_GPL(_torture_create_kthread); |
946 | |
947 | /* |
948 | * Stop a generic kthread, emitting a message. |
949 | */ |
950 | void _torture_stop_kthread(char *m, struct task_struct **tp) |
951 | { |
952 | if (*tp == NULL) |
953 | return; |
954 | VERBOSE_TOROUT_STRING(m); |
955 | kthread_stop(k: *tp); |
956 | *tp = NULL; |
957 | } |
958 | EXPORT_SYMBOL_GPL(_torture_stop_kthread); |
959 | |