1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * This file contains the functions which manage clocksource drivers. |
4 | * |
5 | * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com) |
6 | */ |
7 | |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9 | |
10 | #include <linux/device.h> |
11 | #include <linux/clocksource.h> |
12 | #include <linux/init.h> |
13 | #include <linux/module.h> |
14 | #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ |
15 | #include <linux/tick.h> |
16 | #include <linux/kthread.h> |
17 | #include <linux/prandom.h> |
18 | #include <linux/cpu.h> |
19 | |
20 | #include "tick-internal.h" |
21 | #include "timekeeping_internal.h" |
22 | |
23 | /** |
24 | * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks |
25 | * @mult: pointer to mult variable |
26 | * @shift: pointer to shift variable |
27 | * @from: frequency to convert from |
28 | * @to: frequency to convert to |
29 | * @maxsec: guaranteed runtime conversion range in seconds |
30 | * |
31 | * The function evaluates the shift/mult pair for the scaled math |
32 | * operations of clocksources and clockevents. |
33 | * |
34 | * @to and @from are frequency values in HZ. For clock sources @to is |
35 | * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock |
36 | * event @to is the counter frequency and @from is NSEC_PER_SEC. |
37 | * |
38 | * The @maxsec conversion range argument controls the time frame in |
39 | * seconds which must be covered by the runtime conversion with the |
40 | * calculated mult and shift factors. This guarantees that no 64bit |
41 | * overflow happens when the input value of the conversion is |
42 | * multiplied with the calculated mult factor. Larger ranges may |
43 | * reduce the conversion accuracy by choosing smaller mult and shift |
44 | * factors. |
45 | */ |
46 | void |
47 | clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec) |
48 | { |
49 | u64 tmp; |
50 | u32 sft, sftacc= 32; |
51 | |
52 | /* |
53 | * Calculate the shift factor which is limiting the conversion |
54 | * range: |
55 | */ |
56 | tmp = ((u64)maxsec * from) >> 32; |
57 | while (tmp) { |
58 | tmp >>=1; |
59 | sftacc--; |
60 | } |
61 | |
62 | /* |
63 | * Find the conversion shift/mult pair which has the best |
64 | * accuracy and fits the maxsec conversion range: |
65 | */ |
66 | for (sft = 32; sft > 0; sft--) { |
67 | tmp = (u64) to << sft; |
68 | tmp += from / 2; |
69 | do_div(tmp, from); |
70 | if ((tmp >> sftacc) == 0) |
71 | break; |
72 | } |
73 | *mult = tmp; |
74 | *shift = sft; |
75 | } |
76 | EXPORT_SYMBOL_GPL(clocks_calc_mult_shift); |
77 | |
78 | /*[Clocksource internal variables]--------- |
79 | * curr_clocksource: |
80 | * currently selected clocksource. |
81 | * suspend_clocksource: |
82 | * used to calculate the suspend time. |
83 | * clocksource_list: |
84 | * linked list with the registered clocksources |
85 | * clocksource_mutex: |
86 | * protects manipulations to curr_clocksource and the clocksource_list |
87 | * override_name: |
88 | * Name of the user-specified clocksource. |
89 | */ |
90 | static struct clocksource *curr_clocksource; |
91 | static struct clocksource *suspend_clocksource; |
92 | static LIST_HEAD(clocksource_list); |
93 | static DEFINE_MUTEX(clocksource_mutex); |
94 | static char override_name[CS_NAME_LEN]; |
95 | static int finished_booting; |
96 | static u64 suspend_start; |
97 | |
98 | /* |
99 | * Interval: 0.5sec. |
100 | */ |
101 | #define WATCHDOG_INTERVAL (HZ >> 1) |
102 | #define WATCHDOG_INTERVAL_MAX_NS ((2 * WATCHDOG_INTERVAL) * (NSEC_PER_SEC / HZ)) |
103 | |
104 | /* |
105 | * Threshold: 0.0312s, when doubled: 0.0625s. |
106 | * Also a default for cs->uncertainty_margin when registering clocks. |
107 | */ |
108 | #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 5) |
109 | |
110 | /* |
111 | * Maximum permissible delay between two readouts of the watchdog |
112 | * clocksource surrounding a read of the clocksource being validated. |
113 | * This delay could be due to SMIs, NMIs, or to VCPU preemptions. Used as |
114 | * a lower bound for cs->uncertainty_margin values when registering clocks. |
115 | * |
116 | * The default of 500 parts per million is based on NTP's limits. |
117 | * If a clocksource is good enough for NTP, it is good enough for us! |
118 | */ |
119 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US |
120 | #define MAX_SKEW_USEC CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US |
121 | #else |
122 | #define MAX_SKEW_USEC (125 * WATCHDOG_INTERVAL / HZ) |
123 | #endif |
124 | |
125 | #define WATCHDOG_MAX_SKEW (MAX_SKEW_USEC * NSEC_PER_USEC) |
126 | |
127 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
128 | static void clocksource_watchdog_work(struct work_struct *work); |
129 | static void clocksource_select(void); |
130 | |
131 | static LIST_HEAD(watchdog_list); |
132 | static struct clocksource *watchdog; |
133 | static struct timer_list watchdog_timer; |
134 | static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); |
135 | static DEFINE_SPINLOCK(watchdog_lock); |
136 | static int watchdog_running; |
137 | static atomic_t watchdog_reset_pending; |
138 | static int64_t watchdog_max_interval; |
139 | |
140 | static inline void clocksource_watchdog_lock(unsigned long *flags) |
141 | { |
142 | spin_lock_irqsave(&watchdog_lock, *flags); |
143 | } |
144 | |
145 | static inline void clocksource_watchdog_unlock(unsigned long *flags) |
146 | { |
147 | spin_unlock_irqrestore(lock: &watchdog_lock, flags: *flags); |
148 | } |
149 | |
150 | static int clocksource_watchdog_kthread(void *data); |
151 | static void __clocksource_change_rating(struct clocksource *cs, int rating); |
152 | |
153 | static void clocksource_watchdog_work(struct work_struct *work) |
154 | { |
155 | /* |
156 | * We cannot directly run clocksource_watchdog_kthread() here, because |
157 | * clocksource_select() calls timekeeping_notify() which uses |
158 | * stop_machine(). One cannot use stop_machine() from a workqueue() due |
159 | * lock inversions wrt CPU hotplug. |
160 | * |
161 | * Also, we only ever run this work once or twice during the lifetime |
162 | * of the kernel, so there is no point in creating a more permanent |
163 | * kthread for this. |
164 | * |
165 | * If kthread_run fails the next watchdog scan over the |
166 | * watchdog_list will find the unstable clock again. |
167 | */ |
168 | kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog" ); |
169 | } |
170 | |
171 | static void __clocksource_unstable(struct clocksource *cs) |
172 | { |
173 | cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); |
174 | cs->flags |= CLOCK_SOURCE_UNSTABLE; |
175 | |
176 | /* |
177 | * If the clocksource is registered clocksource_watchdog_kthread() will |
178 | * re-rate and re-select. |
179 | */ |
180 | if (list_empty(head: &cs->list)) { |
181 | cs->rating = 0; |
182 | return; |
183 | } |
184 | |
185 | if (cs->mark_unstable) |
186 | cs->mark_unstable(cs); |
187 | |
188 | /* kick clocksource_watchdog_kthread() */ |
189 | if (finished_booting) |
190 | schedule_work(work: &watchdog_work); |
191 | } |
192 | |
193 | /** |
194 | * clocksource_mark_unstable - mark clocksource unstable via watchdog |
195 | * @cs: clocksource to be marked unstable |
196 | * |
197 | * This function is called by the x86 TSC code to mark clocksources as unstable; |
198 | * it defers demotion and re-selection to a kthread. |
199 | */ |
200 | void clocksource_mark_unstable(struct clocksource *cs) |
201 | { |
202 | unsigned long flags; |
203 | |
204 | spin_lock_irqsave(&watchdog_lock, flags); |
205 | if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { |
206 | if (!list_empty(head: &cs->list) && list_empty(head: &cs->wd_list)) |
207 | list_add(new: &cs->wd_list, head: &watchdog_list); |
208 | __clocksource_unstable(cs); |
209 | } |
210 | spin_unlock_irqrestore(lock: &watchdog_lock, flags); |
211 | } |
212 | |
213 | static int verify_n_cpus = 8; |
214 | module_param(verify_n_cpus, int, 0644); |
215 | |
216 | enum wd_read_status { |
217 | WD_READ_SUCCESS, |
218 | WD_READ_UNSTABLE, |
219 | WD_READ_SKIP |
220 | }; |
221 | |
222 | static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow) |
223 | { |
224 | unsigned int nretries, max_retries; |
225 | u64 wd_end, wd_end2, wd_delta; |
226 | int64_t wd_delay, wd_seq_delay; |
227 | |
228 | max_retries = clocksource_get_max_watchdog_retry(); |
229 | for (nretries = 0; nretries <= max_retries; nretries++) { |
230 | local_irq_disable(); |
231 | *wdnow = watchdog->read(watchdog); |
232 | *csnow = cs->read(cs); |
233 | wd_end = watchdog->read(watchdog); |
234 | wd_end2 = watchdog->read(watchdog); |
235 | local_irq_enable(); |
236 | |
237 | wd_delta = clocksource_delta(now: wd_end, last: *wdnow, mask: watchdog->mask); |
238 | wd_delay = clocksource_cyc2ns(cycles: wd_delta, mult: watchdog->mult, |
239 | shift: watchdog->shift); |
240 | if (wd_delay <= WATCHDOG_MAX_SKEW) { |
241 | if (nretries > 1 || nretries >= max_retries) { |
242 | pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n" , |
243 | smp_processor_id(), watchdog->name, nretries); |
244 | } |
245 | return WD_READ_SUCCESS; |
246 | } |
247 | |
248 | /* |
249 | * Now compute delay in consecutive watchdog read to see if |
250 | * there is too much external interferences that cause |
251 | * significant delay in reading both clocksource and watchdog. |
252 | * |
253 | * If consecutive WD read-back delay > WATCHDOG_MAX_SKEW/2, |
254 | * report system busy, reinit the watchdog and skip the current |
255 | * watchdog test. |
256 | */ |
257 | wd_delta = clocksource_delta(now: wd_end2, last: wd_end, mask: watchdog->mask); |
258 | wd_seq_delay = clocksource_cyc2ns(cycles: wd_delta, mult: watchdog->mult, shift: watchdog->shift); |
259 | if (wd_seq_delay > WATCHDOG_MAX_SKEW/2) |
260 | goto skip_test; |
261 | } |
262 | |
263 | pr_warn("timekeeping watchdog on CPU%d: wd-%s-wd excessive read-back delay of %lldns vs. limit of %ldns, wd-wd read-back delay only %lldns, attempt %d, marking %s unstable\n" , |
264 | smp_processor_id(), cs->name, wd_delay, WATCHDOG_MAX_SKEW, wd_seq_delay, nretries, cs->name); |
265 | return WD_READ_UNSTABLE; |
266 | |
267 | skip_test: |
268 | pr_info("timekeeping watchdog on CPU%d: %s wd-wd read-back delay of %lldns\n" , |
269 | smp_processor_id(), watchdog->name, wd_seq_delay); |
270 | pr_info("wd-%s-wd read-back delay of %lldns, clock-skew test skipped!\n" , |
271 | cs->name, wd_delay); |
272 | return WD_READ_SKIP; |
273 | } |
274 | |
275 | static u64 csnow_mid; |
276 | static cpumask_t cpus_ahead; |
277 | static cpumask_t cpus_behind; |
278 | static cpumask_t cpus_chosen; |
279 | |
280 | static void clocksource_verify_choose_cpus(void) |
281 | { |
282 | int cpu, i, n = verify_n_cpus; |
283 | |
284 | if (n < 0) { |
285 | /* Check all of the CPUs. */ |
286 | cpumask_copy(dstp: &cpus_chosen, cpu_online_mask); |
287 | cpumask_clear_cpu(smp_processor_id(), dstp: &cpus_chosen); |
288 | return; |
289 | } |
290 | |
291 | /* If no checking desired, or no other CPU to check, leave. */ |
292 | cpumask_clear(dstp: &cpus_chosen); |
293 | if (n == 0 || num_online_cpus() <= 1) |
294 | return; |
295 | |
296 | /* Make sure to select at least one CPU other than the current CPU. */ |
297 | cpu = cpumask_first(cpu_online_mask); |
298 | if (cpu == smp_processor_id()) |
299 | cpu = cpumask_next(n: cpu, cpu_online_mask); |
300 | if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) |
301 | return; |
302 | cpumask_set_cpu(cpu, dstp: &cpus_chosen); |
303 | |
304 | /* Force a sane value for the boot parameter. */ |
305 | if (n > nr_cpu_ids) |
306 | n = nr_cpu_ids; |
307 | |
308 | /* |
309 | * Randomly select the specified number of CPUs. If the same |
310 | * CPU is selected multiple times, that CPU is checked only once, |
311 | * and no replacement CPU is selected. This gracefully handles |
312 | * situations where verify_n_cpus is greater than the number of |
313 | * CPUs that are currently online. |
314 | */ |
315 | for (i = 1; i < n; i++) { |
316 | cpu = get_random_u32_below(ceil: nr_cpu_ids); |
317 | cpu = cpumask_next(n: cpu - 1, cpu_online_mask); |
318 | if (cpu >= nr_cpu_ids) |
319 | cpu = cpumask_first(cpu_online_mask); |
320 | if (!WARN_ON_ONCE(cpu >= nr_cpu_ids)) |
321 | cpumask_set_cpu(cpu, dstp: &cpus_chosen); |
322 | } |
323 | |
324 | /* Don't verify ourselves. */ |
325 | cpumask_clear_cpu(smp_processor_id(), dstp: &cpus_chosen); |
326 | } |
327 | |
328 | static void clocksource_verify_one_cpu(void *csin) |
329 | { |
330 | struct clocksource *cs = (struct clocksource *)csin; |
331 | |
332 | csnow_mid = cs->read(cs); |
333 | } |
334 | |
335 | void clocksource_verify_percpu(struct clocksource *cs) |
336 | { |
337 | int64_t cs_nsec, cs_nsec_max = 0, cs_nsec_min = LLONG_MAX; |
338 | u64 csnow_begin, csnow_end; |
339 | int cpu, testcpu; |
340 | s64 delta; |
341 | |
342 | if (verify_n_cpus == 0) |
343 | return; |
344 | cpumask_clear(dstp: &cpus_ahead); |
345 | cpumask_clear(dstp: &cpus_behind); |
346 | cpus_read_lock(); |
347 | preempt_disable(); |
348 | clocksource_verify_choose_cpus(); |
349 | if (cpumask_empty(srcp: &cpus_chosen)) { |
350 | preempt_enable(); |
351 | cpus_read_unlock(); |
352 | pr_warn("Not enough CPUs to check clocksource '%s'.\n" , cs->name); |
353 | return; |
354 | } |
355 | testcpu = smp_processor_id(); |
356 | pr_warn("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n" , cs->name, testcpu, cpumask_pr_args(&cpus_chosen)); |
357 | for_each_cpu(cpu, &cpus_chosen) { |
358 | if (cpu == testcpu) |
359 | continue; |
360 | csnow_begin = cs->read(cs); |
361 | smp_call_function_single(cpuid: cpu, func: clocksource_verify_one_cpu, info: cs, wait: 1); |
362 | csnow_end = cs->read(cs); |
363 | delta = (s64)((csnow_mid - csnow_begin) & cs->mask); |
364 | if (delta < 0) |
365 | cpumask_set_cpu(cpu, dstp: &cpus_behind); |
366 | delta = (csnow_end - csnow_mid) & cs->mask; |
367 | if (delta < 0) |
368 | cpumask_set_cpu(cpu, dstp: &cpus_ahead); |
369 | delta = clocksource_delta(now: csnow_end, last: csnow_begin, mask: cs->mask); |
370 | cs_nsec = clocksource_cyc2ns(cycles: delta, mult: cs->mult, shift: cs->shift); |
371 | if (cs_nsec > cs_nsec_max) |
372 | cs_nsec_max = cs_nsec; |
373 | if (cs_nsec < cs_nsec_min) |
374 | cs_nsec_min = cs_nsec; |
375 | } |
376 | preempt_enable(); |
377 | cpus_read_unlock(); |
378 | if (!cpumask_empty(srcp: &cpus_ahead)) |
379 | pr_warn(" CPUs %*pbl ahead of CPU %d for clocksource %s.\n" , |
380 | cpumask_pr_args(&cpus_ahead), testcpu, cs->name); |
381 | if (!cpumask_empty(srcp: &cpus_behind)) |
382 | pr_warn(" CPUs %*pbl behind CPU %d for clocksource %s.\n" , |
383 | cpumask_pr_args(&cpus_behind), testcpu, cs->name); |
384 | if (!cpumask_empty(srcp: &cpus_ahead) || !cpumask_empty(srcp: &cpus_behind)) |
385 | pr_warn(" CPU %d check durations %lldns - %lldns for clocksource %s.\n" , |
386 | testcpu, cs_nsec_min, cs_nsec_max, cs->name); |
387 | } |
388 | EXPORT_SYMBOL_GPL(clocksource_verify_percpu); |
389 | |
390 | static inline void clocksource_reset_watchdog(void) |
391 | { |
392 | struct clocksource *cs; |
393 | |
394 | list_for_each_entry(cs, &watchdog_list, wd_list) |
395 | cs->flags &= ~CLOCK_SOURCE_WATCHDOG; |
396 | } |
397 | |
398 | |
399 | static void clocksource_watchdog(struct timer_list *unused) |
400 | { |
401 | u64 csnow, wdnow, cslast, wdlast, delta; |
402 | int64_t wd_nsec, cs_nsec, interval; |
403 | int next_cpu, reset_pending; |
404 | struct clocksource *cs; |
405 | enum wd_read_status read_ret; |
406 | unsigned long = 0; |
407 | u32 md; |
408 | |
409 | spin_lock(lock: &watchdog_lock); |
410 | if (!watchdog_running) |
411 | goto out; |
412 | |
413 | reset_pending = atomic_read(v: &watchdog_reset_pending); |
414 | |
415 | list_for_each_entry(cs, &watchdog_list, wd_list) { |
416 | |
417 | /* Clocksource already marked unstable? */ |
418 | if (cs->flags & CLOCK_SOURCE_UNSTABLE) { |
419 | if (finished_booting) |
420 | schedule_work(work: &watchdog_work); |
421 | continue; |
422 | } |
423 | |
424 | read_ret = cs_watchdog_read(cs, csnow: &csnow, wdnow: &wdnow); |
425 | |
426 | if (read_ret == WD_READ_UNSTABLE) { |
427 | /* Clock readout unreliable, so give it up. */ |
428 | __clocksource_unstable(cs); |
429 | continue; |
430 | } |
431 | |
432 | /* |
433 | * When WD_READ_SKIP is returned, it means the system is likely |
434 | * under very heavy load, where the latency of reading |
435 | * watchdog/clocksource is very big, and affect the accuracy of |
436 | * watchdog check. So give system some space and suspend the |
437 | * watchdog check for 5 minutes. |
438 | */ |
439 | if (read_ret == WD_READ_SKIP) { |
440 | /* |
441 | * As the watchdog timer will be suspended, and |
442 | * cs->last could keep unchanged for 5 minutes, reset |
443 | * the counters. |
444 | */ |
445 | clocksource_reset_watchdog(); |
446 | extra_wait = HZ * 300; |
447 | break; |
448 | } |
449 | |
450 | /* Clocksource initialized ? */ |
451 | if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || |
452 | atomic_read(v: &watchdog_reset_pending)) { |
453 | cs->flags |= CLOCK_SOURCE_WATCHDOG; |
454 | cs->wd_last = wdnow; |
455 | cs->cs_last = csnow; |
456 | continue; |
457 | } |
458 | |
459 | delta = clocksource_delta(now: wdnow, last: cs->wd_last, mask: watchdog->mask); |
460 | wd_nsec = clocksource_cyc2ns(cycles: delta, mult: watchdog->mult, |
461 | shift: watchdog->shift); |
462 | |
463 | delta = clocksource_delta(now: csnow, last: cs->cs_last, mask: cs->mask); |
464 | cs_nsec = clocksource_cyc2ns(cycles: delta, mult: cs->mult, shift: cs->shift); |
465 | wdlast = cs->wd_last; /* save these in case we print them */ |
466 | cslast = cs->cs_last; |
467 | cs->cs_last = csnow; |
468 | cs->wd_last = wdnow; |
469 | |
470 | if (atomic_read(v: &watchdog_reset_pending)) |
471 | continue; |
472 | |
473 | /* |
474 | * The processing of timer softirqs can get delayed (usually |
475 | * on account of ksoftirqd not getting to run in a timely |
476 | * manner), which causes the watchdog interval to stretch. |
477 | * Skew detection may fail for longer watchdog intervals |
478 | * on account of fixed margins being used. |
479 | * Some clocksources, e.g. acpi_pm, cannot tolerate |
480 | * watchdog intervals longer than a few seconds. |
481 | */ |
482 | interval = max(cs_nsec, wd_nsec); |
483 | if (unlikely(interval > WATCHDOG_INTERVAL_MAX_NS)) { |
484 | if (system_state > SYSTEM_SCHEDULING && |
485 | interval > 2 * watchdog_max_interval) { |
486 | watchdog_max_interval = interval; |
487 | pr_warn("Long readout interval, skipping watchdog check: cs_nsec: %lld wd_nsec: %lld\n" , |
488 | cs_nsec, wd_nsec); |
489 | } |
490 | watchdog_timer.expires = jiffies; |
491 | continue; |
492 | } |
493 | |
494 | /* Check the deviation from the watchdog clocksource. */ |
495 | md = cs->uncertainty_margin + watchdog->uncertainty_margin; |
496 | if (abs(cs_nsec - wd_nsec) > md) { |
497 | s64 cs_wd_msec; |
498 | s64 wd_msec; |
499 | u32 wd_rem; |
500 | |
501 | pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n" , |
502 | smp_processor_id(), cs->name); |
503 | pr_warn(" '%s' wd_nsec: %lld wd_now: %llx wd_last: %llx mask: %llx\n" , |
504 | watchdog->name, wd_nsec, wdnow, wdlast, watchdog->mask); |
505 | pr_warn(" '%s' cs_nsec: %lld cs_now: %llx cs_last: %llx mask: %llx\n" , |
506 | cs->name, cs_nsec, csnow, cslast, cs->mask); |
507 | cs_wd_msec = div_s64_rem(dividend: cs_nsec - wd_nsec, divisor: 1000 * 1000, remainder: &wd_rem); |
508 | wd_msec = div_s64_rem(dividend: wd_nsec, divisor: 1000 * 1000, remainder: &wd_rem); |
509 | pr_warn(" Clocksource '%s' skewed %lld ns (%lld ms) over watchdog '%s' interval of %lld ns (%lld ms)\n" , |
510 | cs->name, cs_nsec - wd_nsec, cs_wd_msec, watchdog->name, wd_nsec, wd_msec); |
511 | if (curr_clocksource == cs) |
512 | pr_warn(" '%s' is current clocksource.\n" , cs->name); |
513 | else if (curr_clocksource) |
514 | pr_warn(" '%s' (not '%s') is current clocksource.\n" , curr_clocksource->name, cs->name); |
515 | else |
516 | pr_warn(" No current clocksource.\n" ); |
517 | __clocksource_unstable(cs); |
518 | continue; |
519 | } |
520 | |
521 | if (cs == curr_clocksource && cs->tick_stable) |
522 | cs->tick_stable(cs); |
523 | |
524 | if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && |
525 | (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && |
526 | (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { |
527 | /* Mark it valid for high-res. */ |
528 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; |
529 | |
530 | /* |
531 | * clocksource_done_booting() will sort it if |
532 | * finished_booting is not set yet. |
533 | */ |
534 | if (!finished_booting) |
535 | continue; |
536 | |
537 | /* |
538 | * If this is not the current clocksource let |
539 | * the watchdog thread reselect it. Due to the |
540 | * change to high res this clocksource might |
541 | * be preferred now. If it is the current |
542 | * clocksource let the tick code know about |
543 | * that change. |
544 | */ |
545 | if (cs != curr_clocksource) { |
546 | cs->flags |= CLOCK_SOURCE_RESELECT; |
547 | schedule_work(work: &watchdog_work); |
548 | } else { |
549 | tick_clock_notify(); |
550 | } |
551 | } |
552 | } |
553 | |
554 | /* |
555 | * We only clear the watchdog_reset_pending, when we did a |
556 | * full cycle through all clocksources. |
557 | */ |
558 | if (reset_pending) |
559 | atomic_dec(v: &watchdog_reset_pending); |
560 | |
561 | /* |
562 | * Cycle through CPUs to check if the CPUs stay synchronized |
563 | * to each other. |
564 | */ |
565 | next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); |
566 | if (next_cpu >= nr_cpu_ids) |
567 | next_cpu = cpumask_first(cpu_online_mask); |
568 | |
569 | /* |
570 | * Arm timer if not already pending: could race with concurrent |
571 | * pair clocksource_stop_watchdog() clocksource_start_watchdog(). |
572 | */ |
573 | if (!timer_pending(timer: &watchdog_timer)) { |
574 | watchdog_timer.expires += WATCHDOG_INTERVAL + extra_wait; |
575 | add_timer_on(timer: &watchdog_timer, cpu: next_cpu); |
576 | } |
577 | out: |
578 | spin_unlock(lock: &watchdog_lock); |
579 | } |
580 | |
581 | static inline void clocksource_start_watchdog(void) |
582 | { |
583 | if (watchdog_running || !watchdog || list_empty(head: &watchdog_list)) |
584 | return; |
585 | timer_setup(&watchdog_timer, clocksource_watchdog, 0); |
586 | watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; |
587 | add_timer_on(timer: &watchdog_timer, cpu: cpumask_first(cpu_online_mask)); |
588 | watchdog_running = 1; |
589 | } |
590 | |
591 | static inline void clocksource_stop_watchdog(void) |
592 | { |
593 | if (!watchdog_running || (watchdog && !list_empty(head: &watchdog_list))) |
594 | return; |
595 | del_timer(timer: &watchdog_timer); |
596 | watchdog_running = 0; |
597 | } |
598 | |
599 | static void clocksource_resume_watchdog(void) |
600 | { |
601 | atomic_inc(v: &watchdog_reset_pending); |
602 | } |
603 | |
604 | static void clocksource_enqueue_watchdog(struct clocksource *cs) |
605 | { |
606 | INIT_LIST_HEAD(list: &cs->wd_list); |
607 | |
608 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { |
609 | /* cs is a clocksource to be watched. */ |
610 | list_add(new: &cs->wd_list, head: &watchdog_list); |
611 | cs->flags &= ~CLOCK_SOURCE_WATCHDOG; |
612 | } else { |
613 | /* cs is a watchdog. */ |
614 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) |
615 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; |
616 | } |
617 | } |
618 | |
619 | static void clocksource_select_watchdog(bool fallback) |
620 | { |
621 | struct clocksource *cs, *old_wd; |
622 | unsigned long flags; |
623 | |
624 | spin_lock_irqsave(&watchdog_lock, flags); |
625 | /* save current watchdog */ |
626 | old_wd = watchdog; |
627 | if (fallback) |
628 | watchdog = NULL; |
629 | |
630 | list_for_each_entry(cs, &clocksource_list, list) { |
631 | /* cs is a clocksource to be watched. */ |
632 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) |
633 | continue; |
634 | |
635 | /* Skip current if we were requested for a fallback. */ |
636 | if (fallback && cs == old_wd) |
637 | continue; |
638 | |
639 | /* Pick the best watchdog. */ |
640 | if (!watchdog || cs->rating > watchdog->rating) |
641 | watchdog = cs; |
642 | } |
643 | /* If we failed to find a fallback restore the old one. */ |
644 | if (!watchdog) |
645 | watchdog = old_wd; |
646 | |
647 | /* If we changed the watchdog we need to reset cycles. */ |
648 | if (watchdog != old_wd) |
649 | clocksource_reset_watchdog(); |
650 | |
651 | /* Check if the watchdog timer needs to be started. */ |
652 | clocksource_start_watchdog(); |
653 | spin_unlock_irqrestore(lock: &watchdog_lock, flags); |
654 | } |
655 | |
656 | static void clocksource_dequeue_watchdog(struct clocksource *cs) |
657 | { |
658 | if (cs != watchdog) { |
659 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { |
660 | /* cs is a watched clocksource. */ |
661 | list_del_init(entry: &cs->wd_list); |
662 | /* Check if the watchdog timer needs to be stopped. */ |
663 | clocksource_stop_watchdog(); |
664 | } |
665 | } |
666 | } |
667 | |
668 | static int __clocksource_watchdog_kthread(void) |
669 | { |
670 | struct clocksource *cs, *tmp; |
671 | unsigned long flags; |
672 | int select = 0; |
673 | |
674 | /* Do any required per-CPU skew verification. */ |
675 | if (curr_clocksource && |
676 | curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE && |
677 | curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU) |
678 | clocksource_verify_percpu(curr_clocksource); |
679 | |
680 | spin_lock_irqsave(&watchdog_lock, flags); |
681 | list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { |
682 | if (cs->flags & CLOCK_SOURCE_UNSTABLE) { |
683 | list_del_init(entry: &cs->wd_list); |
684 | __clocksource_change_rating(cs, rating: 0); |
685 | select = 1; |
686 | } |
687 | if (cs->flags & CLOCK_SOURCE_RESELECT) { |
688 | cs->flags &= ~CLOCK_SOURCE_RESELECT; |
689 | select = 1; |
690 | } |
691 | } |
692 | /* Check if the watchdog timer needs to be stopped. */ |
693 | clocksource_stop_watchdog(); |
694 | spin_unlock_irqrestore(lock: &watchdog_lock, flags); |
695 | |
696 | return select; |
697 | } |
698 | |
699 | static int clocksource_watchdog_kthread(void *data) |
700 | { |
701 | mutex_lock(&clocksource_mutex); |
702 | if (__clocksource_watchdog_kthread()) |
703 | clocksource_select(); |
704 | mutex_unlock(lock: &clocksource_mutex); |
705 | return 0; |
706 | } |
707 | |
708 | static bool clocksource_is_watchdog(struct clocksource *cs) |
709 | { |
710 | return cs == watchdog; |
711 | } |
712 | |
713 | #else /* CONFIG_CLOCKSOURCE_WATCHDOG */ |
714 | |
715 | static void clocksource_enqueue_watchdog(struct clocksource *cs) |
716 | { |
717 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) |
718 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; |
719 | } |
720 | |
721 | static void clocksource_select_watchdog(bool fallback) { } |
722 | static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } |
723 | static inline void clocksource_resume_watchdog(void) { } |
724 | static inline int __clocksource_watchdog_kthread(void) { return 0; } |
725 | static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } |
726 | void clocksource_mark_unstable(struct clocksource *cs) { } |
727 | |
728 | static inline void clocksource_watchdog_lock(unsigned long *flags) { } |
729 | static inline void clocksource_watchdog_unlock(unsigned long *flags) { } |
730 | |
731 | #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ |
732 | |
733 | static bool clocksource_is_suspend(struct clocksource *cs) |
734 | { |
735 | return cs == suspend_clocksource; |
736 | } |
737 | |
738 | static void __clocksource_suspend_select(struct clocksource *cs) |
739 | { |
740 | /* |
741 | * Skip the clocksource which will be stopped in suspend state. |
742 | */ |
743 | if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP)) |
744 | return; |
745 | |
746 | /* |
747 | * The nonstop clocksource can be selected as the suspend clocksource to |
748 | * calculate the suspend time, so it should not supply suspend/resume |
749 | * interfaces to suspend the nonstop clocksource when system suspends. |
750 | */ |
751 | if (cs->suspend || cs->resume) { |
752 | pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n" , |
753 | cs->name); |
754 | } |
755 | |
756 | /* Pick the best rating. */ |
757 | if (!suspend_clocksource || cs->rating > suspend_clocksource->rating) |
758 | suspend_clocksource = cs; |
759 | } |
760 | |
761 | /** |
762 | * clocksource_suspend_select - Select the best clocksource for suspend timing |
763 | * @fallback: if select a fallback clocksource |
764 | */ |
765 | static void clocksource_suspend_select(bool fallback) |
766 | { |
767 | struct clocksource *cs, *old_suspend; |
768 | |
769 | old_suspend = suspend_clocksource; |
770 | if (fallback) |
771 | suspend_clocksource = NULL; |
772 | |
773 | list_for_each_entry(cs, &clocksource_list, list) { |
774 | /* Skip current if we were requested for a fallback. */ |
775 | if (fallback && cs == old_suspend) |
776 | continue; |
777 | |
778 | __clocksource_suspend_select(cs); |
779 | } |
780 | } |
781 | |
782 | /** |
783 | * clocksource_start_suspend_timing - Start measuring the suspend timing |
784 | * @cs: current clocksource from timekeeping |
785 | * @start_cycles: current cycles from timekeeping |
786 | * |
787 | * This function will save the start cycle values of suspend timer to calculate |
788 | * the suspend time when resuming system. |
789 | * |
790 | * This function is called late in the suspend process from timekeeping_suspend(), |
791 | * that means processes are frozen, non-boot cpus and interrupts are disabled |
792 | * now. It is therefore possible to start the suspend timer without taking the |
793 | * clocksource mutex. |
794 | */ |
795 | void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles) |
796 | { |
797 | if (!suspend_clocksource) |
798 | return; |
799 | |
800 | /* |
801 | * If current clocksource is the suspend timer, we should use the |
802 | * tkr_mono.cycle_last value as suspend_start to avoid same reading |
803 | * from suspend timer. |
804 | */ |
805 | if (clocksource_is_suspend(cs)) { |
806 | suspend_start = start_cycles; |
807 | return; |
808 | } |
809 | |
810 | if (suspend_clocksource->enable && |
811 | suspend_clocksource->enable(suspend_clocksource)) { |
812 | pr_warn_once("Failed to enable the non-suspend-able clocksource.\n" ); |
813 | return; |
814 | } |
815 | |
816 | suspend_start = suspend_clocksource->read(suspend_clocksource); |
817 | } |
818 | |
819 | /** |
820 | * clocksource_stop_suspend_timing - Stop measuring the suspend timing |
821 | * @cs: current clocksource from timekeeping |
822 | * @cycle_now: current cycles from timekeeping |
823 | * |
824 | * This function will calculate the suspend time from suspend timer. |
825 | * |
826 | * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource. |
827 | * |
828 | * This function is called early in the resume process from timekeeping_resume(), |
829 | * that means there is only one cpu, no processes are running and the interrupts |
830 | * are disabled. It is therefore possible to stop the suspend timer without |
831 | * taking the clocksource mutex. |
832 | */ |
833 | u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now) |
834 | { |
835 | u64 now, delta, nsec = 0; |
836 | |
837 | if (!suspend_clocksource) |
838 | return 0; |
839 | |
840 | /* |
841 | * If current clocksource is the suspend timer, we should use the |
842 | * tkr_mono.cycle_last value from timekeeping as current cycle to |
843 | * avoid same reading from suspend timer. |
844 | */ |
845 | if (clocksource_is_suspend(cs)) |
846 | now = cycle_now; |
847 | else |
848 | now = suspend_clocksource->read(suspend_clocksource); |
849 | |
850 | if (now > suspend_start) { |
851 | delta = clocksource_delta(now, last: suspend_start, |
852 | mask: suspend_clocksource->mask); |
853 | nsec = mul_u64_u32_shr(a: delta, mul: suspend_clocksource->mult, |
854 | shift: suspend_clocksource->shift); |
855 | } |
856 | |
857 | /* |
858 | * Disable the suspend timer to save power if current clocksource is |
859 | * not the suspend timer. |
860 | */ |
861 | if (!clocksource_is_suspend(cs) && suspend_clocksource->disable) |
862 | suspend_clocksource->disable(suspend_clocksource); |
863 | |
864 | return nsec; |
865 | } |
866 | |
867 | /** |
868 | * clocksource_suspend - suspend the clocksource(s) |
869 | */ |
870 | void clocksource_suspend(void) |
871 | { |
872 | struct clocksource *cs; |
873 | |
874 | list_for_each_entry_reverse(cs, &clocksource_list, list) |
875 | if (cs->suspend) |
876 | cs->suspend(cs); |
877 | } |
878 | |
879 | /** |
880 | * clocksource_resume - resume the clocksource(s) |
881 | */ |
882 | void clocksource_resume(void) |
883 | { |
884 | struct clocksource *cs; |
885 | |
886 | list_for_each_entry(cs, &clocksource_list, list) |
887 | if (cs->resume) |
888 | cs->resume(cs); |
889 | |
890 | clocksource_resume_watchdog(); |
891 | } |
892 | |
893 | /** |
894 | * clocksource_touch_watchdog - Update watchdog |
895 | * |
896 | * Update the watchdog after exception contexts such as kgdb so as not |
897 | * to incorrectly trip the watchdog. This might fail when the kernel |
898 | * was stopped in code which holds watchdog_lock. |
899 | */ |
900 | void clocksource_touch_watchdog(void) |
901 | { |
902 | clocksource_resume_watchdog(); |
903 | } |
904 | |
905 | /** |
906 | * clocksource_max_adjustment- Returns max adjustment amount |
907 | * @cs: Pointer to clocksource |
908 | * |
909 | */ |
910 | static u32 clocksource_max_adjustment(struct clocksource *cs) |
911 | { |
912 | u64 ret; |
913 | /* |
914 | * We won't try to correct for more than 11% adjustments (110,000 ppm), |
915 | */ |
916 | ret = (u64)cs->mult * 11; |
917 | do_div(ret,100); |
918 | return (u32)ret; |
919 | } |
920 | |
921 | /** |
922 | * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted |
923 | * @mult: cycle to nanosecond multiplier |
924 | * @shift: cycle to nanosecond divisor (power of two) |
925 | * @maxadj: maximum adjustment value to mult (~11%) |
926 | * @mask: bitmask for two's complement subtraction of non 64 bit counters |
927 | * @max_cyc: maximum cycle value before potential overflow (does not include |
928 | * any safety margin) |
929 | * |
930 | * NOTE: This function includes a safety margin of 50%, in other words, we |
931 | * return half the number of nanoseconds the hardware counter can technically |
932 | * cover. This is done so that we can potentially detect problems caused by |
933 | * delayed timers or bad hardware, which might result in time intervals that |
934 | * are larger than what the math used can handle without overflows. |
935 | */ |
936 | u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc) |
937 | { |
938 | u64 max_nsecs, max_cycles; |
939 | |
940 | /* |
941 | * Calculate the maximum number of cycles that we can pass to the |
942 | * cyc2ns() function without overflowing a 64-bit result. |
943 | */ |
944 | max_cycles = ULLONG_MAX; |
945 | do_div(max_cycles, mult+maxadj); |
946 | |
947 | /* |
948 | * The actual maximum number of cycles we can defer the clocksource is |
949 | * determined by the minimum of max_cycles and mask. |
950 | * Note: Here we subtract the maxadj to make sure we don't sleep for |
951 | * too long if there's a large negative adjustment. |
952 | */ |
953 | max_cycles = min(max_cycles, mask); |
954 | max_nsecs = clocksource_cyc2ns(cycles: max_cycles, mult: mult - maxadj, shift); |
955 | |
956 | /* return the max_cycles value as well if requested */ |
957 | if (max_cyc) |
958 | *max_cyc = max_cycles; |
959 | |
960 | /* Return 50% of the actual maximum, so we can detect bad values */ |
961 | max_nsecs >>= 1; |
962 | |
963 | return max_nsecs; |
964 | } |
965 | |
966 | /** |
967 | * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles |
968 | * @cs: Pointer to clocksource to be updated |
969 | * |
970 | */ |
971 | static inline void clocksource_update_max_deferment(struct clocksource *cs) |
972 | { |
973 | cs->max_idle_ns = clocks_calc_max_nsecs(mult: cs->mult, shift: cs->shift, |
974 | maxadj: cs->maxadj, mask: cs->mask, |
975 | max_cyc: &cs->max_cycles); |
976 | } |
977 | |
978 | static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur) |
979 | { |
980 | struct clocksource *cs; |
981 | |
982 | if (!finished_booting || list_empty(head: &clocksource_list)) |
983 | return NULL; |
984 | |
985 | /* |
986 | * We pick the clocksource with the highest rating. If oneshot |
987 | * mode is active, we pick the highres valid clocksource with |
988 | * the best rating. |
989 | */ |
990 | list_for_each_entry(cs, &clocksource_list, list) { |
991 | if (skipcur && cs == curr_clocksource) |
992 | continue; |
993 | if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES)) |
994 | continue; |
995 | return cs; |
996 | } |
997 | return NULL; |
998 | } |
999 | |
1000 | static void __clocksource_select(bool skipcur) |
1001 | { |
1002 | bool oneshot = tick_oneshot_mode_active(); |
1003 | struct clocksource *best, *cs; |
1004 | |
1005 | /* Find the best suitable clocksource */ |
1006 | best = clocksource_find_best(oneshot, skipcur); |
1007 | if (!best) |
1008 | return; |
1009 | |
1010 | if (!strlen(override_name)) |
1011 | goto found; |
1012 | |
1013 | /* Check for the override clocksource. */ |
1014 | list_for_each_entry(cs, &clocksource_list, list) { |
1015 | if (skipcur && cs == curr_clocksource) |
1016 | continue; |
1017 | if (strcmp(cs->name, override_name) != 0) |
1018 | continue; |
1019 | /* |
1020 | * Check to make sure we don't switch to a non-highres |
1021 | * capable clocksource if the tick code is in oneshot |
1022 | * mode (highres or nohz) |
1023 | */ |
1024 | if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) { |
1025 | /* Override clocksource cannot be used. */ |
1026 | if (cs->flags & CLOCK_SOURCE_UNSTABLE) { |
1027 | pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n" , |
1028 | cs->name); |
1029 | override_name[0] = 0; |
1030 | } else { |
1031 | /* |
1032 | * The override cannot be currently verified. |
1033 | * Deferring to let the watchdog check. |
1034 | */ |
1035 | pr_info("Override clocksource %s is not currently HRT compatible - deferring\n" , |
1036 | cs->name); |
1037 | } |
1038 | } else |
1039 | /* Override clocksource can be used. */ |
1040 | best = cs; |
1041 | break; |
1042 | } |
1043 | |
1044 | found: |
1045 | if (curr_clocksource != best && !timekeeping_notify(clock: best)) { |
1046 | pr_info("Switched to clocksource %s\n" , best->name); |
1047 | curr_clocksource = best; |
1048 | } |
1049 | } |
1050 | |
1051 | /** |
1052 | * clocksource_select - Select the best clocksource available |
1053 | * |
1054 | * Private function. Must hold clocksource_mutex when called. |
1055 | * |
1056 | * Select the clocksource with the best rating, or the clocksource, |
1057 | * which is selected by userspace override. |
1058 | */ |
1059 | static void clocksource_select(void) |
1060 | { |
1061 | __clocksource_select(skipcur: false); |
1062 | } |
1063 | |
1064 | static void clocksource_select_fallback(void) |
1065 | { |
1066 | __clocksource_select(skipcur: true); |
1067 | } |
1068 | |
1069 | /* |
1070 | * clocksource_done_booting - Called near the end of core bootup |
1071 | * |
1072 | * Hack to avoid lots of clocksource churn at boot time. |
1073 | * We use fs_initcall because we want this to start before |
1074 | * device_initcall but after subsys_initcall. |
1075 | */ |
1076 | static int __init clocksource_done_booting(void) |
1077 | { |
1078 | mutex_lock(&clocksource_mutex); |
1079 | curr_clocksource = clocksource_default_clock(); |
1080 | finished_booting = 1; |
1081 | /* |
1082 | * Run the watchdog first to eliminate unstable clock sources |
1083 | */ |
1084 | __clocksource_watchdog_kthread(); |
1085 | clocksource_select(); |
1086 | mutex_unlock(lock: &clocksource_mutex); |
1087 | return 0; |
1088 | } |
1089 | fs_initcall(clocksource_done_booting); |
1090 | |
1091 | /* |
1092 | * Enqueue the clocksource sorted by rating |
1093 | */ |
1094 | static void clocksource_enqueue(struct clocksource *cs) |
1095 | { |
1096 | struct list_head *entry = &clocksource_list; |
1097 | struct clocksource *tmp; |
1098 | |
1099 | list_for_each_entry(tmp, &clocksource_list, list) { |
1100 | /* Keep track of the place, where to insert */ |
1101 | if (tmp->rating < cs->rating) |
1102 | break; |
1103 | entry = &tmp->list; |
1104 | } |
1105 | list_add(new: &cs->list, head: entry); |
1106 | } |
1107 | |
1108 | /** |
1109 | * __clocksource_update_freq_scale - Used update clocksource with new freq |
1110 | * @cs: clocksource to be registered |
1111 | * @scale: Scale factor multiplied against freq to get clocksource hz |
1112 | * @freq: clocksource frequency (cycles per second) divided by scale |
1113 | * |
1114 | * This should only be called from the clocksource->enable() method. |
1115 | * |
1116 | * This *SHOULD NOT* be called directly! Please use the |
1117 | * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper |
1118 | * functions. |
1119 | */ |
1120 | void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq) |
1121 | { |
1122 | u64 sec; |
1123 | |
1124 | /* |
1125 | * Default clocksources are *special* and self-define their mult/shift. |
1126 | * But, you're not special, so you should specify a freq value. |
1127 | */ |
1128 | if (freq) { |
1129 | /* |
1130 | * Calc the maximum number of seconds which we can run before |
1131 | * wrapping around. For clocksources which have a mask > 32-bit |
1132 | * we need to limit the max sleep time to have a good |
1133 | * conversion precision. 10 minutes is still a reasonable |
1134 | * amount. That results in a shift value of 24 for a |
1135 | * clocksource with mask >= 40-bit and f >= 4GHz. That maps to |
1136 | * ~ 0.06ppm granularity for NTP. |
1137 | */ |
1138 | sec = cs->mask; |
1139 | do_div(sec, freq); |
1140 | do_div(sec, scale); |
1141 | if (!sec) |
1142 | sec = 1; |
1143 | else if (sec > 600 && cs->mask > UINT_MAX) |
1144 | sec = 600; |
1145 | |
1146 | clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, |
1147 | NSEC_PER_SEC / scale, sec * scale); |
1148 | } |
1149 | |
1150 | /* |
1151 | * If the uncertainty margin is not specified, calculate it. |
1152 | * If both scale and freq are non-zero, calculate the clock |
1153 | * period, but bound below at 2*WATCHDOG_MAX_SKEW. However, |
1154 | * if either of scale or freq is zero, be very conservative and |
1155 | * take the tens-of-milliseconds WATCHDOG_THRESHOLD value for the |
1156 | * uncertainty margin. Allow stupidly small uncertainty margins |
1157 | * to be specified by the caller for testing purposes, but warn |
1158 | * to discourage production use of this capability. |
1159 | */ |
1160 | if (scale && freq && !cs->uncertainty_margin) { |
1161 | cs->uncertainty_margin = NSEC_PER_SEC / (scale * freq); |
1162 | if (cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW) |
1163 | cs->uncertainty_margin = 2 * WATCHDOG_MAX_SKEW; |
1164 | } else if (!cs->uncertainty_margin) { |
1165 | cs->uncertainty_margin = WATCHDOG_THRESHOLD; |
1166 | } |
1167 | WARN_ON_ONCE(cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW); |
1168 | |
1169 | /* |
1170 | * Ensure clocksources that have large 'mult' values don't overflow |
1171 | * when adjusted. |
1172 | */ |
1173 | cs->maxadj = clocksource_max_adjustment(cs); |
1174 | while (freq && ((cs->mult + cs->maxadj < cs->mult) |
1175 | || (cs->mult - cs->maxadj > cs->mult))) { |
1176 | cs->mult >>= 1; |
1177 | cs->shift--; |
1178 | cs->maxadj = clocksource_max_adjustment(cs); |
1179 | } |
1180 | |
1181 | /* |
1182 | * Only warn for *special* clocksources that self-define |
1183 | * their mult/shift values and don't specify a freq. |
1184 | */ |
1185 | WARN_ONCE(cs->mult + cs->maxadj < cs->mult, |
1186 | "timekeeping: Clocksource %s might overflow on 11%% adjustment\n" , |
1187 | cs->name); |
1188 | |
1189 | clocksource_update_max_deferment(cs); |
1190 | |
1191 | pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n" , |
1192 | cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns); |
1193 | } |
1194 | EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale); |
1195 | |
1196 | /** |
1197 | * __clocksource_register_scale - Used to install new clocksources |
1198 | * @cs: clocksource to be registered |
1199 | * @scale: Scale factor multiplied against freq to get clocksource hz |
1200 | * @freq: clocksource frequency (cycles per second) divided by scale |
1201 | * |
1202 | * Returns -EBUSY if registration fails, zero otherwise. |
1203 | * |
1204 | * This *SHOULD NOT* be called directly! Please use the |
1205 | * clocksource_register_hz() or clocksource_register_khz helper functions. |
1206 | */ |
1207 | int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) |
1208 | { |
1209 | unsigned long flags; |
1210 | |
1211 | clocksource_arch_init(cs); |
1212 | |
1213 | if (WARN_ON_ONCE((unsigned int)cs->id >= CSID_MAX)) |
1214 | cs->id = CSID_GENERIC; |
1215 | if (cs->vdso_clock_mode < 0 || |
1216 | cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) { |
1217 | pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n" , |
1218 | cs->name, cs->vdso_clock_mode); |
1219 | cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE; |
1220 | } |
1221 | |
1222 | /* Initialize mult/shift and max_idle_ns */ |
1223 | __clocksource_update_freq_scale(cs, scale, freq); |
1224 | |
1225 | /* Add clocksource to the clocksource list */ |
1226 | mutex_lock(&clocksource_mutex); |
1227 | |
1228 | clocksource_watchdog_lock(flags: &flags); |
1229 | clocksource_enqueue(cs); |
1230 | clocksource_enqueue_watchdog(cs); |
1231 | clocksource_watchdog_unlock(flags: &flags); |
1232 | |
1233 | clocksource_select(); |
1234 | clocksource_select_watchdog(fallback: false); |
1235 | __clocksource_suspend_select(cs); |
1236 | mutex_unlock(lock: &clocksource_mutex); |
1237 | return 0; |
1238 | } |
1239 | EXPORT_SYMBOL_GPL(__clocksource_register_scale); |
1240 | |
1241 | static void __clocksource_change_rating(struct clocksource *cs, int rating) |
1242 | { |
1243 | list_del(entry: &cs->list); |
1244 | cs->rating = rating; |
1245 | clocksource_enqueue(cs); |
1246 | } |
1247 | |
1248 | /** |
1249 | * clocksource_change_rating - Change the rating of a registered clocksource |
1250 | * @cs: clocksource to be changed |
1251 | * @rating: new rating |
1252 | */ |
1253 | void clocksource_change_rating(struct clocksource *cs, int rating) |
1254 | { |
1255 | unsigned long flags; |
1256 | |
1257 | mutex_lock(&clocksource_mutex); |
1258 | clocksource_watchdog_lock(flags: &flags); |
1259 | __clocksource_change_rating(cs, rating); |
1260 | clocksource_watchdog_unlock(flags: &flags); |
1261 | |
1262 | clocksource_select(); |
1263 | clocksource_select_watchdog(fallback: false); |
1264 | clocksource_suspend_select(fallback: false); |
1265 | mutex_unlock(lock: &clocksource_mutex); |
1266 | } |
1267 | EXPORT_SYMBOL(clocksource_change_rating); |
1268 | |
1269 | /* |
1270 | * Unbind clocksource @cs. Called with clocksource_mutex held |
1271 | */ |
1272 | static int clocksource_unbind(struct clocksource *cs) |
1273 | { |
1274 | unsigned long flags; |
1275 | |
1276 | if (clocksource_is_watchdog(cs)) { |
1277 | /* Select and try to install a replacement watchdog. */ |
1278 | clocksource_select_watchdog(fallback: true); |
1279 | if (clocksource_is_watchdog(cs)) |
1280 | return -EBUSY; |
1281 | } |
1282 | |
1283 | if (cs == curr_clocksource) { |
1284 | /* Select and try to install a replacement clock source */ |
1285 | clocksource_select_fallback(); |
1286 | if (curr_clocksource == cs) |
1287 | return -EBUSY; |
1288 | } |
1289 | |
1290 | if (clocksource_is_suspend(cs)) { |
1291 | /* |
1292 | * Select and try to install a replacement suspend clocksource. |
1293 | * If no replacement suspend clocksource, we will just let the |
1294 | * clocksource go and have no suspend clocksource. |
1295 | */ |
1296 | clocksource_suspend_select(fallback: true); |
1297 | } |
1298 | |
1299 | clocksource_watchdog_lock(flags: &flags); |
1300 | clocksource_dequeue_watchdog(cs); |
1301 | list_del_init(entry: &cs->list); |
1302 | clocksource_watchdog_unlock(flags: &flags); |
1303 | |
1304 | return 0; |
1305 | } |
1306 | |
1307 | /** |
1308 | * clocksource_unregister - remove a registered clocksource |
1309 | * @cs: clocksource to be unregistered |
1310 | */ |
1311 | int clocksource_unregister(struct clocksource *cs) |
1312 | { |
1313 | int ret = 0; |
1314 | |
1315 | mutex_lock(&clocksource_mutex); |
1316 | if (!list_empty(head: &cs->list)) |
1317 | ret = clocksource_unbind(cs); |
1318 | mutex_unlock(lock: &clocksource_mutex); |
1319 | return ret; |
1320 | } |
1321 | EXPORT_SYMBOL(clocksource_unregister); |
1322 | |
1323 | #ifdef CONFIG_SYSFS |
1324 | /** |
1325 | * current_clocksource_show - sysfs interface for current clocksource |
1326 | * @dev: unused |
1327 | * @attr: unused |
1328 | * @buf: char buffer to be filled with clocksource list |
1329 | * |
1330 | * Provides sysfs interface for listing current clocksource. |
1331 | */ |
1332 | static ssize_t current_clocksource_show(struct device *dev, |
1333 | struct device_attribute *attr, |
1334 | char *buf) |
1335 | { |
1336 | ssize_t count = 0; |
1337 | |
1338 | mutex_lock(&clocksource_mutex); |
1339 | count = snprintf(buf, PAGE_SIZE, fmt: "%s\n" , curr_clocksource->name); |
1340 | mutex_unlock(lock: &clocksource_mutex); |
1341 | |
1342 | return count; |
1343 | } |
1344 | |
1345 | ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt) |
1346 | { |
1347 | size_t ret = cnt; |
1348 | |
1349 | /* strings from sysfs write are not 0 terminated! */ |
1350 | if (!cnt || cnt >= CS_NAME_LEN) |
1351 | return -EINVAL; |
1352 | |
1353 | /* strip of \n: */ |
1354 | if (buf[cnt-1] == '\n') |
1355 | cnt--; |
1356 | if (cnt > 0) |
1357 | memcpy(dst, buf, cnt); |
1358 | dst[cnt] = 0; |
1359 | return ret; |
1360 | } |
1361 | |
1362 | /** |
1363 | * current_clocksource_store - interface for manually overriding clocksource |
1364 | * @dev: unused |
1365 | * @attr: unused |
1366 | * @buf: name of override clocksource |
1367 | * @count: length of buffer |
1368 | * |
1369 | * Takes input from sysfs interface for manually overriding the default |
1370 | * clocksource selection. |
1371 | */ |
1372 | static ssize_t current_clocksource_store(struct device *dev, |
1373 | struct device_attribute *attr, |
1374 | const char *buf, size_t count) |
1375 | { |
1376 | ssize_t ret; |
1377 | |
1378 | mutex_lock(&clocksource_mutex); |
1379 | |
1380 | ret = sysfs_get_uname(buf, dst: override_name, cnt: count); |
1381 | if (ret >= 0) |
1382 | clocksource_select(); |
1383 | |
1384 | mutex_unlock(lock: &clocksource_mutex); |
1385 | |
1386 | return ret; |
1387 | } |
1388 | static DEVICE_ATTR_RW(current_clocksource); |
1389 | |
1390 | /** |
1391 | * unbind_clocksource_store - interface for manually unbinding clocksource |
1392 | * @dev: unused |
1393 | * @attr: unused |
1394 | * @buf: unused |
1395 | * @count: length of buffer |
1396 | * |
1397 | * Takes input from sysfs interface for manually unbinding a clocksource. |
1398 | */ |
1399 | static ssize_t unbind_clocksource_store(struct device *dev, |
1400 | struct device_attribute *attr, |
1401 | const char *buf, size_t count) |
1402 | { |
1403 | struct clocksource *cs; |
1404 | char name[CS_NAME_LEN]; |
1405 | ssize_t ret; |
1406 | |
1407 | ret = sysfs_get_uname(buf, dst: name, cnt: count); |
1408 | if (ret < 0) |
1409 | return ret; |
1410 | |
1411 | ret = -ENODEV; |
1412 | mutex_lock(&clocksource_mutex); |
1413 | list_for_each_entry(cs, &clocksource_list, list) { |
1414 | if (strcmp(cs->name, name)) |
1415 | continue; |
1416 | ret = clocksource_unbind(cs); |
1417 | break; |
1418 | } |
1419 | mutex_unlock(lock: &clocksource_mutex); |
1420 | |
1421 | return ret ? ret : count; |
1422 | } |
1423 | static DEVICE_ATTR_WO(unbind_clocksource); |
1424 | |
1425 | /** |
1426 | * available_clocksource_show - sysfs interface for listing clocksource |
1427 | * @dev: unused |
1428 | * @attr: unused |
1429 | * @buf: char buffer to be filled with clocksource list |
1430 | * |
1431 | * Provides sysfs interface for listing registered clocksources |
1432 | */ |
1433 | static ssize_t available_clocksource_show(struct device *dev, |
1434 | struct device_attribute *attr, |
1435 | char *buf) |
1436 | { |
1437 | struct clocksource *src; |
1438 | ssize_t count = 0; |
1439 | |
1440 | mutex_lock(&clocksource_mutex); |
1441 | list_for_each_entry(src, &clocksource_list, list) { |
1442 | /* |
1443 | * Don't show non-HRES clocksource if the tick code is |
1444 | * in one shot mode (highres=on or nohz=on) |
1445 | */ |
1446 | if (!tick_oneshot_mode_active() || |
1447 | (src->flags & CLOCK_SOURCE_VALID_FOR_HRES)) |
1448 | count += snprintf(buf: buf + count, |
1449 | max((ssize_t)PAGE_SIZE - count, (ssize_t)0), |
1450 | fmt: "%s " , src->name); |
1451 | } |
1452 | mutex_unlock(lock: &clocksource_mutex); |
1453 | |
1454 | count += snprintf(buf: buf + count, |
1455 | max((ssize_t)PAGE_SIZE - count, (ssize_t)0), fmt: "\n" ); |
1456 | |
1457 | return count; |
1458 | } |
1459 | static DEVICE_ATTR_RO(available_clocksource); |
1460 | |
1461 | static struct attribute *clocksource_attrs[] = { |
1462 | &dev_attr_current_clocksource.attr, |
1463 | &dev_attr_unbind_clocksource.attr, |
1464 | &dev_attr_available_clocksource.attr, |
1465 | NULL |
1466 | }; |
1467 | ATTRIBUTE_GROUPS(clocksource); |
1468 | |
1469 | static const struct bus_type clocksource_subsys = { |
1470 | .name = "clocksource" , |
1471 | .dev_name = "clocksource" , |
1472 | }; |
1473 | |
1474 | static struct device device_clocksource = { |
1475 | .id = 0, |
1476 | .bus = &clocksource_subsys, |
1477 | .groups = clocksource_groups, |
1478 | }; |
1479 | |
1480 | static int __init init_clocksource_sysfs(void) |
1481 | { |
1482 | int error = subsys_system_register(subsys: &clocksource_subsys, NULL); |
1483 | |
1484 | if (!error) |
1485 | error = device_register(dev: &device_clocksource); |
1486 | |
1487 | return error; |
1488 | } |
1489 | |
1490 | device_initcall(init_clocksource_sysfs); |
1491 | #endif /* CONFIG_SYSFS */ |
1492 | |
1493 | /** |
1494 | * boot_override_clocksource - boot clock override |
1495 | * @str: override name |
1496 | * |
1497 | * Takes a clocksource= boot argument and uses it |
1498 | * as the clocksource override name. |
1499 | */ |
1500 | static int __init boot_override_clocksource(char* str) |
1501 | { |
1502 | mutex_lock(&clocksource_mutex); |
1503 | if (str) |
1504 | strscpy(override_name, str, sizeof(override_name)); |
1505 | mutex_unlock(lock: &clocksource_mutex); |
1506 | return 1; |
1507 | } |
1508 | |
1509 | __setup("clocksource=" , boot_override_clocksource); |
1510 | |
1511 | /** |
1512 | * boot_override_clock - Compatibility layer for deprecated boot option |
1513 | * @str: override name |
1514 | * |
1515 | * DEPRECATED! Takes a clock= boot argument and uses it |
1516 | * as the clocksource override name |
1517 | */ |
1518 | static int __init boot_override_clock(char* str) |
1519 | { |
1520 | if (!strcmp(str, "pmtmr" )) { |
1521 | pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n" ); |
1522 | return boot_override_clocksource(str: "acpi_pm" ); |
1523 | } |
1524 | pr_warn("clock= boot option is deprecated - use clocksource=xyz\n" ); |
1525 | return boot_override_clocksource(str); |
1526 | } |
1527 | |
1528 | __setup("clock=" , boot_override_clock); |
1529 | |