1// SPDX-License-Identifier: GPL-2.0
2#include "bcachefs.h"
3#include "clock.h"
4
5#include <linux/freezer.h>
6#include <linux/kthread.h>
7#include <linux/preempt.h>
8
9static inline long io_timer_cmp(io_timer_heap *h,
10 struct io_timer *l,
11 struct io_timer *r)
12{
13 return l->expire - r->expire;
14}
15
16void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
17{
18 size_t i;
19
20 spin_lock(lock: &clock->timer_lock);
21
22 if (time_after_eq((unsigned long) atomic64_read(&clock->now),
23 timer->expire)) {
24 spin_unlock(lock: &clock->timer_lock);
25 timer->fn(timer);
26 return;
27 }
28
29 for (i = 0; i < clock->timers.used; i++)
30 if (clock->timers.data[i] == timer)
31 goto out;
32
33 BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp, NULL));
34out:
35 spin_unlock(lock: &clock->timer_lock);
36}
37
38void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
39{
40 size_t i;
41
42 spin_lock(lock: &clock->timer_lock);
43
44 for (i = 0; i < clock->timers.used; i++)
45 if (clock->timers.data[i] == timer) {
46 heap_del(&clock->timers, i, io_timer_cmp, NULL);
47 break;
48 }
49
50 spin_unlock(lock: &clock->timer_lock);
51}
52
53struct io_clock_wait {
54 struct io_timer io_timer;
55 struct timer_list cpu_timer;
56 struct task_struct *task;
57 int expired;
58};
59
60static void io_clock_wait_fn(struct io_timer *timer)
61{
62 struct io_clock_wait *wait = container_of(timer,
63 struct io_clock_wait, io_timer);
64
65 wait->expired = 1;
66 wake_up_process(tsk: wait->task);
67}
68
69static void io_clock_cpu_timeout(struct timer_list *timer)
70{
71 struct io_clock_wait *wait = container_of(timer,
72 struct io_clock_wait, cpu_timer);
73
74 wait->expired = 1;
75 wake_up_process(tsk: wait->task);
76}
77
78void bch2_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until)
79{
80 struct io_clock_wait wait;
81
82 /* XXX: calculate sleep time rigorously */
83 wait.io_timer.expire = until;
84 wait.io_timer.fn = io_clock_wait_fn;
85 wait.task = current;
86 wait.expired = 0;
87 bch2_io_timer_add(clock, timer: &wait.io_timer);
88
89 schedule();
90
91 bch2_io_timer_del(clock, timer: &wait.io_timer);
92}
93
94void bch2_kthread_io_clock_wait(struct io_clock *clock,
95 unsigned long io_until,
96 unsigned long cpu_timeout)
97{
98 bool kthread = (current->flags & PF_KTHREAD) != 0;
99 struct io_clock_wait wait;
100
101 wait.io_timer.expire = io_until;
102 wait.io_timer.fn = io_clock_wait_fn;
103 wait.task = current;
104 wait.expired = 0;
105 bch2_io_timer_add(clock, timer: &wait.io_timer);
106
107 timer_setup_on_stack(&wait.cpu_timer, io_clock_cpu_timeout, 0);
108
109 if (cpu_timeout != MAX_SCHEDULE_TIMEOUT)
110 mod_timer(timer: &wait.cpu_timer, expires: cpu_timeout + jiffies);
111
112 do {
113 set_current_state(TASK_INTERRUPTIBLE);
114 if (kthread && kthread_should_stop())
115 break;
116
117 if (wait.expired)
118 break;
119
120 schedule();
121 try_to_freeze();
122 } while (0);
123
124 __set_current_state(TASK_RUNNING);
125 del_timer_sync(timer: &wait.cpu_timer);
126 destroy_timer_on_stack(timer: &wait.cpu_timer);
127 bch2_io_timer_del(clock, timer: &wait.io_timer);
128}
129
130static struct io_timer *get_expired_timer(struct io_clock *clock,
131 unsigned long now)
132{
133 struct io_timer *ret = NULL;
134
135 spin_lock(lock: &clock->timer_lock);
136
137 if (clock->timers.used &&
138 time_after_eq(now, clock->timers.data[0]->expire))
139 heap_pop(&clock->timers, ret, io_timer_cmp, NULL);
140
141 spin_unlock(lock: &clock->timer_lock);
142
143 return ret;
144}
145
146void __bch2_increment_clock(struct io_clock *clock, unsigned sectors)
147{
148 struct io_timer *timer;
149 unsigned long now = atomic64_add_return(i: sectors, v: &clock->now);
150
151 while ((timer = get_expired_timer(clock, now)))
152 timer->fn(timer);
153}
154
155void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
156{
157 unsigned long now;
158 unsigned i;
159
160 out->atomic++;
161 spin_lock(lock: &clock->timer_lock);
162 now = atomic64_read(v: &clock->now);
163
164 for (i = 0; i < clock->timers.used; i++)
165 prt_printf(out, "%ps:\t%li\n",
166 clock->timers.data[i]->fn,
167 clock->timers.data[i]->expire - now);
168 spin_unlock(lock: &clock->timer_lock);
169 --out->atomic;
170}
171
172void bch2_io_clock_exit(struct io_clock *clock)
173{
174 free_heap(&clock->timers);
175 free_percpu(pdata: clock->pcpu_buf);
176}
177
178int bch2_io_clock_init(struct io_clock *clock)
179{
180 atomic64_set(v: &clock->now, i: 0);
181 spin_lock_init(&clock->timer_lock);
182
183 clock->max_slop = IO_CLOCK_PCPU_SECTORS * num_possible_cpus();
184
185 clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
186 if (!clock->pcpu_buf)
187 return -BCH_ERR_ENOMEM_io_clock_init;
188
189 if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL))
190 return -BCH_ERR_ENOMEM_io_clock_init;
191
192 return 0;
193}
194

source code of linux/fs/bcachefs/clock.c