1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2014 Red Hat |
4 | * Author: Rob Clark <robdclark@gmail.com> |
5 | */ |
6 | |
7 | #include <drm/drm_atomic_uapi.h> |
8 | #include <drm/drm_vblank.h> |
9 | |
10 | #include "msm_atomic_trace.h" |
11 | #include "msm_drv.h" |
12 | #include "msm_gem.h" |
13 | #include "msm_kms.h" |
14 | |
15 | /* |
16 | * Helpers to control vblanks while we flush.. basically just to ensure |
17 | * that vblank accounting is switched on, so we get valid seqn/timestamp |
18 | * on pageflip events (if requested) |
19 | */ |
20 | |
21 | static void vblank_get(struct msm_kms *kms, unsigned crtc_mask) |
22 | { |
23 | struct drm_crtc *crtc; |
24 | |
25 | for_each_crtc_mask(kms->dev, crtc, crtc_mask) { |
26 | if (!crtc->state->active) |
27 | continue; |
28 | drm_crtc_vblank_get(crtc); |
29 | } |
30 | } |
31 | |
32 | static void vblank_put(struct msm_kms *kms, unsigned crtc_mask) |
33 | { |
34 | struct drm_crtc *crtc; |
35 | |
36 | for_each_crtc_mask(kms->dev, crtc, crtc_mask) { |
37 | if (!crtc->state->active) |
38 | continue; |
39 | drm_crtc_vblank_put(crtc); |
40 | } |
41 | } |
42 | |
43 | static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask) |
44 | { |
45 | int crtc_index; |
46 | struct drm_crtc *crtc; |
47 | |
48 | for_each_crtc_mask(kms->dev, crtc, crtc_mask) { |
49 | crtc_index = drm_crtc_index(crtc); |
50 | mutex_lock_nested(lock: &kms->commit_lock[crtc_index], subclass: crtc_index); |
51 | } |
52 | } |
53 | |
54 | static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask) |
55 | { |
56 | struct drm_crtc *crtc; |
57 | |
58 | for_each_crtc_mask_reverse(kms->dev, crtc, crtc_mask) |
59 | mutex_unlock(lock: &kms->commit_lock[drm_crtc_index(crtc)]); |
60 | } |
61 | |
62 | static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) |
63 | { |
64 | unsigned crtc_mask = BIT(crtc_idx); |
65 | |
66 | trace_msm_atomic_async_commit_start(crtc_mask); |
67 | |
68 | lock_crtcs(kms, crtc_mask); |
69 | |
70 | if (!(kms->pending_crtc_mask & crtc_mask)) { |
71 | unlock_crtcs(kms, crtc_mask); |
72 | goto out; |
73 | } |
74 | |
75 | kms->pending_crtc_mask &= ~crtc_mask; |
76 | |
77 | kms->funcs->enable_commit(kms); |
78 | |
79 | vblank_get(kms, crtc_mask); |
80 | |
81 | /* |
82 | * Flush hardware updates: |
83 | */ |
84 | trace_msm_atomic_flush_commit(crtc_mask); |
85 | kms->funcs->flush_commit(kms, crtc_mask); |
86 | |
87 | /* |
88 | * Wait for flush to complete: |
89 | */ |
90 | trace_msm_atomic_wait_flush_start(crtc_mask); |
91 | kms->funcs->wait_flush(kms, crtc_mask); |
92 | trace_msm_atomic_wait_flush_finish(crtc_mask); |
93 | |
94 | vblank_put(kms, crtc_mask); |
95 | |
96 | kms->funcs->complete_commit(kms, crtc_mask); |
97 | unlock_crtcs(kms, crtc_mask); |
98 | kms->funcs->disable_commit(kms); |
99 | |
100 | out: |
101 | trace_msm_atomic_async_commit_finish(crtc_mask); |
102 | } |
103 | |
104 | static void msm_atomic_pending_work(struct kthread_work *work) |
105 | { |
106 | struct msm_pending_timer *timer = container_of(work, |
107 | struct msm_pending_timer, work.work); |
108 | |
109 | msm_atomic_async_commit(kms: timer->kms, crtc_idx: timer->crtc_idx); |
110 | } |
111 | |
112 | int msm_atomic_init_pending_timer(struct msm_pending_timer *timer, |
113 | struct msm_kms *kms, int crtc_idx) |
114 | { |
115 | timer->kms = kms; |
116 | timer->crtc_idx = crtc_idx; |
117 | |
118 | timer->worker = kthread_create_worker(flags: 0, namefmt: "atomic-worker-%d" , crtc_idx); |
119 | if (IS_ERR(ptr: timer->worker)) { |
120 | int ret = PTR_ERR(ptr: timer->worker); |
121 | timer->worker = NULL; |
122 | return ret; |
123 | } |
124 | sched_set_fifo(p: timer->worker->task); |
125 | |
126 | msm_hrtimer_work_init(work: &timer->work, worker: timer->worker, |
127 | fn: msm_atomic_pending_work, |
128 | CLOCK_MONOTONIC, mode: HRTIMER_MODE_ABS); |
129 | |
130 | return 0; |
131 | } |
132 | |
133 | void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer) |
134 | { |
135 | if (timer->worker) |
136 | kthread_destroy_worker(worker: timer->worker); |
137 | } |
138 | |
139 | static bool can_do_async(struct drm_atomic_state *state, |
140 | struct drm_crtc **async_crtc) |
141 | { |
142 | struct drm_connector_state *connector_state; |
143 | struct drm_connector *connector; |
144 | struct drm_crtc_state *crtc_state; |
145 | struct drm_crtc *crtc; |
146 | int i, num_crtcs = 0; |
147 | |
148 | if (!(state->legacy_cursor_update || state->async_update)) |
149 | return false; |
150 | |
151 | /* any connector change, means slow path: */ |
152 | for_each_new_connector_in_state(state, connector, connector_state, i) |
153 | return false; |
154 | |
155 | for_each_new_crtc_in_state(state, crtc, crtc_state, i) { |
156 | if (drm_atomic_crtc_needs_modeset(state: crtc_state)) |
157 | return false; |
158 | if (!crtc_state->active) |
159 | return false; |
160 | if (++num_crtcs > 1) |
161 | return false; |
162 | *async_crtc = crtc; |
163 | } |
164 | |
165 | return true; |
166 | } |
167 | |
168 | /* Get bitmask of crtcs that will need to be flushed. The bitmask |
169 | * can be used with for_each_crtc_mask() iterator, to iterate |
170 | * effected crtcs without needing to preserve the atomic state. |
171 | */ |
172 | static unsigned get_crtc_mask(struct drm_atomic_state *state) |
173 | { |
174 | struct drm_crtc_state *crtc_state; |
175 | struct drm_crtc *crtc; |
176 | unsigned i, mask = 0; |
177 | |
178 | for_each_new_crtc_in_state(state, crtc, crtc_state, i) |
179 | mask |= drm_crtc_mask(crtc); |
180 | |
181 | return mask; |
182 | } |
183 | |
184 | int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) |
185 | { |
186 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; |
187 | struct drm_crtc *crtc; |
188 | int i; |
189 | |
190 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, |
191 | new_crtc_state, i) { |
192 | if ((old_crtc_state->ctm && !new_crtc_state->ctm) || |
193 | (!old_crtc_state->ctm && new_crtc_state->ctm)) { |
194 | new_crtc_state->mode_changed = true; |
195 | state->allow_modeset = true; |
196 | } |
197 | } |
198 | |
199 | return drm_atomic_helper_check(dev, state); |
200 | } |
201 | |
202 | void msm_atomic_commit_tail(struct drm_atomic_state *state) |
203 | { |
204 | struct drm_device *dev = state->dev; |
205 | struct msm_drm_private *priv = dev->dev_private; |
206 | struct msm_kms *kms = priv->kms; |
207 | struct drm_crtc *async_crtc = NULL; |
208 | unsigned crtc_mask = get_crtc_mask(state); |
209 | bool async = can_do_async(state, async_crtc: &async_crtc); |
210 | |
211 | trace_msm_atomic_commit_tail_start(async, crtc_mask); |
212 | |
213 | kms->funcs->enable_commit(kms); |
214 | |
215 | /* |
216 | * Ensure any previous (potentially async) commit has |
217 | * completed: |
218 | */ |
219 | lock_crtcs(kms, crtc_mask); |
220 | trace_msm_atomic_wait_flush_start(crtc_mask); |
221 | kms->funcs->wait_flush(kms, crtc_mask); |
222 | trace_msm_atomic_wait_flush_finish(crtc_mask); |
223 | |
224 | /* |
225 | * Now that there is no in-progress flush, prepare the |
226 | * current update: |
227 | */ |
228 | if (kms->funcs->prepare_commit) |
229 | kms->funcs->prepare_commit(kms, state); |
230 | |
231 | /* |
232 | * Push atomic updates down to hardware: |
233 | */ |
234 | drm_atomic_helper_commit_modeset_disables(dev, state); |
235 | drm_atomic_helper_commit_planes(dev, state, flags: 0); |
236 | drm_atomic_helper_commit_modeset_enables(dev, old_state: state); |
237 | |
238 | if (async) { |
239 | struct msm_pending_timer *timer = |
240 | &kms->pending_timers[drm_crtc_index(crtc: async_crtc)]; |
241 | |
242 | /* async updates are limited to single-crtc updates: */ |
243 | WARN_ON(crtc_mask != drm_crtc_mask(async_crtc)); |
244 | |
245 | /* |
246 | * Start timer if we don't already have an update pending |
247 | * on this crtc: |
248 | */ |
249 | if (!(kms->pending_crtc_mask & crtc_mask)) { |
250 | ktime_t vsync_time, wakeup_time; |
251 | |
252 | kms->pending_crtc_mask |= crtc_mask; |
253 | |
254 | if (drm_crtc_next_vblank_start(crtc: async_crtc, vblanktime: &vsync_time)) |
255 | goto fallback; |
256 | |
257 | wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1)); |
258 | |
259 | msm_hrtimer_queue_work(work: &timer->work, wakeup_time, |
260 | mode: HRTIMER_MODE_ABS); |
261 | } |
262 | |
263 | kms->funcs->disable_commit(kms); |
264 | unlock_crtcs(kms, crtc_mask); |
265 | /* |
266 | * At this point, from drm core's perspective, we |
267 | * are done with the atomic update, so we can just |
268 | * go ahead and signal that it is done: |
269 | */ |
270 | drm_atomic_helper_commit_hw_done(state); |
271 | drm_atomic_helper_cleanup_planes(dev, old_state: state); |
272 | |
273 | trace_msm_atomic_commit_tail_finish(async, crtc_mask); |
274 | |
275 | return; |
276 | } |
277 | |
278 | fallback: |
279 | /* |
280 | * If there is any async flush pending on updated crtcs, fold |
281 | * them into the current flush. |
282 | */ |
283 | kms->pending_crtc_mask &= ~crtc_mask; |
284 | |
285 | vblank_get(kms, crtc_mask); |
286 | |
287 | /* |
288 | * Flush hardware updates: |
289 | */ |
290 | trace_msm_atomic_flush_commit(crtc_mask); |
291 | kms->funcs->flush_commit(kms, crtc_mask); |
292 | unlock_crtcs(kms, crtc_mask); |
293 | /* |
294 | * Wait for flush to complete: |
295 | */ |
296 | trace_msm_atomic_wait_flush_start(crtc_mask); |
297 | kms->funcs->wait_flush(kms, crtc_mask); |
298 | trace_msm_atomic_wait_flush_finish(crtc_mask); |
299 | |
300 | vblank_put(kms, crtc_mask); |
301 | |
302 | lock_crtcs(kms, crtc_mask); |
303 | kms->funcs->complete_commit(kms, crtc_mask); |
304 | unlock_crtcs(kms, crtc_mask); |
305 | kms->funcs->disable_commit(kms); |
306 | |
307 | drm_atomic_helper_commit_hw_done(state); |
308 | drm_atomic_helper_cleanup_planes(dev, old_state: state); |
309 | |
310 | trace_msm_atomic_commit_tail_finish(async, crtc_mask); |
311 | } |
312 | |