1 | /* |
2 | * Copyright 2007-8 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
12 | * The above copyright notice and this permission notice shall be included in |
13 | * all copies or substantial portions of the Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * |
23 | * Authors: Dave Airlie |
24 | * Alex Deucher |
25 | */ |
26 | |
27 | #include <linux/pci.h> |
28 | #include <linux/pm_runtime.h> |
29 | #include <linux/gcd.h> |
30 | |
31 | #include <asm/div64.h> |
32 | |
33 | #include <drm/drm_crtc_helper.h> |
34 | #include <drm/drm_device.h> |
35 | #include <drm/drm_drv.h> |
36 | #include <drm/drm_edid.h> |
37 | #include <drm/drm_fourcc.h> |
38 | #include <drm/drm_framebuffer.h> |
39 | #include <drm/drm_gem_framebuffer_helper.h> |
40 | #include <drm/drm_modeset_helper.h> |
41 | #include <drm/drm_probe_helper.h> |
42 | #include <drm/drm_vblank.h> |
43 | #include <drm/radeon_drm.h> |
44 | |
45 | #include "atom.h" |
46 | #include "radeon.h" |
47 | #include "radeon_kms.h" |
48 | |
49 | static void avivo_crtc_load_lut(struct drm_crtc *crtc) |
50 | { |
51 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
52 | struct drm_device *dev = crtc->dev; |
53 | struct radeon_device *rdev = dev->dev_private; |
54 | u16 *r, *g, *b; |
55 | int i; |
56 | |
57 | DRM_DEBUG_KMS("%d\n" , radeon_crtc->crtc_id); |
58 | WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0); |
59 | |
60 | WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); |
61 | WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); |
62 | WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); |
63 | |
64 | WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); |
65 | WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); |
66 | WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); |
67 | |
68 | WREG32(AVIVO_DC_LUT_RW_SELECT, radeon_crtc->crtc_id); |
69 | WREG32(AVIVO_DC_LUT_RW_MODE, 0); |
70 | WREG32(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f); |
71 | |
72 | WREG8(AVIVO_DC_LUT_RW_INDEX, 0); |
73 | r = crtc->gamma_store; |
74 | g = r + crtc->gamma_size; |
75 | b = g + crtc->gamma_size; |
76 | for (i = 0; i < 256; i++) { |
77 | WREG32(AVIVO_DC_LUT_30_COLOR, |
78 | ((*r++ & 0xffc0) << 14) | |
79 | ((*g++ & 0xffc0) << 4) | |
80 | (*b++ >> 6)); |
81 | } |
82 | |
83 | /* Only change bit 0 of LUT_SEL, other bits are set elsewhere */ |
84 | WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id, ~1); |
85 | } |
86 | |
87 | static void dce4_crtc_load_lut(struct drm_crtc *crtc) |
88 | { |
89 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
90 | struct drm_device *dev = crtc->dev; |
91 | struct radeon_device *rdev = dev->dev_private; |
92 | u16 *r, *g, *b; |
93 | int i; |
94 | |
95 | DRM_DEBUG_KMS("%d\n" , radeon_crtc->crtc_id); |
96 | WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0); |
97 | |
98 | WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); |
99 | WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); |
100 | WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); |
101 | |
102 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); |
103 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); |
104 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); |
105 | |
106 | WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0); |
107 | WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007); |
108 | |
109 | WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0); |
110 | r = crtc->gamma_store; |
111 | g = r + crtc->gamma_size; |
112 | b = g + crtc->gamma_size; |
113 | for (i = 0; i < 256; i++) { |
114 | WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset, |
115 | ((*r++ & 0xffc0) << 14) | |
116 | ((*g++ & 0xffc0) << 4) | |
117 | (*b++ >> 6)); |
118 | } |
119 | } |
120 | |
121 | static void dce5_crtc_load_lut(struct drm_crtc *crtc) |
122 | { |
123 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
124 | struct drm_device *dev = crtc->dev; |
125 | struct radeon_device *rdev = dev->dev_private; |
126 | u16 *r, *g, *b; |
127 | int i; |
128 | |
129 | DRM_DEBUG_KMS("%d\n" , radeon_crtc->crtc_id); |
130 | |
131 | msleep(msecs: 10); |
132 | |
133 | WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset, |
134 | (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) | |
135 | NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS))); |
136 | WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset, |
137 | NI_GRPH_PRESCALE_BYPASS); |
138 | WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset, |
139 | NI_OVL_PRESCALE_BYPASS); |
140 | WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset, |
141 | (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) | |
142 | NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT))); |
143 | |
144 | WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0); |
145 | |
146 | WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); |
147 | WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); |
148 | WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); |
149 | |
150 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); |
151 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); |
152 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); |
153 | |
154 | WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0); |
155 | WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007); |
156 | |
157 | WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0); |
158 | r = crtc->gamma_store; |
159 | g = r + crtc->gamma_size; |
160 | b = g + crtc->gamma_size; |
161 | for (i = 0; i < 256; i++) { |
162 | WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset, |
163 | ((*r++ & 0xffc0) << 14) | |
164 | ((*g++ & 0xffc0) << 4) | |
165 | (*b++ >> 6)); |
166 | } |
167 | |
168 | WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset, |
169 | (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | |
170 | NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | |
171 | NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | |
172 | NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS))); |
173 | WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset, |
174 | (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) | |
175 | NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS))); |
176 | WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset, |
177 | (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) | |
178 | NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS))); |
179 | WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset, |
180 | (NI_OUTPUT_CSC_GRPH_MODE(radeon_crtc->output_csc) | |
181 | NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS))); |
182 | /* XXX match this to the depth of the crtc fmt block, move to modeset? */ |
183 | WREG32(0x6940 + radeon_crtc->crtc_offset, 0); |
184 | if (ASIC_IS_DCE8(rdev)) { |
185 | /* XXX this only needs to be programmed once per crtc at startup, |
186 | * not sure where the best place for it is |
187 | */ |
188 | WREG32(CIK_ALPHA_CONTROL + radeon_crtc->crtc_offset, |
189 | CIK_CURSOR_ALPHA_BLND_ENA); |
190 | } |
191 | } |
192 | |
193 | static void legacy_crtc_load_lut(struct drm_crtc *crtc) |
194 | { |
195 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
196 | struct drm_device *dev = crtc->dev; |
197 | struct radeon_device *rdev = dev->dev_private; |
198 | u16 *r, *g, *b; |
199 | int i; |
200 | uint32_t dac2_cntl; |
201 | |
202 | dac2_cntl = RREG32(RADEON_DAC_CNTL2); |
203 | if (radeon_crtc->crtc_id == 0) |
204 | dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL; |
205 | else |
206 | dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL; |
207 | WREG32(RADEON_DAC_CNTL2, dac2_cntl); |
208 | |
209 | WREG8(RADEON_PALETTE_INDEX, 0); |
210 | r = crtc->gamma_store; |
211 | g = r + crtc->gamma_size; |
212 | b = g + crtc->gamma_size; |
213 | for (i = 0; i < 256; i++) { |
214 | WREG32(RADEON_PALETTE_30_DATA, |
215 | ((*r++ & 0xffc0) << 14) | |
216 | ((*g++ & 0xffc0) << 4) | |
217 | (*b++ >> 6)); |
218 | } |
219 | } |
220 | |
221 | void radeon_crtc_load_lut(struct drm_crtc *crtc) |
222 | { |
223 | struct drm_device *dev = crtc->dev; |
224 | struct radeon_device *rdev = dev->dev_private; |
225 | |
226 | if (!crtc->enabled) |
227 | return; |
228 | |
229 | if (ASIC_IS_DCE5(rdev)) |
230 | dce5_crtc_load_lut(crtc); |
231 | else if (ASIC_IS_DCE4(rdev)) |
232 | dce4_crtc_load_lut(crtc); |
233 | else if (ASIC_IS_AVIVO(rdev)) |
234 | avivo_crtc_load_lut(crtc); |
235 | else |
236 | legacy_crtc_load_lut(crtc); |
237 | } |
238 | |
239 | static int radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, |
240 | u16 *blue, uint32_t size, |
241 | struct drm_modeset_acquire_ctx *ctx) |
242 | { |
243 | radeon_crtc_load_lut(crtc); |
244 | |
245 | return 0; |
246 | } |
247 | |
248 | static void radeon_crtc_destroy(struct drm_crtc *crtc) |
249 | { |
250 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
251 | |
252 | drm_crtc_cleanup(crtc); |
253 | destroy_workqueue(wq: radeon_crtc->flip_queue); |
254 | kfree(objp: radeon_crtc); |
255 | } |
256 | |
257 | /** |
258 | * radeon_unpin_work_func - unpin old buffer object |
259 | * |
260 | * @__work: kernel work item |
261 | * |
262 | * Unpin the old frame buffer object outside of the interrupt handler |
263 | */ |
264 | static void radeon_unpin_work_func(struct work_struct *__work) |
265 | { |
266 | struct radeon_flip_work *work = |
267 | container_of(__work, struct radeon_flip_work, unpin_work); |
268 | int r; |
269 | |
270 | /* unpin of the old buffer */ |
271 | r = radeon_bo_reserve(bo: work->old_rbo, no_intr: false); |
272 | if (likely(r == 0)) { |
273 | radeon_bo_unpin(bo: work->old_rbo); |
274 | radeon_bo_unreserve(bo: work->old_rbo); |
275 | } else |
276 | DRM_ERROR("failed to reserve buffer after flip\n" ); |
277 | |
278 | drm_gem_object_put(obj: &work->old_rbo->tbo.base); |
279 | kfree(objp: work); |
280 | } |
281 | |
282 | void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) |
283 | { |
284 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; |
285 | unsigned long flags; |
286 | u32 update_pending; |
287 | int vpos, hpos; |
288 | |
289 | /* can happen during initialization */ |
290 | if (radeon_crtc == NULL) |
291 | return; |
292 | |
293 | /* Skip the pageflip completion check below (based on polling) on |
294 | * asics which reliably support hw pageflip completion irqs. pflip |
295 | * irqs are a reliable and race-free method of handling pageflip |
296 | * completion detection. A use_pflipirq module parameter < 2 allows |
297 | * to override this in case of asics with faulty pflip irqs. |
298 | * A module parameter of 0 would only use this polling based path, |
299 | * a parameter of 1 would use pflip irq only as a backup to this |
300 | * path, as in Linux 3.16. |
301 | */ |
302 | if ((radeon_use_pflipirq == 2) && ASIC_IS_DCE4(rdev)) |
303 | return; |
304 | |
305 | spin_lock_irqsave(&rdev->ddev->event_lock, flags); |
306 | if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { |
307 | DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " |
308 | "RADEON_FLIP_SUBMITTED(%d)\n" , |
309 | radeon_crtc->flip_status, |
310 | RADEON_FLIP_SUBMITTED); |
311 | spin_unlock_irqrestore(lock: &rdev->ddev->event_lock, flags); |
312 | return; |
313 | } |
314 | |
315 | update_pending = radeon_page_flip_pending(rdev, crtc_id); |
316 | |
317 | /* Has the pageflip already completed in crtc, or is it certain |
318 | * to complete in this vblank? GET_DISTANCE_TO_VBLANKSTART provides |
319 | * distance to start of "fudged earlier" vblank in vpos, distance to |
320 | * start of real vblank in hpos. vpos >= 0 && hpos < 0 means we are in |
321 | * the last few scanlines before start of real vblank, where the vblank |
322 | * irq can fire, so we have sampled update_pending a bit too early and |
323 | * know the flip will complete at leading edge of the upcoming real |
324 | * vblank. On pre-AVIVO hardware, flips also complete inside the real |
325 | * vblank, not only at leading edge, so if update_pending for hpos >= 0 |
326 | * == inside real vblank, the flip will complete almost immediately. |
327 | * Note that this method of completion handling is still not 100% race |
328 | * free, as we could execute before the radeon_flip_work_func managed |
329 | * to run and set the RADEON_FLIP_SUBMITTED status, thereby we no-op, |
330 | * but the flip still gets programmed into hw and completed during |
331 | * vblank, leading to a delayed emission of the flip completion event. |
332 | * This applies at least to pre-AVIVO hardware, where flips are always |
333 | * completing inside vblank, not only at leading edge of vblank. |
334 | */ |
335 | if (update_pending && |
336 | (DRM_SCANOUTPOS_VALID & |
337 | radeon_get_crtc_scanoutpos(dev: rdev->ddev, pipe: crtc_id, |
338 | GET_DISTANCE_TO_VBLANKSTART, |
339 | vpos: &vpos, hpos: &hpos, NULL, NULL, |
340 | mode: &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) && |
341 | ((vpos >= 0 && hpos < 0) || (hpos >= 0 && !ASIC_IS_AVIVO(rdev)))) { |
342 | /* crtc didn't flip in this target vblank interval, |
343 | * but flip is pending in crtc. Based on the current |
344 | * scanout position we know that the current frame is |
345 | * (nearly) complete and the flip will (likely) |
346 | * complete before the start of the next frame. |
347 | */ |
348 | update_pending = 0; |
349 | } |
350 | spin_unlock_irqrestore(lock: &rdev->ddev->event_lock, flags); |
351 | if (!update_pending) |
352 | radeon_crtc_handle_flip(rdev, crtc_id); |
353 | } |
354 | |
355 | /** |
356 | * radeon_crtc_handle_flip - page flip completed |
357 | * |
358 | * @rdev: radeon device pointer |
359 | * @crtc_id: crtc number this event is for |
360 | * |
361 | * Called when we are sure that a page flip for this crtc is completed. |
362 | */ |
363 | void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) |
364 | { |
365 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; |
366 | struct radeon_flip_work *work; |
367 | unsigned long flags; |
368 | |
369 | /* this can happen at init */ |
370 | if (radeon_crtc == NULL) |
371 | return; |
372 | |
373 | spin_lock_irqsave(&rdev->ddev->event_lock, flags); |
374 | work = radeon_crtc->flip_work; |
375 | if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { |
376 | DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " |
377 | "RADEON_FLIP_SUBMITTED(%d)\n" , |
378 | radeon_crtc->flip_status, |
379 | RADEON_FLIP_SUBMITTED); |
380 | spin_unlock_irqrestore(lock: &rdev->ddev->event_lock, flags); |
381 | return; |
382 | } |
383 | |
384 | /* Pageflip completed. Clean up. */ |
385 | radeon_crtc->flip_status = RADEON_FLIP_NONE; |
386 | radeon_crtc->flip_work = NULL; |
387 | |
388 | /* wakeup userspace */ |
389 | if (work->event) |
390 | drm_crtc_send_vblank_event(crtc: &radeon_crtc->base, e: work->event); |
391 | |
392 | spin_unlock_irqrestore(lock: &rdev->ddev->event_lock, flags); |
393 | |
394 | drm_crtc_vblank_put(crtc: &radeon_crtc->base); |
395 | radeon_irq_kms_pflip_irq_put(rdev, crtc: work->crtc_id); |
396 | queue_work(wq: radeon_crtc->flip_queue, work: &work->unpin_work); |
397 | } |
398 | |
399 | /** |
400 | * radeon_flip_work_func - page flip framebuffer |
401 | * |
402 | * @__work: kernel work item |
403 | * |
404 | * Wait for the buffer object to become idle and do the actual page flip |
405 | */ |
406 | static void radeon_flip_work_func(struct work_struct *__work) |
407 | { |
408 | struct radeon_flip_work *work = |
409 | container_of(__work, struct radeon_flip_work, flip_work); |
410 | struct radeon_device *rdev = work->rdev; |
411 | struct drm_device *dev = rdev->ddev; |
412 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; |
413 | |
414 | struct drm_crtc *crtc = &radeon_crtc->base; |
415 | unsigned long flags; |
416 | int r; |
417 | int vpos, hpos; |
418 | |
419 | down_read(sem: &rdev->exclusive_lock); |
420 | if (work->fence) { |
421 | struct radeon_fence *fence; |
422 | |
423 | fence = to_radeon_fence(f: work->fence); |
424 | if (fence && fence->rdev == rdev) { |
425 | r = radeon_fence_wait(fence, interruptible: false); |
426 | if (r == -EDEADLK) { |
427 | up_read(sem: &rdev->exclusive_lock); |
428 | do { |
429 | r = radeon_gpu_reset(rdev); |
430 | } while (r == -EAGAIN); |
431 | down_read(sem: &rdev->exclusive_lock); |
432 | } |
433 | } else |
434 | r = dma_fence_wait(fence: work->fence, intr: false); |
435 | |
436 | if (r) |
437 | DRM_ERROR("failed to wait on page flip fence (%d)!\n" , r); |
438 | |
439 | /* We continue with the page flip even if we failed to wait on |
440 | * the fence, otherwise the DRM core and userspace will be |
441 | * confused about which BO the CRTC is scanning out |
442 | */ |
443 | |
444 | dma_fence_put(fence: work->fence); |
445 | work->fence = NULL; |
446 | } |
447 | |
448 | /* Wait until we're out of the vertical blank period before the one |
449 | * targeted by the flip. Always wait on pre DCE4 to avoid races with |
450 | * flip completion handling from vblank irq, as these old asics don't |
451 | * have reliable pageflip completion interrupts. |
452 | */ |
453 | while (radeon_crtc->enabled && |
454 | (radeon_get_crtc_scanoutpos(dev, pipe: work->crtc_id, flags: 0, |
455 | vpos: &vpos, hpos: &hpos, NULL, NULL, |
456 | mode: &crtc->hwmode) |
457 | & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == |
458 | (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && |
459 | (!ASIC_IS_AVIVO(rdev) || |
460 | ((int) (work->target_vblank - |
461 | crtc->funcs->get_vblank_counter(crtc)) > 0))) |
462 | usleep_range(min: 1000, max: 2000); |
463 | |
464 | /* We borrow the event spin lock for protecting flip_status */ |
465 | spin_lock_irqsave(&crtc->dev->event_lock, flags); |
466 | |
467 | /* set the proper interrupt */ |
468 | radeon_irq_kms_pflip_irq_get(rdev, crtc: radeon_crtc->crtc_id); |
469 | |
470 | /* do the flip (mmio) */ |
471 | radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base, work->async); |
472 | |
473 | radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED; |
474 | spin_unlock_irqrestore(lock: &crtc->dev->event_lock, flags); |
475 | up_read(sem: &rdev->exclusive_lock); |
476 | } |
477 | |
478 | static int radeon_crtc_page_flip_target(struct drm_crtc *crtc, |
479 | struct drm_framebuffer *fb, |
480 | struct drm_pending_vblank_event *event, |
481 | uint32_t page_flip_flags, |
482 | uint32_t target, |
483 | struct drm_modeset_acquire_ctx *ctx) |
484 | { |
485 | struct drm_device *dev = crtc->dev; |
486 | struct radeon_device *rdev = dev->dev_private; |
487 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
488 | struct drm_gem_object *obj; |
489 | struct radeon_flip_work *work; |
490 | struct radeon_bo *new_rbo; |
491 | uint32_t tiling_flags, pitch_pixels; |
492 | uint64_t base; |
493 | unsigned long flags; |
494 | int r; |
495 | |
496 | work = kzalloc(size: sizeof *work, GFP_KERNEL); |
497 | if (work == NULL) |
498 | return -ENOMEM; |
499 | |
500 | INIT_WORK(&work->flip_work, radeon_flip_work_func); |
501 | INIT_WORK(&work->unpin_work, radeon_unpin_work_func); |
502 | |
503 | work->rdev = rdev; |
504 | work->crtc_id = radeon_crtc->crtc_id; |
505 | work->event = event; |
506 | work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; |
507 | |
508 | /* schedule unpin of the old buffer */ |
509 | obj = crtc->primary->fb->obj[0]; |
510 | |
511 | /* take a reference to the old object */ |
512 | drm_gem_object_get(obj); |
513 | work->old_rbo = gem_to_radeon_bo(obj); |
514 | |
515 | obj = fb->obj[0]; |
516 | new_rbo = gem_to_radeon_bo(obj); |
517 | |
518 | /* pin the new buffer */ |
519 | DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n" , |
520 | work->old_rbo, new_rbo); |
521 | |
522 | r = radeon_bo_reserve(bo: new_rbo, no_intr: false); |
523 | if (unlikely(r != 0)) { |
524 | DRM_ERROR("failed to reserve new rbo buffer before flip\n" ); |
525 | goto cleanup; |
526 | } |
527 | /* Only 27 bit offset for legacy CRTC */ |
528 | r = radeon_bo_pin_restricted(bo: new_rbo, RADEON_GEM_DOMAIN_VRAM, |
529 | ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, gpu_addr: &base); |
530 | if (unlikely(r != 0)) { |
531 | radeon_bo_unreserve(bo: new_rbo); |
532 | r = -EINVAL; |
533 | DRM_ERROR("failed to pin new rbo buffer before flip\n" ); |
534 | goto cleanup; |
535 | } |
536 | r = dma_resv_get_singleton(obj: new_rbo->tbo.base.resv, usage: DMA_RESV_USAGE_WRITE, |
537 | fence: &work->fence); |
538 | if (r) { |
539 | radeon_bo_unreserve(bo: new_rbo); |
540 | DRM_ERROR("failed to get new rbo buffer fences\n" ); |
541 | goto cleanup; |
542 | } |
543 | radeon_bo_get_tiling_flags(bo: new_rbo, tiling_flags: &tiling_flags, NULL); |
544 | radeon_bo_unreserve(bo: new_rbo); |
545 | |
546 | if (!ASIC_IS_AVIVO(rdev)) { |
547 | /* crtc offset is from display base addr not FB location */ |
548 | base -= radeon_crtc->legacy_display_base_addr; |
549 | pitch_pixels = fb->pitches[0] / fb->format->cpp[0]; |
550 | |
551 | if (tiling_flags & RADEON_TILING_MACRO) { |
552 | if (ASIC_IS_R300(rdev)) { |
553 | base &= ~0x7ff; |
554 | } else { |
555 | int byteshift = fb->format->cpp[0] * 8 >> 4; |
556 | int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11; |
557 | base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8); |
558 | } |
559 | } else { |
560 | int offset = crtc->y * pitch_pixels + crtc->x; |
561 | switch (fb->format->cpp[0] * 8) { |
562 | case 8: |
563 | default: |
564 | offset *= 1; |
565 | break; |
566 | case 15: |
567 | case 16: |
568 | offset *= 2; |
569 | break; |
570 | case 24: |
571 | offset *= 3; |
572 | break; |
573 | case 32: |
574 | offset *= 4; |
575 | break; |
576 | } |
577 | base += offset; |
578 | } |
579 | base &= ~7; |
580 | } |
581 | work->base = base; |
582 | work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + |
583 | crtc->funcs->get_vblank_counter(crtc); |
584 | |
585 | /* We borrow the event spin lock for protecting flip_work */ |
586 | spin_lock_irqsave(&crtc->dev->event_lock, flags); |
587 | |
588 | if (radeon_crtc->flip_status != RADEON_FLIP_NONE) { |
589 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n" ); |
590 | spin_unlock_irqrestore(lock: &crtc->dev->event_lock, flags); |
591 | r = -EBUSY; |
592 | goto pflip_cleanup; |
593 | } |
594 | radeon_crtc->flip_status = RADEON_FLIP_PENDING; |
595 | radeon_crtc->flip_work = work; |
596 | |
597 | /* update crtc fb */ |
598 | crtc->primary->fb = fb; |
599 | |
600 | spin_unlock_irqrestore(lock: &crtc->dev->event_lock, flags); |
601 | |
602 | queue_work(wq: radeon_crtc->flip_queue, work: &work->flip_work); |
603 | return 0; |
604 | |
605 | pflip_cleanup: |
606 | if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) { |
607 | DRM_ERROR("failed to reserve new rbo in error path\n" ); |
608 | goto cleanup; |
609 | } |
610 | radeon_bo_unpin(bo: new_rbo); |
611 | radeon_bo_unreserve(bo: new_rbo); |
612 | |
613 | cleanup: |
614 | drm_gem_object_put(obj: &work->old_rbo->tbo.base); |
615 | dma_fence_put(fence: work->fence); |
616 | kfree(objp: work); |
617 | return r; |
618 | } |
619 | |
620 | static int |
621 | radeon_crtc_set_config(struct drm_mode_set *set, |
622 | struct drm_modeset_acquire_ctx *ctx) |
623 | { |
624 | struct drm_device *dev; |
625 | struct radeon_device *rdev; |
626 | struct drm_crtc *crtc; |
627 | bool active = false; |
628 | int ret; |
629 | |
630 | if (!set || !set->crtc) |
631 | return -EINVAL; |
632 | |
633 | dev = set->crtc->dev; |
634 | |
635 | ret = pm_runtime_get_sync(dev: dev->dev); |
636 | if (ret < 0) { |
637 | pm_runtime_put_autosuspend(dev: dev->dev); |
638 | return ret; |
639 | } |
640 | |
641 | ret = drm_crtc_helper_set_config(set, ctx); |
642 | |
643 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) |
644 | if (crtc->enabled) |
645 | active = true; |
646 | |
647 | pm_runtime_mark_last_busy(dev: dev->dev); |
648 | |
649 | rdev = dev->dev_private; |
650 | /* if we have active crtcs and we don't have a power ref, |
651 | take the current one */ |
652 | if (active && !rdev->have_disp_power_ref) { |
653 | rdev->have_disp_power_ref = true; |
654 | return ret; |
655 | } |
656 | /* if we have no active crtcs, then drop the power ref |
657 | we got before */ |
658 | if (!active && rdev->have_disp_power_ref) { |
659 | pm_runtime_put_autosuspend(dev: dev->dev); |
660 | rdev->have_disp_power_ref = false; |
661 | } |
662 | |
663 | /* drop the power reference we got coming in here */ |
664 | pm_runtime_put_autosuspend(dev: dev->dev); |
665 | return ret; |
666 | } |
667 | |
668 | static const struct drm_crtc_funcs radeon_crtc_funcs = { |
669 | .cursor_set2 = radeon_crtc_cursor_set2, |
670 | .cursor_move = radeon_crtc_cursor_move, |
671 | .gamma_set = radeon_crtc_gamma_set, |
672 | .set_config = radeon_crtc_set_config, |
673 | .destroy = radeon_crtc_destroy, |
674 | .page_flip_target = radeon_crtc_page_flip_target, |
675 | .get_vblank_counter = radeon_get_vblank_counter_kms, |
676 | .enable_vblank = radeon_enable_vblank_kms, |
677 | .disable_vblank = radeon_disable_vblank_kms, |
678 | .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, |
679 | }; |
680 | |
681 | static void radeon_crtc_init(struct drm_device *dev, int index) |
682 | { |
683 | struct radeon_device *rdev = dev->dev_private; |
684 | struct radeon_crtc *radeon_crtc; |
685 | |
686 | radeon_crtc = kzalloc(size: sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); |
687 | if (radeon_crtc == NULL) |
688 | return; |
689 | |
690 | drm_crtc_init(dev, crtc: &radeon_crtc->base, funcs: &radeon_crtc_funcs); |
691 | |
692 | drm_mode_crtc_set_gamma_size(crtc: &radeon_crtc->base, gamma_size: 256); |
693 | radeon_crtc->crtc_id = index; |
694 | radeon_crtc->flip_queue = alloc_workqueue(fmt: "radeon-crtc" , flags: WQ_HIGHPRI, max_active: 0); |
695 | rdev->mode_info.crtcs[index] = radeon_crtc; |
696 | |
697 | if (rdev->family >= CHIP_BONAIRE) { |
698 | radeon_crtc->max_cursor_width = CIK_CURSOR_WIDTH; |
699 | radeon_crtc->max_cursor_height = CIK_CURSOR_HEIGHT; |
700 | } else { |
701 | radeon_crtc->max_cursor_width = CURSOR_WIDTH; |
702 | radeon_crtc->max_cursor_height = CURSOR_HEIGHT; |
703 | } |
704 | dev->mode_config.cursor_width = radeon_crtc->max_cursor_width; |
705 | dev->mode_config.cursor_height = radeon_crtc->max_cursor_height; |
706 | |
707 | #if 0 |
708 | radeon_crtc->mode_set.crtc = &radeon_crtc->base; |
709 | radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); |
710 | radeon_crtc->mode_set.num_connectors = 0; |
711 | #endif |
712 | |
713 | if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)) |
714 | radeon_atombios_init_crtc(dev, radeon_crtc); |
715 | else |
716 | radeon_legacy_init_crtc(dev, radeon_crtc); |
717 | } |
718 | |
719 | static const char *encoder_names[38] = { |
720 | "NONE" , |
721 | "INTERNAL_LVDS" , |
722 | "INTERNAL_TMDS1" , |
723 | "INTERNAL_TMDS2" , |
724 | "INTERNAL_DAC1" , |
725 | "INTERNAL_DAC2" , |
726 | "INTERNAL_SDVOA" , |
727 | "INTERNAL_SDVOB" , |
728 | "SI170B" , |
729 | "CH7303" , |
730 | "CH7301" , |
731 | "INTERNAL_DVO1" , |
732 | "EXTERNAL_SDVOA" , |
733 | "EXTERNAL_SDVOB" , |
734 | "TITFP513" , |
735 | "INTERNAL_LVTM1" , |
736 | "VT1623" , |
737 | "HDMI_SI1930" , |
738 | "HDMI_INTERNAL" , |
739 | "INTERNAL_KLDSCP_TMDS1" , |
740 | "INTERNAL_KLDSCP_DVO1" , |
741 | "INTERNAL_KLDSCP_DAC1" , |
742 | "INTERNAL_KLDSCP_DAC2" , |
743 | "SI178" , |
744 | "MVPU_FPGA" , |
745 | "INTERNAL_DDI" , |
746 | "VT1625" , |
747 | "HDMI_SI1932" , |
748 | "DP_AN9801" , |
749 | "DP_DP501" , |
750 | "INTERNAL_UNIPHY" , |
751 | "INTERNAL_KLDSCP_LVTMA" , |
752 | "INTERNAL_UNIPHY1" , |
753 | "INTERNAL_UNIPHY2" , |
754 | "NUTMEG" , |
755 | "TRAVIS" , |
756 | "INTERNAL_VCE" , |
757 | "INTERNAL_UNIPHY3" , |
758 | }; |
759 | |
760 | static const char *hpd_names[6] = { |
761 | "HPD1" , |
762 | "HPD2" , |
763 | "HPD3" , |
764 | "HPD4" , |
765 | "HPD5" , |
766 | "HPD6" , |
767 | }; |
768 | |
769 | static void radeon_print_display_setup(struct drm_device *dev) |
770 | { |
771 | struct drm_connector *connector; |
772 | struct radeon_connector *radeon_connector; |
773 | struct drm_encoder *encoder; |
774 | struct radeon_encoder *radeon_encoder; |
775 | uint32_t devices; |
776 | int i = 0; |
777 | |
778 | DRM_INFO("Radeon Display Connectors\n" ); |
779 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
780 | radeon_connector = to_radeon_connector(connector); |
781 | DRM_INFO("Connector %d:\n" , i); |
782 | DRM_INFO(" %s\n" , connector->name); |
783 | if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) |
784 | DRM_INFO(" %s\n" , hpd_names[radeon_connector->hpd.hpd]); |
785 | if (radeon_connector->ddc_bus) { |
786 | DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n" , |
787 | radeon_connector->ddc_bus->rec.mask_clk_reg, |
788 | radeon_connector->ddc_bus->rec.mask_data_reg, |
789 | radeon_connector->ddc_bus->rec.a_clk_reg, |
790 | radeon_connector->ddc_bus->rec.a_data_reg, |
791 | radeon_connector->ddc_bus->rec.en_clk_reg, |
792 | radeon_connector->ddc_bus->rec.en_data_reg, |
793 | radeon_connector->ddc_bus->rec.y_clk_reg, |
794 | radeon_connector->ddc_bus->rec.y_data_reg); |
795 | if (radeon_connector->router.ddc_valid) |
796 | DRM_INFO(" DDC Router 0x%x/0x%x\n" , |
797 | radeon_connector->router.ddc_mux_control_pin, |
798 | radeon_connector->router.ddc_mux_state); |
799 | if (radeon_connector->router.cd_valid) |
800 | DRM_INFO(" Clock/Data Router 0x%x/0x%x\n" , |
801 | radeon_connector->router.cd_mux_control_pin, |
802 | radeon_connector->router.cd_mux_state); |
803 | } else { |
804 | if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || |
805 | connector->connector_type == DRM_MODE_CONNECTOR_DVII || |
806 | connector->connector_type == DRM_MODE_CONNECTOR_DVID || |
807 | connector->connector_type == DRM_MODE_CONNECTOR_DVIA || |
808 | connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || |
809 | connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) |
810 | DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n" ); |
811 | } |
812 | DRM_INFO(" Encoders:\n" ); |
813 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
814 | radeon_encoder = to_radeon_encoder(encoder); |
815 | devices = radeon_encoder->devices & radeon_connector->devices; |
816 | if (devices) { |
817 | if (devices & ATOM_DEVICE_CRT1_SUPPORT) |
818 | DRM_INFO(" CRT1: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
819 | if (devices & ATOM_DEVICE_CRT2_SUPPORT) |
820 | DRM_INFO(" CRT2: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
821 | if (devices & ATOM_DEVICE_LCD1_SUPPORT) |
822 | DRM_INFO(" LCD1: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
823 | if (devices & ATOM_DEVICE_DFP1_SUPPORT) |
824 | DRM_INFO(" DFP1: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
825 | if (devices & ATOM_DEVICE_DFP2_SUPPORT) |
826 | DRM_INFO(" DFP2: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
827 | if (devices & ATOM_DEVICE_DFP3_SUPPORT) |
828 | DRM_INFO(" DFP3: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
829 | if (devices & ATOM_DEVICE_DFP4_SUPPORT) |
830 | DRM_INFO(" DFP4: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
831 | if (devices & ATOM_DEVICE_DFP5_SUPPORT) |
832 | DRM_INFO(" DFP5: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
833 | if (devices & ATOM_DEVICE_DFP6_SUPPORT) |
834 | DRM_INFO(" DFP6: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
835 | if (devices & ATOM_DEVICE_TV1_SUPPORT) |
836 | DRM_INFO(" TV1: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
837 | if (devices & ATOM_DEVICE_CV_SUPPORT) |
838 | DRM_INFO(" CV: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
839 | } |
840 | } |
841 | i++; |
842 | } |
843 | } |
844 | |
845 | static bool radeon_setup_enc_conn(struct drm_device *dev) |
846 | { |
847 | struct radeon_device *rdev = dev->dev_private; |
848 | bool ret = false; |
849 | |
850 | if (rdev->bios) { |
851 | if (rdev->is_atom_bios) { |
852 | ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); |
853 | if (!ret) |
854 | ret = radeon_get_atom_connector_info_from_object_table(dev); |
855 | } else { |
856 | ret = radeon_get_legacy_connector_info_from_bios(dev); |
857 | if (!ret) |
858 | ret = radeon_get_legacy_connector_info_from_table(dev); |
859 | } |
860 | } else { |
861 | if (!ASIC_IS_AVIVO(rdev)) |
862 | ret = radeon_get_legacy_connector_info_from_table(dev); |
863 | } |
864 | if (ret) { |
865 | radeon_setup_encoder_clones(dev); |
866 | radeon_print_display_setup(dev); |
867 | } |
868 | |
869 | return ret; |
870 | } |
871 | |
872 | /* avivo */ |
873 | |
874 | /** |
875 | * avivo_reduce_ratio - fractional number reduction |
876 | * |
877 | * @nom: nominator |
878 | * @den: denominator |
879 | * @nom_min: minimum value for nominator |
880 | * @den_min: minimum value for denominator |
881 | * |
882 | * Find the greatest common divisor and apply it on both nominator and |
883 | * denominator, but make nominator and denominator are at least as large |
884 | * as their minimum values. |
885 | */ |
886 | static void avivo_reduce_ratio(unsigned *nom, unsigned *den, |
887 | unsigned nom_min, unsigned den_min) |
888 | { |
889 | unsigned tmp; |
890 | |
891 | /* reduce the numbers to a simpler ratio */ |
892 | tmp = gcd(a: *nom, b: *den); |
893 | *nom /= tmp; |
894 | *den /= tmp; |
895 | |
896 | /* make sure nominator is large enough */ |
897 | if (*nom < nom_min) { |
898 | tmp = DIV_ROUND_UP(nom_min, *nom); |
899 | *nom *= tmp; |
900 | *den *= tmp; |
901 | } |
902 | |
903 | /* make sure the denominator is large enough */ |
904 | if (*den < den_min) { |
905 | tmp = DIV_ROUND_UP(den_min, *den); |
906 | *nom *= tmp; |
907 | *den *= tmp; |
908 | } |
909 | } |
910 | |
911 | /** |
912 | * avivo_get_fb_ref_div - feedback and ref divider calculation |
913 | * |
914 | * @nom: nominator |
915 | * @den: denominator |
916 | * @post_div: post divider |
917 | * @fb_div_max: feedback divider maximum |
918 | * @ref_div_max: reference divider maximum |
919 | * @fb_div: resulting feedback divider |
920 | * @ref_div: resulting reference divider |
921 | * |
922 | * Calculate feedback and reference divider for a given post divider. Makes |
923 | * sure we stay within the limits. |
924 | */ |
925 | static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, |
926 | unsigned fb_div_max, unsigned ref_div_max, |
927 | unsigned *fb_div, unsigned *ref_div) |
928 | { |
929 | /* limit reference * post divider to a maximum */ |
930 | ref_div_max = max(min(100 / post_div, ref_div_max), 1u); |
931 | |
932 | /* get matching reference and feedback divider */ |
933 | *ref_div = min(max(den/post_div, 1u), ref_div_max); |
934 | *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den); |
935 | |
936 | /* limit fb divider to its maximum */ |
937 | if (*fb_div > fb_div_max) { |
938 | *ref_div = (*ref_div * fb_div_max)/(*fb_div); |
939 | *fb_div = fb_div_max; |
940 | } |
941 | } |
942 | |
943 | /** |
944 | * radeon_compute_pll_avivo - compute PLL paramaters |
945 | * |
946 | * @pll: information about the PLL |
947 | * @freq: target frequency |
948 | * @dot_clock_p: resulting pixel clock |
949 | * @fb_div_p: resulting feedback divider |
950 | * @frac_fb_div_p: fractional part of the feedback divider |
951 | * @ref_div_p: resulting reference divider |
952 | * @post_div_p: resulting reference divider |
953 | * |
954 | * Try to calculate the PLL parameters to generate the given frequency: |
955 | * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div) |
956 | */ |
957 | void radeon_compute_pll_avivo(struct radeon_pll *pll, |
958 | u32 freq, |
959 | u32 *dot_clock_p, |
960 | u32 *fb_div_p, |
961 | u32 *frac_fb_div_p, |
962 | u32 *ref_div_p, |
963 | u32 *post_div_p) |
964 | { |
965 | unsigned target_clock = pll->flags & RADEON_PLL_USE_FRAC_FB_DIV ? |
966 | freq : freq / 10; |
967 | |
968 | unsigned fb_div_min, fb_div_max, fb_div; |
969 | unsigned post_div_min, post_div_max, post_div; |
970 | unsigned ref_div_min, ref_div_max, ref_div; |
971 | unsigned post_div_best, diff_best; |
972 | unsigned nom, den; |
973 | |
974 | /* determine allowed feedback divider range */ |
975 | fb_div_min = pll->min_feedback_div; |
976 | fb_div_max = pll->max_feedback_div; |
977 | |
978 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { |
979 | fb_div_min *= 10; |
980 | fb_div_max *= 10; |
981 | } |
982 | |
983 | /* determine allowed ref divider range */ |
984 | if (pll->flags & RADEON_PLL_USE_REF_DIV) |
985 | ref_div_min = pll->reference_div; |
986 | else |
987 | ref_div_min = pll->min_ref_div; |
988 | |
989 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && |
990 | pll->flags & RADEON_PLL_USE_REF_DIV) |
991 | ref_div_max = pll->reference_div; |
992 | else if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) |
993 | /* fix for problems on RS880 */ |
994 | ref_div_max = min(pll->max_ref_div, 7u); |
995 | else |
996 | ref_div_max = pll->max_ref_div; |
997 | |
998 | /* determine allowed post divider range */ |
999 | if (pll->flags & RADEON_PLL_USE_POST_DIV) { |
1000 | post_div_min = pll->post_div; |
1001 | post_div_max = pll->post_div; |
1002 | } else { |
1003 | unsigned vco_min, vco_max; |
1004 | |
1005 | if (pll->flags & RADEON_PLL_IS_LCD) { |
1006 | vco_min = pll->lcd_pll_out_min; |
1007 | vco_max = pll->lcd_pll_out_max; |
1008 | } else { |
1009 | vco_min = pll->pll_out_min; |
1010 | vco_max = pll->pll_out_max; |
1011 | } |
1012 | |
1013 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { |
1014 | vco_min *= 10; |
1015 | vco_max *= 10; |
1016 | } |
1017 | |
1018 | post_div_min = vco_min / target_clock; |
1019 | if ((target_clock * post_div_min) < vco_min) |
1020 | ++post_div_min; |
1021 | if (post_div_min < pll->min_post_div) |
1022 | post_div_min = pll->min_post_div; |
1023 | |
1024 | post_div_max = vco_max / target_clock; |
1025 | if ((target_clock * post_div_max) > vco_max) |
1026 | --post_div_max; |
1027 | if (post_div_max > pll->max_post_div) |
1028 | post_div_max = pll->max_post_div; |
1029 | } |
1030 | |
1031 | /* represent the searched ratio as fractional number */ |
1032 | nom = target_clock; |
1033 | den = pll->reference_freq; |
1034 | |
1035 | /* reduce the numbers to a simpler ratio */ |
1036 | avivo_reduce_ratio(nom: &nom, den: &den, nom_min: fb_div_min, den_min: post_div_min); |
1037 | |
1038 | /* now search for a post divider */ |
1039 | if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) |
1040 | post_div_best = post_div_min; |
1041 | else |
1042 | post_div_best = post_div_max; |
1043 | diff_best = ~0; |
1044 | |
1045 | for (post_div = post_div_min; post_div <= post_div_max; ++post_div) { |
1046 | unsigned diff; |
1047 | avivo_get_fb_ref_div(nom, den, post_div, fb_div_max, |
1048 | ref_div_max, fb_div: &fb_div, ref_div: &ref_div); |
1049 | diff = abs(target_clock - (pll->reference_freq * fb_div) / |
1050 | (ref_div * post_div)); |
1051 | |
1052 | if (diff < diff_best || (diff == diff_best && |
1053 | !(pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP))) { |
1054 | |
1055 | post_div_best = post_div; |
1056 | diff_best = diff; |
1057 | } |
1058 | } |
1059 | post_div = post_div_best; |
1060 | |
1061 | /* get the feedback and reference divider for the optimal value */ |
1062 | avivo_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max, |
1063 | fb_div: &fb_div, ref_div: &ref_div); |
1064 | |
1065 | /* reduce the numbers to a simpler ratio once more */ |
1066 | /* this also makes sure that the reference divider is large enough */ |
1067 | avivo_reduce_ratio(nom: &fb_div, den: &ref_div, nom_min: fb_div_min, den_min: ref_div_min); |
1068 | |
1069 | /* avoid high jitter with small fractional dividers */ |
1070 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) { |
1071 | fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50); |
1072 | if (fb_div < fb_div_min) { |
1073 | unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div); |
1074 | fb_div *= tmp; |
1075 | ref_div *= tmp; |
1076 | } |
1077 | } |
1078 | |
1079 | /* and finally save the result */ |
1080 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { |
1081 | *fb_div_p = fb_div / 10; |
1082 | *frac_fb_div_p = fb_div % 10; |
1083 | } else { |
1084 | *fb_div_p = fb_div; |
1085 | *frac_fb_div_p = 0; |
1086 | } |
1087 | |
1088 | *dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) + |
1089 | (pll->reference_freq * *frac_fb_div_p)) / |
1090 | (ref_div * post_div * 10); |
1091 | *ref_div_p = ref_div; |
1092 | *post_div_p = post_div; |
1093 | |
1094 | DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n" , |
1095 | freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, |
1096 | ref_div, post_div); |
1097 | } |
1098 | |
1099 | /* pre-avivo */ |
1100 | static inline uint32_t radeon_div(uint64_t n, uint32_t d) |
1101 | { |
1102 | n += d / 2; |
1103 | |
1104 | do_div(n, d); |
1105 | return n; |
1106 | } |
1107 | |
1108 | void radeon_compute_pll_legacy(struct radeon_pll *pll, |
1109 | uint64_t freq, |
1110 | uint32_t *dot_clock_p, |
1111 | uint32_t *fb_div_p, |
1112 | uint32_t *frac_fb_div_p, |
1113 | uint32_t *ref_div_p, |
1114 | uint32_t *post_div_p) |
1115 | { |
1116 | uint32_t min_ref_div = pll->min_ref_div; |
1117 | uint32_t max_ref_div = pll->max_ref_div; |
1118 | uint32_t min_post_div = pll->min_post_div; |
1119 | uint32_t max_post_div = pll->max_post_div; |
1120 | uint32_t min_fractional_feed_div = 0; |
1121 | uint32_t max_fractional_feed_div = 0; |
1122 | uint32_t best_vco = pll->best_vco; |
1123 | uint32_t best_post_div = 1; |
1124 | uint32_t best_ref_div = 1; |
1125 | uint32_t best_feedback_div = 1; |
1126 | uint32_t best_frac_feedback_div = 0; |
1127 | uint32_t best_freq = -1; |
1128 | uint32_t best_error = 0xffffffff; |
1129 | uint32_t best_vco_diff = 1; |
1130 | uint32_t post_div; |
1131 | u32 pll_out_min, pll_out_max; |
1132 | |
1133 | DRM_DEBUG_KMS("PLL freq %llu %u %u\n" , freq, pll->min_ref_div, pll->max_ref_div); |
1134 | freq = freq * 1000; |
1135 | |
1136 | if (pll->flags & RADEON_PLL_IS_LCD) { |
1137 | pll_out_min = pll->lcd_pll_out_min; |
1138 | pll_out_max = pll->lcd_pll_out_max; |
1139 | } else { |
1140 | pll_out_min = pll->pll_out_min; |
1141 | pll_out_max = pll->pll_out_max; |
1142 | } |
1143 | |
1144 | if (pll_out_min > 64800) |
1145 | pll_out_min = 64800; |
1146 | |
1147 | if (pll->flags & RADEON_PLL_USE_REF_DIV) |
1148 | min_ref_div = max_ref_div = pll->reference_div; |
1149 | else { |
1150 | while (min_ref_div < max_ref_div-1) { |
1151 | uint32_t mid = (min_ref_div + max_ref_div) / 2; |
1152 | uint32_t pll_in = pll->reference_freq / mid; |
1153 | if (pll_in < pll->pll_in_min) |
1154 | max_ref_div = mid; |
1155 | else if (pll_in > pll->pll_in_max) |
1156 | min_ref_div = mid; |
1157 | else |
1158 | break; |
1159 | } |
1160 | } |
1161 | |
1162 | if (pll->flags & RADEON_PLL_USE_POST_DIV) |
1163 | min_post_div = max_post_div = pll->post_div; |
1164 | |
1165 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { |
1166 | min_fractional_feed_div = pll->min_frac_feedback_div; |
1167 | max_fractional_feed_div = pll->max_frac_feedback_div; |
1168 | } |
1169 | |
1170 | for (post_div = max_post_div; post_div >= min_post_div; --post_div) { |
1171 | uint32_t ref_div; |
1172 | |
1173 | if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) |
1174 | continue; |
1175 | |
1176 | /* legacy radeons only have a few post_divs */ |
1177 | if (pll->flags & RADEON_PLL_LEGACY) { |
1178 | if ((post_div == 5) || |
1179 | (post_div == 7) || |
1180 | (post_div == 9) || |
1181 | (post_div == 10) || |
1182 | (post_div == 11) || |
1183 | (post_div == 13) || |
1184 | (post_div == 14) || |
1185 | (post_div == 15)) |
1186 | continue; |
1187 | } |
1188 | |
1189 | for (ref_div = min_ref_div; ref_div <= max_ref_div; ++ref_div) { |
1190 | uint32_t feedback_div, current_freq = 0, error, vco_diff; |
1191 | uint32_t pll_in = pll->reference_freq / ref_div; |
1192 | uint32_t min_feed_div = pll->min_feedback_div; |
1193 | uint32_t max_feed_div = pll->max_feedback_div + 1; |
1194 | |
1195 | if (pll_in < pll->pll_in_min || pll_in > pll->pll_in_max) |
1196 | continue; |
1197 | |
1198 | while (min_feed_div < max_feed_div) { |
1199 | uint32_t vco; |
1200 | uint32_t min_frac_feed_div = min_fractional_feed_div; |
1201 | uint32_t max_frac_feed_div = max_fractional_feed_div + 1; |
1202 | uint32_t frac_feedback_div; |
1203 | uint64_t tmp; |
1204 | |
1205 | feedback_div = (min_feed_div + max_feed_div) / 2; |
1206 | |
1207 | tmp = (uint64_t)pll->reference_freq * feedback_div; |
1208 | vco = radeon_div(n: tmp, d: ref_div); |
1209 | |
1210 | if (vco < pll_out_min) { |
1211 | min_feed_div = feedback_div + 1; |
1212 | continue; |
1213 | } else if (vco > pll_out_max) { |
1214 | max_feed_div = feedback_div; |
1215 | continue; |
1216 | } |
1217 | |
1218 | while (min_frac_feed_div < max_frac_feed_div) { |
1219 | frac_feedback_div = (min_frac_feed_div + max_frac_feed_div) / 2; |
1220 | tmp = (uint64_t)pll->reference_freq * 10000 * feedback_div; |
1221 | tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; |
1222 | current_freq = radeon_div(n: tmp, d: ref_div * post_div); |
1223 | |
1224 | if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { |
1225 | if (freq < current_freq) |
1226 | error = 0xffffffff; |
1227 | else |
1228 | error = freq - current_freq; |
1229 | } else |
1230 | error = abs(current_freq - freq); |
1231 | vco_diff = abs(vco - best_vco); |
1232 | |
1233 | if ((best_vco == 0 && error < best_error) || |
1234 | (best_vco != 0 && |
1235 | ((best_error > 100 && error < best_error - 100) || |
1236 | (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) { |
1237 | best_post_div = post_div; |
1238 | best_ref_div = ref_div; |
1239 | best_feedback_div = feedback_div; |
1240 | best_frac_feedback_div = frac_feedback_div; |
1241 | best_freq = current_freq; |
1242 | best_error = error; |
1243 | best_vco_diff = vco_diff; |
1244 | } else if (current_freq == freq) { |
1245 | if (best_freq == -1) { |
1246 | best_post_div = post_div; |
1247 | best_ref_div = ref_div; |
1248 | best_feedback_div = feedback_div; |
1249 | best_frac_feedback_div = frac_feedback_div; |
1250 | best_freq = current_freq; |
1251 | best_error = error; |
1252 | best_vco_diff = vco_diff; |
1253 | } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || |
1254 | ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || |
1255 | ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || |
1256 | ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || |
1257 | ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || |
1258 | ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { |
1259 | best_post_div = post_div; |
1260 | best_ref_div = ref_div; |
1261 | best_feedback_div = feedback_div; |
1262 | best_frac_feedback_div = frac_feedback_div; |
1263 | best_freq = current_freq; |
1264 | best_error = error; |
1265 | best_vco_diff = vco_diff; |
1266 | } |
1267 | } |
1268 | if (current_freq < freq) |
1269 | min_frac_feed_div = frac_feedback_div + 1; |
1270 | else |
1271 | max_frac_feed_div = frac_feedback_div; |
1272 | } |
1273 | if (current_freq < freq) |
1274 | min_feed_div = feedback_div + 1; |
1275 | else |
1276 | max_feed_div = feedback_div; |
1277 | } |
1278 | } |
1279 | } |
1280 | |
1281 | *dot_clock_p = best_freq / 10000; |
1282 | *fb_div_p = best_feedback_div; |
1283 | *frac_fb_div_p = best_frac_feedback_div; |
1284 | *ref_div_p = best_ref_div; |
1285 | *post_div_p = best_post_div; |
1286 | DRM_DEBUG_KMS("%lld %d, pll dividers - fb: %d.%d ref: %d, post %d\n" , |
1287 | (long long)freq, |
1288 | best_freq / 1000, best_feedback_div, best_frac_feedback_div, |
1289 | best_ref_div, best_post_div); |
1290 | |
1291 | } |
1292 | |
1293 | static const struct drm_framebuffer_funcs radeon_fb_funcs = { |
1294 | .destroy = drm_gem_fb_destroy, |
1295 | .create_handle = drm_gem_fb_create_handle, |
1296 | }; |
1297 | |
1298 | int |
1299 | radeon_framebuffer_init(struct drm_device *dev, |
1300 | struct drm_framebuffer *fb, |
1301 | const struct drm_mode_fb_cmd2 *mode_cmd, |
1302 | struct drm_gem_object *obj) |
1303 | { |
1304 | int ret; |
1305 | fb->obj[0] = obj; |
1306 | drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); |
1307 | ret = drm_framebuffer_init(dev, fb, funcs: &radeon_fb_funcs); |
1308 | if (ret) { |
1309 | fb->obj[0] = NULL; |
1310 | return ret; |
1311 | } |
1312 | return 0; |
1313 | } |
1314 | |
1315 | static struct drm_framebuffer * |
1316 | radeon_user_framebuffer_create(struct drm_device *dev, |
1317 | struct drm_file *file_priv, |
1318 | const struct drm_mode_fb_cmd2 *mode_cmd) |
1319 | { |
1320 | struct drm_gem_object *obj; |
1321 | struct drm_framebuffer *fb; |
1322 | int ret; |
1323 | |
1324 | obj = drm_gem_object_lookup(filp: file_priv, handle: mode_cmd->handles[0]); |
1325 | if (obj == NULL) { |
1326 | dev_err(dev->dev, "No GEM object associated to handle 0x%08X, " |
1327 | "can't create framebuffer\n" , mode_cmd->handles[0]); |
1328 | return ERR_PTR(error: -ENOENT); |
1329 | } |
1330 | |
1331 | /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ |
1332 | if (obj->import_attach) { |
1333 | DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n" ); |
1334 | drm_gem_object_put(obj); |
1335 | return ERR_PTR(error: -EINVAL); |
1336 | } |
1337 | |
1338 | fb = kzalloc(size: sizeof(*fb), GFP_KERNEL); |
1339 | if (fb == NULL) { |
1340 | drm_gem_object_put(obj); |
1341 | return ERR_PTR(error: -ENOMEM); |
1342 | } |
1343 | |
1344 | ret = radeon_framebuffer_init(dev, fb, mode_cmd, obj); |
1345 | if (ret) { |
1346 | kfree(objp: fb); |
1347 | drm_gem_object_put(obj); |
1348 | return ERR_PTR(error: ret); |
1349 | } |
1350 | |
1351 | return fb; |
1352 | } |
1353 | |
1354 | static const struct drm_mode_config_funcs radeon_mode_funcs = { |
1355 | .fb_create = radeon_user_framebuffer_create, |
1356 | }; |
1357 | |
1358 | static const struct drm_prop_enum_list radeon_tmds_pll_enum_list[] = |
1359 | { { 0, "driver" }, |
1360 | { 1, "bios" }, |
1361 | }; |
1362 | |
1363 | static const struct drm_prop_enum_list radeon_tv_std_enum_list[] = |
1364 | { { TV_STD_NTSC, "ntsc" }, |
1365 | { TV_STD_PAL, "pal" }, |
1366 | { TV_STD_PAL_M, "pal-m" }, |
1367 | { TV_STD_PAL_60, "pal-60" }, |
1368 | { TV_STD_NTSC_J, "ntsc-j" }, |
1369 | { TV_STD_SCART_PAL, "scart-pal" }, |
1370 | { TV_STD_PAL_CN, "pal-cn" }, |
1371 | { TV_STD_SECAM, "secam" }, |
1372 | }; |
1373 | |
1374 | static const struct drm_prop_enum_list radeon_underscan_enum_list[] = |
1375 | { { UNDERSCAN_OFF, "off" }, |
1376 | { UNDERSCAN_ON, "on" }, |
1377 | { UNDERSCAN_AUTO, "auto" }, |
1378 | }; |
1379 | |
1380 | static const struct drm_prop_enum_list radeon_audio_enum_list[] = |
1381 | { { RADEON_AUDIO_DISABLE, "off" }, |
1382 | { RADEON_AUDIO_ENABLE, "on" }, |
1383 | { RADEON_AUDIO_AUTO, "auto" }, |
1384 | }; |
1385 | |
1386 | /* XXX support different dither options? spatial, temporal, both, etc. */ |
1387 | static const struct drm_prop_enum_list radeon_dither_enum_list[] = |
1388 | { { RADEON_FMT_DITHER_DISABLE, "off" }, |
1389 | { RADEON_FMT_DITHER_ENABLE, "on" }, |
1390 | }; |
1391 | |
1392 | static const struct drm_prop_enum_list radeon_output_csc_enum_list[] = |
1393 | { { RADEON_OUTPUT_CSC_BYPASS, "bypass" }, |
1394 | { RADEON_OUTPUT_CSC_TVRGB, "tvrgb" }, |
1395 | { RADEON_OUTPUT_CSC_YCBCR601, "ycbcr601" }, |
1396 | { RADEON_OUTPUT_CSC_YCBCR709, "ycbcr709" }, |
1397 | }; |
1398 | |
1399 | static int radeon_modeset_create_props(struct radeon_device *rdev) |
1400 | { |
1401 | int sz; |
1402 | |
1403 | if (rdev->is_atom_bios) { |
1404 | rdev->mode_info.coherent_mode_property = |
1405 | drm_property_create_range(dev: rdev->ddev, flags: 0 , name: "coherent" , min: 0, max: 1); |
1406 | if (!rdev->mode_info.coherent_mode_property) |
1407 | return -ENOMEM; |
1408 | } |
1409 | |
1410 | if (!ASIC_IS_AVIVO(rdev)) { |
1411 | sz = ARRAY_SIZE(radeon_tmds_pll_enum_list); |
1412 | rdev->mode_info.tmds_pll_property = |
1413 | drm_property_create_enum(dev: rdev->ddev, flags: 0, |
1414 | name: "tmds_pll" , |
1415 | props: radeon_tmds_pll_enum_list, num_values: sz); |
1416 | } |
1417 | |
1418 | rdev->mode_info.load_detect_property = |
1419 | drm_property_create_range(dev: rdev->ddev, flags: 0, name: "load detection" , min: 0, max: 1); |
1420 | if (!rdev->mode_info.load_detect_property) |
1421 | return -ENOMEM; |
1422 | |
1423 | drm_mode_create_scaling_mode_property(dev: rdev->ddev); |
1424 | |
1425 | sz = ARRAY_SIZE(radeon_tv_std_enum_list); |
1426 | rdev->mode_info.tv_std_property = |
1427 | drm_property_create_enum(dev: rdev->ddev, flags: 0, |
1428 | name: "tv standard" , |
1429 | props: radeon_tv_std_enum_list, num_values: sz); |
1430 | |
1431 | sz = ARRAY_SIZE(radeon_underscan_enum_list); |
1432 | rdev->mode_info.underscan_property = |
1433 | drm_property_create_enum(dev: rdev->ddev, flags: 0, |
1434 | name: "underscan" , |
1435 | props: radeon_underscan_enum_list, num_values: sz); |
1436 | |
1437 | rdev->mode_info.underscan_hborder_property = |
1438 | drm_property_create_range(dev: rdev->ddev, flags: 0, |
1439 | name: "underscan hborder" , min: 0, max: 128); |
1440 | if (!rdev->mode_info.underscan_hborder_property) |
1441 | return -ENOMEM; |
1442 | |
1443 | rdev->mode_info.underscan_vborder_property = |
1444 | drm_property_create_range(dev: rdev->ddev, flags: 0, |
1445 | name: "underscan vborder" , min: 0, max: 128); |
1446 | if (!rdev->mode_info.underscan_vborder_property) |
1447 | return -ENOMEM; |
1448 | |
1449 | sz = ARRAY_SIZE(radeon_audio_enum_list); |
1450 | rdev->mode_info.audio_property = |
1451 | drm_property_create_enum(dev: rdev->ddev, flags: 0, |
1452 | name: "audio" , |
1453 | props: radeon_audio_enum_list, num_values: sz); |
1454 | |
1455 | sz = ARRAY_SIZE(radeon_dither_enum_list); |
1456 | rdev->mode_info.dither_property = |
1457 | drm_property_create_enum(dev: rdev->ddev, flags: 0, |
1458 | name: "dither" , |
1459 | props: radeon_dither_enum_list, num_values: sz); |
1460 | |
1461 | sz = ARRAY_SIZE(radeon_output_csc_enum_list); |
1462 | rdev->mode_info.output_csc_property = |
1463 | drm_property_create_enum(dev: rdev->ddev, flags: 0, |
1464 | name: "output_csc" , |
1465 | props: radeon_output_csc_enum_list, num_values: sz); |
1466 | |
1467 | return 0; |
1468 | } |
1469 | |
1470 | void radeon_update_display_priority(struct radeon_device *rdev) |
1471 | { |
1472 | /* adjustment options for the display watermarks */ |
1473 | if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) { |
1474 | /* set display priority to high for r3xx, rv515 chips |
1475 | * this avoids flickering due to underflow to the |
1476 | * display controllers during heavy acceleration. |
1477 | * Don't force high on rs4xx igp chips as it seems to |
1478 | * affect the sound card. See kernel bug 15982. |
1479 | */ |
1480 | if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) && |
1481 | !(rdev->flags & RADEON_IS_IGP)) |
1482 | rdev->disp_priority = 2; |
1483 | else |
1484 | rdev->disp_priority = 0; |
1485 | } else |
1486 | rdev->disp_priority = radeon_disp_priority; |
1487 | |
1488 | } |
1489 | |
1490 | /* |
1491 | * Allocate hdmi structs and determine register offsets |
1492 | */ |
1493 | static void radeon_afmt_init(struct radeon_device *rdev) |
1494 | { |
1495 | int i; |
1496 | |
1497 | for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) |
1498 | rdev->mode_info.afmt[i] = NULL; |
1499 | |
1500 | if (ASIC_IS_NODCE(rdev)) { |
1501 | /* nothing to do */ |
1502 | } else if (ASIC_IS_DCE4(rdev)) { |
1503 | static uint32_t eg_offsets[] = { |
1504 | EVERGREEN_CRTC0_REGISTER_OFFSET, |
1505 | EVERGREEN_CRTC1_REGISTER_OFFSET, |
1506 | EVERGREEN_CRTC2_REGISTER_OFFSET, |
1507 | EVERGREEN_CRTC3_REGISTER_OFFSET, |
1508 | EVERGREEN_CRTC4_REGISTER_OFFSET, |
1509 | EVERGREEN_CRTC5_REGISTER_OFFSET, |
1510 | 0x13830 - 0x7030, |
1511 | }; |
1512 | int num_afmt; |
1513 | |
1514 | /* DCE8 has 7 audio blocks tied to DIG encoders */ |
1515 | /* DCE6 has 6 audio blocks tied to DIG encoders */ |
1516 | /* DCE4/5 has 6 audio blocks tied to DIG encoders */ |
1517 | /* DCE4.1 has 2 audio blocks tied to DIG encoders */ |
1518 | if (ASIC_IS_DCE8(rdev)) |
1519 | num_afmt = 7; |
1520 | else if (ASIC_IS_DCE6(rdev)) |
1521 | num_afmt = 6; |
1522 | else if (ASIC_IS_DCE5(rdev)) |
1523 | num_afmt = 6; |
1524 | else if (ASIC_IS_DCE41(rdev)) |
1525 | num_afmt = 2; |
1526 | else /* DCE4 */ |
1527 | num_afmt = 6; |
1528 | |
1529 | BUG_ON(num_afmt > ARRAY_SIZE(eg_offsets)); |
1530 | for (i = 0; i < num_afmt; i++) { |
1531 | rdev->mode_info.afmt[i] = kzalloc(size: sizeof(struct radeon_afmt), GFP_KERNEL); |
1532 | if (rdev->mode_info.afmt[i]) { |
1533 | rdev->mode_info.afmt[i]->offset = eg_offsets[i]; |
1534 | rdev->mode_info.afmt[i]->id = i; |
1535 | } |
1536 | } |
1537 | } else if (ASIC_IS_DCE3(rdev)) { |
1538 | /* DCE3.x has 2 audio blocks tied to DIG encoders */ |
1539 | rdev->mode_info.afmt[0] = kzalloc(size: sizeof(struct radeon_afmt), GFP_KERNEL); |
1540 | if (rdev->mode_info.afmt[0]) { |
1541 | rdev->mode_info.afmt[0]->offset = DCE3_HDMI_OFFSET0; |
1542 | rdev->mode_info.afmt[0]->id = 0; |
1543 | } |
1544 | rdev->mode_info.afmt[1] = kzalloc(size: sizeof(struct radeon_afmt), GFP_KERNEL); |
1545 | if (rdev->mode_info.afmt[1]) { |
1546 | rdev->mode_info.afmt[1]->offset = DCE3_HDMI_OFFSET1; |
1547 | rdev->mode_info.afmt[1]->id = 1; |
1548 | } |
1549 | } else if (ASIC_IS_DCE2(rdev)) { |
1550 | /* DCE2 has at least 1 routable audio block */ |
1551 | rdev->mode_info.afmt[0] = kzalloc(size: sizeof(struct radeon_afmt), GFP_KERNEL); |
1552 | if (rdev->mode_info.afmt[0]) { |
1553 | rdev->mode_info.afmt[0]->offset = DCE2_HDMI_OFFSET0; |
1554 | rdev->mode_info.afmt[0]->id = 0; |
1555 | } |
1556 | /* r6xx has 2 routable audio blocks */ |
1557 | if (rdev->family >= CHIP_R600) { |
1558 | rdev->mode_info.afmt[1] = kzalloc(size: sizeof(struct radeon_afmt), GFP_KERNEL); |
1559 | if (rdev->mode_info.afmt[1]) { |
1560 | rdev->mode_info.afmt[1]->offset = DCE2_HDMI_OFFSET1; |
1561 | rdev->mode_info.afmt[1]->id = 1; |
1562 | } |
1563 | } |
1564 | } |
1565 | } |
1566 | |
1567 | static void radeon_afmt_fini(struct radeon_device *rdev) |
1568 | { |
1569 | int i; |
1570 | |
1571 | for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) { |
1572 | kfree(objp: rdev->mode_info.afmt[i]); |
1573 | rdev->mode_info.afmt[i] = NULL; |
1574 | } |
1575 | } |
1576 | |
1577 | int radeon_modeset_init(struct radeon_device *rdev) |
1578 | { |
1579 | int i; |
1580 | int ret; |
1581 | |
1582 | drm_mode_config_init(dev: rdev->ddev); |
1583 | rdev->mode_info.mode_config_initialized = true; |
1584 | |
1585 | rdev->ddev->mode_config.funcs = &radeon_mode_funcs; |
1586 | |
1587 | if (radeon_use_pflipirq == 2 && rdev->family >= CHIP_R600) |
1588 | rdev->ddev->mode_config.async_page_flip = true; |
1589 | |
1590 | if (ASIC_IS_DCE5(rdev)) { |
1591 | rdev->ddev->mode_config.max_width = 16384; |
1592 | rdev->ddev->mode_config.max_height = 16384; |
1593 | } else if (ASIC_IS_AVIVO(rdev)) { |
1594 | rdev->ddev->mode_config.max_width = 8192; |
1595 | rdev->ddev->mode_config.max_height = 8192; |
1596 | } else { |
1597 | rdev->ddev->mode_config.max_width = 4096; |
1598 | rdev->ddev->mode_config.max_height = 4096; |
1599 | } |
1600 | |
1601 | rdev->ddev->mode_config.preferred_depth = 24; |
1602 | rdev->ddev->mode_config.prefer_shadow = 1; |
1603 | |
1604 | rdev->ddev->mode_config.fb_modifiers_not_supported = true; |
1605 | |
1606 | ret = radeon_modeset_create_props(rdev); |
1607 | if (ret) { |
1608 | return ret; |
1609 | } |
1610 | |
1611 | /* init i2c buses */ |
1612 | radeon_i2c_init(rdev); |
1613 | |
1614 | /* check combios for a valid hardcoded EDID - Sun servers */ |
1615 | if (!rdev->is_atom_bios) { |
1616 | /* check for hardcoded EDID in BIOS */ |
1617 | radeon_combios_check_hardcoded_edid(rdev); |
1618 | } |
1619 | |
1620 | /* allocate crtcs */ |
1621 | for (i = 0; i < rdev->num_crtc; i++) { |
1622 | radeon_crtc_init(dev: rdev->ddev, index: i); |
1623 | } |
1624 | |
1625 | /* okay we should have all the bios connectors */ |
1626 | ret = radeon_setup_enc_conn(dev: rdev->ddev); |
1627 | if (!ret) { |
1628 | return ret; |
1629 | } |
1630 | |
1631 | /* init dig PHYs, disp eng pll */ |
1632 | if (rdev->is_atom_bios) { |
1633 | radeon_atom_encoder_init(rdev); |
1634 | radeon_atom_disp_eng_pll_init(rdev); |
1635 | } |
1636 | |
1637 | /* initialize hpd */ |
1638 | radeon_hpd_init(rdev); |
1639 | |
1640 | /* setup afmt */ |
1641 | radeon_afmt_init(rdev); |
1642 | |
1643 | drm_kms_helper_poll_init(dev: rdev->ddev); |
1644 | |
1645 | /* do pm late init */ |
1646 | ret = radeon_pm_late_init(rdev); |
1647 | |
1648 | return 0; |
1649 | } |
1650 | |
1651 | void radeon_modeset_fini(struct radeon_device *rdev) |
1652 | { |
1653 | if (rdev->mode_info.mode_config_initialized) { |
1654 | drm_kms_helper_poll_fini(dev: rdev->ddev); |
1655 | radeon_hpd_fini(rdev); |
1656 | drm_helper_force_disable_all(dev: rdev->ddev); |
1657 | radeon_afmt_fini(rdev); |
1658 | drm_mode_config_cleanup(dev: rdev->ddev); |
1659 | rdev->mode_info.mode_config_initialized = false; |
1660 | } |
1661 | |
1662 | kfree(objp: rdev->mode_info.bios_hardcoded_edid); |
1663 | |
1664 | /* free i2c buses */ |
1665 | radeon_i2c_fini(rdev); |
1666 | } |
1667 | |
1668 | static bool is_hdtv_mode(const struct drm_display_mode *mode) |
1669 | { |
1670 | /* try and guess if this is a tv or a monitor */ |
1671 | if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */ |
1672 | (mode->vdisplay == 576) || /* 576p */ |
1673 | (mode->vdisplay == 720) || /* 720p */ |
1674 | (mode->vdisplay == 1080)) /* 1080p */ |
1675 | return true; |
1676 | else |
1677 | return false; |
1678 | } |
1679 | |
1680 | bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, |
1681 | const struct drm_display_mode *mode, |
1682 | struct drm_display_mode *adjusted_mode) |
1683 | { |
1684 | struct drm_device *dev = crtc->dev; |
1685 | struct radeon_device *rdev = dev->dev_private; |
1686 | struct drm_encoder *encoder; |
1687 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
1688 | struct radeon_encoder *radeon_encoder; |
1689 | struct drm_connector *connector; |
1690 | bool first = true; |
1691 | u32 src_v = 1, dst_v = 1; |
1692 | u32 src_h = 1, dst_h = 1; |
1693 | |
1694 | radeon_crtc->h_border = 0; |
1695 | radeon_crtc->v_border = 0; |
1696 | |
1697 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
1698 | if (encoder->crtc != crtc) |
1699 | continue; |
1700 | radeon_encoder = to_radeon_encoder(encoder); |
1701 | connector = radeon_get_connector_for_encoder(encoder); |
1702 | |
1703 | if (first) { |
1704 | /* set scaling */ |
1705 | if (radeon_encoder->rmx_type == RMX_OFF) |
1706 | radeon_crtc->rmx_type = RMX_OFF; |
1707 | else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay || |
1708 | mode->vdisplay < radeon_encoder->native_mode.vdisplay) |
1709 | radeon_crtc->rmx_type = radeon_encoder->rmx_type; |
1710 | else |
1711 | radeon_crtc->rmx_type = RMX_OFF; |
1712 | /* copy native mode */ |
1713 | memcpy(&radeon_crtc->native_mode, |
1714 | &radeon_encoder->native_mode, |
1715 | sizeof(struct drm_display_mode)); |
1716 | src_v = crtc->mode.vdisplay; |
1717 | dst_v = radeon_crtc->native_mode.vdisplay; |
1718 | src_h = crtc->mode.hdisplay; |
1719 | dst_h = radeon_crtc->native_mode.hdisplay; |
1720 | |
1721 | /* fix up for overscan on hdmi */ |
1722 | if (ASIC_IS_AVIVO(rdev) && |
1723 | (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && |
1724 | ((radeon_encoder->underscan_type == UNDERSCAN_ON) || |
1725 | ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && |
1726 | drm_detect_hdmi_monitor(edid: radeon_connector_edid(connector)) && |
1727 | is_hdtv_mode(mode)))) { |
1728 | if (radeon_encoder->underscan_hborder != 0) |
1729 | radeon_crtc->h_border = radeon_encoder->underscan_hborder; |
1730 | else |
1731 | radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; |
1732 | if (radeon_encoder->underscan_vborder != 0) |
1733 | radeon_crtc->v_border = radeon_encoder->underscan_vborder; |
1734 | else |
1735 | radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; |
1736 | radeon_crtc->rmx_type = RMX_FULL; |
1737 | src_v = crtc->mode.vdisplay; |
1738 | dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2); |
1739 | src_h = crtc->mode.hdisplay; |
1740 | dst_h = crtc->mode.hdisplay - (radeon_crtc->h_border * 2); |
1741 | } |
1742 | first = false; |
1743 | } else { |
1744 | if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) { |
1745 | /* WARNING: Right now this can't happen but |
1746 | * in the future we need to check that scaling |
1747 | * are consistent across different encoder |
1748 | * (ie all encoder can work with the same |
1749 | * scaling). |
1750 | */ |
1751 | DRM_ERROR("Scaling not consistent across encoder.\n" ); |
1752 | return false; |
1753 | } |
1754 | } |
1755 | } |
1756 | if (radeon_crtc->rmx_type != RMX_OFF) { |
1757 | fixed20_12 a, b; |
1758 | a.full = dfixed_const(src_v); |
1759 | b.full = dfixed_const(dst_v); |
1760 | radeon_crtc->vsc.full = dfixed_div(A: a, B: b); |
1761 | a.full = dfixed_const(src_h); |
1762 | b.full = dfixed_const(dst_h); |
1763 | radeon_crtc->hsc.full = dfixed_div(A: a, B: b); |
1764 | } else { |
1765 | radeon_crtc->vsc.full = dfixed_const(1); |
1766 | radeon_crtc->hsc.full = dfixed_const(1); |
1767 | } |
1768 | return true; |
1769 | } |
1770 | |
1771 | /* |
1772 | * Retrieve current video scanout position of crtc on a given gpu, and |
1773 | * an optional accurate timestamp of when query happened. |
1774 | * |
1775 | * \param dev Device to query. |
1776 | * \param crtc Crtc to query. |
1777 | * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). |
1778 | * For driver internal use only also supports these flags: |
1779 | * |
1780 | * USE_REAL_VBLANKSTART to use the real start of vblank instead |
1781 | * of a fudged earlier start of vblank. |
1782 | * |
1783 | * GET_DISTANCE_TO_VBLANKSTART to return distance to the |
1784 | * fudged earlier start of vblank in *vpos and the distance |
1785 | * to true start of vblank in *hpos. |
1786 | * |
1787 | * \param *vpos Location where vertical scanout position should be stored. |
1788 | * \param *hpos Location where horizontal scanout position should go. |
1789 | * \param *stime Target location for timestamp taken immediately before |
1790 | * scanout position query. Can be NULL to skip timestamp. |
1791 | * \param *etime Target location for timestamp taken immediately after |
1792 | * scanout position query. Can be NULL to skip timestamp. |
1793 | * |
1794 | * Returns vpos as a positive number while in active scanout area. |
1795 | * Returns vpos as a negative number inside vblank, counting the number |
1796 | * of scanlines to go until end of vblank, e.g., -1 means "one scanline |
1797 | * until start of active scanout / end of vblank." |
1798 | * |
1799 | * \return Flags, or'ed together as follows: |
1800 | * |
1801 | * DRM_SCANOUTPOS_VALID = Query successful. |
1802 | * DRM_SCANOUTPOS_INVBL = Inside vblank. |
1803 | * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of |
1804 | * this flag means that returned position may be offset by a constant but |
1805 | * unknown small number of scanlines wrt. real scanout position. |
1806 | * |
1807 | */ |
1808 | int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, |
1809 | unsigned int flags, int *vpos, int *hpos, |
1810 | ktime_t *stime, ktime_t *etime, |
1811 | const struct drm_display_mode *mode) |
1812 | { |
1813 | u32 stat_crtc = 0, vbl = 0, position = 0; |
1814 | int vbl_start, vbl_end, vtotal, ret = 0; |
1815 | bool in_vbl = true; |
1816 | |
1817 | struct radeon_device *rdev = dev->dev_private; |
1818 | |
1819 | /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ |
1820 | |
1821 | /* Get optional system timestamp before query. */ |
1822 | if (stime) |
1823 | *stime = ktime_get(); |
1824 | |
1825 | if (ASIC_IS_DCE4(rdev)) { |
1826 | if (pipe == 0) { |
1827 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
1828 | EVERGREEN_CRTC0_REGISTER_OFFSET); |
1829 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1830 | EVERGREEN_CRTC0_REGISTER_OFFSET); |
1831 | ret |= DRM_SCANOUTPOS_VALID; |
1832 | } |
1833 | if (pipe == 1) { |
1834 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
1835 | EVERGREEN_CRTC1_REGISTER_OFFSET); |
1836 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1837 | EVERGREEN_CRTC1_REGISTER_OFFSET); |
1838 | ret |= DRM_SCANOUTPOS_VALID; |
1839 | } |
1840 | if (pipe == 2) { |
1841 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
1842 | EVERGREEN_CRTC2_REGISTER_OFFSET); |
1843 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1844 | EVERGREEN_CRTC2_REGISTER_OFFSET); |
1845 | ret |= DRM_SCANOUTPOS_VALID; |
1846 | } |
1847 | if (pipe == 3) { |
1848 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
1849 | EVERGREEN_CRTC3_REGISTER_OFFSET); |
1850 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1851 | EVERGREEN_CRTC3_REGISTER_OFFSET); |
1852 | ret |= DRM_SCANOUTPOS_VALID; |
1853 | } |
1854 | if (pipe == 4) { |
1855 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
1856 | EVERGREEN_CRTC4_REGISTER_OFFSET); |
1857 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1858 | EVERGREEN_CRTC4_REGISTER_OFFSET); |
1859 | ret |= DRM_SCANOUTPOS_VALID; |
1860 | } |
1861 | if (pipe == 5) { |
1862 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
1863 | EVERGREEN_CRTC5_REGISTER_OFFSET); |
1864 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1865 | EVERGREEN_CRTC5_REGISTER_OFFSET); |
1866 | ret |= DRM_SCANOUTPOS_VALID; |
1867 | } |
1868 | } else if (ASIC_IS_AVIVO(rdev)) { |
1869 | if (pipe == 0) { |
1870 | vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END); |
1871 | position = RREG32(AVIVO_D1CRTC_STATUS_POSITION); |
1872 | ret |= DRM_SCANOUTPOS_VALID; |
1873 | } |
1874 | if (pipe == 1) { |
1875 | vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END); |
1876 | position = RREG32(AVIVO_D2CRTC_STATUS_POSITION); |
1877 | ret |= DRM_SCANOUTPOS_VALID; |
1878 | } |
1879 | } else { |
1880 | /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */ |
1881 | if (pipe == 0) { |
1882 | /* Assume vbl_end == 0, get vbl_start from |
1883 | * upper 16 bits. |
1884 | */ |
1885 | vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) & |
1886 | RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; |
1887 | /* Only retrieve vpos from upper 16 bits, set hpos == 0. */ |
1888 | position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; |
1889 | stat_crtc = RREG32(RADEON_CRTC_STATUS); |
1890 | if (!(stat_crtc & 1)) |
1891 | in_vbl = false; |
1892 | |
1893 | ret |= DRM_SCANOUTPOS_VALID; |
1894 | } |
1895 | if (pipe == 1) { |
1896 | vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) & |
1897 | RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; |
1898 | position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; |
1899 | stat_crtc = RREG32(RADEON_CRTC2_STATUS); |
1900 | if (!(stat_crtc & 1)) |
1901 | in_vbl = false; |
1902 | |
1903 | ret |= DRM_SCANOUTPOS_VALID; |
1904 | } |
1905 | } |
1906 | |
1907 | /* Get optional system timestamp after query. */ |
1908 | if (etime) |
1909 | *etime = ktime_get(); |
1910 | |
1911 | /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ |
1912 | |
1913 | /* Decode into vertical and horizontal scanout position. */ |
1914 | *vpos = position & 0x1fff; |
1915 | *hpos = (position >> 16) & 0x1fff; |
1916 | |
1917 | /* Valid vblank area boundaries from gpu retrieved? */ |
1918 | if (vbl > 0) { |
1919 | /* Yes: Decode. */ |
1920 | ret |= DRM_SCANOUTPOS_ACCURATE; |
1921 | vbl_start = vbl & 0x1fff; |
1922 | vbl_end = (vbl >> 16) & 0x1fff; |
1923 | } |
1924 | else { |
1925 | /* No: Fake something reasonable which gives at least ok results. */ |
1926 | vbl_start = mode->crtc_vdisplay; |
1927 | vbl_end = 0; |
1928 | } |
1929 | |
1930 | /* Called from driver internal vblank counter query code? */ |
1931 | if (flags & GET_DISTANCE_TO_VBLANKSTART) { |
1932 | /* Caller wants distance from real vbl_start in *hpos */ |
1933 | *hpos = *vpos - vbl_start; |
1934 | } |
1935 | |
1936 | /* Fudge vblank to start a few scanlines earlier to handle the |
1937 | * problem that vblank irqs fire a few scanlines before start |
1938 | * of vblank. Some driver internal callers need the true vblank |
1939 | * start to be used and signal this via the USE_REAL_VBLANKSTART flag. |
1940 | * |
1941 | * The cause of the "early" vblank irq is that the irq is triggered |
1942 | * by the line buffer logic when the line buffer read position enters |
1943 | * the vblank, whereas our crtc scanout position naturally lags the |
1944 | * line buffer read position. |
1945 | */ |
1946 | if (!(flags & USE_REAL_VBLANKSTART)) |
1947 | vbl_start -= rdev->mode_info.crtcs[pipe]->lb_vblank_lead_lines; |
1948 | |
1949 | /* Test scanout position against vblank region. */ |
1950 | if ((*vpos < vbl_start) && (*vpos >= vbl_end)) |
1951 | in_vbl = false; |
1952 | |
1953 | /* In vblank? */ |
1954 | if (in_vbl) |
1955 | ret |= DRM_SCANOUTPOS_IN_VBLANK; |
1956 | |
1957 | /* Called from driver internal vblank counter query code? */ |
1958 | if (flags & GET_DISTANCE_TO_VBLANKSTART) { |
1959 | /* Caller wants distance from fudged earlier vbl_start */ |
1960 | *vpos -= vbl_start; |
1961 | return ret; |
1962 | } |
1963 | |
1964 | /* Check if inside vblank area and apply corrective offsets: |
1965 | * vpos will then be >=0 in video scanout area, but negative |
1966 | * within vblank area, counting down the number of lines until |
1967 | * start of scanout. |
1968 | */ |
1969 | |
1970 | /* Inside "upper part" of vblank area? Apply corrective offset if so: */ |
1971 | if (in_vbl && (*vpos >= vbl_start)) { |
1972 | vtotal = mode->crtc_vtotal; |
1973 | *vpos = *vpos - vtotal; |
1974 | } |
1975 | |
1976 | /* Correct for shifted end of vbl at vbl_end. */ |
1977 | *vpos = *vpos - vbl_end; |
1978 | |
1979 | return ret; |
1980 | } |
1981 | |
1982 | bool |
1983 | radeon_get_crtc_scanout_position(struct drm_crtc *crtc, |
1984 | bool in_vblank_irq, int *vpos, int *hpos, |
1985 | ktime_t *stime, ktime_t *etime, |
1986 | const struct drm_display_mode *mode) |
1987 | { |
1988 | struct drm_device *dev = crtc->dev; |
1989 | unsigned int pipe = crtc->index; |
1990 | |
1991 | return radeon_get_crtc_scanoutpos(dev, pipe, flags: 0, vpos, hpos, |
1992 | stime, etime, mode); |
1993 | } |
1994 | |