1 | /* |
2 | * Copyright 2007-8 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
12 | * The above copyright notice and this permission notice shall be included in |
13 | * all copies or substantial portions of the Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * |
23 | * Authors: Dave Airlie |
24 | * Alex Deucher |
25 | */ |
26 | #include <drm/drmP.h> |
27 | #include <drm/radeon_drm.h> |
28 | #include "radeon.h" |
29 | |
30 | #include "atom.h" |
31 | #include <asm/div64.h> |
32 | |
33 | #include <linux/pm_runtime.h> |
34 | #include <drm/drm_crtc_helper.h> |
35 | #include <drm/drm_gem_framebuffer_helper.h> |
36 | #include <drm/drm_fb_helper.h> |
37 | #include <drm/drm_plane_helper.h> |
38 | #include <drm/drm_probe_helper.h> |
39 | #include <drm/drm_edid.h> |
40 | |
41 | #include <linux/gcd.h> |
42 | |
43 | static void avivo_crtc_load_lut(struct drm_crtc *crtc) |
44 | { |
45 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
46 | struct drm_device *dev = crtc->dev; |
47 | struct radeon_device *rdev = dev->dev_private; |
48 | u16 *r, *g, *b; |
49 | int i; |
50 | |
51 | DRM_DEBUG_KMS("%d\n" , radeon_crtc->crtc_id); |
52 | WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0); |
53 | |
54 | WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); |
55 | WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); |
56 | WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); |
57 | |
58 | WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); |
59 | WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); |
60 | WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); |
61 | |
62 | WREG32(AVIVO_DC_LUT_RW_SELECT, radeon_crtc->crtc_id); |
63 | WREG32(AVIVO_DC_LUT_RW_MODE, 0); |
64 | WREG32(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f); |
65 | |
66 | WREG8(AVIVO_DC_LUT_RW_INDEX, 0); |
67 | r = crtc->gamma_store; |
68 | g = r + crtc->gamma_size; |
69 | b = g + crtc->gamma_size; |
70 | for (i = 0; i < 256; i++) { |
71 | WREG32(AVIVO_DC_LUT_30_COLOR, |
72 | ((*r++ & 0xffc0) << 14) | |
73 | ((*g++ & 0xffc0) << 4) | |
74 | (*b++ >> 6)); |
75 | } |
76 | |
77 | /* Only change bit 0 of LUT_SEL, other bits are set elsewhere */ |
78 | WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id, ~1); |
79 | } |
80 | |
81 | static void dce4_crtc_load_lut(struct drm_crtc *crtc) |
82 | { |
83 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
84 | struct drm_device *dev = crtc->dev; |
85 | struct radeon_device *rdev = dev->dev_private; |
86 | u16 *r, *g, *b; |
87 | int i; |
88 | |
89 | DRM_DEBUG_KMS("%d\n" , radeon_crtc->crtc_id); |
90 | WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0); |
91 | |
92 | WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); |
93 | WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); |
94 | WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); |
95 | |
96 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); |
97 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); |
98 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); |
99 | |
100 | WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0); |
101 | WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007); |
102 | |
103 | WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0); |
104 | r = crtc->gamma_store; |
105 | g = r + crtc->gamma_size; |
106 | b = g + crtc->gamma_size; |
107 | for (i = 0; i < 256; i++) { |
108 | WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset, |
109 | ((*r++ & 0xffc0) << 14) | |
110 | ((*g++ & 0xffc0) << 4) | |
111 | (*b++ >> 6)); |
112 | } |
113 | } |
114 | |
115 | static void dce5_crtc_load_lut(struct drm_crtc *crtc) |
116 | { |
117 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
118 | struct drm_device *dev = crtc->dev; |
119 | struct radeon_device *rdev = dev->dev_private; |
120 | u16 *r, *g, *b; |
121 | int i; |
122 | |
123 | DRM_DEBUG_KMS("%d\n" , radeon_crtc->crtc_id); |
124 | |
125 | WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset, |
126 | (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) | |
127 | NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS))); |
128 | WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset, |
129 | NI_GRPH_PRESCALE_BYPASS); |
130 | WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset, |
131 | NI_OVL_PRESCALE_BYPASS); |
132 | WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset, |
133 | (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) | |
134 | NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT))); |
135 | |
136 | WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0); |
137 | |
138 | WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); |
139 | WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); |
140 | WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); |
141 | |
142 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); |
143 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); |
144 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); |
145 | |
146 | WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0); |
147 | WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007); |
148 | |
149 | WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0); |
150 | r = crtc->gamma_store; |
151 | g = r + crtc->gamma_size; |
152 | b = g + crtc->gamma_size; |
153 | for (i = 0; i < 256; i++) { |
154 | WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset, |
155 | ((*r++ & 0xffc0) << 14) | |
156 | ((*g++ & 0xffc0) << 4) | |
157 | (*b++ >> 6)); |
158 | } |
159 | |
160 | WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset, |
161 | (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | |
162 | NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | |
163 | NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | |
164 | NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS))); |
165 | WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset, |
166 | (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) | |
167 | NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS))); |
168 | WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset, |
169 | (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) | |
170 | NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS))); |
171 | WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset, |
172 | (NI_OUTPUT_CSC_GRPH_MODE(radeon_crtc->output_csc) | |
173 | NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS))); |
174 | /* XXX match this to the depth of the crtc fmt block, move to modeset? */ |
175 | WREG32(0x6940 + radeon_crtc->crtc_offset, 0); |
176 | if (ASIC_IS_DCE8(rdev)) { |
177 | /* XXX this only needs to be programmed once per crtc at startup, |
178 | * not sure where the best place for it is |
179 | */ |
180 | WREG32(CIK_ALPHA_CONTROL + radeon_crtc->crtc_offset, |
181 | CIK_CURSOR_ALPHA_BLND_ENA); |
182 | } |
183 | } |
184 | |
185 | static void legacy_crtc_load_lut(struct drm_crtc *crtc) |
186 | { |
187 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
188 | struct drm_device *dev = crtc->dev; |
189 | struct radeon_device *rdev = dev->dev_private; |
190 | u16 *r, *g, *b; |
191 | int i; |
192 | uint32_t dac2_cntl; |
193 | |
194 | dac2_cntl = RREG32(RADEON_DAC_CNTL2); |
195 | if (radeon_crtc->crtc_id == 0) |
196 | dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL; |
197 | else |
198 | dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL; |
199 | WREG32(RADEON_DAC_CNTL2, dac2_cntl); |
200 | |
201 | WREG8(RADEON_PALETTE_INDEX, 0); |
202 | r = crtc->gamma_store; |
203 | g = r + crtc->gamma_size; |
204 | b = g + crtc->gamma_size; |
205 | for (i = 0; i < 256; i++) { |
206 | WREG32(RADEON_PALETTE_30_DATA, |
207 | ((*r++ & 0xffc0) << 14) | |
208 | ((*g++ & 0xffc0) << 4) | |
209 | (*b++ >> 6)); |
210 | } |
211 | } |
212 | |
213 | void radeon_crtc_load_lut(struct drm_crtc *crtc) |
214 | { |
215 | struct drm_device *dev = crtc->dev; |
216 | struct radeon_device *rdev = dev->dev_private; |
217 | |
218 | if (!crtc->enabled) |
219 | return; |
220 | |
221 | if (ASIC_IS_DCE5(rdev)) |
222 | dce5_crtc_load_lut(crtc); |
223 | else if (ASIC_IS_DCE4(rdev)) |
224 | dce4_crtc_load_lut(crtc); |
225 | else if (ASIC_IS_AVIVO(rdev)) |
226 | avivo_crtc_load_lut(crtc); |
227 | else |
228 | legacy_crtc_load_lut(crtc); |
229 | } |
230 | |
231 | static int radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, |
232 | u16 *blue, uint32_t size, |
233 | struct drm_modeset_acquire_ctx *ctx) |
234 | { |
235 | radeon_crtc_load_lut(crtc); |
236 | |
237 | return 0; |
238 | } |
239 | |
240 | static void radeon_crtc_destroy(struct drm_crtc *crtc) |
241 | { |
242 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
243 | |
244 | drm_crtc_cleanup(crtc); |
245 | destroy_workqueue(radeon_crtc->flip_queue); |
246 | kfree(radeon_crtc); |
247 | } |
248 | |
249 | /** |
250 | * radeon_unpin_work_func - unpin old buffer object |
251 | * |
252 | * @__work - kernel work item |
253 | * |
254 | * Unpin the old frame buffer object outside of the interrupt handler |
255 | */ |
256 | static void radeon_unpin_work_func(struct work_struct *__work) |
257 | { |
258 | struct radeon_flip_work *work = |
259 | container_of(__work, struct radeon_flip_work, unpin_work); |
260 | int r; |
261 | |
262 | /* unpin of the old buffer */ |
263 | r = radeon_bo_reserve(work->old_rbo, false); |
264 | if (likely(r == 0)) { |
265 | r = radeon_bo_unpin(work->old_rbo); |
266 | if (unlikely(r != 0)) { |
267 | DRM_ERROR("failed to unpin buffer after flip\n" ); |
268 | } |
269 | radeon_bo_unreserve(work->old_rbo); |
270 | } else |
271 | DRM_ERROR("failed to reserve buffer after flip\n" ); |
272 | |
273 | drm_gem_object_put_unlocked(&work->old_rbo->gem_base); |
274 | kfree(work); |
275 | } |
276 | |
277 | void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) |
278 | { |
279 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; |
280 | unsigned long flags; |
281 | u32 update_pending; |
282 | int vpos, hpos; |
283 | |
284 | /* can happen during initialization */ |
285 | if (radeon_crtc == NULL) |
286 | return; |
287 | |
288 | /* Skip the pageflip completion check below (based on polling) on |
289 | * asics which reliably support hw pageflip completion irqs. pflip |
290 | * irqs are a reliable and race-free method of handling pageflip |
291 | * completion detection. A use_pflipirq module parameter < 2 allows |
292 | * to override this in case of asics with faulty pflip irqs. |
293 | * A module parameter of 0 would only use this polling based path, |
294 | * a parameter of 1 would use pflip irq only as a backup to this |
295 | * path, as in Linux 3.16. |
296 | */ |
297 | if ((radeon_use_pflipirq == 2) && ASIC_IS_DCE4(rdev)) |
298 | return; |
299 | |
300 | spin_lock_irqsave(&rdev->ddev->event_lock, flags); |
301 | if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { |
302 | DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " |
303 | "RADEON_FLIP_SUBMITTED(%d)\n" , |
304 | radeon_crtc->flip_status, |
305 | RADEON_FLIP_SUBMITTED); |
306 | spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); |
307 | return; |
308 | } |
309 | |
310 | update_pending = radeon_page_flip_pending(rdev, crtc_id); |
311 | |
312 | /* Has the pageflip already completed in crtc, or is it certain |
313 | * to complete in this vblank? GET_DISTANCE_TO_VBLANKSTART provides |
314 | * distance to start of "fudged earlier" vblank in vpos, distance to |
315 | * start of real vblank in hpos. vpos >= 0 && hpos < 0 means we are in |
316 | * the last few scanlines before start of real vblank, where the vblank |
317 | * irq can fire, so we have sampled update_pending a bit too early and |
318 | * know the flip will complete at leading edge of the upcoming real |
319 | * vblank. On pre-AVIVO hardware, flips also complete inside the real |
320 | * vblank, not only at leading edge, so if update_pending for hpos >= 0 |
321 | * == inside real vblank, the flip will complete almost immediately. |
322 | * Note that this method of completion handling is still not 100% race |
323 | * free, as we could execute before the radeon_flip_work_func managed |
324 | * to run and set the RADEON_FLIP_SUBMITTED status, thereby we no-op, |
325 | * but the flip still gets programmed into hw and completed during |
326 | * vblank, leading to a delayed emission of the flip completion event. |
327 | * This applies at least to pre-AVIVO hardware, where flips are always |
328 | * completing inside vblank, not only at leading edge of vblank. |
329 | */ |
330 | if (update_pending && |
331 | (DRM_SCANOUTPOS_VALID & |
332 | radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, |
333 | GET_DISTANCE_TO_VBLANKSTART, |
334 | &vpos, &hpos, NULL, NULL, |
335 | &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) && |
336 | ((vpos >= 0 && hpos < 0) || (hpos >= 0 && !ASIC_IS_AVIVO(rdev)))) { |
337 | /* crtc didn't flip in this target vblank interval, |
338 | * but flip is pending in crtc. Based on the current |
339 | * scanout position we know that the current frame is |
340 | * (nearly) complete and the flip will (likely) |
341 | * complete before the start of the next frame. |
342 | */ |
343 | update_pending = 0; |
344 | } |
345 | spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); |
346 | if (!update_pending) |
347 | radeon_crtc_handle_flip(rdev, crtc_id); |
348 | } |
349 | |
350 | /** |
351 | * radeon_crtc_handle_flip - page flip completed |
352 | * |
353 | * @rdev: radeon device pointer |
354 | * @crtc_id: crtc number this event is for |
355 | * |
356 | * Called when we are sure that a page flip for this crtc is completed. |
357 | */ |
358 | void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) |
359 | { |
360 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; |
361 | struct radeon_flip_work *work; |
362 | unsigned long flags; |
363 | |
364 | /* this can happen at init */ |
365 | if (radeon_crtc == NULL) |
366 | return; |
367 | |
368 | spin_lock_irqsave(&rdev->ddev->event_lock, flags); |
369 | work = radeon_crtc->flip_work; |
370 | if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { |
371 | DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " |
372 | "RADEON_FLIP_SUBMITTED(%d)\n" , |
373 | radeon_crtc->flip_status, |
374 | RADEON_FLIP_SUBMITTED); |
375 | spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); |
376 | return; |
377 | } |
378 | |
379 | /* Pageflip completed. Clean up. */ |
380 | radeon_crtc->flip_status = RADEON_FLIP_NONE; |
381 | radeon_crtc->flip_work = NULL; |
382 | |
383 | /* wakeup userspace */ |
384 | if (work->event) |
385 | drm_crtc_send_vblank_event(&radeon_crtc->base, work->event); |
386 | |
387 | spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); |
388 | |
389 | drm_crtc_vblank_put(&radeon_crtc->base); |
390 | radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); |
391 | queue_work(radeon_crtc->flip_queue, &work->unpin_work); |
392 | } |
393 | |
394 | /** |
395 | * radeon_flip_work_func - page flip framebuffer |
396 | * |
397 | * @work - kernel work item |
398 | * |
399 | * Wait for the buffer object to become idle and do the actual page flip |
400 | */ |
401 | static void radeon_flip_work_func(struct work_struct *__work) |
402 | { |
403 | struct radeon_flip_work *work = |
404 | container_of(__work, struct radeon_flip_work, flip_work); |
405 | struct radeon_device *rdev = work->rdev; |
406 | struct drm_device *dev = rdev->ddev; |
407 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; |
408 | |
409 | struct drm_crtc *crtc = &radeon_crtc->base; |
410 | unsigned long flags; |
411 | int r; |
412 | int vpos, hpos; |
413 | |
414 | down_read(&rdev->exclusive_lock); |
415 | if (work->fence) { |
416 | struct radeon_fence *fence; |
417 | |
418 | fence = to_radeon_fence(work->fence); |
419 | if (fence && fence->rdev == rdev) { |
420 | r = radeon_fence_wait(fence, false); |
421 | if (r == -EDEADLK) { |
422 | up_read(&rdev->exclusive_lock); |
423 | do { |
424 | r = radeon_gpu_reset(rdev); |
425 | } while (r == -EAGAIN); |
426 | down_read(&rdev->exclusive_lock); |
427 | } |
428 | } else |
429 | r = dma_fence_wait(work->fence, false); |
430 | |
431 | if (r) |
432 | DRM_ERROR("failed to wait on page flip fence (%d)!\n" , r); |
433 | |
434 | /* We continue with the page flip even if we failed to wait on |
435 | * the fence, otherwise the DRM core and userspace will be |
436 | * confused about which BO the CRTC is scanning out |
437 | */ |
438 | |
439 | dma_fence_put(work->fence); |
440 | work->fence = NULL; |
441 | } |
442 | |
443 | /* Wait until we're out of the vertical blank period before the one |
444 | * targeted by the flip. Always wait on pre DCE4 to avoid races with |
445 | * flip completion handling from vblank irq, as these old asics don't |
446 | * have reliable pageflip completion interrupts. |
447 | */ |
448 | while (radeon_crtc->enabled && |
449 | (radeon_get_crtc_scanoutpos(dev, work->crtc_id, 0, |
450 | &vpos, &hpos, NULL, NULL, |
451 | &crtc->hwmode) |
452 | & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == |
453 | (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && |
454 | (!ASIC_IS_AVIVO(rdev) || |
455 | ((int) (work->target_vblank - |
456 | dev->driver->get_vblank_counter(dev, work->crtc_id)) > 0))) |
457 | usleep_range(1000, 2000); |
458 | |
459 | /* We borrow the event spin lock for protecting flip_status */ |
460 | spin_lock_irqsave(&crtc->dev->event_lock, flags); |
461 | |
462 | /* set the proper interrupt */ |
463 | radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); |
464 | |
465 | /* do the flip (mmio) */ |
466 | radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base, work->async); |
467 | |
468 | radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED; |
469 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); |
470 | up_read(&rdev->exclusive_lock); |
471 | } |
472 | |
473 | static int radeon_crtc_page_flip_target(struct drm_crtc *crtc, |
474 | struct drm_framebuffer *fb, |
475 | struct drm_pending_vblank_event *event, |
476 | uint32_t page_flip_flags, |
477 | uint32_t target, |
478 | struct drm_modeset_acquire_ctx *ctx) |
479 | { |
480 | struct drm_device *dev = crtc->dev; |
481 | struct radeon_device *rdev = dev->dev_private; |
482 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
483 | struct drm_gem_object *obj; |
484 | struct radeon_flip_work *work; |
485 | struct radeon_bo *new_rbo; |
486 | uint32_t tiling_flags, pitch_pixels; |
487 | uint64_t base; |
488 | unsigned long flags; |
489 | int r; |
490 | |
491 | work = kzalloc(sizeof *work, GFP_KERNEL); |
492 | if (work == NULL) |
493 | return -ENOMEM; |
494 | |
495 | INIT_WORK(&work->flip_work, radeon_flip_work_func); |
496 | INIT_WORK(&work->unpin_work, radeon_unpin_work_func); |
497 | |
498 | work->rdev = rdev; |
499 | work->crtc_id = radeon_crtc->crtc_id; |
500 | work->event = event; |
501 | work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; |
502 | |
503 | /* schedule unpin of the old buffer */ |
504 | obj = crtc->primary->fb->obj[0]; |
505 | |
506 | /* take a reference to the old object */ |
507 | drm_gem_object_get(obj); |
508 | work->old_rbo = gem_to_radeon_bo(obj); |
509 | |
510 | obj = fb->obj[0]; |
511 | new_rbo = gem_to_radeon_bo(obj); |
512 | |
513 | /* pin the new buffer */ |
514 | DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n" , |
515 | work->old_rbo, new_rbo); |
516 | |
517 | r = radeon_bo_reserve(new_rbo, false); |
518 | if (unlikely(r != 0)) { |
519 | DRM_ERROR("failed to reserve new rbo buffer before flip\n" ); |
520 | goto cleanup; |
521 | } |
522 | /* Only 27 bit offset for legacy CRTC */ |
523 | r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM, |
524 | ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base); |
525 | if (unlikely(r != 0)) { |
526 | radeon_bo_unreserve(new_rbo); |
527 | r = -EINVAL; |
528 | DRM_ERROR("failed to pin new rbo buffer before flip\n" ); |
529 | goto cleanup; |
530 | } |
531 | work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); |
532 | radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); |
533 | radeon_bo_unreserve(new_rbo); |
534 | |
535 | if (!ASIC_IS_AVIVO(rdev)) { |
536 | /* crtc offset is from display base addr not FB location */ |
537 | base -= radeon_crtc->legacy_display_base_addr; |
538 | pitch_pixels = fb->pitches[0] / fb->format->cpp[0]; |
539 | |
540 | if (tiling_flags & RADEON_TILING_MACRO) { |
541 | if (ASIC_IS_R300(rdev)) { |
542 | base &= ~0x7ff; |
543 | } else { |
544 | int byteshift = fb->format->cpp[0] * 8 >> 4; |
545 | int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11; |
546 | base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8); |
547 | } |
548 | } else { |
549 | int offset = crtc->y * pitch_pixels + crtc->x; |
550 | switch (fb->format->cpp[0] * 8) { |
551 | case 8: |
552 | default: |
553 | offset *= 1; |
554 | break; |
555 | case 15: |
556 | case 16: |
557 | offset *= 2; |
558 | break; |
559 | case 24: |
560 | offset *= 3; |
561 | break; |
562 | case 32: |
563 | offset *= 4; |
564 | break; |
565 | } |
566 | base += offset; |
567 | } |
568 | base &= ~7; |
569 | } |
570 | work->base = base; |
571 | work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + |
572 | dev->driver->get_vblank_counter(dev, work->crtc_id); |
573 | |
574 | /* We borrow the event spin lock for protecting flip_work */ |
575 | spin_lock_irqsave(&crtc->dev->event_lock, flags); |
576 | |
577 | if (radeon_crtc->flip_status != RADEON_FLIP_NONE) { |
578 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n" ); |
579 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); |
580 | r = -EBUSY; |
581 | goto pflip_cleanup; |
582 | } |
583 | radeon_crtc->flip_status = RADEON_FLIP_PENDING; |
584 | radeon_crtc->flip_work = work; |
585 | |
586 | /* update crtc fb */ |
587 | crtc->primary->fb = fb; |
588 | |
589 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); |
590 | |
591 | queue_work(radeon_crtc->flip_queue, &work->flip_work); |
592 | return 0; |
593 | |
594 | pflip_cleanup: |
595 | if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) { |
596 | DRM_ERROR("failed to reserve new rbo in error path\n" ); |
597 | goto cleanup; |
598 | } |
599 | if (unlikely(radeon_bo_unpin(new_rbo) != 0)) { |
600 | DRM_ERROR("failed to unpin new rbo in error path\n" ); |
601 | } |
602 | radeon_bo_unreserve(new_rbo); |
603 | |
604 | cleanup: |
605 | drm_gem_object_put_unlocked(&work->old_rbo->gem_base); |
606 | dma_fence_put(work->fence); |
607 | kfree(work); |
608 | return r; |
609 | } |
610 | |
611 | static int |
612 | radeon_crtc_set_config(struct drm_mode_set *set, |
613 | struct drm_modeset_acquire_ctx *ctx) |
614 | { |
615 | struct drm_device *dev; |
616 | struct radeon_device *rdev; |
617 | struct drm_crtc *crtc; |
618 | bool active = false; |
619 | int ret; |
620 | |
621 | if (!set || !set->crtc) |
622 | return -EINVAL; |
623 | |
624 | dev = set->crtc->dev; |
625 | |
626 | ret = pm_runtime_get_sync(dev->dev); |
627 | if (ret < 0) |
628 | return ret; |
629 | |
630 | ret = drm_crtc_helper_set_config(set, ctx); |
631 | |
632 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) |
633 | if (crtc->enabled) |
634 | active = true; |
635 | |
636 | pm_runtime_mark_last_busy(dev->dev); |
637 | |
638 | rdev = dev->dev_private; |
639 | /* if we have active crtcs and we don't have a power ref, |
640 | take the current one */ |
641 | if (active && !rdev->have_disp_power_ref) { |
642 | rdev->have_disp_power_ref = true; |
643 | return ret; |
644 | } |
645 | /* if we have no active crtcs, then drop the power ref |
646 | we got before */ |
647 | if (!active && rdev->have_disp_power_ref) { |
648 | pm_runtime_put_autosuspend(dev->dev); |
649 | rdev->have_disp_power_ref = false; |
650 | } |
651 | |
652 | /* drop the power reference we got coming in here */ |
653 | pm_runtime_put_autosuspend(dev->dev); |
654 | return ret; |
655 | } |
656 | |
657 | static const struct drm_crtc_funcs radeon_crtc_funcs = { |
658 | .cursor_set2 = radeon_crtc_cursor_set2, |
659 | .cursor_move = radeon_crtc_cursor_move, |
660 | .gamma_set = radeon_crtc_gamma_set, |
661 | .set_config = radeon_crtc_set_config, |
662 | .destroy = radeon_crtc_destroy, |
663 | .page_flip_target = radeon_crtc_page_flip_target, |
664 | }; |
665 | |
666 | static void radeon_crtc_init(struct drm_device *dev, int index) |
667 | { |
668 | struct radeon_device *rdev = dev->dev_private; |
669 | struct radeon_crtc *radeon_crtc; |
670 | int i; |
671 | |
672 | radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); |
673 | if (radeon_crtc == NULL) |
674 | return; |
675 | |
676 | drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs); |
677 | |
678 | drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); |
679 | radeon_crtc->crtc_id = index; |
680 | radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc" , WQ_HIGHPRI, 0); |
681 | rdev->mode_info.crtcs[index] = radeon_crtc; |
682 | |
683 | if (rdev->family >= CHIP_BONAIRE) { |
684 | radeon_crtc->max_cursor_width = CIK_CURSOR_WIDTH; |
685 | radeon_crtc->max_cursor_height = CIK_CURSOR_HEIGHT; |
686 | } else { |
687 | radeon_crtc->max_cursor_width = CURSOR_WIDTH; |
688 | radeon_crtc->max_cursor_height = CURSOR_HEIGHT; |
689 | } |
690 | dev->mode_config.cursor_width = radeon_crtc->max_cursor_width; |
691 | dev->mode_config.cursor_height = radeon_crtc->max_cursor_height; |
692 | |
693 | #if 0 |
694 | radeon_crtc->mode_set.crtc = &radeon_crtc->base; |
695 | radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); |
696 | radeon_crtc->mode_set.num_connectors = 0; |
697 | #endif |
698 | |
699 | for (i = 0; i < 256; i++) { |
700 | radeon_crtc->lut_r[i] = i << 2; |
701 | radeon_crtc->lut_g[i] = i << 2; |
702 | radeon_crtc->lut_b[i] = i << 2; |
703 | } |
704 | |
705 | if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)) |
706 | radeon_atombios_init_crtc(dev, radeon_crtc); |
707 | else |
708 | radeon_legacy_init_crtc(dev, radeon_crtc); |
709 | } |
710 | |
711 | static const char *encoder_names[38] = { |
712 | "NONE" , |
713 | "INTERNAL_LVDS" , |
714 | "INTERNAL_TMDS1" , |
715 | "INTERNAL_TMDS2" , |
716 | "INTERNAL_DAC1" , |
717 | "INTERNAL_DAC2" , |
718 | "INTERNAL_SDVOA" , |
719 | "INTERNAL_SDVOB" , |
720 | "SI170B" , |
721 | "CH7303" , |
722 | "CH7301" , |
723 | "INTERNAL_DVO1" , |
724 | "EXTERNAL_SDVOA" , |
725 | "EXTERNAL_SDVOB" , |
726 | "TITFP513" , |
727 | "INTERNAL_LVTM1" , |
728 | "VT1623" , |
729 | "HDMI_SI1930" , |
730 | "HDMI_INTERNAL" , |
731 | "INTERNAL_KLDSCP_TMDS1" , |
732 | "INTERNAL_KLDSCP_DVO1" , |
733 | "INTERNAL_KLDSCP_DAC1" , |
734 | "INTERNAL_KLDSCP_DAC2" , |
735 | "SI178" , |
736 | "MVPU_FPGA" , |
737 | "INTERNAL_DDI" , |
738 | "VT1625" , |
739 | "HDMI_SI1932" , |
740 | "DP_AN9801" , |
741 | "DP_DP501" , |
742 | "INTERNAL_UNIPHY" , |
743 | "INTERNAL_KLDSCP_LVTMA" , |
744 | "INTERNAL_UNIPHY1" , |
745 | "INTERNAL_UNIPHY2" , |
746 | "NUTMEG" , |
747 | "TRAVIS" , |
748 | "INTERNAL_VCE" , |
749 | "INTERNAL_UNIPHY3" , |
750 | }; |
751 | |
752 | static const char *hpd_names[6] = { |
753 | "HPD1" , |
754 | "HPD2" , |
755 | "HPD3" , |
756 | "HPD4" , |
757 | "HPD5" , |
758 | "HPD6" , |
759 | }; |
760 | |
761 | static void radeon_print_display_setup(struct drm_device *dev) |
762 | { |
763 | struct drm_connector *connector; |
764 | struct radeon_connector *radeon_connector; |
765 | struct drm_encoder *encoder; |
766 | struct radeon_encoder *radeon_encoder; |
767 | uint32_t devices; |
768 | int i = 0; |
769 | |
770 | DRM_INFO("Radeon Display Connectors\n" ); |
771 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
772 | radeon_connector = to_radeon_connector(connector); |
773 | DRM_INFO("Connector %d:\n" , i); |
774 | DRM_INFO(" %s\n" , connector->name); |
775 | if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) |
776 | DRM_INFO(" %s\n" , hpd_names[radeon_connector->hpd.hpd]); |
777 | if (radeon_connector->ddc_bus) { |
778 | DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n" , |
779 | radeon_connector->ddc_bus->rec.mask_clk_reg, |
780 | radeon_connector->ddc_bus->rec.mask_data_reg, |
781 | radeon_connector->ddc_bus->rec.a_clk_reg, |
782 | radeon_connector->ddc_bus->rec.a_data_reg, |
783 | radeon_connector->ddc_bus->rec.en_clk_reg, |
784 | radeon_connector->ddc_bus->rec.en_data_reg, |
785 | radeon_connector->ddc_bus->rec.y_clk_reg, |
786 | radeon_connector->ddc_bus->rec.y_data_reg); |
787 | if (radeon_connector->router.ddc_valid) |
788 | DRM_INFO(" DDC Router 0x%x/0x%x\n" , |
789 | radeon_connector->router.ddc_mux_control_pin, |
790 | radeon_connector->router.ddc_mux_state); |
791 | if (radeon_connector->router.cd_valid) |
792 | DRM_INFO(" Clock/Data Router 0x%x/0x%x\n" , |
793 | radeon_connector->router.cd_mux_control_pin, |
794 | radeon_connector->router.cd_mux_state); |
795 | } else { |
796 | if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || |
797 | connector->connector_type == DRM_MODE_CONNECTOR_DVII || |
798 | connector->connector_type == DRM_MODE_CONNECTOR_DVID || |
799 | connector->connector_type == DRM_MODE_CONNECTOR_DVIA || |
800 | connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || |
801 | connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) |
802 | DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n" ); |
803 | } |
804 | DRM_INFO(" Encoders:\n" ); |
805 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
806 | radeon_encoder = to_radeon_encoder(encoder); |
807 | devices = radeon_encoder->devices & radeon_connector->devices; |
808 | if (devices) { |
809 | if (devices & ATOM_DEVICE_CRT1_SUPPORT) |
810 | DRM_INFO(" CRT1: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
811 | if (devices & ATOM_DEVICE_CRT2_SUPPORT) |
812 | DRM_INFO(" CRT2: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
813 | if (devices & ATOM_DEVICE_LCD1_SUPPORT) |
814 | DRM_INFO(" LCD1: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
815 | if (devices & ATOM_DEVICE_DFP1_SUPPORT) |
816 | DRM_INFO(" DFP1: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
817 | if (devices & ATOM_DEVICE_DFP2_SUPPORT) |
818 | DRM_INFO(" DFP2: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
819 | if (devices & ATOM_DEVICE_DFP3_SUPPORT) |
820 | DRM_INFO(" DFP3: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
821 | if (devices & ATOM_DEVICE_DFP4_SUPPORT) |
822 | DRM_INFO(" DFP4: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
823 | if (devices & ATOM_DEVICE_DFP5_SUPPORT) |
824 | DRM_INFO(" DFP5: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
825 | if (devices & ATOM_DEVICE_DFP6_SUPPORT) |
826 | DRM_INFO(" DFP6: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
827 | if (devices & ATOM_DEVICE_TV1_SUPPORT) |
828 | DRM_INFO(" TV1: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
829 | if (devices & ATOM_DEVICE_CV_SUPPORT) |
830 | DRM_INFO(" CV: %s\n" , encoder_names[radeon_encoder->encoder_id]); |
831 | } |
832 | } |
833 | i++; |
834 | } |
835 | } |
836 | |
837 | static bool radeon_setup_enc_conn(struct drm_device *dev) |
838 | { |
839 | struct radeon_device *rdev = dev->dev_private; |
840 | bool ret = false; |
841 | |
842 | if (rdev->bios) { |
843 | if (rdev->is_atom_bios) { |
844 | ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); |
845 | if (ret == false) |
846 | ret = radeon_get_atom_connector_info_from_object_table(dev); |
847 | } else { |
848 | ret = radeon_get_legacy_connector_info_from_bios(dev); |
849 | if (ret == false) |
850 | ret = radeon_get_legacy_connector_info_from_table(dev); |
851 | } |
852 | } else { |
853 | if (!ASIC_IS_AVIVO(rdev)) |
854 | ret = radeon_get_legacy_connector_info_from_table(dev); |
855 | } |
856 | if (ret) { |
857 | radeon_setup_encoder_clones(dev); |
858 | radeon_print_display_setup(dev); |
859 | } |
860 | |
861 | return ret; |
862 | } |
863 | |
864 | /* avivo */ |
865 | |
866 | /** |
867 | * avivo_reduce_ratio - fractional number reduction |
868 | * |
869 | * @nom: nominator |
870 | * @den: denominator |
871 | * @nom_min: minimum value for nominator |
872 | * @den_min: minimum value for denominator |
873 | * |
874 | * Find the greatest common divisor and apply it on both nominator and |
875 | * denominator, but make nominator and denominator are at least as large |
876 | * as their minimum values. |
877 | */ |
878 | static void avivo_reduce_ratio(unsigned *nom, unsigned *den, |
879 | unsigned nom_min, unsigned den_min) |
880 | { |
881 | unsigned tmp; |
882 | |
883 | /* reduce the numbers to a simpler ratio */ |
884 | tmp = gcd(*nom, *den); |
885 | *nom /= tmp; |
886 | *den /= tmp; |
887 | |
888 | /* make sure nominator is large enough */ |
889 | if (*nom < nom_min) { |
890 | tmp = DIV_ROUND_UP(nom_min, *nom); |
891 | *nom *= tmp; |
892 | *den *= tmp; |
893 | } |
894 | |
895 | /* make sure the denominator is large enough */ |
896 | if (*den < den_min) { |
897 | tmp = DIV_ROUND_UP(den_min, *den); |
898 | *nom *= tmp; |
899 | *den *= tmp; |
900 | } |
901 | } |
902 | |
903 | /** |
904 | * avivo_get_fb_ref_div - feedback and ref divider calculation |
905 | * |
906 | * @nom: nominator |
907 | * @den: denominator |
908 | * @post_div: post divider |
909 | * @fb_div_max: feedback divider maximum |
910 | * @ref_div_max: reference divider maximum |
911 | * @fb_div: resulting feedback divider |
912 | * @ref_div: resulting reference divider |
913 | * |
914 | * Calculate feedback and reference divider for a given post divider. Makes |
915 | * sure we stay within the limits. |
916 | */ |
917 | static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, |
918 | unsigned fb_div_max, unsigned ref_div_max, |
919 | unsigned *fb_div, unsigned *ref_div) |
920 | { |
921 | /* limit reference * post divider to a maximum */ |
922 | ref_div_max = max(min(100 / post_div, ref_div_max), 1u); |
923 | |
924 | /* get matching reference and feedback divider */ |
925 | *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); |
926 | *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den); |
927 | |
928 | /* limit fb divider to its maximum */ |
929 | if (*fb_div > fb_div_max) { |
930 | *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div); |
931 | *fb_div = fb_div_max; |
932 | } |
933 | } |
934 | |
935 | /** |
936 | * radeon_compute_pll_avivo - compute PLL paramaters |
937 | * |
938 | * @pll: information about the PLL |
939 | * @dot_clock_p: resulting pixel clock |
940 | * fb_div_p: resulting feedback divider |
941 | * frac_fb_div_p: fractional part of the feedback divider |
942 | * ref_div_p: resulting reference divider |
943 | * post_div_p: resulting reference divider |
944 | * |
945 | * Try to calculate the PLL parameters to generate the given frequency: |
946 | * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div) |
947 | */ |
948 | void radeon_compute_pll_avivo(struct radeon_pll *pll, |
949 | u32 freq, |
950 | u32 *dot_clock_p, |
951 | u32 *fb_div_p, |
952 | u32 *frac_fb_div_p, |
953 | u32 *ref_div_p, |
954 | u32 *post_div_p) |
955 | { |
956 | unsigned target_clock = pll->flags & RADEON_PLL_USE_FRAC_FB_DIV ? |
957 | freq : freq / 10; |
958 | |
959 | unsigned fb_div_min, fb_div_max, fb_div; |
960 | unsigned post_div_min, post_div_max, post_div; |
961 | unsigned ref_div_min, ref_div_max, ref_div; |
962 | unsigned post_div_best, diff_best; |
963 | unsigned nom, den; |
964 | |
965 | /* determine allowed feedback divider range */ |
966 | fb_div_min = pll->min_feedback_div; |
967 | fb_div_max = pll->max_feedback_div; |
968 | |
969 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { |
970 | fb_div_min *= 10; |
971 | fb_div_max *= 10; |
972 | } |
973 | |
974 | /* determine allowed ref divider range */ |
975 | if (pll->flags & RADEON_PLL_USE_REF_DIV) |
976 | ref_div_min = pll->reference_div; |
977 | else |
978 | ref_div_min = pll->min_ref_div; |
979 | |
980 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && |
981 | pll->flags & RADEON_PLL_USE_REF_DIV) |
982 | ref_div_max = pll->reference_div; |
983 | else if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) |
984 | /* fix for problems on RS880 */ |
985 | ref_div_max = min(pll->max_ref_div, 7u); |
986 | else |
987 | ref_div_max = pll->max_ref_div; |
988 | |
989 | /* determine allowed post divider range */ |
990 | if (pll->flags & RADEON_PLL_USE_POST_DIV) { |
991 | post_div_min = pll->post_div; |
992 | post_div_max = pll->post_div; |
993 | } else { |
994 | unsigned vco_min, vco_max; |
995 | |
996 | if (pll->flags & RADEON_PLL_IS_LCD) { |
997 | vco_min = pll->lcd_pll_out_min; |
998 | vco_max = pll->lcd_pll_out_max; |
999 | } else { |
1000 | vco_min = pll->pll_out_min; |
1001 | vco_max = pll->pll_out_max; |
1002 | } |
1003 | |
1004 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { |
1005 | vco_min *= 10; |
1006 | vco_max *= 10; |
1007 | } |
1008 | |
1009 | post_div_min = vco_min / target_clock; |
1010 | if ((target_clock * post_div_min) < vco_min) |
1011 | ++post_div_min; |
1012 | if (post_div_min < pll->min_post_div) |
1013 | post_div_min = pll->min_post_div; |
1014 | |
1015 | post_div_max = vco_max / target_clock; |
1016 | if ((target_clock * post_div_max) > vco_max) |
1017 | --post_div_max; |
1018 | if (post_div_max > pll->max_post_div) |
1019 | post_div_max = pll->max_post_div; |
1020 | } |
1021 | |
1022 | /* represent the searched ratio as fractional number */ |
1023 | nom = target_clock; |
1024 | den = pll->reference_freq; |
1025 | |
1026 | /* reduce the numbers to a simpler ratio */ |
1027 | avivo_reduce_ratio(&nom, &den, fb_div_min, post_div_min); |
1028 | |
1029 | /* now search for a post divider */ |
1030 | if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) |
1031 | post_div_best = post_div_min; |
1032 | else |
1033 | post_div_best = post_div_max; |
1034 | diff_best = ~0; |
1035 | |
1036 | for (post_div = post_div_min; post_div <= post_div_max; ++post_div) { |
1037 | unsigned diff; |
1038 | avivo_get_fb_ref_div(nom, den, post_div, fb_div_max, |
1039 | ref_div_max, &fb_div, &ref_div); |
1040 | diff = abs(target_clock - (pll->reference_freq * fb_div) / |
1041 | (ref_div * post_div)); |
1042 | |
1043 | if (diff < diff_best || (diff == diff_best && |
1044 | !(pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP))) { |
1045 | |
1046 | post_div_best = post_div; |
1047 | diff_best = diff; |
1048 | } |
1049 | } |
1050 | post_div = post_div_best; |
1051 | |
1052 | /* get the feedback and reference divider for the optimal value */ |
1053 | avivo_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max, |
1054 | &fb_div, &ref_div); |
1055 | |
1056 | /* reduce the numbers to a simpler ratio once more */ |
1057 | /* this also makes sure that the reference divider is large enough */ |
1058 | avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min); |
1059 | |
1060 | /* avoid high jitter with small fractional dividers */ |
1061 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) { |
1062 | fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50); |
1063 | if (fb_div < fb_div_min) { |
1064 | unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div); |
1065 | fb_div *= tmp; |
1066 | ref_div *= tmp; |
1067 | } |
1068 | } |
1069 | |
1070 | /* and finally save the result */ |
1071 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { |
1072 | *fb_div_p = fb_div / 10; |
1073 | *frac_fb_div_p = fb_div % 10; |
1074 | } else { |
1075 | *fb_div_p = fb_div; |
1076 | *frac_fb_div_p = 0; |
1077 | } |
1078 | |
1079 | *dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) + |
1080 | (pll->reference_freq * *frac_fb_div_p)) / |
1081 | (ref_div * post_div * 10); |
1082 | *ref_div_p = ref_div; |
1083 | *post_div_p = post_div; |
1084 | |
1085 | DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n" , |
1086 | freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, |
1087 | ref_div, post_div); |
1088 | } |
1089 | |
1090 | /* pre-avivo */ |
1091 | static inline uint32_t radeon_div(uint64_t n, uint32_t d) |
1092 | { |
1093 | uint64_t mod; |
1094 | |
1095 | n += d / 2; |
1096 | |
1097 | mod = do_div(n, d); |
1098 | return n; |
1099 | } |
1100 | |
1101 | void radeon_compute_pll_legacy(struct radeon_pll *pll, |
1102 | uint64_t freq, |
1103 | uint32_t *dot_clock_p, |
1104 | uint32_t *fb_div_p, |
1105 | uint32_t *frac_fb_div_p, |
1106 | uint32_t *ref_div_p, |
1107 | uint32_t *post_div_p) |
1108 | { |
1109 | uint32_t min_ref_div = pll->min_ref_div; |
1110 | uint32_t max_ref_div = pll->max_ref_div; |
1111 | uint32_t min_post_div = pll->min_post_div; |
1112 | uint32_t max_post_div = pll->max_post_div; |
1113 | uint32_t min_fractional_feed_div = 0; |
1114 | uint32_t max_fractional_feed_div = 0; |
1115 | uint32_t best_vco = pll->best_vco; |
1116 | uint32_t best_post_div = 1; |
1117 | uint32_t best_ref_div = 1; |
1118 | uint32_t best_feedback_div = 1; |
1119 | uint32_t best_frac_feedback_div = 0; |
1120 | uint32_t best_freq = -1; |
1121 | uint32_t best_error = 0xffffffff; |
1122 | uint32_t best_vco_diff = 1; |
1123 | uint32_t post_div; |
1124 | u32 pll_out_min, pll_out_max; |
1125 | |
1126 | DRM_DEBUG_KMS("PLL freq %llu %u %u\n" , freq, pll->min_ref_div, pll->max_ref_div); |
1127 | freq = freq * 1000; |
1128 | |
1129 | if (pll->flags & RADEON_PLL_IS_LCD) { |
1130 | pll_out_min = pll->lcd_pll_out_min; |
1131 | pll_out_max = pll->lcd_pll_out_max; |
1132 | } else { |
1133 | pll_out_min = pll->pll_out_min; |
1134 | pll_out_max = pll->pll_out_max; |
1135 | } |
1136 | |
1137 | if (pll_out_min > 64800) |
1138 | pll_out_min = 64800; |
1139 | |
1140 | if (pll->flags & RADEON_PLL_USE_REF_DIV) |
1141 | min_ref_div = max_ref_div = pll->reference_div; |
1142 | else { |
1143 | while (min_ref_div < max_ref_div-1) { |
1144 | uint32_t mid = (min_ref_div + max_ref_div) / 2; |
1145 | uint32_t pll_in = pll->reference_freq / mid; |
1146 | if (pll_in < pll->pll_in_min) |
1147 | max_ref_div = mid; |
1148 | else if (pll_in > pll->pll_in_max) |
1149 | min_ref_div = mid; |
1150 | else |
1151 | break; |
1152 | } |
1153 | } |
1154 | |
1155 | if (pll->flags & RADEON_PLL_USE_POST_DIV) |
1156 | min_post_div = max_post_div = pll->post_div; |
1157 | |
1158 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { |
1159 | min_fractional_feed_div = pll->min_frac_feedback_div; |
1160 | max_fractional_feed_div = pll->max_frac_feedback_div; |
1161 | } |
1162 | |
1163 | for (post_div = max_post_div; post_div >= min_post_div; --post_div) { |
1164 | uint32_t ref_div; |
1165 | |
1166 | if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) |
1167 | continue; |
1168 | |
1169 | /* legacy radeons only have a few post_divs */ |
1170 | if (pll->flags & RADEON_PLL_LEGACY) { |
1171 | if ((post_div == 5) || |
1172 | (post_div == 7) || |
1173 | (post_div == 9) || |
1174 | (post_div == 10) || |
1175 | (post_div == 11) || |
1176 | (post_div == 13) || |
1177 | (post_div == 14) || |
1178 | (post_div == 15)) |
1179 | continue; |
1180 | } |
1181 | |
1182 | for (ref_div = min_ref_div; ref_div <= max_ref_div; ++ref_div) { |
1183 | uint32_t feedback_div, current_freq = 0, error, vco_diff; |
1184 | uint32_t pll_in = pll->reference_freq / ref_div; |
1185 | uint32_t min_feed_div = pll->min_feedback_div; |
1186 | uint32_t max_feed_div = pll->max_feedback_div + 1; |
1187 | |
1188 | if (pll_in < pll->pll_in_min || pll_in > pll->pll_in_max) |
1189 | continue; |
1190 | |
1191 | while (min_feed_div < max_feed_div) { |
1192 | uint32_t vco; |
1193 | uint32_t min_frac_feed_div = min_fractional_feed_div; |
1194 | uint32_t max_frac_feed_div = max_fractional_feed_div + 1; |
1195 | uint32_t frac_feedback_div; |
1196 | uint64_t tmp; |
1197 | |
1198 | feedback_div = (min_feed_div + max_feed_div) / 2; |
1199 | |
1200 | tmp = (uint64_t)pll->reference_freq * feedback_div; |
1201 | vco = radeon_div(tmp, ref_div); |
1202 | |
1203 | if (vco < pll_out_min) { |
1204 | min_feed_div = feedback_div + 1; |
1205 | continue; |
1206 | } else if (vco > pll_out_max) { |
1207 | max_feed_div = feedback_div; |
1208 | continue; |
1209 | } |
1210 | |
1211 | while (min_frac_feed_div < max_frac_feed_div) { |
1212 | frac_feedback_div = (min_frac_feed_div + max_frac_feed_div) / 2; |
1213 | tmp = (uint64_t)pll->reference_freq * 10000 * feedback_div; |
1214 | tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; |
1215 | current_freq = radeon_div(tmp, ref_div * post_div); |
1216 | |
1217 | if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { |
1218 | if (freq < current_freq) |
1219 | error = 0xffffffff; |
1220 | else |
1221 | error = freq - current_freq; |
1222 | } else |
1223 | error = abs(current_freq - freq); |
1224 | vco_diff = abs(vco - best_vco); |
1225 | |
1226 | if ((best_vco == 0 && error < best_error) || |
1227 | (best_vco != 0 && |
1228 | ((best_error > 100 && error < best_error - 100) || |
1229 | (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) { |
1230 | best_post_div = post_div; |
1231 | best_ref_div = ref_div; |
1232 | best_feedback_div = feedback_div; |
1233 | best_frac_feedback_div = frac_feedback_div; |
1234 | best_freq = current_freq; |
1235 | best_error = error; |
1236 | best_vco_diff = vco_diff; |
1237 | } else if (current_freq == freq) { |
1238 | if (best_freq == -1) { |
1239 | best_post_div = post_div; |
1240 | best_ref_div = ref_div; |
1241 | best_feedback_div = feedback_div; |
1242 | best_frac_feedback_div = frac_feedback_div; |
1243 | best_freq = current_freq; |
1244 | best_error = error; |
1245 | best_vco_diff = vco_diff; |
1246 | } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || |
1247 | ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || |
1248 | ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || |
1249 | ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || |
1250 | ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || |
1251 | ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { |
1252 | best_post_div = post_div; |
1253 | best_ref_div = ref_div; |
1254 | best_feedback_div = feedback_div; |
1255 | best_frac_feedback_div = frac_feedback_div; |
1256 | best_freq = current_freq; |
1257 | best_error = error; |
1258 | best_vco_diff = vco_diff; |
1259 | } |
1260 | } |
1261 | if (current_freq < freq) |
1262 | min_frac_feed_div = frac_feedback_div + 1; |
1263 | else |
1264 | max_frac_feed_div = frac_feedback_div; |
1265 | } |
1266 | if (current_freq < freq) |
1267 | min_feed_div = feedback_div + 1; |
1268 | else |
1269 | max_feed_div = feedback_div; |
1270 | } |
1271 | } |
1272 | } |
1273 | |
1274 | *dot_clock_p = best_freq / 10000; |
1275 | *fb_div_p = best_feedback_div; |
1276 | *frac_fb_div_p = best_frac_feedback_div; |
1277 | *ref_div_p = best_ref_div; |
1278 | *post_div_p = best_post_div; |
1279 | DRM_DEBUG_KMS("%lld %d, pll dividers - fb: %d.%d ref: %d, post %d\n" , |
1280 | (long long)freq, |
1281 | best_freq / 1000, best_feedback_div, best_frac_feedback_div, |
1282 | best_ref_div, best_post_div); |
1283 | |
1284 | } |
1285 | |
1286 | static const struct drm_framebuffer_funcs radeon_fb_funcs = { |
1287 | .destroy = drm_gem_fb_destroy, |
1288 | .create_handle = drm_gem_fb_create_handle, |
1289 | }; |
1290 | |
1291 | int |
1292 | radeon_framebuffer_init(struct drm_device *dev, |
1293 | struct drm_framebuffer *fb, |
1294 | const struct drm_mode_fb_cmd2 *mode_cmd, |
1295 | struct drm_gem_object *obj) |
1296 | { |
1297 | int ret; |
1298 | fb->obj[0] = obj; |
1299 | drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); |
1300 | ret = drm_framebuffer_init(dev, fb, &radeon_fb_funcs); |
1301 | if (ret) { |
1302 | fb->obj[0] = NULL; |
1303 | return ret; |
1304 | } |
1305 | return 0; |
1306 | } |
1307 | |
1308 | static struct drm_framebuffer * |
1309 | radeon_user_framebuffer_create(struct drm_device *dev, |
1310 | struct drm_file *file_priv, |
1311 | const struct drm_mode_fb_cmd2 *mode_cmd) |
1312 | { |
1313 | struct drm_gem_object *obj; |
1314 | struct drm_framebuffer *fb; |
1315 | int ret; |
1316 | |
1317 | obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); |
1318 | if (obj == NULL) { |
1319 | dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, " |
1320 | "can't create framebuffer\n" , mode_cmd->handles[0]); |
1321 | return ERR_PTR(-ENOENT); |
1322 | } |
1323 | |
1324 | /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ |
1325 | if (obj->import_attach) { |
1326 | DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n" ); |
1327 | return ERR_PTR(-EINVAL); |
1328 | } |
1329 | |
1330 | fb = kzalloc(sizeof(*fb), GFP_KERNEL); |
1331 | if (fb == NULL) { |
1332 | drm_gem_object_put_unlocked(obj); |
1333 | return ERR_PTR(-ENOMEM); |
1334 | } |
1335 | |
1336 | ret = radeon_framebuffer_init(dev, fb, mode_cmd, obj); |
1337 | if (ret) { |
1338 | kfree(fb); |
1339 | drm_gem_object_put_unlocked(obj); |
1340 | return ERR_PTR(ret); |
1341 | } |
1342 | |
1343 | return fb; |
1344 | } |
1345 | |
1346 | static const struct drm_mode_config_funcs radeon_mode_funcs = { |
1347 | .fb_create = radeon_user_framebuffer_create, |
1348 | .output_poll_changed = drm_fb_helper_output_poll_changed, |
1349 | }; |
1350 | |
1351 | static const struct drm_prop_enum_list radeon_tmds_pll_enum_list[] = |
1352 | { { 0, "driver" }, |
1353 | { 1, "bios" }, |
1354 | }; |
1355 | |
1356 | static const struct drm_prop_enum_list radeon_tv_std_enum_list[] = |
1357 | { { TV_STD_NTSC, "ntsc" }, |
1358 | { TV_STD_PAL, "pal" }, |
1359 | { TV_STD_PAL_M, "pal-m" }, |
1360 | { TV_STD_PAL_60, "pal-60" }, |
1361 | { TV_STD_NTSC_J, "ntsc-j" }, |
1362 | { TV_STD_SCART_PAL, "scart-pal" }, |
1363 | { TV_STD_PAL_CN, "pal-cn" }, |
1364 | { TV_STD_SECAM, "secam" }, |
1365 | }; |
1366 | |
1367 | static const struct drm_prop_enum_list radeon_underscan_enum_list[] = |
1368 | { { UNDERSCAN_OFF, "off" }, |
1369 | { UNDERSCAN_ON, "on" }, |
1370 | { UNDERSCAN_AUTO, "auto" }, |
1371 | }; |
1372 | |
1373 | static const struct drm_prop_enum_list radeon_audio_enum_list[] = |
1374 | { { RADEON_AUDIO_DISABLE, "off" }, |
1375 | { RADEON_AUDIO_ENABLE, "on" }, |
1376 | { RADEON_AUDIO_AUTO, "auto" }, |
1377 | }; |
1378 | |
1379 | /* XXX support different dither options? spatial, temporal, both, etc. */ |
1380 | static const struct drm_prop_enum_list radeon_dither_enum_list[] = |
1381 | { { RADEON_FMT_DITHER_DISABLE, "off" }, |
1382 | { RADEON_FMT_DITHER_ENABLE, "on" }, |
1383 | }; |
1384 | |
1385 | static const struct drm_prop_enum_list radeon_output_csc_enum_list[] = |
1386 | { { RADEON_OUTPUT_CSC_BYPASS, "bypass" }, |
1387 | { RADEON_OUTPUT_CSC_TVRGB, "tvrgb" }, |
1388 | { RADEON_OUTPUT_CSC_YCBCR601, "ycbcr601" }, |
1389 | { RADEON_OUTPUT_CSC_YCBCR709, "ycbcr709" }, |
1390 | }; |
1391 | |
1392 | static int radeon_modeset_create_props(struct radeon_device *rdev) |
1393 | { |
1394 | int sz; |
1395 | |
1396 | if (rdev->is_atom_bios) { |
1397 | rdev->mode_info.coherent_mode_property = |
1398 | drm_property_create_range(rdev->ddev, 0 , "coherent" , 0, 1); |
1399 | if (!rdev->mode_info.coherent_mode_property) |
1400 | return -ENOMEM; |
1401 | } |
1402 | |
1403 | if (!ASIC_IS_AVIVO(rdev)) { |
1404 | sz = ARRAY_SIZE(radeon_tmds_pll_enum_list); |
1405 | rdev->mode_info.tmds_pll_property = |
1406 | drm_property_create_enum(rdev->ddev, 0, |
1407 | "tmds_pll" , |
1408 | radeon_tmds_pll_enum_list, sz); |
1409 | } |
1410 | |
1411 | rdev->mode_info.load_detect_property = |
1412 | drm_property_create_range(rdev->ddev, 0, "load detection" , 0, 1); |
1413 | if (!rdev->mode_info.load_detect_property) |
1414 | return -ENOMEM; |
1415 | |
1416 | drm_mode_create_scaling_mode_property(rdev->ddev); |
1417 | |
1418 | sz = ARRAY_SIZE(radeon_tv_std_enum_list); |
1419 | rdev->mode_info.tv_std_property = |
1420 | drm_property_create_enum(rdev->ddev, 0, |
1421 | "tv standard" , |
1422 | radeon_tv_std_enum_list, sz); |
1423 | |
1424 | sz = ARRAY_SIZE(radeon_underscan_enum_list); |
1425 | rdev->mode_info.underscan_property = |
1426 | drm_property_create_enum(rdev->ddev, 0, |
1427 | "underscan" , |
1428 | radeon_underscan_enum_list, sz); |
1429 | |
1430 | rdev->mode_info.underscan_hborder_property = |
1431 | drm_property_create_range(rdev->ddev, 0, |
1432 | "underscan hborder" , 0, 128); |
1433 | if (!rdev->mode_info.underscan_hborder_property) |
1434 | return -ENOMEM; |
1435 | |
1436 | rdev->mode_info.underscan_vborder_property = |
1437 | drm_property_create_range(rdev->ddev, 0, |
1438 | "underscan vborder" , 0, 128); |
1439 | if (!rdev->mode_info.underscan_vborder_property) |
1440 | return -ENOMEM; |
1441 | |
1442 | sz = ARRAY_SIZE(radeon_audio_enum_list); |
1443 | rdev->mode_info.audio_property = |
1444 | drm_property_create_enum(rdev->ddev, 0, |
1445 | "audio" , |
1446 | radeon_audio_enum_list, sz); |
1447 | |
1448 | sz = ARRAY_SIZE(radeon_dither_enum_list); |
1449 | rdev->mode_info.dither_property = |
1450 | drm_property_create_enum(rdev->ddev, 0, |
1451 | "dither" , |
1452 | radeon_dither_enum_list, sz); |
1453 | |
1454 | sz = ARRAY_SIZE(radeon_output_csc_enum_list); |
1455 | rdev->mode_info.output_csc_property = |
1456 | drm_property_create_enum(rdev->ddev, 0, |
1457 | "output_csc" , |
1458 | radeon_output_csc_enum_list, sz); |
1459 | |
1460 | return 0; |
1461 | } |
1462 | |
1463 | void radeon_update_display_priority(struct radeon_device *rdev) |
1464 | { |
1465 | /* adjustment options for the display watermarks */ |
1466 | if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) { |
1467 | /* set display priority to high for r3xx, rv515 chips |
1468 | * this avoids flickering due to underflow to the |
1469 | * display controllers during heavy acceleration. |
1470 | * Don't force high on rs4xx igp chips as it seems to |
1471 | * affect the sound card. See kernel bug 15982. |
1472 | */ |
1473 | if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) && |
1474 | !(rdev->flags & RADEON_IS_IGP)) |
1475 | rdev->disp_priority = 2; |
1476 | else |
1477 | rdev->disp_priority = 0; |
1478 | } else |
1479 | rdev->disp_priority = radeon_disp_priority; |
1480 | |
1481 | } |
1482 | |
1483 | /* |
1484 | * Allocate hdmi structs and determine register offsets |
1485 | */ |
1486 | static void radeon_afmt_init(struct radeon_device *rdev) |
1487 | { |
1488 | int i; |
1489 | |
1490 | for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) |
1491 | rdev->mode_info.afmt[i] = NULL; |
1492 | |
1493 | if (ASIC_IS_NODCE(rdev)) { |
1494 | /* nothing to do */ |
1495 | } else if (ASIC_IS_DCE4(rdev)) { |
1496 | static uint32_t eg_offsets[] = { |
1497 | EVERGREEN_CRTC0_REGISTER_OFFSET, |
1498 | EVERGREEN_CRTC1_REGISTER_OFFSET, |
1499 | EVERGREEN_CRTC2_REGISTER_OFFSET, |
1500 | EVERGREEN_CRTC3_REGISTER_OFFSET, |
1501 | EVERGREEN_CRTC4_REGISTER_OFFSET, |
1502 | EVERGREEN_CRTC5_REGISTER_OFFSET, |
1503 | 0x13830 - 0x7030, |
1504 | }; |
1505 | int num_afmt; |
1506 | |
1507 | /* DCE8 has 7 audio blocks tied to DIG encoders */ |
1508 | /* DCE6 has 6 audio blocks tied to DIG encoders */ |
1509 | /* DCE4/5 has 6 audio blocks tied to DIG encoders */ |
1510 | /* DCE4.1 has 2 audio blocks tied to DIG encoders */ |
1511 | if (ASIC_IS_DCE8(rdev)) |
1512 | num_afmt = 7; |
1513 | else if (ASIC_IS_DCE6(rdev)) |
1514 | num_afmt = 6; |
1515 | else if (ASIC_IS_DCE5(rdev)) |
1516 | num_afmt = 6; |
1517 | else if (ASIC_IS_DCE41(rdev)) |
1518 | num_afmt = 2; |
1519 | else /* DCE4 */ |
1520 | num_afmt = 6; |
1521 | |
1522 | BUG_ON(num_afmt > ARRAY_SIZE(eg_offsets)); |
1523 | for (i = 0; i < num_afmt; i++) { |
1524 | rdev->mode_info.afmt[i] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); |
1525 | if (rdev->mode_info.afmt[i]) { |
1526 | rdev->mode_info.afmt[i]->offset = eg_offsets[i]; |
1527 | rdev->mode_info.afmt[i]->id = i; |
1528 | } |
1529 | } |
1530 | } else if (ASIC_IS_DCE3(rdev)) { |
1531 | /* DCE3.x has 2 audio blocks tied to DIG encoders */ |
1532 | rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); |
1533 | if (rdev->mode_info.afmt[0]) { |
1534 | rdev->mode_info.afmt[0]->offset = DCE3_HDMI_OFFSET0; |
1535 | rdev->mode_info.afmt[0]->id = 0; |
1536 | } |
1537 | rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); |
1538 | if (rdev->mode_info.afmt[1]) { |
1539 | rdev->mode_info.afmt[1]->offset = DCE3_HDMI_OFFSET1; |
1540 | rdev->mode_info.afmt[1]->id = 1; |
1541 | } |
1542 | } else if (ASIC_IS_DCE2(rdev)) { |
1543 | /* DCE2 has at least 1 routable audio block */ |
1544 | rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); |
1545 | if (rdev->mode_info.afmt[0]) { |
1546 | rdev->mode_info.afmt[0]->offset = DCE2_HDMI_OFFSET0; |
1547 | rdev->mode_info.afmt[0]->id = 0; |
1548 | } |
1549 | /* r6xx has 2 routable audio blocks */ |
1550 | if (rdev->family >= CHIP_R600) { |
1551 | rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); |
1552 | if (rdev->mode_info.afmt[1]) { |
1553 | rdev->mode_info.afmt[1]->offset = DCE2_HDMI_OFFSET1; |
1554 | rdev->mode_info.afmt[1]->id = 1; |
1555 | } |
1556 | } |
1557 | } |
1558 | } |
1559 | |
1560 | static void radeon_afmt_fini(struct radeon_device *rdev) |
1561 | { |
1562 | int i; |
1563 | |
1564 | for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) { |
1565 | kfree(rdev->mode_info.afmt[i]); |
1566 | rdev->mode_info.afmt[i] = NULL; |
1567 | } |
1568 | } |
1569 | |
1570 | int radeon_modeset_init(struct radeon_device *rdev) |
1571 | { |
1572 | int i; |
1573 | int ret; |
1574 | |
1575 | drm_mode_config_init(rdev->ddev); |
1576 | rdev->mode_info.mode_config_initialized = true; |
1577 | |
1578 | rdev->ddev->mode_config.funcs = &radeon_mode_funcs; |
1579 | |
1580 | if (radeon_use_pflipirq == 2 && rdev->family >= CHIP_R600) |
1581 | rdev->ddev->mode_config.async_page_flip = true; |
1582 | |
1583 | if (ASIC_IS_DCE5(rdev)) { |
1584 | rdev->ddev->mode_config.max_width = 16384; |
1585 | rdev->ddev->mode_config.max_height = 16384; |
1586 | } else if (ASIC_IS_AVIVO(rdev)) { |
1587 | rdev->ddev->mode_config.max_width = 8192; |
1588 | rdev->ddev->mode_config.max_height = 8192; |
1589 | } else { |
1590 | rdev->ddev->mode_config.max_width = 4096; |
1591 | rdev->ddev->mode_config.max_height = 4096; |
1592 | } |
1593 | |
1594 | rdev->ddev->mode_config.preferred_depth = 24; |
1595 | rdev->ddev->mode_config.prefer_shadow = 1; |
1596 | |
1597 | rdev->ddev->mode_config.fb_base = rdev->mc.aper_base; |
1598 | |
1599 | ret = radeon_modeset_create_props(rdev); |
1600 | if (ret) { |
1601 | return ret; |
1602 | } |
1603 | |
1604 | /* init i2c buses */ |
1605 | radeon_i2c_init(rdev); |
1606 | |
1607 | /* check combios for a valid hardcoded EDID - Sun servers */ |
1608 | if (!rdev->is_atom_bios) { |
1609 | /* check for hardcoded EDID in BIOS */ |
1610 | radeon_combios_check_hardcoded_edid(rdev); |
1611 | } |
1612 | |
1613 | /* allocate crtcs */ |
1614 | for (i = 0; i < rdev->num_crtc; i++) { |
1615 | radeon_crtc_init(rdev->ddev, i); |
1616 | } |
1617 | |
1618 | /* okay we should have all the bios connectors */ |
1619 | ret = radeon_setup_enc_conn(rdev->ddev); |
1620 | if (!ret) { |
1621 | return ret; |
1622 | } |
1623 | |
1624 | /* init dig PHYs, disp eng pll */ |
1625 | if (rdev->is_atom_bios) { |
1626 | radeon_atom_encoder_init(rdev); |
1627 | radeon_atom_disp_eng_pll_init(rdev); |
1628 | } |
1629 | |
1630 | /* initialize hpd */ |
1631 | radeon_hpd_init(rdev); |
1632 | |
1633 | /* setup afmt */ |
1634 | radeon_afmt_init(rdev); |
1635 | |
1636 | radeon_fbdev_init(rdev); |
1637 | drm_kms_helper_poll_init(rdev->ddev); |
1638 | |
1639 | /* do pm late init */ |
1640 | ret = radeon_pm_late_init(rdev); |
1641 | |
1642 | return 0; |
1643 | } |
1644 | |
1645 | void radeon_modeset_fini(struct radeon_device *rdev) |
1646 | { |
1647 | if (rdev->mode_info.mode_config_initialized) { |
1648 | drm_kms_helper_poll_fini(rdev->ddev); |
1649 | radeon_hpd_fini(rdev); |
1650 | drm_helper_force_disable_all(rdev->ddev); |
1651 | radeon_fbdev_fini(rdev); |
1652 | radeon_afmt_fini(rdev); |
1653 | drm_mode_config_cleanup(rdev->ddev); |
1654 | rdev->mode_info.mode_config_initialized = false; |
1655 | } |
1656 | |
1657 | kfree(rdev->mode_info.bios_hardcoded_edid); |
1658 | |
1659 | /* free i2c buses */ |
1660 | radeon_i2c_fini(rdev); |
1661 | } |
1662 | |
1663 | static bool is_hdtv_mode(const struct drm_display_mode *mode) |
1664 | { |
1665 | /* try and guess if this is a tv or a monitor */ |
1666 | if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */ |
1667 | (mode->vdisplay == 576) || /* 576p */ |
1668 | (mode->vdisplay == 720) || /* 720p */ |
1669 | (mode->vdisplay == 1080)) /* 1080p */ |
1670 | return true; |
1671 | else |
1672 | return false; |
1673 | } |
1674 | |
1675 | bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, |
1676 | const struct drm_display_mode *mode, |
1677 | struct drm_display_mode *adjusted_mode) |
1678 | { |
1679 | struct drm_device *dev = crtc->dev; |
1680 | struct radeon_device *rdev = dev->dev_private; |
1681 | struct drm_encoder *encoder; |
1682 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
1683 | struct radeon_encoder *radeon_encoder; |
1684 | struct drm_connector *connector; |
1685 | struct radeon_connector *radeon_connector; |
1686 | bool first = true; |
1687 | u32 src_v = 1, dst_v = 1; |
1688 | u32 src_h = 1, dst_h = 1; |
1689 | |
1690 | radeon_crtc->h_border = 0; |
1691 | radeon_crtc->v_border = 0; |
1692 | |
1693 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
1694 | if (encoder->crtc != crtc) |
1695 | continue; |
1696 | radeon_encoder = to_radeon_encoder(encoder); |
1697 | connector = radeon_get_connector_for_encoder(encoder); |
1698 | radeon_connector = to_radeon_connector(connector); |
1699 | |
1700 | if (first) { |
1701 | /* set scaling */ |
1702 | if (radeon_encoder->rmx_type == RMX_OFF) |
1703 | radeon_crtc->rmx_type = RMX_OFF; |
1704 | else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay || |
1705 | mode->vdisplay < radeon_encoder->native_mode.vdisplay) |
1706 | radeon_crtc->rmx_type = radeon_encoder->rmx_type; |
1707 | else |
1708 | radeon_crtc->rmx_type = RMX_OFF; |
1709 | /* copy native mode */ |
1710 | memcpy(&radeon_crtc->native_mode, |
1711 | &radeon_encoder->native_mode, |
1712 | sizeof(struct drm_display_mode)); |
1713 | src_v = crtc->mode.vdisplay; |
1714 | dst_v = radeon_crtc->native_mode.vdisplay; |
1715 | src_h = crtc->mode.hdisplay; |
1716 | dst_h = radeon_crtc->native_mode.hdisplay; |
1717 | |
1718 | /* fix up for overscan on hdmi */ |
1719 | if (ASIC_IS_AVIVO(rdev) && |
1720 | (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && |
1721 | ((radeon_encoder->underscan_type == UNDERSCAN_ON) || |
1722 | ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && |
1723 | drm_detect_hdmi_monitor(radeon_connector_edid(connector)) && |
1724 | is_hdtv_mode(mode)))) { |
1725 | if (radeon_encoder->underscan_hborder != 0) |
1726 | radeon_crtc->h_border = radeon_encoder->underscan_hborder; |
1727 | else |
1728 | radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; |
1729 | if (radeon_encoder->underscan_vborder != 0) |
1730 | radeon_crtc->v_border = radeon_encoder->underscan_vborder; |
1731 | else |
1732 | radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; |
1733 | radeon_crtc->rmx_type = RMX_FULL; |
1734 | src_v = crtc->mode.vdisplay; |
1735 | dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2); |
1736 | src_h = crtc->mode.hdisplay; |
1737 | dst_h = crtc->mode.hdisplay - (radeon_crtc->h_border * 2); |
1738 | } |
1739 | first = false; |
1740 | } else { |
1741 | if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) { |
1742 | /* WARNING: Right now this can't happen but |
1743 | * in the future we need to check that scaling |
1744 | * are consistent across different encoder |
1745 | * (ie all encoder can work with the same |
1746 | * scaling). |
1747 | */ |
1748 | DRM_ERROR("Scaling not consistent across encoder.\n" ); |
1749 | return false; |
1750 | } |
1751 | } |
1752 | } |
1753 | if (radeon_crtc->rmx_type != RMX_OFF) { |
1754 | fixed20_12 a, b; |
1755 | a.full = dfixed_const(src_v); |
1756 | b.full = dfixed_const(dst_v); |
1757 | radeon_crtc->vsc.full = dfixed_div(a, b); |
1758 | a.full = dfixed_const(src_h); |
1759 | b.full = dfixed_const(dst_h); |
1760 | radeon_crtc->hsc.full = dfixed_div(a, b); |
1761 | } else { |
1762 | radeon_crtc->vsc.full = dfixed_const(1); |
1763 | radeon_crtc->hsc.full = dfixed_const(1); |
1764 | } |
1765 | return true; |
1766 | } |
1767 | |
1768 | /* |
1769 | * Retrieve current video scanout position of crtc on a given gpu, and |
1770 | * an optional accurate timestamp of when query happened. |
1771 | * |
1772 | * \param dev Device to query. |
1773 | * \param crtc Crtc to query. |
1774 | * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). |
1775 | * For driver internal use only also supports these flags: |
1776 | * |
1777 | * USE_REAL_VBLANKSTART to use the real start of vblank instead |
1778 | * of a fudged earlier start of vblank. |
1779 | * |
1780 | * GET_DISTANCE_TO_VBLANKSTART to return distance to the |
1781 | * fudged earlier start of vblank in *vpos and the distance |
1782 | * to true start of vblank in *hpos. |
1783 | * |
1784 | * \param *vpos Location where vertical scanout position should be stored. |
1785 | * \param *hpos Location where horizontal scanout position should go. |
1786 | * \param *stime Target location for timestamp taken immediately before |
1787 | * scanout position query. Can be NULL to skip timestamp. |
1788 | * \param *etime Target location for timestamp taken immediately after |
1789 | * scanout position query. Can be NULL to skip timestamp. |
1790 | * |
1791 | * Returns vpos as a positive number while in active scanout area. |
1792 | * Returns vpos as a negative number inside vblank, counting the number |
1793 | * of scanlines to go until end of vblank, e.g., -1 means "one scanline |
1794 | * until start of active scanout / end of vblank." |
1795 | * |
1796 | * \return Flags, or'ed together as follows: |
1797 | * |
1798 | * DRM_SCANOUTPOS_VALID = Query successful. |
1799 | * DRM_SCANOUTPOS_INVBL = Inside vblank. |
1800 | * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of |
1801 | * this flag means that returned position may be offset by a constant but |
1802 | * unknown small number of scanlines wrt. real scanout position. |
1803 | * |
1804 | */ |
1805 | int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, |
1806 | unsigned int flags, int *vpos, int *hpos, |
1807 | ktime_t *stime, ktime_t *etime, |
1808 | const struct drm_display_mode *mode) |
1809 | { |
1810 | u32 stat_crtc = 0, vbl = 0, position = 0; |
1811 | int vbl_start, vbl_end, vtotal, ret = 0; |
1812 | bool in_vbl = true; |
1813 | |
1814 | struct radeon_device *rdev = dev->dev_private; |
1815 | |
1816 | /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ |
1817 | |
1818 | /* Get optional system timestamp before query. */ |
1819 | if (stime) |
1820 | *stime = ktime_get(); |
1821 | |
1822 | if (ASIC_IS_DCE4(rdev)) { |
1823 | if (pipe == 0) { |
1824 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
1825 | EVERGREEN_CRTC0_REGISTER_OFFSET); |
1826 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1827 | EVERGREEN_CRTC0_REGISTER_OFFSET); |
1828 | ret |= DRM_SCANOUTPOS_VALID; |
1829 | } |
1830 | if (pipe == 1) { |
1831 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
1832 | EVERGREEN_CRTC1_REGISTER_OFFSET); |
1833 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1834 | EVERGREEN_CRTC1_REGISTER_OFFSET); |
1835 | ret |= DRM_SCANOUTPOS_VALID; |
1836 | } |
1837 | if (pipe == 2) { |
1838 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
1839 | EVERGREEN_CRTC2_REGISTER_OFFSET); |
1840 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1841 | EVERGREEN_CRTC2_REGISTER_OFFSET); |
1842 | ret |= DRM_SCANOUTPOS_VALID; |
1843 | } |
1844 | if (pipe == 3) { |
1845 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
1846 | EVERGREEN_CRTC3_REGISTER_OFFSET); |
1847 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1848 | EVERGREEN_CRTC3_REGISTER_OFFSET); |
1849 | ret |= DRM_SCANOUTPOS_VALID; |
1850 | } |
1851 | if (pipe == 4) { |
1852 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
1853 | EVERGREEN_CRTC4_REGISTER_OFFSET); |
1854 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1855 | EVERGREEN_CRTC4_REGISTER_OFFSET); |
1856 | ret |= DRM_SCANOUTPOS_VALID; |
1857 | } |
1858 | if (pipe == 5) { |
1859 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
1860 | EVERGREEN_CRTC5_REGISTER_OFFSET); |
1861 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1862 | EVERGREEN_CRTC5_REGISTER_OFFSET); |
1863 | ret |= DRM_SCANOUTPOS_VALID; |
1864 | } |
1865 | } else if (ASIC_IS_AVIVO(rdev)) { |
1866 | if (pipe == 0) { |
1867 | vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END); |
1868 | position = RREG32(AVIVO_D1CRTC_STATUS_POSITION); |
1869 | ret |= DRM_SCANOUTPOS_VALID; |
1870 | } |
1871 | if (pipe == 1) { |
1872 | vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END); |
1873 | position = RREG32(AVIVO_D2CRTC_STATUS_POSITION); |
1874 | ret |= DRM_SCANOUTPOS_VALID; |
1875 | } |
1876 | } else { |
1877 | /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */ |
1878 | if (pipe == 0) { |
1879 | /* Assume vbl_end == 0, get vbl_start from |
1880 | * upper 16 bits. |
1881 | */ |
1882 | vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) & |
1883 | RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; |
1884 | /* Only retrieve vpos from upper 16 bits, set hpos == 0. */ |
1885 | position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; |
1886 | stat_crtc = RREG32(RADEON_CRTC_STATUS); |
1887 | if (!(stat_crtc & 1)) |
1888 | in_vbl = false; |
1889 | |
1890 | ret |= DRM_SCANOUTPOS_VALID; |
1891 | } |
1892 | if (pipe == 1) { |
1893 | vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) & |
1894 | RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; |
1895 | position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; |
1896 | stat_crtc = RREG32(RADEON_CRTC2_STATUS); |
1897 | if (!(stat_crtc & 1)) |
1898 | in_vbl = false; |
1899 | |
1900 | ret |= DRM_SCANOUTPOS_VALID; |
1901 | } |
1902 | } |
1903 | |
1904 | /* Get optional system timestamp after query. */ |
1905 | if (etime) |
1906 | *etime = ktime_get(); |
1907 | |
1908 | /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ |
1909 | |
1910 | /* Decode into vertical and horizontal scanout position. */ |
1911 | *vpos = position & 0x1fff; |
1912 | *hpos = (position >> 16) & 0x1fff; |
1913 | |
1914 | /* Valid vblank area boundaries from gpu retrieved? */ |
1915 | if (vbl > 0) { |
1916 | /* Yes: Decode. */ |
1917 | ret |= DRM_SCANOUTPOS_ACCURATE; |
1918 | vbl_start = vbl & 0x1fff; |
1919 | vbl_end = (vbl >> 16) & 0x1fff; |
1920 | } |
1921 | else { |
1922 | /* No: Fake something reasonable which gives at least ok results. */ |
1923 | vbl_start = mode->crtc_vdisplay; |
1924 | vbl_end = 0; |
1925 | } |
1926 | |
1927 | /* Called from driver internal vblank counter query code? */ |
1928 | if (flags & GET_DISTANCE_TO_VBLANKSTART) { |
1929 | /* Caller wants distance from real vbl_start in *hpos */ |
1930 | *hpos = *vpos - vbl_start; |
1931 | } |
1932 | |
1933 | /* Fudge vblank to start a few scanlines earlier to handle the |
1934 | * problem that vblank irqs fire a few scanlines before start |
1935 | * of vblank. Some driver internal callers need the true vblank |
1936 | * start to be used and signal this via the USE_REAL_VBLANKSTART flag. |
1937 | * |
1938 | * The cause of the "early" vblank irq is that the irq is triggered |
1939 | * by the line buffer logic when the line buffer read position enters |
1940 | * the vblank, whereas our crtc scanout position naturally lags the |
1941 | * line buffer read position. |
1942 | */ |
1943 | if (!(flags & USE_REAL_VBLANKSTART)) |
1944 | vbl_start -= rdev->mode_info.crtcs[pipe]->lb_vblank_lead_lines; |
1945 | |
1946 | /* Test scanout position against vblank region. */ |
1947 | if ((*vpos < vbl_start) && (*vpos >= vbl_end)) |
1948 | in_vbl = false; |
1949 | |
1950 | /* In vblank? */ |
1951 | if (in_vbl) |
1952 | ret |= DRM_SCANOUTPOS_IN_VBLANK; |
1953 | |
1954 | /* Called from driver internal vblank counter query code? */ |
1955 | if (flags & GET_DISTANCE_TO_VBLANKSTART) { |
1956 | /* Caller wants distance from fudged earlier vbl_start */ |
1957 | *vpos -= vbl_start; |
1958 | return ret; |
1959 | } |
1960 | |
1961 | /* Check if inside vblank area and apply corrective offsets: |
1962 | * vpos will then be >=0 in video scanout area, but negative |
1963 | * within vblank area, counting down the number of lines until |
1964 | * start of scanout. |
1965 | */ |
1966 | |
1967 | /* Inside "upper part" of vblank area? Apply corrective offset if so: */ |
1968 | if (in_vbl && (*vpos >= vbl_start)) { |
1969 | vtotal = mode->crtc_vtotal; |
1970 | *vpos = *vpos - vtotal; |
1971 | } |
1972 | |
1973 | /* Correct for shifted end of vbl at vbl_end. */ |
1974 | *vpos = *vpos - vbl_end; |
1975 | |
1976 | return ret; |
1977 | } |
1978 | |