1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
26 | * Jerome Glisse |
27 | */ |
28 | |
29 | #include <linux/pci.h> |
30 | #include <linux/pm_runtime.h> |
31 | #include <linux/slab.h> |
32 | #include <linux/uaccess.h> |
33 | #include <linux/vga_switcheroo.h> |
34 | |
35 | #include <drm/drm_file.h> |
36 | #include <drm/drm_ioctl.h> |
37 | #include <drm/radeon_drm.h> |
38 | |
39 | #include "radeon.h" |
40 | #include "radeon_asic.h" |
41 | #include "radeon_drv.h" |
42 | #include "radeon_kms.h" |
43 | |
44 | #if defined(CONFIG_VGA_SWITCHEROO) |
45 | bool radeon_has_atpx(void); |
46 | #else |
47 | static inline bool radeon_has_atpx(void) { return false; } |
48 | #endif |
49 | |
50 | /** |
51 | * radeon_driver_unload_kms - Main unload function for KMS. |
52 | * |
53 | * @dev: drm dev pointer |
54 | * |
55 | * This is the main unload function for KMS (all asics). |
56 | * It calls radeon_modeset_fini() to tear down the |
57 | * displays, and radeon_device_fini() to tear down |
58 | * the rest of the device (CP, writeback, etc.). |
59 | * Returns 0 on success. |
60 | */ |
61 | void radeon_driver_unload_kms(struct drm_device *dev) |
62 | { |
63 | struct radeon_device *rdev = dev->dev_private; |
64 | |
65 | if (rdev == NULL) |
66 | return; |
67 | |
68 | if (rdev->rmmio == NULL) |
69 | goto done_free; |
70 | |
71 | if (radeon_is_px(dev)) { |
72 | pm_runtime_get_sync(dev: dev->dev); |
73 | pm_runtime_forbid(dev: dev->dev); |
74 | } |
75 | |
76 | radeon_acpi_fini(rdev); |
77 | |
78 | radeon_modeset_fini(rdev); |
79 | radeon_device_fini(rdev); |
80 | |
81 | if (rdev->agp) |
82 | arch_phys_wc_del(handle: rdev->agp->agp_mtrr); |
83 | kfree(objp: rdev->agp); |
84 | rdev->agp = NULL; |
85 | |
86 | done_free: |
87 | kfree(objp: rdev); |
88 | dev->dev_private = NULL; |
89 | } |
90 | |
91 | /** |
92 | * radeon_driver_load_kms - Main load function for KMS. |
93 | * |
94 | * @dev: drm dev pointer |
95 | * @flags: device flags |
96 | * |
97 | * This is the main load function for KMS (all asics). |
98 | * It calls radeon_device_init() to set up the non-display |
99 | * parts of the chip (asic init, CP, writeback, etc.), and |
100 | * radeon_modeset_init() to set up the display parts |
101 | * (crtcs, encoders, hotplug detect, etc.). |
102 | * Returns 0 on success, error on failure. |
103 | */ |
104 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) |
105 | { |
106 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
107 | struct radeon_device *rdev; |
108 | int r, acpi_status; |
109 | |
110 | rdev = kzalloc(size: sizeof(struct radeon_device), GFP_KERNEL); |
111 | if (rdev == NULL) { |
112 | return -ENOMEM; |
113 | } |
114 | dev->dev_private = (void *)rdev; |
115 | |
116 | #ifdef __alpha__ |
117 | rdev->hose = pdev->sysdata; |
118 | #endif |
119 | |
120 | if (pci_find_capability(dev: pdev, PCI_CAP_ID_AGP)) |
121 | rdev->agp = radeon_agp_head_init(dev); |
122 | if (rdev->agp) { |
123 | rdev->agp->agp_mtrr = arch_phys_wc_add( |
124 | base: rdev->agp->agp_info.aper_base, |
125 | size: rdev->agp->agp_info.aper_size * |
126 | 1024 * 1024); |
127 | } |
128 | |
129 | /* update BUS flag */ |
130 | if (pci_find_capability(dev: pdev, PCI_CAP_ID_AGP)) { |
131 | flags |= RADEON_IS_AGP; |
132 | } else if (pci_is_pcie(dev: pdev)) { |
133 | flags |= RADEON_IS_PCIE; |
134 | } else { |
135 | flags |= RADEON_IS_PCI; |
136 | } |
137 | |
138 | if ((radeon_runtime_pm != 0) && |
139 | radeon_has_atpx() && |
140 | ((flags & RADEON_IS_IGP) == 0) && |
141 | !pci_is_thunderbolt_attached(pdev)) |
142 | flags |= RADEON_IS_PX; |
143 | |
144 | /* radeon_device_init should report only fatal error |
145 | * like memory allocation failure or iomapping failure, |
146 | * or memory manager initialization failure, it must |
147 | * properly initialize the GPU MC controller and permit |
148 | * VRAM allocation |
149 | */ |
150 | r = radeon_device_init(rdev, ddev: dev, pdev, flags); |
151 | if (r) { |
152 | dev_err(dev->dev, "Fatal error during GPU init\n" ); |
153 | goto out; |
154 | } |
155 | |
156 | /* Again modeset_init should fail only on fatal error |
157 | * otherwise it should provide enough functionalities |
158 | * for shadowfb to run |
159 | */ |
160 | r = radeon_modeset_init(rdev); |
161 | if (r) |
162 | dev_err(dev->dev, "Fatal error during modeset init\n" ); |
163 | |
164 | /* Call ACPI methods: require modeset init |
165 | * but failure is not fatal |
166 | */ |
167 | if (!r) { |
168 | acpi_status = radeon_acpi_init(rdev); |
169 | if (acpi_status) |
170 | dev_dbg(dev->dev, "Error during ACPI methods call\n" ); |
171 | } |
172 | |
173 | if (radeon_is_px(dev)) { |
174 | dev_pm_set_driver_flags(dev: dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); |
175 | pm_runtime_use_autosuspend(dev: dev->dev); |
176 | pm_runtime_set_autosuspend_delay(dev: dev->dev, delay: 5000); |
177 | pm_runtime_set_active(dev: dev->dev); |
178 | pm_runtime_allow(dev: dev->dev); |
179 | pm_runtime_mark_last_busy(dev: dev->dev); |
180 | pm_runtime_put_autosuspend(dev: dev->dev); |
181 | } |
182 | |
183 | out: |
184 | if (r) |
185 | radeon_driver_unload_kms(dev); |
186 | |
187 | |
188 | return r; |
189 | } |
190 | |
191 | /** |
192 | * radeon_set_filp_rights - Set filp right. |
193 | * |
194 | * @dev: drm dev pointer |
195 | * @owner: drm file |
196 | * @applier: drm file |
197 | * @value: value |
198 | * |
199 | * Sets the filp rights for the device (all asics). |
200 | */ |
201 | static void radeon_set_filp_rights(struct drm_device *dev, |
202 | struct drm_file **owner, |
203 | struct drm_file *applier, |
204 | uint32_t *value) |
205 | { |
206 | struct radeon_device *rdev = dev->dev_private; |
207 | |
208 | mutex_lock(&rdev->gem.mutex); |
209 | if (*value == 1) { |
210 | /* wants rights */ |
211 | if (!*owner) |
212 | *owner = applier; |
213 | } else if (*value == 0) { |
214 | /* revokes rights */ |
215 | if (*owner == applier) |
216 | *owner = NULL; |
217 | } |
218 | *value = *owner == applier ? 1 : 0; |
219 | mutex_unlock(lock: &rdev->gem.mutex); |
220 | } |
221 | |
222 | /* |
223 | * Userspace get information ioctl |
224 | */ |
225 | /** |
226 | * radeon_info_ioctl - answer a device specific request. |
227 | * |
228 | * @dev: drm device pointer |
229 | * @data: request object |
230 | * @filp: drm filp |
231 | * |
232 | * This function is used to pass device specific parameters to the userspace |
233 | * drivers. Examples include: pci device id, pipeline parms, tiling params, |
234 | * etc. (all asics). |
235 | * Returns 0 on success, -EINVAL on failure. |
236 | */ |
237 | int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
238 | { |
239 | struct radeon_device *rdev = dev->dev_private; |
240 | struct drm_radeon_info *info = data; |
241 | struct radeon_mode_info *minfo = &rdev->mode_info; |
242 | uint32_t *value, value_tmp, *value_ptr, value_size; |
243 | struct ttm_resource_manager *man; |
244 | uint64_t value64; |
245 | struct drm_crtc *crtc; |
246 | int i, found; |
247 | |
248 | value_ptr = (uint32_t *)((unsigned long)info->value); |
249 | value = &value_tmp; |
250 | value_size = sizeof(uint32_t); |
251 | |
252 | switch (info->request) { |
253 | case RADEON_INFO_DEVICE_ID: |
254 | *value = to_pci_dev(dev->dev)->device; |
255 | break; |
256 | case RADEON_INFO_NUM_GB_PIPES: |
257 | *value = rdev->num_gb_pipes; |
258 | break; |
259 | case RADEON_INFO_NUM_Z_PIPES: |
260 | *value = rdev->num_z_pipes; |
261 | break; |
262 | case RADEON_INFO_ACCEL_WORKING: |
263 | /* xf86-video-ati 6.13.0 relies on this being false for evergreen */ |
264 | if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) |
265 | *value = false; |
266 | else |
267 | *value = rdev->accel_working; |
268 | break; |
269 | case RADEON_INFO_CRTC_FROM_ID: |
270 | if (copy_from_user(to: value, from: value_ptr, n: sizeof(uint32_t))) { |
271 | DRM_ERROR("copy_from_user %s:%u\n" , __func__, __LINE__); |
272 | return -EFAULT; |
273 | } |
274 | for (i = 0, found = 0; i < rdev->num_crtc; i++) { |
275 | crtc = (struct drm_crtc *)minfo->crtcs[i]; |
276 | if (crtc && crtc->base.id == *value) { |
277 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
278 | *value = radeon_crtc->crtc_id; |
279 | found = 1; |
280 | break; |
281 | } |
282 | } |
283 | if (!found) { |
284 | DRM_DEBUG_KMS("unknown crtc id %d\n" , *value); |
285 | return -EINVAL; |
286 | } |
287 | break; |
288 | case RADEON_INFO_ACCEL_WORKING2: |
289 | if (rdev->family == CHIP_HAWAII) { |
290 | if (rdev->accel_working) { |
291 | if (rdev->new_fw) |
292 | *value = 3; |
293 | else |
294 | *value = 2; |
295 | } else { |
296 | *value = 0; |
297 | } |
298 | } else { |
299 | *value = rdev->accel_working; |
300 | } |
301 | break; |
302 | case RADEON_INFO_TILING_CONFIG: |
303 | if (rdev->family >= CHIP_BONAIRE) |
304 | *value = rdev->config.cik.tile_config; |
305 | else if (rdev->family >= CHIP_TAHITI) |
306 | *value = rdev->config.si.tile_config; |
307 | else if (rdev->family >= CHIP_CAYMAN) |
308 | *value = rdev->config.cayman.tile_config; |
309 | else if (rdev->family >= CHIP_CEDAR) |
310 | *value = rdev->config.evergreen.tile_config; |
311 | else if (rdev->family >= CHIP_RV770) |
312 | *value = rdev->config.rv770.tile_config; |
313 | else if (rdev->family >= CHIP_R600) |
314 | *value = rdev->config.r600.tile_config; |
315 | else { |
316 | DRM_DEBUG_KMS("tiling config is r6xx+ only!\n" ); |
317 | return -EINVAL; |
318 | } |
319 | break; |
320 | case RADEON_INFO_WANT_HYPERZ: |
321 | /* The "value" here is both an input and output parameter. |
322 | * If the input value is 1, filp requests hyper-z access. |
323 | * If the input value is 0, filp revokes its hyper-z access. |
324 | * |
325 | * When returning, the value is 1 if filp owns hyper-z access, |
326 | * 0 otherwise. */ |
327 | if (copy_from_user(to: value, from: value_ptr, n: sizeof(uint32_t))) { |
328 | DRM_ERROR("copy_from_user %s:%u\n" , __func__, __LINE__); |
329 | return -EFAULT; |
330 | } |
331 | if (*value >= 2) { |
332 | DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n" , *value); |
333 | return -EINVAL; |
334 | } |
335 | radeon_set_filp_rights(dev, owner: &rdev->hyperz_filp, applier: filp, value); |
336 | break; |
337 | case RADEON_INFO_WANT_CMASK: |
338 | /* The same logic as Hyper-Z. */ |
339 | if (copy_from_user(to: value, from: value_ptr, n: sizeof(uint32_t))) { |
340 | DRM_ERROR("copy_from_user %s:%u\n" , __func__, __LINE__); |
341 | return -EFAULT; |
342 | } |
343 | if (*value >= 2) { |
344 | DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n" , *value); |
345 | return -EINVAL; |
346 | } |
347 | radeon_set_filp_rights(dev, owner: &rdev->cmask_filp, applier: filp, value); |
348 | break; |
349 | case RADEON_INFO_CLOCK_CRYSTAL_FREQ: |
350 | /* return clock value in KHz */ |
351 | if (rdev->asic->get_xclk) |
352 | *value = radeon_get_xclk(rdev) * 10; |
353 | else |
354 | *value = rdev->clock.spll.reference_freq * 10; |
355 | break; |
356 | case RADEON_INFO_NUM_BACKENDS: |
357 | if (rdev->family >= CHIP_BONAIRE) |
358 | *value = rdev->config.cik.max_backends_per_se * |
359 | rdev->config.cik.max_shader_engines; |
360 | else if (rdev->family >= CHIP_TAHITI) |
361 | *value = rdev->config.si.max_backends_per_se * |
362 | rdev->config.si.max_shader_engines; |
363 | else if (rdev->family >= CHIP_CAYMAN) |
364 | *value = rdev->config.cayman.max_backends_per_se * |
365 | rdev->config.cayman.max_shader_engines; |
366 | else if (rdev->family >= CHIP_CEDAR) |
367 | *value = rdev->config.evergreen.max_backends; |
368 | else if (rdev->family >= CHIP_RV770) |
369 | *value = rdev->config.rv770.max_backends; |
370 | else if (rdev->family >= CHIP_R600) |
371 | *value = rdev->config.r600.max_backends; |
372 | else { |
373 | return -EINVAL; |
374 | } |
375 | break; |
376 | case RADEON_INFO_NUM_TILE_PIPES: |
377 | if (rdev->family >= CHIP_BONAIRE) |
378 | *value = rdev->config.cik.max_tile_pipes; |
379 | else if (rdev->family >= CHIP_TAHITI) |
380 | *value = rdev->config.si.max_tile_pipes; |
381 | else if (rdev->family >= CHIP_CAYMAN) |
382 | *value = rdev->config.cayman.max_tile_pipes; |
383 | else if (rdev->family >= CHIP_CEDAR) |
384 | *value = rdev->config.evergreen.max_tile_pipes; |
385 | else if (rdev->family >= CHIP_RV770) |
386 | *value = rdev->config.rv770.max_tile_pipes; |
387 | else if (rdev->family >= CHIP_R600) |
388 | *value = rdev->config.r600.max_tile_pipes; |
389 | else { |
390 | return -EINVAL; |
391 | } |
392 | break; |
393 | case RADEON_INFO_FUSION_GART_WORKING: |
394 | *value = 1; |
395 | break; |
396 | case RADEON_INFO_BACKEND_MAP: |
397 | if (rdev->family >= CHIP_BONAIRE) |
398 | *value = rdev->config.cik.backend_map; |
399 | else if (rdev->family >= CHIP_TAHITI) |
400 | *value = rdev->config.si.backend_map; |
401 | else if (rdev->family >= CHIP_CAYMAN) |
402 | *value = rdev->config.cayman.backend_map; |
403 | else if (rdev->family >= CHIP_CEDAR) |
404 | *value = rdev->config.evergreen.backend_map; |
405 | else if (rdev->family >= CHIP_RV770) |
406 | *value = rdev->config.rv770.backend_map; |
407 | else if (rdev->family >= CHIP_R600) |
408 | *value = rdev->config.r600.backend_map; |
409 | else { |
410 | return -EINVAL; |
411 | } |
412 | break; |
413 | case RADEON_INFO_VA_START: |
414 | /* this is where we report if vm is supported or not */ |
415 | if (rdev->family < CHIP_CAYMAN) |
416 | return -EINVAL; |
417 | *value = RADEON_VA_RESERVED_SIZE; |
418 | break; |
419 | case RADEON_INFO_IB_VM_MAX_SIZE: |
420 | /* this is where we report if vm is supported or not */ |
421 | if (rdev->family < CHIP_CAYMAN) |
422 | return -EINVAL; |
423 | *value = RADEON_IB_VM_MAX_SIZE; |
424 | break; |
425 | case RADEON_INFO_MAX_PIPES: |
426 | if (rdev->family >= CHIP_BONAIRE) |
427 | *value = rdev->config.cik.max_cu_per_sh; |
428 | else if (rdev->family >= CHIP_TAHITI) |
429 | *value = rdev->config.si.max_cu_per_sh; |
430 | else if (rdev->family >= CHIP_CAYMAN) |
431 | *value = rdev->config.cayman.max_pipes_per_simd; |
432 | else if (rdev->family >= CHIP_CEDAR) |
433 | *value = rdev->config.evergreen.max_pipes; |
434 | else if (rdev->family >= CHIP_RV770) |
435 | *value = rdev->config.rv770.max_pipes; |
436 | else if (rdev->family >= CHIP_R600) |
437 | *value = rdev->config.r600.max_pipes; |
438 | else { |
439 | return -EINVAL; |
440 | } |
441 | break; |
442 | case RADEON_INFO_TIMESTAMP: |
443 | if (rdev->family < CHIP_R600) { |
444 | DRM_DEBUG_KMS("timestamp is r6xx+ only!\n" ); |
445 | return -EINVAL; |
446 | } |
447 | value = (uint32_t *)&value64; |
448 | value_size = sizeof(uint64_t); |
449 | value64 = radeon_get_gpu_clock_counter(rdev); |
450 | break; |
451 | case RADEON_INFO_MAX_SE: |
452 | if (rdev->family >= CHIP_BONAIRE) |
453 | *value = rdev->config.cik.max_shader_engines; |
454 | else if (rdev->family >= CHIP_TAHITI) |
455 | *value = rdev->config.si.max_shader_engines; |
456 | else if (rdev->family >= CHIP_CAYMAN) |
457 | *value = rdev->config.cayman.max_shader_engines; |
458 | else if (rdev->family >= CHIP_CEDAR) |
459 | *value = rdev->config.evergreen.num_ses; |
460 | else |
461 | *value = 1; |
462 | break; |
463 | case RADEON_INFO_MAX_SH_PER_SE: |
464 | if (rdev->family >= CHIP_BONAIRE) |
465 | *value = rdev->config.cik.max_sh_per_se; |
466 | else if (rdev->family >= CHIP_TAHITI) |
467 | *value = rdev->config.si.max_sh_per_se; |
468 | else |
469 | return -EINVAL; |
470 | break; |
471 | case RADEON_INFO_FASTFB_WORKING: |
472 | *value = rdev->fastfb_working; |
473 | break; |
474 | case RADEON_INFO_RING_WORKING: |
475 | if (copy_from_user(to: value, from: value_ptr, n: sizeof(uint32_t))) { |
476 | DRM_ERROR("copy_from_user %s:%u\n" , __func__, __LINE__); |
477 | return -EFAULT; |
478 | } |
479 | switch (*value) { |
480 | case RADEON_CS_RING_GFX: |
481 | case RADEON_CS_RING_COMPUTE: |
482 | *value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready; |
483 | break; |
484 | case RADEON_CS_RING_DMA: |
485 | *value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready; |
486 | *value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready; |
487 | break; |
488 | case RADEON_CS_RING_UVD: |
489 | *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready; |
490 | break; |
491 | case RADEON_CS_RING_VCE: |
492 | *value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready; |
493 | break; |
494 | default: |
495 | return -EINVAL; |
496 | } |
497 | break; |
498 | case RADEON_INFO_SI_TILE_MODE_ARRAY: |
499 | if (rdev->family >= CHIP_BONAIRE) { |
500 | value = rdev->config.cik.tile_mode_array; |
501 | value_size = sizeof(uint32_t)*32; |
502 | } else if (rdev->family >= CHIP_TAHITI) { |
503 | value = rdev->config.si.tile_mode_array; |
504 | value_size = sizeof(uint32_t)*32; |
505 | } else { |
506 | DRM_DEBUG_KMS("tile mode array is si+ only!\n" ); |
507 | return -EINVAL; |
508 | } |
509 | break; |
510 | case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY: |
511 | if (rdev->family >= CHIP_BONAIRE) { |
512 | value = rdev->config.cik.macrotile_mode_array; |
513 | value_size = sizeof(uint32_t)*16; |
514 | } else { |
515 | DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n" ); |
516 | return -EINVAL; |
517 | } |
518 | break; |
519 | case RADEON_INFO_SI_CP_DMA_COMPUTE: |
520 | *value = 1; |
521 | break; |
522 | case RADEON_INFO_SI_BACKEND_ENABLED_MASK: |
523 | if (rdev->family >= CHIP_BONAIRE) { |
524 | *value = rdev->config.cik.backend_enable_mask; |
525 | } else if (rdev->family >= CHIP_TAHITI) { |
526 | *value = rdev->config.si.backend_enable_mask; |
527 | } else { |
528 | DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n" ); |
529 | return -EINVAL; |
530 | } |
531 | break; |
532 | case RADEON_INFO_MAX_SCLK: |
533 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && |
534 | rdev->pm.dpm_enabled) |
535 | *value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10; |
536 | else |
537 | *value = rdev->pm.default_sclk * 10; |
538 | break; |
539 | case RADEON_INFO_VCE_FW_VERSION: |
540 | *value = rdev->vce.fw_version; |
541 | break; |
542 | case RADEON_INFO_VCE_FB_VERSION: |
543 | *value = rdev->vce.fb_version; |
544 | break; |
545 | case RADEON_INFO_NUM_BYTES_MOVED: |
546 | value = (uint32_t *)&value64; |
547 | value_size = sizeof(uint64_t); |
548 | value64 = atomic64_read(v: &rdev->num_bytes_moved); |
549 | break; |
550 | case RADEON_INFO_VRAM_USAGE: |
551 | value = (uint32_t *)&value64; |
552 | value_size = sizeof(uint64_t); |
553 | man = ttm_manager_type(bdev: &rdev->mman.bdev, TTM_PL_VRAM); |
554 | value64 = ttm_resource_manager_usage(man); |
555 | break; |
556 | case RADEON_INFO_GTT_USAGE: |
557 | value = (uint32_t *)&value64; |
558 | value_size = sizeof(uint64_t); |
559 | man = ttm_manager_type(bdev: &rdev->mman.bdev, TTM_PL_TT); |
560 | value64 = ttm_resource_manager_usage(man); |
561 | break; |
562 | case RADEON_INFO_ACTIVE_CU_COUNT: |
563 | if (rdev->family >= CHIP_BONAIRE) |
564 | *value = rdev->config.cik.active_cus; |
565 | else if (rdev->family >= CHIP_TAHITI) |
566 | *value = rdev->config.si.active_cus; |
567 | else if (rdev->family >= CHIP_CAYMAN) |
568 | *value = rdev->config.cayman.active_simds; |
569 | else if (rdev->family >= CHIP_CEDAR) |
570 | *value = rdev->config.evergreen.active_simds; |
571 | else if (rdev->family >= CHIP_RV770) |
572 | *value = rdev->config.rv770.active_simds; |
573 | else if (rdev->family >= CHIP_R600) |
574 | *value = rdev->config.r600.active_simds; |
575 | else |
576 | *value = 1; |
577 | break; |
578 | case RADEON_INFO_CURRENT_GPU_TEMP: |
579 | /* get temperature in millidegrees C */ |
580 | if (rdev->asic->pm.get_temperature) |
581 | *value = radeon_get_temperature(rdev); |
582 | else |
583 | *value = 0; |
584 | break; |
585 | case RADEON_INFO_CURRENT_GPU_SCLK: |
586 | /* get sclk in Mhz */ |
587 | if (rdev->pm.dpm_enabled) |
588 | *value = radeon_dpm_get_current_sclk(rdev) / 100; |
589 | else |
590 | *value = rdev->pm.current_sclk / 100; |
591 | break; |
592 | case RADEON_INFO_CURRENT_GPU_MCLK: |
593 | /* get mclk in Mhz */ |
594 | if (rdev->pm.dpm_enabled) |
595 | *value = radeon_dpm_get_current_mclk(rdev) / 100; |
596 | else |
597 | *value = rdev->pm.current_mclk / 100; |
598 | break; |
599 | case RADEON_INFO_READ_REG: |
600 | if (copy_from_user(to: value, from: value_ptr, n: sizeof(uint32_t))) { |
601 | DRM_ERROR("copy_from_user %s:%u\n" , __func__, __LINE__); |
602 | return -EFAULT; |
603 | } |
604 | if (radeon_get_allowed_info_register(rdev, *value, value)) |
605 | return -EINVAL; |
606 | break; |
607 | case RADEON_INFO_VA_UNMAP_WORKING: |
608 | *value = true; |
609 | break; |
610 | case RADEON_INFO_GPU_RESET_COUNTER: |
611 | *value = atomic_read(v: &rdev->gpu_reset_counter); |
612 | break; |
613 | default: |
614 | DRM_DEBUG_KMS("Invalid request %d\n" , info->request); |
615 | return -EINVAL; |
616 | } |
617 | if (copy_to_user(to: value_ptr, from: (char *)value, n: value_size)) { |
618 | DRM_ERROR("copy_to_user %s:%u\n" , __func__, __LINE__); |
619 | return -EFAULT; |
620 | } |
621 | return 0; |
622 | } |
623 | |
624 | /** |
625 | * radeon_driver_open_kms - drm callback for open |
626 | * |
627 | * @dev: drm dev pointer |
628 | * @file_priv: drm file |
629 | * |
630 | * On device open, init vm on cayman+ (all asics). |
631 | * Returns 0 on success, error on failure. |
632 | */ |
633 | int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) |
634 | { |
635 | struct radeon_device *rdev = dev->dev_private; |
636 | struct radeon_fpriv *fpriv; |
637 | struct radeon_vm *vm; |
638 | int r; |
639 | |
640 | file_priv->driver_priv = NULL; |
641 | |
642 | r = pm_runtime_get_sync(dev: dev->dev); |
643 | if (r < 0) { |
644 | pm_runtime_put_autosuspend(dev: dev->dev); |
645 | return r; |
646 | } |
647 | |
648 | /* new gpu have virtual address space support */ |
649 | if (rdev->family >= CHIP_CAYMAN) { |
650 | |
651 | fpriv = kzalloc(size: sizeof(*fpriv), GFP_KERNEL); |
652 | if (unlikely(!fpriv)) { |
653 | r = -ENOMEM; |
654 | goto err_suspend; |
655 | } |
656 | |
657 | if (rdev->accel_working) { |
658 | vm = &fpriv->vm; |
659 | r = radeon_vm_init(rdev, vm); |
660 | if (r) |
661 | goto err_fpriv; |
662 | |
663 | r = radeon_bo_reserve(bo: rdev->ring_tmp_bo.bo, no_intr: false); |
664 | if (r) |
665 | goto err_vm_fini; |
666 | |
667 | /* map the ib pool buffer read only into |
668 | * virtual address space */ |
669 | vm->ib_bo_va = radeon_vm_bo_add(rdev, vm, |
670 | bo: rdev->ring_tmp_bo.bo); |
671 | if (!vm->ib_bo_va) { |
672 | r = -ENOMEM; |
673 | goto err_vm_fini; |
674 | } |
675 | |
676 | r = radeon_vm_bo_set_addr(rdev, bo_va: vm->ib_bo_va, |
677 | RADEON_VA_IB_OFFSET, |
678 | RADEON_VM_PAGE_READABLE | |
679 | RADEON_VM_PAGE_SNOOPED); |
680 | if (r) |
681 | goto err_vm_fini; |
682 | } |
683 | file_priv->driver_priv = fpriv; |
684 | } |
685 | |
686 | pm_runtime_mark_last_busy(dev: dev->dev); |
687 | pm_runtime_put_autosuspend(dev: dev->dev); |
688 | return 0; |
689 | |
690 | err_vm_fini: |
691 | radeon_vm_fini(rdev, vm); |
692 | err_fpriv: |
693 | kfree(objp: fpriv); |
694 | |
695 | err_suspend: |
696 | pm_runtime_mark_last_busy(dev: dev->dev); |
697 | pm_runtime_put_autosuspend(dev: dev->dev); |
698 | return r; |
699 | } |
700 | |
701 | /** |
702 | * radeon_driver_postclose_kms - drm callback for post close |
703 | * |
704 | * @dev: drm dev pointer |
705 | * @file_priv: drm file |
706 | * |
707 | * On device close, tear down hyperz and cmask filps on r1xx-r5xx |
708 | * (all asics). And tear down vm on cayman+ (all asics). |
709 | */ |
710 | void radeon_driver_postclose_kms(struct drm_device *dev, |
711 | struct drm_file *file_priv) |
712 | { |
713 | struct radeon_device *rdev = dev->dev_private; |
714 | |
715 | pm_runtime_get_sync(dev: dev->dev); |
716 | |
717 | mutex_lock(&rdev->gem.mutex); |
718 | if (rdev->hyperz_filp == file_priv) |
719 | rdev->hyperz_filp = NULL; |
720 | if (rdev->cmask_filp == file_priv) |
721 | rdev->cmask_filp = NULL; |
722 | mutex_unlock(lock: &rdev->gem.mutex); |
723 | |
724 | radeon_uvd_free_handles(rdev, filp: file_priv); |
725 | radeon_vce_free_handles(rdev, filp: file_priv); |
726 | |
727 | /* new gpu have virtual address space support */ |
728 | if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { |
729 | struct radeon_fpriv *fpriv = file_priv->driver_priv; |
730 | struct radeon_vm *vm = &fpriv->vm; |
731 | int r; |
732 | |
733 | if (rdev->accel_working) { |
734 | r = radeon_bo_reserve(bo: rdev->ring_tmp_bo.bo, no_intr: false); |
735 | if (!r) { |
736 | if (vm->ib_bo_va) |
737 | radeon_vm_bo_rmv(rdev, bo_va: vm->ib_bo_va); |
738 | radeon_bo_unreserve(bo: rdev->ring_tmp_bo.bo); |
739 | } |
740 | radeon_vm_fini(rdev, vm); |
741 | } |
742 | |
743 | kfree(objp: fpriv); |
744 | file_priv->driver_priv = NULL; |
745 | } |
746 | pm_runtime_mark_last_busy(dev: dev->dev); |
747 | pm_runtime_put_autosuspend(dev: dev->dev); |
748 | } |
749 | |
750 | /* |
751 | * VBlank related functions. |
752 | */ |
753 | /** |
754 | * radeon_get_vblank_counter_kms - get frame count |
755 | * |
756 | * @crtc: crtc to get the frame count from |
757 | * |
758 | * Gets the frame count on the requested crtc (all asics). |
759 | * Returns frame count on success, -EINVAL on failure. |
760 | */ |
761 | u32 radeon_get_vblank_counter_kms(struct drm_crtc *crtc) |
762 | { |
763 | struct drm_device *dev = crtc->dev; |
764 | unsigned int pipe = crtc->index; |
765 | int vpos, hpos, stat; |
766 | u32 count; |
767 | struct radeon_device *rdev = dev->dev_private; |
768 | |
769 | if (pipe >= rdev->num_crtc) { |
770 | DRM_ERROR("Invalid crtc %u\n" , pipe); |
771 | return -EINVAL; |
772 | } |
773 | |
774 | /* The hw increments its frame counter at start of vsync, not at start |
775 | * of vblank, as is required by DRM core vblank counter handling. |
776 | * Cook the hw count here to make it appear to the caller as if it |
777 | * incremented at start of vblank. We measure distance to start of |
778 | * vblank in vpos. vpos therefore will be >= 0 between start of vblank |
779 | * and start of vsync, so vpos >= 0 means to bump the hw frame counter |
780 | * result by 1 to give the proper appearance to caller. |
781 | */ |
782 | if (rdev->mode_info.crtcs[pipe]) { |
783 | /* Repeat readout if needed to provide stable result if |
784 | * we cross start of vsync during the queries. |
785 | */ |
786 | do { |
787 | count = radeon_get_vblank_counter(rdev, pipe); |
788 | /* Ask radeon_get_crtc_scanoutpos to return vpos as |
789 | * distance to start of vblank, instead of regular |
790 | * vertical scanout pos. |
791 | */ |
792 | stat = radeon_get_crtc_scanoutpos( |
793 | dev, pipe, GET_DISTANCE_TO_VBLANKSTART, |
794 | vpos: &vpos, hpos: &hpos, NULL, NULL, |
795 | mode: &rdev->mode_info.crtcs[pipe]->base.hwmode); |
796 | } while (count != radeon_get_vblank_counter(rdev, pipe)); |
797 | |
798 | if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != |
799 | (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { |
800 | DRM_DEBUG_VBL("Query failed! stat %d\n" , stat); |
801 | } |
802 | else { |
803 | DRM_DEBUG_VBL("crtc %u: dist from vblank start %d\n" , |
804 | pipe, vpos); |
805 | |
806 | /* Bump counter if we are at >= leading edge of vblank, |
807 | * but before vsync where vpos would turn negative and |
808 | * the hw counter really increments. |
809 | */ |
810 | if (vpos >= 0) |
811 | count++; |
812 | } |
813 | } |
814 | else { |
815 | /* Fallback to use value as is. */ |
816 | count = radeon_get_vblank_counter(rdev, pipe); |
817 | DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n" ); |
818 | } |
819 | |
820 | return count; |
821 | } |
822 | |
823 | /** |
824 | * radeon_enable_vblank_kms - enable vblank interrupt |
825 | * |
826 | * @crtc: crtc to enable vblank interrupt for |
827 | * |
828 | * Enable the interrupt on the requested crtc (all asics). |
829 | * Returns 0 on success, -EINVAL on failure. |
830 | */ |
831 | int radeon_enable_vblank_kms(struct drm_crtc *crtc) |
832 | { |
833 | struct drm_device *dev = crtc->dev; |
834 | unsigned int pipe = crtc->index; |
835 | struct radeon_device *rdev = dev->dev_private; |
836 | unsigned long irqflags; |
837 | int r; |
838 | |
839 | if (pipe >= rdev->num_crtc) { |
840 | DRM_ERROR("Invalid crtc %d\n" , pipe); |
841 | return -EINVAL; |
842 | } |
843 | |
844 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
845 | rdev->irq.crtc_vblank_int[pipe] = true; |
846 | r = radeon_irq_set(rdev); |
847 | spin_unlock_irqrestore(lock: &rdev->irq.lock, flags: irqflags); |
848 | return r; |
849 | } |
850 | |
851 | /** |
852 | * radeon_disable_vblank_kms - disable vblank interrupt |
853 | * |
854 | * @crtc: crtc to disable vblank interrupt for |
855 | * |
856 | * Disable the interrupt on the requested crtc (all asics). |
857 | */ |
858 | void radeon_disable_vblank_kms(struct drm_crtc *crtc) |
859 | { |
860 | struct drm_device *dev = crtc->dev; |
861 | unsigned int pipe = crtc->index; |
862 | struct radeon_device *rdev = dev->dev_private; |
863 | unsigned long irqflags; |
864 | |
865 | if (pipe >= rdev->num_crtc) { |
866 | DRM_ERROR("Invalid crtc %d\n" , pipe); |
867 | return; |
868 | } |
869 | |
870 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
871 | rdev->irq.crtc_vblank_int[pipe] = false; |
872 | radeon_irq_set(rdev); |
873 | spin_unlock_irqrestore(lock: &rdev->irq.lock, flags: irqflags); |
874 | } |
875 | |