1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
26 | * Jerome Glisse |
27 | */ |
28 | #include <drm/drmP.h> |
29 | #include <drm/drm_fb_helper.h> |
30 | #include "radeon.h" |
31 | #include <drm/radeon_drm.h> |
32 | #include "radeon_asic.h" |
33 | |
34 | #include <linux/vga_switcheroo.h> |
35 | #include <linux/slab.h> |
36 | #include <linux/pm_runtime.h> |
37 | |
38 | #if defined(CONFIG_VGA_SWITCHEROO) |
39 | bool radeon_has_atpx(void); |
40 | #else |
41 | static inline bool radeon_has_atpx(void) { return false; } |
42 | #endif |
43 | |
44 | /** |
45 | * radeon_driver_unload_kms - Main unload function for KMS. |
46 | * |
47 | * @dev: drm dev pointer |
48 | * |
49 | * This is the main unload function for KMS (all asics). |
50 | * It calls radeon_modeset_fini() to tear down the |
51 | * displays, and radeon_device_fini() to tear down |
52 | * the rest of the device (CP, writeback, etc.). |
53 | * Returns 0 on success. |
54 | */ |
55 | void radeon_driver_unload_kms(struct drm_device *dev) |
56 | { |
57 | struct radeon_device *rdev = dev->dev_private; |
58 | |
59 | if (rdev == NULL) |
60 | return; |
61 | |
62 | if (rdev->rmmio == NULL) |
63 | goto done_free; |
64 | |
65 | if (radeon_is_px(dev)) { |
66 | pm_runtime_get_sync(dev->dev); |
67 | pm_runtime_forbid(dev->dev); |
68 | } |
69 | |
70 | radeon_acpi_fini(rdev); |
71 | |
72 | radeon_modeset_fini(rdev); |
73 | radeon_device_fini(rdev); |
74 | |
75 | done_free: |
76 | kfree(rdev); |
77 | dev->dev_private = NULL; |
78 | } |
79 | |
80 | /** |
81 | * radeon_driver_load_kms - Main load function for KMS. |
82 | * |
83 | * @dev: drm dev pointer |
84 | * @flags: device flags |
85 | * |
86 | * This is the main load function for KMS (all asics). |
87 | * It calls radeon_device_init() to set up the non-display |
88 | * parts of the chip (asic init, CP, writeback, etc.), and |
89 | * radeon_modeset_init() to set up the display parts |
90 | * (crtcs, encoders, hotplug detect, etc.). |
91 | * Returns 0 on success, error on failure. |
92 | */ |
93 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) |
94 | { |
95 | struct radeon_device *rdev; |
96 | int r, acpi_status; |
97 | |
98 | if (!radeon_si_support) { |
99 | switch (flags & RADEON_FAMILY_MASK) { |
100 | case CHIP_TAHITI: |
101 | case CHIP_PITCAIRN: |
102 | case CHIP_VERDE: |
103 | case CHIP_OLAND: |
104 | case CHIP_HAINAN: |
105 | dev_info(dev->dev, |
106 | "SI support disabled by module param\n" ); |
107 | return -ENODEV; |
108 | } |
109 | } |
110 | if (!radeon_cik_support) { |
111 | switch (flags & RADEON_FAMILY_MASK) { |
112 | case CHIP_KAVERI: |
113 | case CHIP_BONAIRE: |
114 | case CHIP_HAWAII: |
115 | case CHIP_KABINI: |
116 | case CHIP_MULLINS: |
117 | dev_info(dev->dev, |
118 | "CIK support disabled by module param\n" ); |
119 | return -ENODEV; |
120 | } |
121 | } |
122 | |
123 | rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL); |
124 | if (rdev == NULL) { |
125 | return -ENOMEM; |
126 | } |
127 | dev->dev_private = (void *)rdev; |
128 | |
129 | /* update BUS flag */ |
130 | if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) { |
131 | flags |= RADEON_IS_AGP; |
132 | } else if (pci_is_pcie(dev->pdev)) { |
133 | flags |= RADEON_IS_PCIE; |
134 | } else { |
135 | flags |= RADEON_IS_PCI; |
136 | } |
137 | |
138 | if ((radeon_runtime_pm != 0) && |
139 | radeon_has_atpx() && |
140 | ((flags & RADEON_IS_IGP) == 0) && |
141 | !pci_is_thunderbolt_attached(dev->pdev)) |
142 | flags |= RADEON_IS_PX; |
143 | |
144 | /* radeon_device_init should report only fatal error |
145 | * like memory allocation failure or iomapping failure, |
146 | * or memory manager initialization failure, it must |
147 | * properly initialize the GPU MC controller and permit |
148 | * VRAM allocation |
149 | */ |
150 | r = radeon_device_init(rdev, dev, dev->pdev, flags); |
151 | if (r) { |
152 | dev_err(&dev->pdev->dev, "Fatal error during GPU init\n" ); |
153 | goto out; |
154 | } |
155 | |
156 | /* Again modeset_init should fail only on fatal error |
157 | * otherwise it should provide enough functionalities |
158 | * for shadowfb to run |
159 | */ |
160 | r = radeon_modeset_init(rdev); |
161 | if (r) |
162 | dev_err(&dev->pdev->dev, "Fatal error during modeset init\n" ); |
163 | |
164 | /* Call ACPI methods: require modeset init |
165 | * but failure is not fatal |
166 | */ |
167 | if (!r) { |
168 | acpi_status = radeon_acpi_init(rdev); |
169 | if (acpi_status) |
170 | dev_dbg(&dev->pdev->dev, |
171 | "Error during ACPI methods call\n" ); |
172 | } |
173 | |
174 | if (radeon_is_px(dev)) { |
175 | dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); |
176 | pm_runtime_use_autosuspend(dev->dev); |
177 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); |
178 | pm_runtime_set_active(dev->dev); |
179 | pm_runtime_allow(dev->dev); |
180 | pm_runtime_mark_last_busy(dev->dev); |
181 | pm_runtime_put_autosuspend(dev->dev); |
182 | } |
183 | |
184 | out: |
185 | if (r) |
186 | radeon_driver_unload_kms(dev); |
187 | |
188 | |
189 | return r; |
190 | } |
191 | |
192 | /** |
193 | * radeon_set_filp_rights - Set filp right. |
194 | * |
195 | * @dev: drm dev pointer |
196 | * @owner: drm file |
197 | * @applier: drm file |
198 | * @value: value |
199 | * |
200 | * Sets the filp rights for the device (all asics). |
201 | */ |
202 | static void radeon_set_filp_rights(struct drm_device *dev, |
203 | struct drm_file **owner, |
204 | struct drm_file *applier, |
205 | uint32_t *value) |
206 | { |
207 | struct radeon_device *rdev = dev->dev_private; |
208 | |
209 | mutex_lock(&rdev->gem.mutex); |
210 | if (*value == 1) { |
211 | /* wants rights */ |
212 | if (!*owner) |
213 | *owner = applier; |
214 | } else if (*value == 0) { |
215 | /* revokes rights */ |
216 | if (*owner == applier) |
217 | *owner = NULL; |
218 | } |
219 | *value = *owner == applier ? 1 : 0; |
220 | mutex_unlock(&rdev->gem.mutex); |
221 | } |
222 | |
223 | /* |
224 | * Userspace get information ioctl |
225 | */ |
226 | /** |
227 | * radeon_info_ioctl - answer a device specific request. |
228 | * |
229 | * @rdev: radeon device pointer |
230 | * @data: request object |
231 | * @filp: drm filp |
232 | * |
233 | * This function is used to pass device specific parameters to the userspace |
234 | * drivers. Examples include: pci device id, pipeline parms, tiling params, |
235 | * etc. (all asics). |
236 | * Returns 0 on success, -EINVAL on failure. |
237 | */ |
238 | static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
239 | { |
240 | struct radeon_device *rdev = dev->dev_private; |
241 | struct drm_radeon_info *info = data; |
242 | struct radeon_mode_info *minfo = &rdev->mode_info; |
243 | uint32_t *value, value_tmp, *value_ptr, value_size; |
244 | uint64_t value64; |
245 | struct drm_crtc *crtc; |
246 | int i, found; |
247 | |
248 | value_ptr = (uint32_t *)((unsigned long)info->value); |
249 | value = &value_tmp; |
250 | value_size = sizeof(uint32_t); |
251 | |
252 | switch (info->request) { |
253 | case RADEON_INFO_DEVICE_ID: |
254 | *value = dev->pdev->device; |
255 | break; |
256 | case RADEON_INFO_NUM_GB_PIPES: |
257 | *value = rdev->num_gb_pipes; |
258 | break; |
259 | case RADEON_INFO_NUM_Z_PIPES: |
260 | *value = rdev->num_z_pipes; |
261 | break; |
262 | case RADEON_INFO_ACCEL_WORKING: |
263 | /* xf86-video-ati 6.13.0 relies on this being false for evergreen */ |
264 | if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) |
265 | *value = false; |
266 | else |
267 | *value = rdev->accel_working; |
268 | break; |
269 | case RADEON_INFO_CRTC_FROM_ID: |
270 | if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { |
271 | DRM_ERROR("copy_from_user %s:%u\n" , __func__, __LINE__); |
272 | return -EFAULT; |
273 | } |
274 | for (i = 0, found = 0; i < rdev->num_crtc; i++) { |
275 | crtc = (struct drm_crtc *)minfo->crtcs[i]; |
276 | if (crtc && crtc->base.id == *value) { |
277 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
278 | *value = radeon_crtc->crtc_id; |
279 | found = 1; |
280 | break; |
281 | } |
282 | } |
283 | if (!found) { |
284 | DRM_DEBUG_KMS("unknown crtc id %d\n" , *value); |
285 | return -EINVAL; |
286 | } |
287 | break; |
288 | case RADEON_INFO_ACCEL_WORKING2: |
289 | if (rdev->family == CHIP_HAWAII) { |
290 | if (rdev->accel_working) { |
291 | if (rdev->new_fw) |
292 | *value = 3; |
293 | else |
294 | *value = 2; |
295 | } else { |
296 | *value = 0; |
297 | } |
298 | } else { |
299 | *value = rdev->accel_working; |
300 | } |
301 | break; |
302 | case RADEON_INFO_TILING_CONFIG: |
303 | if (rdev->family >= CHIP_BONAIRE) |
304 | *value = rdev->config.cik.tile_config; |
305 | else if (rdev->family >= CHIP_TAHITI) |
306 | *value = rdev->config.si.tile_config; |
307 | else if (rdev->family >= CHIP_CAYMAN) |
308 | *value = rdev->config.cayman.tile_config; |
309 | else if (rdev->family >= CHIP_CEDAR) |
310 | *value = rdev->config.evergreen.tile_config; |
311 | else if (rdev->family >= CHIP_RV770) |
312 | *value = rdev->config.rv770.tile_config; |
313 | else if (rdev->family >= CHIP_R600) |
314 | *value = rdev->config.r600.tile_config; |
315 | else { |
316 | DRM_DEBUG_KMS("tiling config is r6xx+ only!\n" ); |
317 | return -EINVAL; |
318 | } |
319 | break; |
320 | case RADEON_INFO_WANT_HYPERZ: |
321 | /* The "value" here is both an input and output parameter. |
322 | * If the input value is 1, filp requests hyper-z access. |
323 | * If the input value is 0, filp revokes its hyper-z access. |
324 | * |
325 | * When returning, the value is 1 if filp owns hyper-z access, |
326 | * 0 otherwise. */ |
327 | if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { |
328 | DRM_ERROR("copy_from_user %s:%u\n" , __func__, __LINE__); |
329 | return -EFAULT; |
330 | } |
331 | if (*value >= 2) { |
332 | DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n" , *value); |
333 | return -EINVAL; |
334 | } |
335 | radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value); |
336 | break; |
337 | case RADEON_INFO_WANT_CMASK: |
338 | /* The same logic as Hyper-Z. */ |
339 | if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { |
340 | DRM_ERROR("copy_from_user %s:%u\n" , __func__, __LINE__); |
341 | return -EFAULT; |
342 | } |
343 | if (*value >= 2) { |
344 | DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n" , *value); |
345 | return -EINVAL; |
346 | } |
347 | radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value); |
348 | break; |
349 | case RADEON_INFO_CLOCK_CRYSTAL_FREQ: |
350 | /* return clock value in KHz */ |
351 | if (rdev->asic->get_xclk) |
352 | *value = radeon_get_xclk(rdev) * 10; |
353 | else |
354 | *value = rdev->clock.spll.reference_freq * 10; |
355 | break; |
356 | case RADEON_INFO_NUM_BACKENDS: |
357 | if (rdev->family >= CHIP_BONAIRE) |
358 | *value = rdev->config.cik.max_backends_per_se * |
359 | rdev->config.cik.max_shader_engines; |
360 | else if (rdev->family >= CHIP_TAHITI) |
361 | *value = rdev->config.si.max_backends_per_se * |
362 | rdev->config.si.max_shader_engines; |
363 | else if (rdev->family >= CHIP_CAYMAN) |
364 | *value = rdev->config.cayman.max_backends_per_se * |
365 | rdev->config.cayman.max_shader_engines; |
366 | else if (rdev->family >= CHIP_CEDAR) |
367 | *value = rdev->config.evergreen.max_backends; |
368 | else if (rdev->family >= CHIP_RV770) |
369 | *value = rdev->config.rv770.max_backends; |
370 | else if (rdev->family >= CHIP_R600) |
371 | *value = rdev->config.r600.max_backends; |
372 | else { |
373 | return -EINVAL; |
374 | } |
375 | break; |
376 | case RADEON_INFO_NUM_TILE_PIPES: |
377 | if (rdev->family >= CHIP_BONAIRE) |
378 | *value = rdev->config.cik.max_tile_pipes; |
379 | else if (rdev->family >= CHIP_TAHITI) |
380 | *value = rdev->config.si.max_tile_pipes; |
381 | else if (rdev->family >= CHIP_CAYMAN) |
382 | *value = rdev->config.cayman.max_tile_pipes; |
383 | else if (rdev->family >= CHIP_CEDAR) |
384 | *value = rdev->config.evergreen.max_tile_pipes; |
385 | else if (rdev->family >= CHIP_RV770) |
386 | *value = rdev->config.rv770.max_tile_pipes; |
387 | else if (rdev->family >= CHIP_R600) |
388 | *value = rdev->config.r600.max_tile_pipes; |
389 | else { |
390 | return -EINVAL; |
391 | } |
392 | break; |
393 | case RADEON_INFO_FUSION_GART_WORKING: |
394 | *value = 1; |
395 | break; |
396 | case RADEON_INFO_BACKEND_MAP: |
397 | if (rdev->family >= CHIP_BONAIRE) |
398 | *value = rdev->config.cik.backend_map; |
399 | else if (rdev->family >= CHIP_TAHITI) |
400 | *value = rdev->config.si.backend_map; |
401 | else if (rdev->family >= CHIP_CAYMAN) |
402 | *value = rdev->config.cayman.backend_map; |
403 | else if (rdev->family >= CHIP_CEDAR) |
404 | *value = rdev->config.evergreen.backend_map; |
405 | else if (rdev->family >= CHIP_RV770) |
406 | *value = rdev->config.rv770.backend_map; |
407 | else if (rdev->family >= CHIP_R600) |
408 | *value = rdev->config.r600.backend_map; |
409 | else { |
410 | return -EINVAL; |
411 | } |
412 | break; |
413 | case RADEON_INFO_VA_START: |
414 | /* this is where we report if vm is supported or not */ |
415 | if (rdev->family < CHIP_CAYMAN) |
416 | return -EINVAL; |
417 | *value = RADEON_VA_RESERVED_SIZE; |
418 | break; |
419 | case RADEON_INFO_IB_VM_MAX_SIZE: |
420 | /* this is where we report if vm is supported or not */ |
421 | if (rdev->family < CHIP_CAYMAN) |
422 | return -EINVAL; |
423 | *value = RADEON_IB_VM_MAX_SIZE; |
424 | break; |
425 | case RADEON_INFO_MAX_PIPES: |
426 | if (rdev->family >= CHIP_BONAIRE) |
427 | *value = rdev->config.cik.max_cu_per_sh; |
428 | else if (rdev->family >= CHIP_TAHITI) |
429 | *value = rdev->config.si.max_cu_per_sh; |
430 | else if (rdev->family >= CHIP_CAYMAN) |
431 | *value = rdev->config.cayman.max_pipes_per_simd; |
432 | else if (rdev->family >= CHIP_CEDAR) |
433 | *value = rdev->config.evergreen.max_pipes; |
434 | else if (rdev->family >= CHIP_RV770) |
435 | *value = rdev->config.rv770.max_pipes; |
436 | else if (rdev->family >= CHIP_R600) |
437 | *value = rdev->config.r600.max_pipes; |
438 | else { |
439 | return -EINVAL; |
440 | } |
441 | break; |
442 | case RADEON_INFO_TIMESTAMP: |
443 | if (rdev->family < CHIP_R600) { |
444 | DRM_DEBUG_KMS("timestamp is r6xx+ only!\n" ); |
445 | return -EINVAL; |
446 | } |
447 | value = (uint32_t*)&value64; |
448 | value_size = sizeof(uint64_t); |
449 | value64 = radeon_get_gpu_clock_counter(rdev); |
450 | break; |
451 | case RADEON_INFO_MAX_SE: |
452 | if (rdev->family >= CHIP_BONAIRE) |
453 | *value = rdev->config.cik.max_shader_engines; |
454 | else if (rdev->family >= CHIP_TAHITI) |
455 | *value = rdev->config.si.max_shader_engines; |
456 | else if (rdev->family >= CHIP_CAYMAN) |
457 | *value = rdev->config.cayman.max_shader_engines; |
458 | else if (rdev->family >= CHIP_CEDAR) |
459 | *value = rdev->config.evergreen.num_ses; |
460 | else |
461 | *value = 1; |
462 | break; |
463 | case RADEON_INFO_MAX_SH_PER_SE: |
464 | if (rdev->family >= CHIP_BONAIRE) |
465 | *value = rdev->config.cik.max_sh_per_se; |
466 | else if (rdev->family >= CHIP_TAHITI) |
467 | *value = rdev->config.si.max_sh_per_se; |
468 | else |
469 | return -EINVAL; |
470 | break; |
471 | case RADEON_INFO_FASTFB_WORKING: |
472 | *value = rdev->fastfb_working; |
473 | break; |
474 | case RADEON_INFO_RING_WORKING: |
475 | if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { |
476 | DRM_ERROR("copy_from_user %s:%u\n" , __func__, __LINE__); |
477 | return -EFAULT; |
478 | } |
479 | switch (*value) { |
480 | case RADEON_CS_RING_GFX: |
481 | case RADEON_CS_RING_COMPUTE: |
482 | *value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready; |
483 | break; |
484 | case RADEON_CS_RING_DMA: |
485 | *value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready; |
486 | *value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready; |
487 | break; |
488 | case RADEON_CS_RING_UVD: |
489 | *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready; |
490 | break; |
491 | case RADEON_CS_RING_VCE: |
492 | *value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready; |
493 | break; |
494 | default: |
495 | return -EINVAL; |
496 | } |
497 | break; |
498 | case RADEON_INFO_SI_TILE_MODE_ARRAY: |
499 | if (rdev->family >= CHIP_BONAIRE) { |
500 | value = rdev->config.cik.tile_mode_array; |
501 | value_size = sizeof(uint32_t)*32; |
502 | } else if (rdev->family >= CHIP_TAHITI) { |
503 | value = rdev->config.si.tile_mode_array; |
504 | value_size = sizeof(uint32_t)*32; |
505 | } else { |
506 | DRM_DEBUG_KMS("tile mode array is si+ only!\n" ); |
507 | return -EINVAL; |
508 | } |
509 | break; |
510 | case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY: |
511 | if (rdev->family >= CHIP_BONAIRE) { |
512 | value = rdev->config.cik.macrotile_mode_array; |
513 | value_size = sizeof(uint32_t)*16; |
514 | } else { |
515 | DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n" ); |
516 | return -EINVAL; |
517 | } |
518 | break; |
519 | case RADEON_INFO_SI_CP_DMA_COMPUTE: |
520 | *value = 1; |
521 | break; |
522 | case RADEON_INFO_SI_BACKEND_ENABLED_MASK: |
523 | if (rdev->family >= CHIP_BONAIRE) { |
524 | *value = rdev->config.cik.backend_enable_mask; |
525 | } else if (rdev->family >= CHIP_TAHITI) { |
526 | *value = rdev->config.si.backend_enable_mask; |
527 | } else { |
528 | DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n" ); |
529 | } |
530 | break; |
531 | case RADEON_INFO_MAX_SCLK: |
532 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && |
533 | rdev->pm.dpm_enabled) |
534 | *value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10; |
535 | else |
536 | *value = rdev->pm.default_sclk * 10; |
537 | break; |
538 | case RADEON_INFO_VCE_FW_VERSION: |
539 | *value = rdev->vce.fw_version; |
540 | break; |
541 | case RADEON_INFO_VCE_FB_VERSION: |
542 | *value = rdev->vce.fb_version; |
543 | break; |
544 | case RADEON_INFO_NUM_BYTES_MOVED: |
545 | value = (uint32_t*)&value64; |
546 | value_size = sizeof(uint64_t); |
547 | value64 = atomic64_read(&rdev->num_bytes_moved); |
548 | break; |
549 | case RADEON_INFO_VRAM_USAGE: |
550 | value = (uint32_t*)&value64; |
551 | value_size = sizeof(uint64_t); |
552 | value64 = atomic64_read(&rdev->vram_usage); |
553 | break; |
554 | case RADEON_INFO_GTT_USAGE: |
555 | value = (uint32_t*)&value64; |
556 | value_size = sizeof(uint64_t); |
557 | value64 = atomic64_read(&rdev->gtt_usage); |
558 | break; |
559 | case RADEON_INFO_ACTIVE_CU_COUNT: |
560 | if (rdev->family >= CHIP_BONAIRE) |
561 | *value = rdev->config.cik.active_cus; |
562 | else if (rdev->family >= CHIP_TAHITI) |
563 | *value = rdev->config.si.active_cus; |
564 | else if (rdev->family >= CHIP_CAYMAN) |
565 | *value = rdev->config.cayman.active_simds; |
566 | else if (rdev->family >= CHIP_CEDAR) |
567 | *value = rdev->config.evergreen.active_simds; |
568 | else if (rdev->family >= CHIP_RV770) |
569 | *value = rdev->config.rv770.active_simds; |
570 | else if (rdev->family >= CHIP_R600) |
571 | *value = rdev->config.r600.active_simds; |
572 | else |
573 | *value = 1; |
574 | break; |
575 | case RADEON_INFO_CURRENT_GPU_TEMP: |
576 | /* get temperature in millidegrees C */ |
577 | if (rdev->asic->pm.get_temperature) |
578 | *value = radeon_get_temperature(rdev); |
579 | else |
580 | *value = 0; |
581 | break; |
582 | case RADEON_INFO_CURRENT_GPU_SCLK: |
583 | /* get sclk in Mhz */ |
584 | if (rdev->pm.dpm_enabled) |
585 | *value = radeon_dpm_get_current_sclk(rdev) / 100; |
586 | else |
587 | *value = rdev->pm.current_sclk / 100; |
588 | break; |
589 | case RADEON_INFO_CURRENT_GPU_MCLK: |
590 | /* get mclk in Mhz */ |
591 | if (rdev->pm.dpm_enabled) |
592 | *value = radeon_dpm_get_current_mclk(rdev) / 100; |
593 | else |
594 | *value = rdev->pm.current_mclk / 100; |
595 | break; |
596 | case RADEON_INFO_READ_REG: |
597 | if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { |
598 | DRM_ERROR("copy_from_user %s:%u\n" , __func__, __LINE__); |
599 | return -EFAULT; |
600 | } |
601 | if (radeon_get_allowed_info_register(rdev, *value, value)) |
602 | return -EINVAL; |
603 | break; |
604 | case RADEON_INFO_VA_UNMAP_WORKING: |
605 | *value = true; |
606 | break; |
607 | case RADEON_INFO_GPU_RESET_COUNTER: |
608 | *value = atomic_read(&rdev->gpu_reset_counter); |
609 | break; |
610 | default: |
611 | DRM_DEBUG_KMS("Invalid request %d\n" , info->request); |
612 | return -EINVAL; |
613 | } |
614 | if (copy_to_user(value_ptr, (char*)value, value_size)) { |
615 | DRM_ERROR("copy_to_user %s:%u\n" , __func__, __LINE__); |
616 | return -EFAULT; |
617 | } |
618 | return 0; |
619 | } |
620 | |
621 | |
622 | /* |
623 | * Outdated mess for old drm with Xorg being in charge (void function now). |
624 | */ |
625 | /** |
626 | * radeon_driver_lastclose_kms - drm callback for last close |
627 | * |
628 | * @dev: drm dev pointer |
629 | * |
630 | * Switch vga_switcheroo state after last close (all asics). |
631 | */ |
632 | void radeon_driver_lastclose_kms(struct drm_device *dev) |
633 | { |
634 | drm_fb_helper_lastclose(dev); |
635 | vga_switcheroo_process_delayed_switch(); |
636 | } |
637 | |
638 | /** |
639 | * radeon_driver_open_kms - drm callback for open |
640 | * |
641 | * @dev: drm dev pointer |
642 | * @file_priv: drm file |
643 | * |
644 | * On device open, init vm on cayman+ (all asics). |
645 | * Returns 0 on success, error on failure. |
646 | */ |
647 | int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) |
648 | { |
649 | struct radeon_device *rdev = dev->dev_private; |
650 | int r; |
651 | |
652 | file_priv->driver_priv = NULL; |
653 | |
654 | r = pm_runtime_get_sync(dev->dev); |
655 | if (r < 0) |
656 | return r; |
657 | |
658 | /* new gpu have virtual address space support */ |
659 | if (rdev->family >= CHIP_CAYMAN) { |
660 | struct radeon_fpriv *fpriv; |
661 | struct radeon_vm *vm; |
662 | |
663 | fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); |
664 | if (unlikely(!fpriv)) { |
665 | r = -ENOMEM; |
666 | goto out_suspend; |
667 | } |
668 | |
669 | if (rdev->accel_working) { |
670 | vm = &fpriv->vm; |
671 | r = radeon_vm_init(rdev, vm); |
672 | if (r) { |
673 | kfree(fpriv); |
674 | goto out_suspend; |
675 | } |
676 | |
677 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); |
678 | if (r) { |
679 | radeon_vm_fini(rdev, vm); |
680 | kfree(fpriv); |
681 | goto out_suspend; |
682 | } |
683 | |
684 | /* map the ib pool buffer read only into |
685 | * virtual address space */ |
686 | vm->ib_bo_va = radeon_vm_bo_add(rdev, vm, |
687 | rdev->ring_tmp_bo.bo); |
688 | r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va, |
689 | RADEON_VA_IB_OFFSET, |
690 | RADEON_VM_PAGE_READABLE | |
691 | RADEON_VM_PAGE_SNOOPED); |
692 | if (r) { |
693 | radeon_vm_fini(rdev, vm); |
694 | kfree(fpriv); |
695 | goto out_suspend; |
696 | } |
697 | } |
698 | file_priv->driver_priv = fpriv; |
699 | } |
700 | |
701 | out_suspend: |
702 | pm_runtime_mark_last_busy(dev->dev); |
703 | pm_runtime_put_autosuspend(dev->dev); |
704 | return r; |
705 | } |
706 | |
707 | /** |
708 | * radeon_driver_postclose_kms - drm callback for post close |
709 | * |
710 | * @dev: drm dev pointer |
711 | * @file_priv: drm file |
712 | * |
713 | * On device close, tear down hyperz and cmask filps on r1xx-r5xx |
714 | * (all asics). And tear down vm on cayman+ (all asics). |
715 | */ |
716 | void radeon_driver_postclose_kms(struct drm_device *dev, |
717 | struct drm_file *file_priv) |
718 | { |
719 | struct radeon_device *rdev = dev->dev_private; |
720 | |
721 | pm_runtime_get_sync(dev->dev); |
722 | |
723 | mutex_lock(&rdev->gem.mutex); |
724 | if (rdev->hyperz_filp == file_priv) |
725 | rdev->hyperz_filp = NULL; |
726 | if (rdev->cmask_filp == file_priv) |
727 | rdev->cmask_filp = NULL; |
728 | mutex_unlock(&rdev->gem.mutex); |
729 | |
730 | radeon_uvd_free_handles(rdev, file_priv); |
731 | radeon_vce_free_handles(rdev, file_priv); |
732 | |
733 | /* new gpu have virtual address space support */ |
734 | if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { |
735 | struct radeon_fpriv *fpriv = file_priv->driver_priv; |
736 | struct radeon_vm *vm = &fpriv->vm; |
737 | int r; |
738 | |
739 | if (rdev->accel_working) { |
740 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); |
741 | if (!r) { |
742 | if (vm->ib_bo_va) |
743 | radeon_vm_bo_rmv(rdev, vm->ib_bo_va); |
744 | radeon_bo_unreserve(rdev->ring_tmp_bo.bo); |
745 | } |
746 | radeon_vm_fini(rdev, vm); |
747 | } |
748 | |
749 | kfree(fpriv); |
750 | file_priv->driver_priv = NULL; |
751 | } |
752 | pm_runtime_mark_last_busy(dev->dev); |
753 | pm_runtime_put_autosuspend(dev->dev); |
754 | } |
755 | |
756 | /* |
757 | * VBlank related functions. |
758 | */ |
759 | /** |
760 | * radeon_get_vblank_counter_kms - get frame count |
761 | * |
762 | * @dev: drm dev pointer |
763 | * @pipe: crtc to get the frame count from |
764 | * |
765 | * Gets the frame count on the requested crtc (all asics). |
766 | * Returns frame count on success, -EINVAL on failure. |
767 | */ |
768 | u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) |
769 | { |
770 | int vpos, hpos, stat; |
771 | u32 count; |
772 | struct radeon_device *rdev = dev->dev_private; |
773 | |
774 | if (pipe >= rdev->num_crtc) { |
775 | DRM_ERROR("Invalid crtc %u\n" , pipe); |
776 | return -EINVAL; |
777 | } |
778 | |
779 | /* The hw increments its frame counter at start of vsync, not at start |
780 | * of vblank, as is required by DRM core vblank counter handling. |
781 | * Cook the hw count here to make it appear to the caller as if it |
782 | * incremented at start of vblank. We measure distance to start of |
783 | * vblank in vpos. vpos therefore will be >= 0 between start of vblank |
784 | * and start of vsync, so vpos >= 0 means to bump the hw frame counter |
785 | * result by 1 to give the proper appearance to caller. |
786 | */ |
787 | if (rdev->mode_info.crtcs[pipe]) { |
788 | /* Repeat readout if needed to provide stable result if |
789 | * we cross start of vsync during the queries. |
790 | */ |
791 | do { |
792 | count = radeon_get_vblank_counter(rdev, pipe); |
793 | /* Ask radeon_get_crtc_scanoutpos to return vpos as |
794 | * distance to start of vblank, instead of regular |
795 | * vertical scanout pos. |
796 | */ |
797 | stat = radeon_get_crtc_scanoutpos( |
798 | dev, pipe, GET_DISTANCE_TO_VBLANKSTART, |
799 | &vpos, &hpos, NULL, NULL, |
800 | &rdev->mode_info.crtcs[pipe]->base.hwmode); |
801 | } while (count != radeon_get_vblank_counter(rdev, pipe)); |
802 | |
803 | if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != |
804 | (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { |
805 | DRM_DEBUG_VBL("Query failed! stat %d\n" , stat); |
806 | } |
807 | else { |
808 | DRM_DEBUG_VBL("crtc %u: dist from vblank start %d\n" , |
809 | pipe, vpos); |
810 | |
811 | /* Bump counter if we are at >= leading edge of vblank, |
812 | * but before vsync where vpos would turn negative and |
813 | * the hw counter really increments. |
814 | */ |
815 | if (vpos >= 0) |
816 | count++; |
817 | } |
818 | } |
819 | else { |
820 | /* Fallback to use value as is. */ |
821 | count = radeon_get_vblank_counter(rdev, pipe); |
822 | DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n" ); |
823 | } |
824 | |
825 | return count; |
826 | } |
827 | |
828 | /** |
829 | * radeon_enable_vblank_kms - enable vblank interrupt |
830 | * |
831 | * @dev: drm dev pointer |
832 | * @crtc: crtc to enable vblank interrupt for |
833 | * |
834 | * Enable the interrupt on the requested crtc (all asics). |
835 | * Returns 0 on success, -EINVAL on failure. |
836 | */ |
837 | int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) |
838 | { |
839 | struct radeon_device *rdev = dev->dev_private; |
840 | unsigned long irqflags; |
841 | int r; |
842 | |
843 | if (crtc < 0 || crtc >= rdev->num_crtc) { |
844 | DRM_ERROR("Invalid crtc %d\n" , crtc); |
845 | return -EINVAL; |
846 | } |
847 | |
848 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
849 | rdev->irq.crtc_vblank_int[crtc] = true; |
850 | r = radeon_irq_set(rdev); |
851 | spin_unlock_irqrestore(&rdev->irq.lock, irqflags); |
852 | return r; |
853 | } |
854 | |
855 | /** |
856 | * radeon_disable_vblank_kms - disable vblank interrupt |
857 | * |
858 | * @dev: drm dev pointer |
859 | * @crtc: crtc to disable vblank interrupt for |
860 | * |
861 | * Disable the interrupt on the requested crtc (all asics). |
862 | */ |
863 | void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) |
864 | { |
865 | struct radeon_device *rdev = dev->dev_private; |
866 | unsigned long irqflags; |
867 | |
868 | if (crtc < 0 || crtc >= rdev->num_crtc) { |
869 | DRM_ERROR("Invalid crtc %d\n" , crtc); |
870 | return; |
871 | } |
872 | |
873 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
874 | rdev->irq.crtc_vblank_int[crtc] = false; |
875 | radeon_irq_set(rdev); |
876 | spin_unlock_irqrestore(&rdev->irq.lock, irqflags); |
877 | } |
878 | |
879 | const struct drm_ioctl_desc radeon_ioctls_kms[] = { |
880 | DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
881 | DRM_IOCTL_DEF_DRV(RADEON_CP_START, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
882 | DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
883 | DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
884 | DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, drm_invalid_op, DRM_AUTH), |
885 | DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, drm_invalid_op, DRM_AUTH), |
886 | DRM_IOCTL_DEF_DRV(RADEON_RESET, drm_invalid_op, DRM_AUTH), |
887 | DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, drm_invalid_op, DRM_AUTH), |
888 | DRM_IOCTL_DEF_DRV(RADEON_SWAP, drm_invalid_op, DRM_AUTH), |
889 | DRM_IOCTL_DEF_DRV(RADEON_CLEAR, drm_invalid_op, DRM_AUTH), |
890 | DRM_IOCTL_DEF_DRV(RADEON_VERTEX, drm_invalid_op, DRM_AUTH), |
891 | DRM_IOCTL_DEF_DRV(RADEON_INDICES, drm_invalid_op, DRM_AUTH), |
892 | DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, drm_invalid_op, DRM_AUTH), |
893 | DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, drm_invalid_op, DRM_AUTH), |
894 | DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
895 | DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, drm_invalid_op, DRM_AUTH), |
896 | DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, drm_invalid_op, DRM_AUTH), |
897 | DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, drm_invalid_op, DRM_AUTH), |
898 | DRM_IOCTL_DEF_DRV(RADEON_FLIP, drm_invalid_op, DRM_AUTH), |
899 | DRM_IOCTL_DEF_DRV(RADEON_ALLOC, drm_invalid_op, DRM_AUTH), |
900 | DRM_IOCTL_DEF_DRV(RADEON_FREE, drm_invalid_op, DRM_AUTH), |
901 | DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
902 | DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, drm_invalid_op, DRM_AUTH), |
903 | DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, drm_invalid_op, DRM_AUTH), |
904 | DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, drm_invalid_op, DRM_AUTH), |
905 | DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, drm_invalid_op, DRM_AUTH), |
906 | DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, drm_invalid_op, DRM_AUTH), |
907 | /* KMS */ |
908 | DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), |
909 | DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), |
910 | DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), |
911 | DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), |
912 | DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH), |
913 | DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH), |
914 | DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), |
915 | DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), |
916 | DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), |
917 | DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), |
918 | DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), |
919 | DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), |
920 | DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), |
921 | DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), |
922 | DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), |
923 | }; |
924 | int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms); |
925 | |