1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <drm/drmP.h>
29#include "amdgpu.h"
30#include <drm/amdgpu_drm.h>
31#include "amdgpu_sched.h"
32#include "amdgpu_uvd.h"
33#include "amdgpu_vce.h"
34#include "atom.h"
35
36#include <linux/vga_switcheroo.h>
37#include <linux/slab.h>
38#include <linux/pm_runtime.h>
39#include "amdgpu_amdkfd.h"
40#include "amdgpu_gem.h"
41#include "amdgpu_display.h"
42
43static void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
44{
45 struct amdgpu_gpu_instance *gpu_instance;
46 int i;
47
48 mutex_lock(&mgpu_info.mutex);
49
50 for (i = 0; i < mgpu_info.num_gpu; i++) {
51 gpu_instance = &(mgpu_info.gpu_ins[i]);
52 if (gpu_instance->adev == adev) {
53 mgpu_info.gpu_ins[i] =
54 mgpu_info.gpu_ins[mgpu_info.num_gpu - 1];
55 mgpu_info.num_gpu--;
56 if (adev->flags & AMD_IS_APU)
57 mgpu_info.num_apu--;
58 else
59 mgpu_info.num_dgpu--;
60 break;
61 }
62 }
63
64 mutex_unlock(&mgpu_info.mutex);
65}
66
67/**
68 * amdgpu_driver_unload_kms - Main unload function for KMS.
69 *
70 * @dev: drm dev pointer
71 *
72 * This is the main unload function for KMS (all asics).
73 * Returns 0 on success.
74 */
75void amdgpu_driver_unload_kms(struct drm_device *dev)
76{
77 struct amdgpu_device *adev = dev->dev_private;
78
79 if (adev == NULL)
80 return;
81
82 amdgpu_unregister_gpu_instance(adev);
83
84 if (adev->rmmio == NULL)
85 goto done_free;
86
87 if (amdgpu_sriov_vf(adev))
88 amdgpu_virt_request_full_gpu(adev, false);
89
90 if (amdgpu_device_is_px(dev)) {
91 pm_runtime_get_sync(dev->dev);
92 pm_runtime_forbid(dev->dev);
93 }
94
95 amdgpu_acpi_fini(adev);
96
97 amdgpu_device_fini(adev);
98
99done_free:
100 kfree(adev);
101 dev->dev_private = NULL;
102}
103
104static void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
105{
106 struct amdgpu_gpu_instance *gpu_instance;
107
108 mutex_lock(&mgpu_info.mutex);
109
110 if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
111 DRM_ERROR("Cannot register more gpu instance\n");
112 mutex_unlock(&mgpu_info.mutex);
113 return;
114 }
115
116 gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]);
117 gpu_instance->adev = adev;
118 gpu_instance->mgpu_fan_enabled = 0;
119
120 mgpu_info.num_gpu++;
121 if (adev->flags & AMD_IS_APU)
122 mgpu_info.num_apu++;
123 else
124 mgpu_info.num_dgpu++;
125
126 mutex_unlock(&mgpu_info.mutex);
127}
128
129/**
130 * amdgpu_driver_load_kms - Main load function for KMS.
131 *
132 * @dev: drm dev pointer
133 * @flags: device flags
134 *
135 * This is the main load function for KMS (all asics).
136 * Returns 0 on success, error on failure.
137 */
138int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
139{
140 struct amdgpu_device *adev;
141 int r, acpi_status;
142
143#ifdef CONFIG_DRM_AMDGPU_SI
144 if (!amdgpu_si_support) {
145 switch (flags & AMD_ASIC_MASK) {
146 case CHIP_TAHITI:
147 case CHIP_PITCAIRN:
148 case CHIP_VERDE:
149 case CHIP_OLAND:
150 case CHIP_HAINAN:
151 dev_info(dev->dev,
152 "SI support provided by radeon.\n");
153 dev_info(dev->dev,
154 "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
155 );
156 return -ENODEV;
157 }
158 }
159#endif
160#ifdef CONFIG_DRM_AMDGPU_CIK
161 if (!amdgpu_cik_support) {
162 switch (flags & AMD_ASIC_MASK) {
163 case CHIP_KAVERI:
164 case CHIP_BONAIRE:
165 case CHIP_HAWAII:
166 case CHIP_KABINI:
167 case CHIP_MULLINS:
168 dev_info(dev->dev,
169 "CIK support provided by radeon.\n");
170 dev_info(dev->dev,
171 "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
172 );
173 return -ENODEV;
174 }
175 }
176#endif
177
178 adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
179 if (adev == NULL) {
180 return -ENOMEM;
181 }
182 dev->dev_private = (void *)adev;
183
184 if ((amdgpu_runtime_pm != 0) &&
185 amdgpu_has_atpx() &&
186 (amdgpu_is_atpx_hybrid() ||
187 amdgpu_has_atpx_dgpu_power_cntl()) &&
188 ((flags & AMD_IS_APU) == 0) &&
189 !pci_is_thunderbolt_attached(dev->pdev))
190 flags |= AMD_IS_PX;
191
192 /* amdgpu_device_init should report only fatal error
193 * like memory allocation failure or iomapping failure,
194 * or memory manager initialization failure, it must
195 * properly initialize the GPU MC controller and permit
196 * VRAM allocation
197 */
198 r = amdgpu_device_init(adev, dev, dev->pdev, flags);
199 if (r) {
200 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
201 goto out;
202 }
203
204 /* Call ACPI methods: require modeset init
205 * but failure is not fatal
206 */
207 if (!r) {
208 acpi_status = amdgpu_acpi_init(adev);
209 if (acpi_status)
210 dev_dbg(&dev->pdev->dev,
211 "Error during ACPI methods call\n");
212 }
213
214 if (amdgpu_device_is_px(dev)) {
215 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
216 pm_runtime_use_autosuspend(dev->dev);
217 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
218 pm_runtime_set_active(dev->dev);
219 pm_runtime_allow(dev->dev);
220 pm_runtime_mark_last_busy(dev->dev);
221 pm_runtime_put_autosuspend(dev->dev);
222 }
223
224 amdgpu_register_gpu_instance(adev);
225out:
226 if (r) {
227 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
228 if (adev->rmmio && amdgpu_device_is_px(dev))
229 pm_runtime_put_noidle(dev->dev);
230 amdgpu_driver_unload_kms(dev);
231 }
232
233 return r;
234}
235
236static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
237 struct drm_amdgpu_query_fw *query_fw,
238 struct amdgpu_device *adev)
239{
240 switch (query_fw->fw_type) {
241 case AMDGPU_INFO_FW_VCE:
242 fw_info->ver = adev->vce.fw_version;
243 fw_info->feature = adev->vce.fb_version;
244 break;
245 case AMDGPU_INFO_FW_UVD:
246 fw_info->ver = adev->uvd.fw_version;
247 fw_info->feature = 0;
248 break;
249 case AMDGPU_INFO_FW_VCN:
250 fw_info->ver = adev->vcn.fw_version;
251 fw_info->feature = 0;
252 break;
253 case AMDGPU_INFO_FW_GMC:
254 fw_info->ver = adev->gmc.fw_version;
255 fw_info->feature = 0;
256 break;
257 case AMDGPU_INFO_FW_GFX_ME:
258 fw_info->ver = adev->gfx.me_fw_version;
259 fw_info->feature = adev->gfx.me_feature_version;
260 break;
261 case AMDGPU_INFO_FW_GFX_PFP:
262 fw_info->ver = adev->gfx.pfp_fw_version;
263 fw_info->feature = adev->gfx.pfp_feature_version;
264 break;
265 case AMDGPU_INFO_FW_GFX_CE:
266 fw_info->ver = adev->gfx.ce_fw_version;
267 fw_info->feature = adev->gfx.ce_feature_version;
268 break;
269 case AMDGPU_INFO_FW_GFX_RLC:
270 fw_info->ver = adev->gfx.rlc_fw_version;
271 fw_info->feature = adev->gfx.rlc_feature_version;
272 break;
273 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
274 fw_info->ver = adev->gfx.rlc_srlc_fw_version;
275 fw_info->feature = adev->gfx.rlc_srlc_feature_version;
276 break;
277 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
278 fw_info->ver = adev->gfx.rlc_srlg_fw_version;
279 fw_info->feature = adev->gfx.rlc_srlg_feature_version;
280 break;
281 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
282 fw_info->ver = adev->gfx.rlc_srls_fw_version;
283 fw_info->feature = adev->gfx.rlc_srls_feature_version;
284 break;
285 case AMDGPU_INFO_FW_GFX_MEC:
286 if (query_fw->index == 0) {
287 fw_info->ver = adev->gfx.mec_fw_version;
288 fw_info->feature = adev->gfx.mec_feature_version;
289 } else if (query_fw->index == 1) {
290 fw_info->ver = adev->gfx.mec2_fw_version;
291 fw_info->feature = adev->gfx.mec2_feature_version;
292 } else
293 return -EINVAL;
294 break;
295 case AMDGPU_INFO_FW_SMC:
296 fw_info->ver = adev->pm.fw_version;
297 fw_info->feature = 0;
298 break;
299 case AMDGPU_INFO_FW_SDMA:
300 if (query_fw->index >= adev->sdma.num_instances)
301 return -EINVAL;
302 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
303 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
304 break;
305 case AMDGPU_INFO_FW_SOS:
306 fw_info->ver = adev->psp.sos_fw_version;
307 fw_info->feature = adev->psp.sos_feature_version;
308 break;
309 case AMDGPU_INFO_FW_ASD:
310 fw_info->ver = adev->psp.asd_fw_version;
311 fw_info->feature = adev->psp.asd_feature_version;
312 break;
313 case AMDGPU_INFO_FW_DMCU:
314 fw_info->ver = adev->dm.dmcu_fw_version;
315 fw_info->feature = 0;
316 break;
317 default:
318 return -EINVAL;
319 }
320 return 0;
321}
322
323static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
324 struct drm_amdgpu_info *info,
325 struct drm_amdgpu_info_hw_ip *result)
326{
327 uint32_t ib_start_alignment = 0;
328 uint32_t ib_size_alignment = 0;
329 enum amd_ip_block_type type;
330 unsigned int num_rings = 0;
331 unsigned int i, j;
332
333 if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
334 return -EINVAL;
335
336 switch (info->query_hw_ip.type) {
337 case AMDGPU_HW_IP_GFX:
338 type = AMD_IP_BLOCK_TYPE_GFX;
339 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
340 if (adev->gfx.gfx_ring[i].sched.ready)
341 ++num_rings;
342 ib_start_alignment = 32;
343 ib_size_alignment = 32;
344 break;
345 case AMDGPU_HW_IP_COMPUTE:
346 type = AMD_IP_BLOCK_TYPE_GFX;
347 for (i = 0; i < adev->gfx.num_compute_rings; i++)
348 if (adev->gfx.compute_ring[i].sched.ready)
349 ++num_rings;
350 ib_start_alignment = 32;
351 ib_size_alignment = 32;
352 break;
353 case AMDGPU_HW_IP_DMA:
354 type = AMD_IP_BLOCK_TYPE_SDMA;
355 for (i = 0; i < adev->sdma.num_instances; i++)
356 if (adev->sdma.instance[i].ring.sched.ready)
357 ++num_rings;
358 ib_start_alignment = 256;
359 ib_size_alignment = 4;
360 break;
361 case AMDGPU_HW_IP_UVD:
362 type = AMD_IP_BLOCK_TYPE_UVD;
363 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
364 if (adev->uvd.harvest_config & (1 << i))
365 continue;
366
367 if (adev->uvd.inst[i].ring.sched.ready)
368 ++num_rings;
369 }
370 ib_start_alignment = 64;
371 ib_size_alignment = 64;
372 break;
373 case AMDGPU_HW_IP_VCE:
374 type = AMD_IP_BLOCK_TYPE_VCE;
375 for (i = 0; i < adev->vce.num_rings; i++)
376 if (adev->vce.ring[i].sched.ready)
377 ++num_rings;
378 ib_start_alignment = 4;
379 ib_size_alignment = 1;
380 break;
381 case AMDGPU_HW_IP_UVD_ENC:
382 type = AMD_IP_BLOCK_TYPE_UVD;
383 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
384 if (adev->uvd.harvest_config & (1 << i))
385 continue;
386
387 for (j = 0; j < adev->uvd.num_enc_rings; j++)
388 if (adev->uvd.inst[i].ring_enc[j].sched.ready)
389 ++num_rings;
390 }
391 ib_start_alignment = 64;
392 ib_size_alignment = 64;
393 break;
394 case AMDGPU_HW_IP_VCN_DEC:
395 type = AMD_IP_BLOCK_TYPE_VCN;
396 if (adev->vcn.ring_dec.sched.ready)
397 ++num_rings;
398 ib_start_alignment = 16;
399 ib_size_alignment = 16;
400 break;
401 case AMDGPU_HW_IP_VCN_ENC:
402 type = AMD_IP_BLOCK_TYPE_VCN;
403 for (i = 0; i < adev->vcn.num_enc_rings; i++)
404 if (adev->vcn.ring_enc[i].sched.ready)
405 ++num_rings;
406 ib_start_alignment = 64;
407 ib_size_alignment = 1;
408 break;
409 case AMDGPU_HW_IP_VCN_JPEG:
410 type = AMD_IP_BLOCK_TYPE_VCN;
411 if (adev->vcn.ring_jpeg.sched.ready)
412 ++num_rings;
413 ib_start_alignment = 16;
414 ib_size_alignment = 16;
415 break;
416 default:
417 return -EINVAL;
418 }
419
420 for (i = 0; i < adev->num_ip_blocks; i++)
421 if (adev->ip_blocks[i].version->type == type &&
422 adev->ip_blocks[i].status.valid)
423 break;
424
425 if (i == adev->num_ip_blocks)
426 return 0;
427
428 num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type],
429 num_rings);
430
431 result->hw_ip_version_major = adev->ip_blocks[i].version->major;
432 result->hw_ip_version_minor = adev->ip_blocks[i].version->minor;
433 result->capabilities_flags = 0;
434 result->available_rings = (1 << num_rings) - 1;
435 result->ib_start_alignment = ib_start_alignment;
436 result->ib_size_alignment = ib_size_alignment;
437 return 0;
438}
439
440/*
441 * Userspace get information ioctl
442 */
443/**
444 * amdgpu_info_ioctl - answer a device specific request.
445 *
446 * @adev: amdgpu device pointer
447 * @data: request object
448 * @filp: drm filp
449 *
450 * This function is used to pass device specific parameters to the userspace
451 * drivers. Examples include: pci device id, pipeline parms, tiling params,
452 * etc. (all asics).
453 * Returns 0 on success, -EINVAL on failure.
454 */
455static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
456{
457 struct amdgpu_device *adev = dev->dev_private;
458 struct drm_amdgpu_info *info = data;
459 struct amdgpu_mode_info *minfo = &adev->mode_info;
460 void __user *out = (void __user *)(uintptr_t)info->return_pointer;
461 uint32_t size = info->return_size;
462 struct drm_crtc *crtc;
463 uint32_t ui32 = 0;
464 uint64_t ui64 = 0;
465 int i, found;
466 int ui32_size = sizeof(ui32);
467
468 if (!info->return_size || !info->return_pointer)
469 return -EINVAL;
470
471 switch (info->query) {
472 case AMDGPU_INFO_ACCEL_WORKING:
473 ui32 = adev->accel_working;
474 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
475 case AMDGPU_INFO_CRTC_FROM_ID:
476 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
477 crtc = (struct drm_crtc *)minfo->crtcs[i];
478 if (crtc && crtc->base.id == info->mode_crtc.id) {
479 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
480 ui32 = amdgpu_crtc->crtc_id;
481 found = 1;
482 break;
483 }
484 }
485 if (!found) {
486 DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
487 return -EINVAL;
488 }
489 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
490 case AMDGPU_INFO_HW_IP_INFO: {
491 struct drm_amdgpu_info_hw_ip ip = {};
492 int ret;
493
494 ret = amdgpu_hw_ip_info(adev, info, &ip);
495 if (ret)
496 return ret;
497
498 ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip)));
499 return ret ? -EFAULT : 0;
500 }
501 case AMDGPU_INFO_HW_IP_COUNT: {
502 enum amd_ip_block_type type;
503 uint32_t count = 0;
504
505 switch (info->query_hw_ip.type) {
506 case AMDGPU_HW_IP_GFX:
507 type = AMD_IP_BLOCK_TYPE_GFX;
508 break;
509 case AMDGPU_HW_IP_COMPUTE:
510 type = AMD_IP_BLOCK_TYPE_GFX;
511 break;
512 case AMDGPU_HW_IP_DMA:
513 type = AMD_IP_BLOCK_TYPE_SDMA;
514 break;
515 case AMDGPU_HW_IP_UVD:
516 type = AMD_IP_BLOCK_TYPE_UVD;
517 break;
518 case AMDGPU_HW_IP_VCE:
519 type = AMD_IP_BLOCK_TYPE_VCE;
520 break;
521 case AMDGPU_HW_IP_UVD_ENC:
522 type = AMD_IP_BLOCK_TYPE_UVD;
523 break;
524 case AMDGPU_HW_IP_VCN_DEC:
525 case AMDGPU_HW_IP_VCN_ENC:
526 case AMDGPU_HW_IP_VCN_JPEG:
527 type = AMD_IP_BLOCK_TYPE_VCN;
528 break;
529 default:
530 return -EINVAL;
531 }
532
533 for (i = 0; i < adev->num_ip_blocks; i++)
534 if (adev->ip_blocks[i].version->type == type &&
535 adev->ip_blocks[i].status.valid &&
536 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
537 count++;
538
539 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
540 }
541 case AMDGPU_INFO_TIMESTAMP:
542 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
543 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
544 case AMDGPU_INFO_FW_VERSION: {
545 struct drm_amdgpu_info_firmware fw_info;
546 int ret;
547
548 /* We only support one instance of each IP block right now. */
549 if (info->query_fw.ip_instance != 0)
550 return -EINVAL;
551
552 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
553 if (ret)
554 return ret;
555
556 return copy_to_user(out, &fw_info,
557 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
558 }
559 case AMDGPU_INFO_NUM_BYTES_MOVED:
560 ui64 = atomic64_read(&adev->num_bytes_moved);
561 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
562 case AMDGPU_INFO_NUM_EVICTIONS:
563 ui64 = atomic64_read(&adev->num_evictions);
564 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
565 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
566 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
567 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
568 case AMDGPU_INFO_VRAM_USAGE:
569 ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
570 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
571 case AMDGPU_INFO_VIS_VRAM_USAGE:
572 ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
573 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
574 case AMDGPU_INFO_GTT_USAGE:
575 ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
576 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
577 case AMDGPU_INFO_GDS_CONFIG: {
578 struct drm_amdgpu_info_gds gds_info;
579
580 memset(&gds_info, 0, sizeof(gds_info));
581 gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size;
582 gds_info.compute_partition_size = adev->gds.mem.cs_partition_size;
583 gds_info.gds_total_size = adev->gds.mem.total_size;
584 gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size;
585 gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size;
586 gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size;
587 gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size;
588 return copy_to_user(out, &gds_info,
589 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
590 }
591 case AMDGPU_INFO_VRAM_GTT: {
592 struct drm_amdgpu_info_vram_gtt vram_gtt;
593
594 vram_gtt.vram_size = adev->gmc.real_vram_size -
595 atomic64_read(&adev->vram_pin_size);
596 vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
597 atomic64_read(&adev->visible_pin_size);
598 vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
599 vram_gtt.gtt_size *= PAGE_SIZE;
600 vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
601 return copy_to_user(out, &vram_gtt,
602 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
603 }
604 case AMDGPU_INFO_MEMORY: {
605 struct drm_amdgpu_memory_info mem;
606
607 memset(&mem, 0, sizeof(mem));
608 mem.vram.total_heap_size = adev->gmc.real_vram_size;
609 mem.vram.usable_heap_size = adev->gmc.real_vram_size -
610 atomic64_read(&adev->vram_pin_size);
611 mem.vram.heap_usage =
612 amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
613 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
614
615 mem.cpu_accessible_vram.total_heap_size =
616 adev->gmc.visible_vram_size;
617 mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
618 atomic64_read(&adev->visible_pin_size);
619 mem.cpu_accessible_vram.heap_usage =
620 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
621 mem.cpu_accessible_vram.max_allocation =
622 mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
623
624 mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
625 mem.gtt.total_heap_size *= PAGE_SIZE;
626 mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
627 atomic64_read(&adev->gart_pin_size);
628 mem.gtt.heap_usage =
629 amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
630 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
631
632 return copy_to_user(out, &mem,
633 min((size_t)size, sizeof(mem)))
634 ? -EFAULT : 0;
635 }
636 case AMDGPU_INFO_READ_MMR_REG: {
637 unsigned n, alloc_size;
638 uint32_t *regs;
639 unsigned se_num = (info->read_mmr_reg.instance >>
640 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
641 AMDGPU_INFO_MMR_SE_INDEX_MASK;
642 unsigned sh_num = (info->read_mmr_reg.instance >>
643 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
644 AMDGPU_INFO_MMR_SH_INDEX_MASK;
645
646 /* set full masks if the userspace set all bits
647 * in the bitfields */
648 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
649 se_num = 0xffffffff;
650 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
651 sh_num = 0xffffffff;
652
653 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
654 if (!regs)
655 return -ENOMEM;
656 alloc_size = info->read_mmr_reg.count * sizeof(*regs);
657
658 for (i = 0; i < info->read_mmr_reg.count; i++)
659 if (amdgpu_asic_read_register(adev, se_num, sh_num,
660 info->read_mmr_reg.dword_offset + i,
661 &regs[i])) {
662 DRM_DEBUG_KMS("unallowed offset %#x\n",
663 info->read_mmr_reg.dword_offset + i);
664 kfree(regs);
665 return -EFAULT;
666 }
667 n = copy_to_user(out, regs, min(size, alloc_size));
668 kfree(regs);
669 return n ? -EFAULT : 0;
670 }
671 case AMDGPU_INFO_DEV_INFO: {
672 struct drm_amdgpu_info_device dev_info = {};
673 uint64_t vm_size;
674
675 dev_info.device_id = dev->pdev->device;
676 dev_info.chip_rev = adev->rev_id;
677 dev_info.external_rev = adev->external_rev_id;
678 dev_info.pci_rev = dev->pdev->revision;
679 dev_info.family = adev->family;
680 dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
681 dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
682 /* return all clocks in KHz */
683 dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
684 if (adev->pm.dpm_enabled) {
685 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
686 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
687 } else {
688 dev_info.max_engine_clock = adev->clock.default_sclk * 10;
689 dev_info.max_memory_clock = adev->clock.default_mclk * 10;
690 }
691 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
692 dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
693 adev->gfx.config.max_shader_engines;
694 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
695 dev_info._pad = 0;
696 dev_info.ids_flags = 0;
697 if (adev->flags & AMD_IS_APU)
698 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
699 if (amdgpu_sriov_vf(adev))
700 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
701
702 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
703 vm_size -= AMDGPU_VA_RESERVED_SIZE;
704
705 /* Older VCE FW versions are buggy and can handle only 40bits */
706 if (adev->vce.fw_version &&
707 adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
708 vm_size = min(vm_size, 1ULL << 40);
709
710 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
711 dev_info.virtual_address_max =
712 min(vm_size, AMDGPU_GMC_HOLE_START);
713
714 if (vm_size > AMDGPU_GMC_HOLE_START) {
715 dev_info.high_va_offset = AMDGPU_GMC_HOLE_END;
716 dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
717 }
718 dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
719 dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
720 dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
721 dev_info.cu_active_number = adev->gfx.cu_info.number;
722 dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
723 dev_info.ce_ram_size = adev->gfx.ce_ram_size;
724 memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
725 sizeof(adev->gfx.cu_info.ao_cu_bitmap));
726 memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
727 sizeof(adev->gfx.cu_info.bitmap));
728 dev_info.vram_type = adev->gmc.vram_type;
729 dev_info.vram_bit_width = adev->gmc.vram_width;
730 dev_info.vce_harvest_config = adev->vce.harvest_config;
731 dev_info.gc_double_offchip_lds_buf =
732 adev->gfx.config.double_offchip_lds_buf;
733
734 if (amdgpu_ngg) {
735 dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
736 dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
737 dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
738 dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
739 dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
740 dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
741 dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
742 dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
743 }
744 dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
745 dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
746 dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
747 dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
748 dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
749 dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
750 dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
751
752 return copy_to_user(out, &dev_info,
753 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
754 }
755 case AMDGPU_INFO_VCE_CLOCK_TABLE: {
756 unsigned i;
757 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
758 struct amd_vce_state *vce_state;
759
760 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
761 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
762 if (vce_state) {
763 vce_clk_table.entries[i].sclk = vce_state->sclk;
764 vce_clk_table.entries[i].mclk = vce_state->mclk;
765 vce_clk_table.entries[i].eclk = vce_state->evclk;
766 vce_clk_table.num_valid_entries++;
767 }
768 }
769
770 return copy_to_user(out, &vce_clk_table,
771 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
772 }
773 case AMDGPU_INFO_VBIOS: {
774 uint32_t bios_size = adev->bios_size;
775
776 switch (info->vbios_info.type) {
777 case AMDGPU_INFO_VBIOS_SIZE:
778 return copy_to_user(out, &bios_size,
779 min((size_t)size, sizeof(bios_size)))
780 ? -EFAULT : 0;
781 case AMDGPU_INFO_VBIOS_IMAGE: {
782 uint8_t *bios;
783 uint32_t bios_offset = info->vbios_info.offset;
784
785 if (bios_offset >= bios_size)
786 return -EINVAL;
787
788 bios = adev->bios + bios_offset;
789 return copy_to_user(out, bios,
790 min((size_t)size, (size_t)(bios_size - bios_offset)))
791 ? -EFAULT : 0;
792 }
793 default:
794 DRM_DEBUG_KMS("Invalid request %d\n",
795 info->vbios_info.type);
796 return -EINVAL;
797 }
798 }
799 case AMDGPU_INFO_NUM_HANDLES: {
800 struct drm_amdgpu_info_num_handles handle;
801
802 switch (info->query_hw_ip.type) {
803 case AMDGPU_HW_IP_UVD:
804 /* Starting Polaris, we support unlimited UVD handles */
805 if (adev->asic_type < CHIP_POLARIS10) {
806 handle.uvd_max_handles = adev->uvd.max_handles;
807 handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
808
809 return copy_to_user(out, &handle,
810 min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
811 } else {
812 return -ENODATA;
813 }
814
815 break;
816 default:
817 return -EINVAL;
818 }
819 }
820 case AMDGPU_INFO_SENSOR: {
821 if (!adev->pm.dpm_enabled)
822 return -ENOENT;
823
824 switch (info->sensor_info.type) {
825 case AMDGPU_INFO_SENSOR_GFX_SCLK:
826 /* get sclk in Mhz */
827 if (amdgpu_dpm_read_sensor(adev,
828 AMDGPU_PP_SENSOR_GFX_SCLK,
829 (void *)&ui32, &ui32_size)) {
830 return -EINVAL;
831 }
832 ui32 /= 100;
833 break;
834 case AMDGPU_INFO_SENSOR_GFX_MCLK:
835 /* get mclk in Mhz */
836 if (amdgpu_dpm_read_sensor(adev,
837 AMDGPU_PP_SENSOR_GFX_MCLK,
838 (void *)&ui32, &ui32_size)) {
839 return -EINVAL;
840 }
841 ui32 /= 100;
842 break;
843 case AMDGPU_INFO_SENSOR_GPU_TEMP:
844 /* get temperature in millidegrees C */
845 if (amdgpu_dpm_read_sensor(adev,
846 AMDGPU_PP_SENSOR_GPU_TEMP,
847 (void *)&ui32, &ui32_size)) {
848 return -EINVAL;
849 }
850 break;
851 case AMDGPU_INFO_SENSOR_GPU_LOAD:
852 /* get GPU load */
853 if (amdgpu_dpm_read_sensor(adev,
854 AMDGPU_PP_SENSOR_GPU_LOAD,
855 (void *)&ui32, &ui32_size)) {
856 return -EINVAL;
857 }
858 break;
859 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
860 /* get average GPU power */
861 if (amdgpu_dpm_read_sensor(adev,
862 AMDGPU_PP_SENSOR_GPU_POWER,
863 (void *)&ui32, &ui32_size)) {
864 return -EINVAL;
865 }
866 ui32 >>= 8;
867 break;
868 case AMDGPU_INFO_SENSOR_VDDNB:
869 /* get VDDNB in millivolts */
870 if (amdgpu_dpm_read_sensor(adev,
871 AMDGPU_PP_SENSOR_VDDNB,
872 (void *)&ui32, &ui32_size)) {
873 return -EINVAL;
874 }
875 break;
876 case AMDGPU_INFO_SENSOR_VDDGFX:
877 /* get VDDGFX in millivolts */
878 if (amdgpu_dpm_read_sensor(adev,
879 AMDGPU_PP_SENSOR_VDDGFX,
880 (void *)&ui32, &ui32_size)) {
881 return -EINVAL;
882 }
883 break;
884 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
885 /* get stable pstate sclk in Mhz */
886 if (amdgpu_dpm_read_sensor(adev,
887 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
888 (void *)&ui32, &ui32_size)) {
889 return -EINVAL;
890 }
891 ui32 /= 100;
892 break;
893 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
894 /* get stable pstate mclk in Mhz */
895 if (amdgpu_dpm_read_sensor(adev,
896 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
897 (void *)&ui32, &ui32_size)) {
898 return -EINVAL;
899 }
900 ui32 /= 100;
901 break;
902 default:
903 DRM_DEBUG_KMS("Invalid request %d\n",
904 info->sensor_info.type);
905 return -EINVAL;
906 }
907 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
908 }
909 case AMDGPU_INFO_VRAM_LOST_COUNTER:
910 ui32 = atomic_read(&adev->vram_lost_counter);
911 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
912 default:
913 DRM_DEBUG_KMS("Invalid request %d\n", info->query);
914 return -EINVAL;
915 }
916 return 0;
917}
918
919
920/*
921 * Outdated mess for old drm with Xorg being in charge (void function now).
922 */
923/**
924 * amdgpu_driver_lastclose_kms - drm callback for last close
925 *
926 * @dev: drm dev pointer
927 *
928 * Switch vga_switcheroo state after last close (all asics).
929 */
930void amdgpu_driver_lastclose_kms(struct drm_device *dev)
931{
932 drm_fb_helper_lastclose(dev);
933 vga_switcheroo_process_delayed_switch();
934}
935
936/**
937 * amdgpu_driver_open_kms - drm callback for open
938 *
939 * @dev: drm dev pointer
940 * @file_priv: drm file
941 *
942 * On device open, init vm on cayman+ (all asics).
943 * Returns 0 on success, error on failure.
944 */
945int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
946{
947 struct amdgpu_device *adev = dev->dev_private;
948 struct amdgpu_fpriv *fpriv;
949 int r, pasid;
950
951 /* Ensure IB tests are run on ring */
952 flush_delayed_work(&adev->late_init_work);
953
954 file_priv->driver_priv = NULL;
955
956 r = pm_runtime_get_sync(dev->dev);
957 if (r < 0)
958 return r;
959
960 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
961 if (unlikely(!fpriv)) {
962 r = -ENOMEM;
963 goto out_suspend;
964 }
965
966 pasid = amdgpu_pasid_alloc(16);
967 if (pasid < 0) {
968 dev_warn(adev->dev, "No more PASIDs available!");
969 pasid = 0;
970 }
971 r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid);
972 if (r)
973 goto error_pasid;
974
975 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
976 if (!fpriv->prt_va) {
977 r = -ENOMEM;
978 goto error_vm;
979 }
980
981 if (amdgpu_sriov_vf(adev)) {
982 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
983
984 r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
985 &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
986 if (r)
987 goto error_vm;
988 }
989
990 mutex_init(&fpriv->bo_list_lock);
991 idr_init(&fpriv->bo_list_handles);
992
993 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
994
995 file_priv->driver_priv = fpriv;
996 goto out_suspend;
997
998error_vm:
999 amdgpu_vm_fini(adev, &fpriv->vm);
1000
1001error_pasid:
1002 if (pasid)
1003 amdgpu_pasid_free(pasid);
1004
1005 kfree(fpriv);
1006
1007out_suspend:
1008 pm_runtime_mark_last_busy(dev->dev);
1009 pm_runtime_put_autosuspend(dev->dev);
1010
1011 return r;
1012}
1013
1014/**
1015 * amdgpu_driver_postclose_kms - drm callback for post close
1016 *
1017 * @dev: drm dev pointer
1018 * @file_priv: drm file
1019 *
1020 * On device post close, tear down vm on cayman+ (all asics).
1021 */
1022void amdgpu_driver_postclose_kms(struct drm_device *dev,
1023 struct drm_file *file_priv)
1024{
1025 struct amdgpu_device *adev = dev->dev_private;
1026 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
1027 struct amdgpu_bo_list *list;
1028 struct amdgpu_bo *pd;
1029 unsigned int pasid;
1030 int handle;
1031
1032 if (!fpriv)
1033 return;
1034
1035 pm_runtime_get_sync(dev->dev);
1036
1037 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
1038 amdgpu_uvd_free_handles(adev, file_priv);
1039 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
1040 amdgpu_vce_free_handles(adev, file_priv);
1041
1042 amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
1043
1044 if (amdgpu_sriov_vf(adev)) {
1045 /* TODO: how to handle reserve failure */
1046 BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
1047 amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
1048 fpriv->csa_va = NULL;
1049 amdgpu_bo_unreserve(adev->virt.csa_obj);
1050 }
1051
1052 pasid = fpriv->vm.pasid;
1053 pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
1054
1055 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
1056 amdgpu_vm_fini(adev, &fpriv->vm);
1057
1058 if (pasid)
1059 amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
1060 amdgpu_bo_unref(&pd);
1061
1062 idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
1063 amdgpu_bo_list_put(list);
1064
1065 idr_destroy(&fpriv->bo_list_handles);
1066 mutex_destroy(&fpriv->bo_list_lock);
1067
1068 kfree(fpriv);
1069 file_priv->driver_priv = NULL;
1070
1071 pm_runtime_mark_last_busy(dev->dev);
1072 pm_runtime_put_autosuspend(dev->dev);
1073}
1074
1075/*
1076 * VBlank related functions.
1077 */
1078/**
1079 * amdgpu_get_vblank_counter_kms - get frame count
1080 *
1081 * @dev: drm dev pointer
1082 * @pipe: crtc to get the frame count from
1083 *
1084 * Gets the frame count on the requested crtc (all asics).
1085 * Returns frame count on success, -EINVAL on failure.
1086 */
1087u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
1088{
1089 struct amdgpu_device *adev = dev->dev_private;
1090 int vpos, hpos, stat;
1091 u32 count;
1092
1093 if (pipe >= adev->mode_info.num_crtc) {
1094 DRM_ERROR("Invalid crtc %u\n", pipe);
1095 return -EINVAL;
1096 }
1097
1098 /* The hw increments its frame counter at start of vsync, not at start
1099 * of vblank, as is required by DRM core vblank counter handling.
1100 * Cook the hw count here to make it appear to the caller as if it
1101 * incremented at start of vblank. We measure distance to start of
1102 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
1103 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
1104 * result by 1 to give the proper appearance to caller.
1105 */
1106 if (adev->mode_info.crtcs[pipe]) {
1107 /* Repeat readout if needed to provide stable result if
1108 * we cross start of vsync during the queries.
1109 */
1110 do {
1111 count = amdgpu_display_vblank_get_counter(adev, pipe);
1112 /* Ask amdgpu_display_get_crtc_scanoutpos to return
1113 * vpos as distance to start of vblank, instead of
1114 * regular vertical scanout pos.
1115 */
1116 stat = amdgpu_display_get_crtc_scanoutpos(
1117 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
1118 &vpos, &hpos, NULL, NULL,
1119 &adev->mode_info.crtcs[pipe]->base.hwmode);
1120 } while (count != amdgpu_display_vblank_get_counter(adev, pipe));
1121
1122 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
1123 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
1124 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
1125 } else {
1126 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
1127 pipe, vpos);
1128
1129 /* Bump counter if we are at >= leading edge of vblank,
1130 * but before vsync where vpos would turn negative and
1131 * the hw counter really increments.
1132 */
1133 if (vpos >= 0)
1134 count++;
1135 }
1136 } else {
1137 /* Fallback to use value as is. */
1138 count = amdgpu_display_vblank_get_counter(adev, pipe);
1139 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
1140 }
1141
1142 return count;
1143}
1144
1145/**
1146 * amdgpu_enable_vblank_kms - enable vblank interrupt
1147 *
1148 * @dev: drm dev pointer
1149 * @pipe: crtc to enable vblank interrupt for
1150 *
1151 * Enable the interrupt on the requested crtc (all asics).
1152 * Returns 0 on success, -EINVAL on failure.
1153 */
1154int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1155{
1156 struct amdgpu_device *adev = dev->dev_private;
1157 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1158
1159 return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
1160}
1161
1162/**
1163 * amdgpu_disable_vblank_kms - disable vblank interrupt
1164 *
1165 * @dev: drm dev pointer
1166 * @pipe: crtc to disable vblank interrupt for
1167 *
1168 * Disable the interrupt on the requested crtc (all asics).
1169 */
1170void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1171{
1172 struct amdgpu_device *adev = dev->dev_private;
1173 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1174
1175 amdgpu_irq_put(adev, &adev->crtc_irq, idx);
1176}
1177
1178const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
1179 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1180 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1181 DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1182 DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
1183 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1184 DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1185 /* KMS */
1186 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1187 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1188 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1189 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1190 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1191 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1192 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1193 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1194 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1195 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
1196};
1197const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
1198
1199/*
1200 * Debugfs info
1201 */
1202#if defined(CONFIG_DEBUG_FS)
1203
1204static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
1205{
1206 struct drm_info_node *node = (struct drm_info_node *) m->private;
1207 struct drm_device *dev = node->minor->dev;
1208 struct amdgpu_device *adev = dev->dev_private;
1209 struct drm_amdgpu_info_firmware fw_info;
1210 struct drm_amdgpu_query_fw query_fw;
1211 struct atom_context *ctx = adev->mode_info.atom_context;
1212 int ret, i;
1213
1214 /* VCE */
1215 query_fw.fw_type = AMDGPU_INFO_FW_VCE;
1216 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1217 if (ret)
1218 return ret;
1219 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
1220 fw_info.feature, fw_info.ver);
1221
1222 /* UVD */
1223 query_fw.fw_type = AMDGPU_INFO_FW_UVD;
1224 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1225 if (ret)
1226 return ret;
1227 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
1228 fw_info.feature, fw_info.ver);
1229
1230 /* GMC */
1231 query_fw.fw_type = AMDGPU_INFO_FW_GMC;
1232 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1233 if (ret)
1234 return ret;
1235 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
1236 fw_info.feature, fw_info.ver);
1237
1238 /* ME */
1239 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
1240 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1241 if (ret)
1242 return ret;
1243 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
1244 fw_info.feature, fw_info.ver);
1245
1246 /* PFP */
1247 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
1248 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1249 if (ret)
1250 return ret;
1251 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
1252 fw_info.feature, fw_info.ver);
1253
1254 /* CE */
1255 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
1256 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1257 if (ret)
1258 return ret;
1259 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
1260 fw_info.feature, fw_info.ver);
1261
1262 /* RLC */
1263 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
1264 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1265 if (ret)
1266 return ret;
1267 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
1268 fw_info.feature, fw_info.ver);
1269
1270 /* RLC SAVE RESTORE LIST CNTL */
1271 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
1272 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1273 if (ret)
1274 return ret;
1275 seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
1276 fw_info.feature, fw_info.ver);
1277
1278 /* RLC SAVE RESTORE LIST GPM MEM */
1279 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
1280 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1281 if (ret)
1282 return ret;
1283 seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
1284 fw_info.feature, fw_info.ver);
1285
1286 /* RLC SAVE RESTORE LIST SRM MEM */
1287 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
1288 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1289 if (ret)
1290 return ret;
1291 seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
1292 fw_info.feature, fw_info.ver);
1293
1294 /* MEC */
1295 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
1296 query_fw.index = 0;
1297 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1298 if (ret)
1299 return ret;
1300 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
1301 fw_info.feature, fw_info.ver);
1302
1303 /* MEC2 */
1304 if (adev->asic_type == CHIP_KAVERI ||
1305 (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
1306 query_fw.index = 1;
1307 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1308 if (ret)
1309 return ret;
1310 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1311 fw_info.feature, fw_info.ver);
1312 }
1313
1314 /* PSP SOS */
1315 query_fw.fw_type = AMDGPU_INFO_FW_SOS;
1316 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1317 if (ret)
1318 return ret;
1319 seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
1320 fw_info.feature, fw_info.ver);
1321
1322
1323 /* PSP ASD */
1324 query_fw.fw_type = AMDGPU_INFO_FW_ASD;
1325 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1326 if (ret)
1327 return ret;
1328 seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
1329 fw_info.feature, fw_info.ver);
1330
1331 /* SMC */
1332 query_fw.fw_type = AMDGPU_INFO_FW_SMC;
1333 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1334 if (ret)
1335 return ret;
1336 seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
1337 fw_info.feature, fw_info.ver);
1338
1339 /* SDMA */
1340 query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
1341 for (i = 0; i < adev->sdma.num_instances; i++) {
1342 query_fw.index = i;
1343 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1344 if (ret)
1345 return ret;
1346 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1347 i, fw_info.feature, fw_info.ver);
1348 }
1349
1350 /* VCN */
1351 query_fw.fw_type = AMDGPU_INFO_FW_VCN;
1352 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1353 if (ret)
1354 return ret;
1355 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
1356 fw_info.feature, fw_info.ver);
1357
1358 /* DMCU */
1359 query_fw.fw_type = AMDGPU_INFO_FW_DMCU;
1360 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1361 if (ret)
1362 return ret;
1363 seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
1364 fw_info.feature, fw_info.ver);
1365
1366
1367 seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
1368
1369 return 0;
1370}
1371
1372static const struct drm_info_list amdgpu_firmware_info_list[] = {
1373 {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
1374};
1375#endif
1376
1377int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
1378{
1379#if defined(CONFIG_DEBUG_FS)
1380 return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
1381 ARRAY_SIZE(amdgpu_firmware_info_list));
1382#else
1383 return 0;
1384#endif
1385}
1386