1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <linux/firmware.h>
26
27#include "amdgpu.h"
28#include "amdgpu_uvd.h"
29#include "cikd.h"
30
31#include "uvd/uvd_4_2_d.h"
32#include "uvd/uvd_4_2_sh_mask.h"
33
34#include "oss/oss_2_0_d.h"
35#include "oss/oss_2_0_sh_mask.h"
36
37#include "bif/bif_4_1_d.h"
38
39#include "smu/smu_7_0_1_d.h"
40#include "smu/smu_7_0_1_sh_mask.h"
41
42static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
43static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
44static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
45static int uvd_v4_2_start(struct amdgpu_device *adev);
46static void uvd_v4_2_stop(struct amdgpu_device *adev);
47static int uvd_v4_2_set_clockgating_state(void *handle,
48 enum amd_clockgating_state state);
49static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
50 bool sw_mode);
51/**
52 * uvd_v4_2_ring_get_rptr - get read pointer
53 *
54 * @ring: amdgpu_ring pointer
55 *
56 * Returns the current hardware read pointer
57 */
58static uint64_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring)
59{
60 struct amdgpu_device *adev = ring->adev;
61
62 return RREG32(mmUVD_RBC_RB_RPTR);
63}
64
65/**
66 * uvd_v4_2_ring_get_wptr - get write pointer
67 *
68 * @ring: amdgpu_ring pointer
69 *
70 * Returns the current hardware write pointer
71 */
72static uint64_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring)
73{
74 struct amdgpu_device *adev = ring->adev;
75
76 return RREG32(mmUVD_RBC_RB_WPTR);
77}
78
79/**
80 * uvd_v4_2_ring_set_wptr - set write pointer
81 *
82 * @ring: amdgpu_ring pointer
83 *
84 * Commits the write pointer to the hardware
85 */
86static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
87{
88 struct amdgpu_device *adev = ring->adev;
89
90 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
91}
92
93static int uvd_v4_2_early_init(void *handle)
94{
95 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
96 adev->uvd.num_uvd_inst = 1;
97
98 uvd_v4_2_set_ring_funcs(adev);
99 uvd_v4_2_set_irq_funcs(adev);
100
101 return 0;
102}
103
104static int uvd_v4_2_sw_init(void *handle)
105{
106 struct amdgpu_ring *ring;
107 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
108 int r;
109
110 /* UVD TRAP */
111 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, src_id: 124, source: &adev->uvd.inst->irq);
112 if (r)
113 return r;
114
115 r = amdgpu_uvd_sw_init(adev);
116 if (r)
117 return r;
118
119 ring = &adev->uvd.inst->ring;
120 sprintf(buf: ring->name, fmt: "uvd");
121 r = amdgpu_ring_init(adev, ring, max_dw: 512, irq_src: &adev->uvd.inst->irq, irq_type: 0,
122 hw_prio: AMDGPU_RING_PRIO_DEFAULT, NULL);
123 if (r)
124 return r;
125
126 r = amdgpu_uvd_resume(adev);
127 if (r)
128 return r;
129
130 r = amdgpu_uvd_entity_init(adev);
131
132 return r;
133}
134
135static int uvd_v4_2_sw_fini(void *handle)
136{
137 int r;
138 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
139
140 r = amdgpu_uvd_suspend(adev);
141 if (r)
142 return r;
143
144 return amdgpu_uvd_sw_fini(adev);
145}
146
147static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
148 bool enable);
149/**
150 * uvd_v4_2_hw_init - start and test UVD block
151 *
152 * @handle: handle used to pass amdgpu_device pointer
153 *
154 * Initialize the hardware, boot up the VCPU and do some testing
155 */
156static int uvd_v4_2_hw_init(void *handle)
157{
158 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
159 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
160 uint32_t tmp;
161 int r;
162
163 uvd_v4_2_enable_mgcg(adev, enable: true);
164 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
165
166 r = amdgpu_ring_test_helper(ring);
167 if (r)
168 goto done;
169
170 r = amdgpu_ring_alloc(ring, ndw: 10);
171 if (r) {
172 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
173 goto done;
174 }
175
176 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
177 amdgpu_ring_write(ring, v: tmp);
178 amdgpu_ring_write(ring, v: 0xFFFFF);
179
180 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
181 amdgpu_ring_write(ring, v: tmp);
182 amdgpu_ring_write(ring, v: 0xFFFFF);
183
184 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
185 amdgpu_ring_write(ring, v: tmp);
186 amdgpu_ring_write(ring, v: 0xFFFFF);
187
188 /* Clear timeout status bits */
189 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
190 amdgpu_ring_write(ring, v: 0x8);
191
192 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
193 amdgpu_ring_write(ring, v: 3);
194
195 amdgpu_ring_commit(ring);
196
197done:
198 if (!r)
199 DRM_INFO("UVD initialized successfully.\n");
200
201 return r;
202}
203
204/**
205 * uvd_v4_2_hw_fini - stop the hardware block
206 *
207 * @handle: handle used to pass amdgpu_device pointer
208 *
209 * Stop the UVD block, mark ring as not ready any more
210 */
211static int uvd_v4_2_hw_fini(void *handle)
212{
213 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
214
215 cancel_delayed_work_sync(dwork: &adev->uvd.idle_work);
216
217 if (RREG32(mmUVD_STATUS) != 0)
218 uvd_v4_2_stop(adev);
219
220 return 0;
221}
222
223static int uvd_v4_2_prepare_suspend(void *handle)
224{
225 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
226
227 return amdgpu_uvd_prepare_suspend(adev);
228}
229
230static int uvd_v4_2_suspend(void *handle)
231{
232 int r;
233 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
234
235 /*
236 * Proper cleanups before halting the HW engine:
237 * - cancel the delayed idle work
238 * - enable powergating
239 * - enable clockgating
240 * - disable dpm
241 *
242 * TODO: to align with the VCN implementation, move the
243 * jobs for clockgating/powergating/dpm setting to
244 * ->set_powergating_state().
245 */
246 cancel_delayed_work_sync(dwork: &adev->uvd.idle_work);
247
248 if (adev->pm.dpm_enabled) {
249 amdgpu_dpm_enable_uvd(adev, enable: false);
250 } else {
251 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
252 /* shutdown the UVD block */
253 amdgpu_device_ip_set_powergating_state(dev: adev, block_type: AMD_IP_BLOCK_TYPE_UVD,
254 state: AMD_PG_STATE_GATE);
255 amdgpu_device_ip_set_clockgating_state(dev: adev, block_type: AMD_IP_BLOCK_TYPE_UVD,
256 state: AMD_CG_STATE_GATE);
257 }
258
259 r = uvd_v4_2_hw_fini(handle: adev);
260 if (r)
261 return r;
262
263 return amdgpu_uvd_suspend(adev);
264}
265
266static int uvd_v4_2_resume(void *handle)
267{
268 int r;
269 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
270
271 r = amdgpu_uvd_resume(adev);
272 if (r)
273 return r;
274
275 return uvd_v4_2_hw_init(handle: adev);
276}
277
278/**
279 * uvd_v4_2_start - start UVD block
280 *
281 * @adev: amdgpu_device pointer
282 *
283 * Setup and start the UVD block
284 */
285static int uvd_v4_2_start(struct amdgpu_device *adev)
286{
287 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
288 uint32_t rb_bufsz;
289 int i, j, r;
290 u32 tmp;
291 /* disable byte swapping */
292 u32 lmi_swap_cntl = 0;
293 u32 mp_swap_cntl = 0;
294
295 /* set uvd busy */
296 WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2));
297
298 uvd_v4_2_set_dcm(adev, sw_mode: true);
299 WREG32(mmUVD_CGC_GATE, 0);
300
301 /* take UVD block out of reset */
302 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
303 mdelay(5);
304
305 /* enable VCPU clock */
306 WREG32(mmUVD_VCPU_CNTL, 1 << 9);
307
308 /* disable interupt */
309 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
310
311#ifdef __BIG_ENDIAN
312 /* swap (8 in 32) RB and IB */
313 lmi_swap_cntl = 0xa;
314 mp_swap_cntl = 0;
315#endif
316 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
317 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
318 /* initialize UVD memory controller */
319 WREG32(mmUVD_LMI_CTRL, 0x203108);
320
321 tmp = RREG32(mmUVD_MPC_CNTL);
322 WREG32(mmUVD_MPC_CNTL, tmp | 0x10);
323
324 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
325 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
326 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
327 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
328 WREG32(mmUVD_MPC_SET_ALU, 0);
329 WREG32(mmUVD_MPC_SET_MUX, 0x88);
330
331 uvd_v4_2_mc_resume(adev);
332
333 tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL);
334 WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10));
335
336 /* enable UMC */
337 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
338
339 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
340
341 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
342
343 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
344
345 mdelay(10);
346
347 for (i = 0; i < 10; ++i) {
348 uint32_t status;
349 for (j = 0; j < 100; ++j) {
350 status = RREG32(mmUVD_STATUS);
351 if (status & 2)
352 break;
353 mdelay(10);
354 }
355 r = 0;
356 if (status & 2)
357 break;
358
359 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
360 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
361 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
362 mdelay(10);
363 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
364 mdelay(10);
365 r = -1;
366 }
367
368 if (r) {
369 DRM_ERROR("UVD not responding, giving up!!!\n");
370 return r;
371 }
372
373 /* enable interupt */
374 WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
375
376 WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
377
378 /* force RBC into idle state */
379 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
380
381 /* Set the write pointer delay */
382 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
383
384 /* program the 4GB memory segment for rptr and ring buffer */
385 WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
386 (0x7 << 16) | (0x1 << 31));
387
388 /* Initialize the ring buffer's read and write pointers */
389 WREG32(mmUVD_RBC_RB_RPTR, 0x0);
390
391 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
392 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
393
394 /* set the ring address */
395 WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr);
396
397 /* Set ring buffer size */
398 rb_bufsz = order_base_2(ring->ring_size);
399 rb_bufsz = (0x1 << 8) | rb_bufsz;
400 WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
401
402 return 0;
403}
404
405/**
406 * uvd_v4_2_stop - stop UVD block
407 *
408 * @adev: amdgpu_device pointer
409 *
410 * stop the UVD block
411 */
412static void uvd_v4_2_stop(struct amdgpu_device *adev)
413{
414 uint32_t i, j;
415 uint32_t status;
416
417 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
418
419 for (i = 0; i < 10; ++i) {
420 for (j = 0; j < 100; ++j) {
421 status = RREG32(mmUVD_STATUS);
422 if (status & 2)
423 break;
424 mdelay(1);
425 }
426 if (status & 2)
427 break;
428 }
429
430 for (i = 0; i < 10; ++i) {
431 for (j = 0; j < 100; ++j) {
432 status = RREG32(mmUVD_LMI_STATUS);
433 if (status & 0xf)
434 break;
435 mdelay(1);
436 }
437 if (status & 0xf)
438 break;
439 }
440
441 /* Stall UMC and register bus before resetting VCPU */
442 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
443
444 for (i = 0; i < 10; ++i) {
445 for (j = 0; j < 100; ++j) {
446 status = RREG32(mmUVD_LMI_STATUS);
447 if (status & 0x240)
448 break;
449 mdelay(1);
450 }
451 if (status & 0x240)
452 break;
453 }
454
455 WREG32_P(0x3D49, 0, ~(1 << 2));
456
457 WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
458
459 /* put LMI, VCPU, RBC etc... into reset */
460 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
461 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
462 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
463
464 WREG32(mmUVD_STATUS, 0);
465
466 uvd_v4_2_set_dcm(adev, sw_mode: false);
467}
468
469/**
470 * uvd_v4_2_ring_emit_fence - emit an fence & trap command
471 *
472 * @ring: amdgpu_ring pointer
473 * @addr: address
474 * @seq: sequence number
475 * @flags: fence related flags
476 *
477 * Write a fence and a trap command to the ring.
478 */
479static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
480 unsigned flags)
481{
482 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
483
484 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
485 amdgpu_ring_write(ring, v: seq);
486 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
487 amdgpu_ring_write(ring, v: addr & 0xffffffff);
488 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
489 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
490 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
491 amdgpu_ring_write(ring, v: 0);
492
493 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
494 amdgpu_ring_write(ring, v: 0);
495 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
496 amdgpu_ring_write(ring, v: 0);
497 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
498 amdgpu_ring_write(ring, v: 2);
499}
500
501/**
502 * uvd_v4_2_ring_test_ring - register write test
503 *
504 * @ring: amdgpu_ring pointer
505 *
506 * Test if we can successfully write to the context register
507 */
508static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
509{
510 struct amdgpu_device *adev = ring->adev;
511 uint32_t tmp = 0;
512 unsigned i;
513 int r;
514
515 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
516 r = amdgpu_ring_alloc(ring, ndw: 3);
517 if (r)
518 return r;
519
520 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
521 amdgpu_ring_write(ring, v: 0xDEADBEEF);
522 amdgpu_ring_commit(ring);
523 for (i = 0; i < adev->usec_timeout; i++) {
524 tmp = RREG32(mmUVD_CONTEXT_ID);
525 if (tmp == 0xDEADBEEF)
526 break;
527 udelay(1);
528 }
529
530 if (i >= adev->usec_timeout)
531 r = -ETIMEDOUT;
532
533 return r;
534}
535
536/**
537 * uvd_v4_2_ring_emit_ib - execute indirect buffer
538 *
539 * @ring: amdgpu_ring pointer
540 * @job: iob associated with the indirect buffer
541 * @ib: indirect buffer to execute
542 * @flags: flags associated with the indirect buffer
543 *
544 * Write ring commands to execute the indirect buffer
545 */
546static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
547 struct amdgpu_job *job,
548 struct amdgpu_ib *ib,
549 uint32_t flags)
550{
551 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
552 amdgpu_ring_write(ring, v: ib->gpu_addr);
553 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
554 amdgpu_ring_write(ring, v: ib->length_dw);
555}
556
557static void uvd_v4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
558{
559 int i;
560
561 WARN_ON(ring->wptr % 2 || count % 2);
562
563 for (i = 0; i < count / 2; i++) {
564 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
565 amdgpu_ring_write(ring, v: 0);
566 }
567}
568
569/**
570 * uvd_v4_2_mc_resume - memory controller programming
571 *
572 * @adev: amdgpu_device pointer
573 *
574 * Let the UVD memory controller know it's offsets
575 */
576static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
577{
578 uint64_t addr;
579 uint32_t size;
580
581 /* program the VCPU memory controller bits 0-27 */
582 addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
583 size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
584 WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
585 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
586
587 addr += size;
588 size = AMDGPU_UVD_HEAP_SIZE >> 3;
589 WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
590 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
591
592 addr += size;
593 size = (AMDGPU_UVD_STACK_SIZE +
594 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
595 WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
596 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
597
598 /* bits 28-31 */
599 addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF;
600 WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
601
602 /* bits 32-39 */
603 addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF;
604 WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
605
606 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
607 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
608 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
609}
610
611static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
612 bool enable)
613{
614 u32 orig, data;
615
616 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
617 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
618 data |= 0xfff;
619 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
620
621 orig = data = RREG32(mmUVD_CGC_CTRL);
622 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
623 if (orig != data)
624 WREG32(mmUVD_CGC_CTRL, data);
625 } else {
626 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
627 data &= ~0xfff;
628 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
629
630 orig = data = RREG32(mmUVD_CGC_CTRL);
631 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
632 if (orig != data)
633 WREG32(mmUVD_CGC_CTRL, data);
634 }
635}
636
637static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
638 bool sw_mode)
639{
640 u32 tmp, tmp2;
641
642 WREG32_FIELD(UVD_CGC_GATE, REGS, 0);
643
644 tmp = RREG32(mmUVD_CGC_CTRL);
645 tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
646 tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
647 (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) |
648 (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT);
649
650 if (sw_mode) {
651 tmp &= ~0x7ffff800;
652 tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
653 UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK |
654 (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT);
655 } else {
656 tmp |= 0x7ffff800;
657 tmp2 = 0;
658 }
659
660 WREG32(mmUVD_CGC_CTRL, tmp);
661 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
662}
663
664static bool uvd_v4_2_is_idle(void *handle)
665{
666 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
667
668 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
669}
670
671static int uvd_v4_2_wait_for_idle(void *handle)
672{
673 unsigned i;
674 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
675
676 for (i = 0; i < adev->usec_timeout; i++) {
677 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
678 return 0;
679 }
680 return -ETIMEDOUT;
681}
682
683static int uvd_v4_2_soft_reset(void *handle)
684{
685 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
686
687 uvd_v4_2_stop(adev);
688
689 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
690 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
691 mdelay(5);
692
693 return uvd_v4_2_start(adev);
694}
695
696static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev,
697 struct amdgpu_irq_src *source,
698 unsigned type,
699 enum amdgpu_interrupt_state state)
700{
701 // TODO
702 return 0;
703}
704
705static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
706 struct amdgpu_irq_src *source,
707 struct amdgpu_iv_entry *entry)
708{
709 DRM_DEBUG("IH: UVD TRAP\n");
710 amdgpu_fence_process(ring: &adev->uvd.inst->ring);
711 return 0;
712}
713
714static int uvd_v4_2_set_clockgating_state(void *handle,
715 enum amd_clockgating_state state)
716{
717 return 0;
718}
719
720static int uvd_v4_2_set_powergating_state(void *handle,
721 enum amd_powergating_state state)
722{
723 /* This doesn't actually powergate the UVD block.
724 * That's done in the dpm code via the SMC. This
725 * just re-inits the block as necessary. The actual
726 * gating still happens in the dpm code. We should
727 * revisit this when there is a cleaner line between
728 * the smc and the hw blocks
729 */
730 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
731
732 if (state == AMD_PG_STATE_GATE) {
733 uvd_v4_2_stop(adev);
734 if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
735 if (!(RREG32_SMC(ixCURRENT_PG_STATUS) &
736 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) {
737 WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
738 UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK |
739 UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
740 mdelay(20);
741 }
742 }
743 return 0;
744 } else {
745 if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
746 if (RREG32_SMC(ixCURRENT_PG_STATUS) &
747 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
748 WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
749 UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK |
750 UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
751 mdelay(30);
752 }
753 }
754 return uvd_v4_2_start(adev);
755 }
756}
757
758static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
759 .name = "uvd_v4_2",
760 .early_init = uvd_v4_2_early_init,
761 .late_init = NULL,
762 .sw_init = uvd_v4_2_sw_init,
763 .sw_fini = uvd_v4_2_sw_fini,
764 .hw_init = uvd_v4_2_hw_init,
765 .hw_fini = uvd_v4_2_hw_fini,
766 .prepare_suspend = uvd_v4_2_prepare_suspend,
767 .suspend = uvd_v4_2_suspend,
768 .resume = uvd_v4_2_resume,
769 .is_idle = uvd_v4_2_is_idle,
770 .wait_for_idle = uvd_v4_2_wait_for_idle,
771 .soft_reset = uvd_v4_2_soft_reset,
772 .set_clockgating_state = uvd_v4_2_set_clockgating_state,
773 .set_powergating_state = uvd_v4_2_set_powergating_state,
774};
775
776static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
777 .type = AMDGPU_RING_TYPE_UVD,
778 .align_mask = 0xf,
779 .support_64bit_ptrs = false,
780 .no_user_fence = true,
781 .get_rptr = uvd_v4_2_ring_get_rptr,
782 .get_wptr = uvd_v4_2_ring_get_wptr,
783 .set_wptr = uvd_v4_2_ring_set_wptr,
784 .parse_cs = amdgpu_uvd_ring_parse_cs,
785 .emit_frame_size =
786 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */
787 .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
788 .emit_ib = uvd_v4_2_ring_emit_ib,
789 .emit_fence = uvd_v4_2_ring_emit_fence,
790 .test_ring = uvd_v4_2_ring_test_ring,
791 .test_ib = amdgpu_uvd_ring_test_ib,
792 .insert_nop = uvd_v4_2_ring_insert_nop,
793 .pad_ib = amdgpu_ring_generic_pad_ib,
794 .begin_use = amdgpu_uvd_ring_begin_use,
795 .end_use = amdgpu_uvd_ring_end_use,
796};
797
798static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
799{
800 adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs;
801}
802
803static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
804 .set = uvd_v4_2_set_interrupt_state,
805 .process = uvd_v4_2_process_interrupt,
806};
807
808static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
809{
810 adev->uvd.inst->irq.num_types = 1;
811 adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs;
812}
813
814const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
815{
816 .type = AMD_IP_BLOCK_TYPE_UVD,
817 .major = 4,
818 .minor = 2,
819 .rev = 0,
820 .funcs = &uvd_v4_2_ip_funcs,
821};
822

source code of linux/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c