1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #include <linux/firmware.h> |
25 | #include <drm/drmP.h> |
26 | #include "amdgpu.h" |
27 | #include "amdgpu_vcn.h" |
28 | #include "soc15.h" |
29 | #include "soc15d.h" |
30 | #include "soc15_common.h" |
31 | |
32 | #include "vcn/vcn_1_0_offset.h" |
33 | #include "vcn/vcn_1_0_sh_mask.h" |
34 | #include "hdp/hdp_4_0_offset.h" |
35 | #include "mmhub/mmhub_9_1_offset.h" |
36 | #include "mmhub/mmhub_9_1_sh_mask.h" |
37 | |
38 | #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h" |
39 | |
40 | #define mmUVD_RBC_XX_IB_REG_CHECK 0x05ab |
41 | #define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1 |
42 | #define mmUVD_REG_XX_MASK 0x05ac |
43 | #define mmUVD_REG_XX_MASK_BASE_IDX 1 |
44 | |
45 | static int vcn_v1_0_stop(struct amdgpu_device *adev); |
46 | static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev); |
47 | static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev); |
48 | static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev); |
49 | static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev); |
50 | static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr); |
51 | static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state); |
52 | |
53 | /** |
54 | * vcn_v1_0_early_init - set function pointers |
55 | * |
56 | * @handle: amdgpu_device pointer |
57 | * |
58 | * Set ring and irq function pointers |
59 | */ |
60 | static int vcn_v1_0_early_init(void *handle) |
61 | { |
62 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
63 | |
64 | adev->vcn.num_enc_rings = 2; |
65 | |
66 | vcn_v1_0_set_dec_ring_funcs(adev); |
67 | vcn_v1_0_set_enc_ring_funcs(adev); |
68 | vcn_v1_0_set_jpeg_ring_funcs(adev); |
69 | vcn_v1_0_set_irq_funcs(adev); |
70 | |
71 | return 0; |
72 | } |
73 | |
74 | /** |
75 | * vcn_v1_0_sw_init - sw init for VCN block |
76 | * |
77 | * @handle: amdgpu_device pointer |
78 | * |
79 | * Load firmware and sw initialization |
80 | */ |
81 | static int vcn_v1_0_sw_init(void *handle) |
82 | { |
83 | struct amdgpu_ring *ring; |
84 | int i, r; |
85 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
86 | |
87 | /* VCN DEC TRAP */ |
88 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq); |
89 | if (r) |
90 | return r; |
91 | |
92 | /* VCN ENC TRAP */ |
93 | for (i = 0; i < adev->vcn.num_enc_rings; ++i) { |
94 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE, |
95 | &adev->vcn.irq); |
96 | if (r) |
97 | return r; |
98 | } |
99 | |
100 | /* VCN JPEG TRAP */ |
101 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.irq); |
102 | if (r) |
103 | return r; |
104 | |
105 | r = amdgpu_vcn_sw_init(adev); |
106 | if (r) |
107 | return r; |
108 | |
109 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
110 | const struct common_firmware_header *hdr; |
111 | hdr = (const struct common_firmware_header *)adev->vcn.fw->data; |
112 | adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; |
113 | adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; |
114 | adev->firmware.fw_size += |
115 | ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); |
116 | DRM_INFO("PSP loading VCN firmware\n" ); |
117 | } |
118 | |
119 | r = amdgpu_vcn_resume(adev); |
120 | if (r) |
121 | return r; |
122 | |
123 | ring = &adev->vcn.ring_dec; |
124 | sprintf(ring->name, "vcn_dec" ); |
125 | r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); |
126 | if (r) |
127 | return r; |
128 | |
129 | for (i = 0; i < adev->vcn.num_enc_rings; ++i) { |
130 | ring = &adev->vcn.ring_enc[i]; |
131 | sprintf(ring->name, "vcn_enc%d" , i); |
132 | r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); |
133 | if (r) |
134 | return r; |
135 | } |
136 | |
137 | ring = &adev->vcn.ring_jpeg; |
138 | sprintf(ring->name, "vcn_jpeg" ); |
139 | r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); |
140 | if (r) |
141 | return r; |
142 | |
143 | return r; |
144 | } |
145 | |
146 | /** |
147 | * vcn_v1_0_sw_fini - sw fini for VCN block |
148 | * |
149 | * @handle: amdgpu_device pointer |
150 | * |
151 | * VCN suspend and free up sw allocation |
152 | */ |
153 | static int vcn_v1_0_sw_fini(void *handle) |
154 | { |
155 | int r; |
156 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
157 | |
158 | r = amdgpu_vcn_suspend(adev); |
159 | if (r) |
160 | return r; |
161 | |
162 | r = amdgpu_vcn_sw_fini(adev); |
163 | |
164 | return r; |
165 | } |
166 | |
167 | /** |
168 | * vcn_v1_0_hw_init - start and test VCN block |
169 | * |
170 | * @handle: amdgpu_device pointer |
171 | * |
172 | * Initialize the hardware, boot up the VCPU and do some testing |
173 | */ |
174 | static int vcn_v1_0_hw_init(void *handle) |
175 | { |
176 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
177 | struct amdgpu_ring *ring = &adev->vcn.ring_dec; |
178 | int i, r; |
179 | |
180 | r = amdgpu_ring_test_helper(ring); |
181 | if (r) |
182 | goto done; |
183 | |
184 | for (i = 0; i < adev->vcn.num_enc_rings; ++i) { |
185 | ring = &adev->vcn.ring_enc[i]; |
186 | ring->sched.ready = true; |
187 | r = amdgpu_ring_test_helper(ring); |
188 | if (r) |
189 | goto done; |
190 | } |
191 | |
192 | ring = &adev->vcn.ring_jpeg; |
193 | r = amdgpu_ring_test_helper(ring); |
194 | if (r) |
195 | goto done; |
196 | |
197 | done: |
198 | if (!r) |
199 | DRM_INFO("VCN decode and encode initialized successfully(under %s).\n" , |
200 | (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode" :"SPG Mode" ); |
201 | |
202 | return r; |
203 | } |
204 | |
205 | /** |
206 | * vcn_v1_0_hw_fini - stop the hardware block |
207 | * |
208 | * @handle: amdgpu_device pointer |
209 | * |
210 | * Stop the VCN block, mark ring as not ready any more |
211 | */ |
212 | static int vcn_v1_0_hw_fini(void *handle) |
213 | { |
214 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
215 | struct amdgpu_ring *ring = &adev->vcn.ring_dec; |
216 | |
217 | if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || |
218 | RREG32_SOC15(VCN, 0, mmUVD_STATUS)) |
219 | vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE); |
220 | |
221 | ring->sched.ready = false; |
222 | |
223 | return 0; |
224 | } |
225 | |
226 | /** |
227 | * vcn_v1_0_suspend - suspend VCN block |
228 | * |
229 | * @handle: amdgpu_device pointer |
230 | * |
231 | * HW fini and suspend VCN block |
232 | */ |
233 | static int vcn_v1_0_suspend(void *handle) |
234 | { |
235 | int r; |
236 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
237 | |
238 | r = vcn_v1_0_hw_fini(adev); |
239 | if (r) |
240 | return r; |
241 | |
242 | r = amdgpu_vcn_suspend(adev); |
243 | |
244 | return r; |
245 | } |
246 | |
247 | /** |
248 | * vcn_v1_0_resume - resume VCN block |
249 | * |
250 | * @handle: amdgpu_device pointer |
251 | * |
252 | * Resume firmware and hw init VCN block |
253 | */ |
254 | static int vcn_v1_0_resume(void *handle) |
255 | { |
256 | int r; |
257 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
258 | |
259 | r = amdgpu_vcn_resume(adev); |
260 | if (r) |
261 | return r; |
262 | |
263 | r = vcn_v1_0_hw_init(adev); |
264 | |
265 | return r; |
266 | } |
267 | |
268 | /** |
269 | * vcn_v1_0_mc_resume_spg_mode - memory controller programming |
270 | * |
271 | * @adev: amdgpu_device pointer |
272 | * |
273 | * Let the VCN memory controller know it's offsets |
274 | */ |
275 | static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev) |
276 | { |
277 | uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); |
278 | uint32_t offset; |
279 | |
280 | /* cache window 0: fw */ |
281 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
282 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, |
283 | (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo)); |
284 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, |
285 | (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi)); |
286 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0); |
287 | offset = 0; |
288 | } else { |
289 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, |
290 | lower_32_bits(adev->vcn.gpu_addr)); |
291 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, |
292 | upper_32_bits(adev->vcn.gpu_addr)); |
293 | offset = size; |
294 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, |
295 | AMDGPU_UVD_FIRMWARE_OFFSET >> 3); |
296 | } |
297 | |
298 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); |
299 | |
300 | /* cache window 1: stack */ |
301 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, |
302 | lower_32_bits(adev->vcn.gpu_addr + offset)); |
303 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, |
304 | upper_32_bits(adev->vcn.gpu_addr + offset)); |
305 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0); |
306 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); |
307 | |
308 | /* cache window 2: context */ |
309 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, |
310 | lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); |
311 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, |
312 | upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); |
313 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0); |
314 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); |
315 | |
316 | WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG, |
317 | adev->gfx.config.gb_addr_config); |
318 | WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG, |
319 | adev->gfx.config.gb_addr_config); |
320 | WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG, |
321 | adev->gfx.config.gb_addr_config); |
322 | WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG, |
323 | adev->gfx.config.gb_addr_config); |
324 | WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG, |
325 | adev->gfx.config.gb_addr_config); |
326 | WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG, |
327 | adev->gfx.config.gb_addr_config); |
328 | WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG, |
329 | adev->gfx.config.gb_addr_config); |
330 | WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG, |
331 | adev->gfx.config.gb_addr_config); |
332 | WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG, |
333 | adev->gfx.config.gb_addr_config); |
334 | WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG, |
335 | adev->gfx.config.gb_addr_config); |
336 | WREG32_SOC15(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, |
337 | adev->gfx.config.gb_addr_config); |
338 | WREG32_SOC15(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, |
339 | adev->gfx.config.gb_addr_config); |
340 | } |
341 | |
342 | static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev) |
343 | { |
344 | uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); |
345 | uint32_t offset; |
346 | |
347 | /* cache window 0: fw */ |
348 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
349 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, |
350 | (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), |
351 | 0xFFFFFFFF, 0); |
352 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, |
353 | (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), |
354 | 0xFFFFFFFF, 0); |
355 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0, |
356 | 0xFFFFFFFF, 0); |
357 | offset = 0; |
358 | } else { |
359 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, |
360 | lower_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0); |
361 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, |
362 | upper_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0); |
363 | offset = size; |
364 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, |
365 | AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0); |
366 | } |
367 | |
368 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0); |
369 | |
370 | /* cache window 1: stack */ |
371 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, |
372 | lower_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0); |
373 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, |
374 | upper_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0); |
375 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0, |
376 | 0xFFFFFFFF, 0); |
377 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE, |
378 | 0xFFFFFFFF, 0); |
379 | |
380 | /* cache window 2: context */ |
381 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, |
382 | lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), |
383 | 0xFFFFFFFF, 0); |
384 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, |
385 | upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), |
386 | 0xFFFFFFFF, 0); |
387 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0); |
388 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE, |
389 | 0xFFFFFFFF, 0); |
390 | |
391 | /* VCN global tiling registers */ |
392 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_ADDR_CONFIG, |
393 | adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); |
394 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG, |
395 | adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); |
396 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG, |
397 | adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); |
398 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG, |
399 | adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); |
400 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG, |
401 | adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); |
402 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG, |
403 | adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); |
404 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG, |
405 | adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); |
406 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG, |
407 | adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); |
408 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG, |
409 | adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); |
410 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG, |
411 | adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); |
412 | } |
413 | |
414 | /** |
415 | * vcn_v1_0_disable_clock_gating - disable VCN clock gating |
416 | * |
417 | * @adev: amdgpu_device pointer |
418 | * @sw: enable SW clock gating |
419 | * |
420 | * Disable clock gating for VCN block |
421 | */ |
422 | static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev) |
423 | { |
424 | uint32_t data; |
425 | |
426 | /* JPEG disable CGC */ |
427 | data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL); |
428 | |
429 | if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) |
430 | data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; |
431 | else |
432 | data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK; |
433 | |
434 | data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; |
435 | data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; |
436 | WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data); |
437 | |
438 | data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE); |
439 | data &= ~(JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK); |
440 | WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data); |
441 | |
442 | /* UVD disable CGC */ |
443 | data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); |
444 | if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) |
445 | data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; |
446 | else |
447 | data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; |
448 | |
449 | data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; |
450 | data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; |
451 | WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); |
452 | |
453 | data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE); |
454 | data &= ~(UVD_CGC_GATE__SYS_MASK |
455 | | UVD_CGC_GATE__UDEC_MASK |
456 | | UVD_CGC_GATE__MPEG2_MASK |
457 | | UVD_CGC_GATE__REGS_MASK |
458 | | UVD_CGC_GATE__RBC_MASK |
459 | | UVD_CGC_GATE__LMI_MC_MASK |
460 | | UVD_CGC_GATE__LMI_UMC_MASK |
461 | | UVD_CGC_GATE__IDCT_MASK |
462 | | UVD_CGC_GATE__MPRD_MASK |
463 | | UVD_CGC_GATE__MPC_MASK |
464 | | UVD_CGC_GATE__LBSI_MASK |
465 | | UVD_CGC_GATE__LRBBM_MASK |
466 | | UVD_CGC_GATE__UDEC_RE_MASK |
467 | | UVD_CGC_GATE__UDEC_CM_MASK |
468 | | UVD_CGC_GATE__UDEC_IT_MASK |
469 | | UVD_CGC_GATE__UDEC_DB_MASK |
470 | | UVD_CGC_GATE__UDEC_MP_MASK |
471 | | UVD_CGC_GATE__WCB_MASK |
472 | | UVD_CGC_GATE__VCPU_MASK |
473 | | UVD_CGC_GATE__SCPU_MASK); |
474 | WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data); |
475 | |
476 | data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); |
477 | data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
478 | | UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
479 | | UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
480 | | UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
481 | | UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
482 | | UVD_CGC_CTRL__SYS_MODE_MASK |
483 | | UVD_CGC_CTRL__UDEC_MODE_MASK |
484 | | UVD_CGC_CTRL__MPEG2_MODE_MASK |
485 | | UVD_CGC_CTRL__REGS_MODE_MASK |
486 | | UVD_CGC_CTRL__RBC_MODE_MASK |
487 | | UVD_CGC_CTRL__LMI_MC_MODE_MASK |
488 | | UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
489 | | UVD_CGC_CTRL__IDCT_MODE_MASK |
490 | | UVD_CGC_CTRL__MPRD_MODE_MASK |
491 | | UVD_CGC_CTRL__MPC_MODE_MASK |
492 | | UVD_CGC_CTRL__LBSI_MODE_MASK |
493 | | UVD_CGC_CTRL__LRBBM_MODE_MASK |
494 | | UVD_CGC_CTRL__WCB_MODE_MASK |
495 | | UVD_CGC_CTRL__VCPU_MODE_MASK |
496 | | UVD_CGC_CTRL__SCPU_MODE_MASK); |
497 | WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); |
498 | |
499 | /* turn on */ |
500 | data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE); |
501 | data |= (UVD_SUVD_CGC_GATE__SRE_MASK |
502 | | UVD_SUVD_CGC_GATE__SIT_MASK |
503 | | UVD_SUVD_CGC_GATE__SMP_MASK |
504 | | UVD_SUVD_CGC_GATE__SCM_MASK |
505 | | UVD_SUVD_CGC_GATE__SDB_MASK |
506 | | UVD_SUVD_CGC_GATE__SRE_H264_MASK |
507 | | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
508 | | UVD_SUVD_CGC_GATE__SIT_H264_MASK |
509 | | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
510 | | UVD_SUVD_CGC_GATE__SCM_H264_MASK |
511 | | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
512 | | UVD_SUVD_CGC_GATE__SDB_H264_MASK |
513 | | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK |
514 | | UVD_SUVD_CGC_GATE__SCLR_MASK |
515 | | UVD_SUVD_CGC_GATE__UVD_SC_MASK |
516 | | UVD_SUVD_CGC_GATE__ENT_MASK |
517 | | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK |
518 | | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK |
519 | | UVD_SUVD_CGC_GATE__SITE_MASK |
520 | | UVD_SUVD_CGC_GATE__SRE_VP9_MASK |
521 | | UVD_SUVD_CGC_GATE__SCM_VP9_MASK |
522 | | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK |
523 | | UVD_SUVD_CGC_GATE__SDB_VP9_MASK |
524 | | UVD_SUVD_CGC_GATE__IME_HEVC_MASK); |
525 | WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data); |
526 | |
527 | data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL); |
528 | data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
529 | | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
530 | | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
531 | | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
532 | | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK |
533 | | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK |
534 | | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK |
535 | | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK |
536 | | UVD_SUVD_CGC_CTRL__IME_MODE_MASK |
537 | | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); |
538 | WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data); |
539 | } |
540 | |
541 | /** |
542 | * vcn_v1_0_enable_clock_gating - enable VCN clock gating |
543 | * |
544 | * @adev: amdgpu_device pointer |
545 | * @sw: enable SW clock gating |
546 | * |
547 | * Enable clock gating for VCN block |
548 | */ |
549 | static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev) |
550 | { |
551 | uint32_t data = 0; |
552 | |
553 | /* enable JPEG CGC */ |
554 | data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL); |
555 | if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) |
556 | data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; |
557 | else |
558 | data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; |
559 | data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; |
560 | data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; |
561 | WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data); |
562 | |
563 | data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE); |
564 | data |= (JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK); |
565 | WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data); |
566 | |
567 | /* enable UVD CGC */ |
568 | data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); |
569 | if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) |
570 | data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; |
571 | else |
572 | data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; |
573 | data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; |
574 | data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; |
575 | WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); |
576 | |
577 | data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); |
578 | data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
579 | | UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
580 | | UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
581 | | UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
582 | | UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
583 | | UVD_CGC_CTRL__SYS_MODE_MASK |
584 | | UVD_CGC_CTRL__UDEC_MODE_MASK |
585 | | UVD_CGC_CTRL__MPEG2_MODE_MASK |
586 | | UVD_CGC_CTRL__REGS_MODE_MASK |
587 | | UVD_CGC_CTRL__RBC_MODE_MASK |
588 | | UVD_CGC_CTRL__LMI_MC_MODE_MASK |
589 | | UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
590 | | UVD_CGC_CTRL__IDCT_MODE_MASK |
591 | | UVD_CGC_CTRL__MPRD_MODE_MASK |
592 | | UVD_CGC_CTRL__MPC_MODE_MASK |
593 | | UVD_CGC_CTRL__LBSI_MODE_MASK |
594 | | UVD_CGC_CTRL__LRBBM_MODE_MASK |
595 | | UVD_CGC_CTRL__WCB_MODE_MASK |
596 | | UVD_CGC_CTRL__VCPU_MODE_MASK |
597 | | UVD_CGC_CTRL__SCPU_MODE_MASK); |
598 | WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); |
599 | |
600 | data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL); |
601 | data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
602 | | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
603 | | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
604 | | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
605 | | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK |
606 | | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK |
607 | | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK |
608 | | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK |
609 | | UVD_SUVD_CGC_CTRL__IME_MODE_MASK |
610 | | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); |
611 | WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data); |
612 | } |
613 | |
614 | static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel) |
615 | { |
616 | uint32_t reg_data = 0; |
617 | |
618 | /* disable JPEG CGC */ |
619 | if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) |
620 | reg_data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; |
621 | else |
622 | reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; |
623 | reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; |
624 | reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; |
625 | WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel); |
626 | |
627 | WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel); |
628 | |
629 | /* enable sw clock gating control */ |
630 | if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) |
631 | reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; |
632 | else |
633 | reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; |
634 | reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; |
635 | reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; |
636 | reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | |
637 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK | |
638 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK | |
639 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK | |
640 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK | |
641 | UVD_CGC_CTRL__SYS_MODE_MASK | |
642 | UVD_CGC_CTRL__UDEC_MODE_MASK | |
643 | UVD_CGC_CTRL__MPEG2_MODE_MASK | |
644 | UVD_CGC_CTRL__REGS_MODE_MASK | |
645 | UVD_CGC_CTRL__RBC_MODE_MASK | |
646 | UVD_CGC_CTRL__LMI_MC_MODE_MASK | |
647 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK | |
648 | UVD_CGC_CTRL__IDCT_MODE_MASK | |
649 | UVD_CGC_CTRL__MPRD_MODE_MASK | |
650 | UVD_CGC_CTRL__MPC_MODE_MASK | |
651 | UVD_CGC_CTRL__LBSI_MODE_MASK | |
652 | UVD_CGC_CTRL__LRBBM_MODE_MASK | |
653 | UVD_CGC_CTRL__WCB_MODE_MASK | |
654 | UVD_CGC_CTRL__VCPU_MODE_MASK | |
655 | UVD_CGC_CTRL__SCPU_MODE_MASK); |
656 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel); |
657 | |
658 | /* turn off clock gating */ |
659 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel); |
660 | |
661 | /* turn on SUVD clock gating */ |
662 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel); |
663 | |
664 | /* turn on sw mode in UVD_SUVD_CGC_CTRL */ |
665 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel); |
666 | } |
667 | |
668 | static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev) |
669 | { |
670 | uint32_t data = 0; |
671 | int ret; |
672 | |
673 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { |
674 | data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT |
675 | | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT |
676 | | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT |
677 | | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT |
678 | | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT |
679 | | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT |
680 | | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT |
681 | | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT |
682 | | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT |
683 | | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT |
684 | | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); |
685 | |
686 | WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); |
687 | SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF, ret); |
688 | } else { |
689 | data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT |
690 | | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT |
691 | | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT |
692 | | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT |
693 | | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT |
694 | | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT |
695 | | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT |
696 | | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT |
697 | | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT |
698 | | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT |
699 | | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); |
700 | WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); |
701 | SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF, ret); |
702 | } |
703 | |
704 | /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */ |
705 | |
706 | data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS); |
707 | data &= ~0x103; |
708 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN) |
709 | data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK; |
710 | |
711 | WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data); |
712 | } |
713 | |
714 | static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev) |
715 | { |
716 | uint32_t data = 0; |
717 | int ret; |
718 | |
719 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { |
720 | /* Before power off, this indicator has to be turned on */ |
721 | data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS); |
722 | data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK; |
723 | data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF; |
724 | WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data); |
725 | |
726 | |
727 | data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT |
728 | | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT |
729 | | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT |
730 | | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT |
731 | | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT |
732 | | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT |
733 | | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT |
734 | | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT |
735 | | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT |
736 | | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT |
737 | | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); |
738 | |
739 | WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); |
740 | |
741 | data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT |
742 | | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT |
743 | | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT |
744 | | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT |
745 | | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT |
746 | | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT |
747 | | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT |
748 | | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT |
749 | | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT |
750 | | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT |
751 | | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT); |
752 | SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF, ret); |
753 | } |
754 | } |
755 | |
756 | /** |
757 | * vcn_v1_0_start - start VCN block |
758 | * |
759 | * @adev: amdgpu_device pointer |
760 | * |
761 | * Setup and start the VCN block |
762 | */ |
763 | static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev) |
764 | { |
765 | struct amdgpu_ring *ring = &adev->vcn.ring_dec; |
766 | uint32_t rb_bufsz, tmp; |
767 | uint32_t lmi_swap_cntl; |
768 | int i, j, r; |
769 | |
770 | /* disable byte swapping */ |
771 | lmi_swap_cntl = 0; |
772 | |
773 | vcn_1_0_disable_static_power_gating(adev); |
774 | |
775 | tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY; |
776 | WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp); |
777 | |
778 | /* disable clock gating */ |
779 | vcn_v1_0_disable_clock_gating(adev); |
780 | |
781 | /* disable interupt */ |
782 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0, |
783 | ~UVD_MASTINT_EN__VCPU_EN_MASK); |
784 | |
785 | /* initialize VCN memory controller */ |
786 | tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL); |
787 | WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp | |
788 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | |
789 | UVD_LMI_CTRL__MASK_MC_URGENT_MASK | |
790 | UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | |
791 | UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); |
792 | |
793 | #ifdef __BIG_ENDIAN |
794 | /* swap (8 in 32) RB and IB */ |
795 | lmi_swap_cntl = 0xa; |
796 | #endif |
797 | WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); |
798 | |
799 | tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL); |
800 | tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; |
801 | tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; |
802 | WREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL, tmp); |
803 | |
804 | WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, |
805 | ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | |
806 | (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | |
807 | (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | |
808 | (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); |
809 | |
810 | WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, |
811 | ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | |
812 | (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | |
813 | (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | |
814 | (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); |
815 | |
816 | WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, |
817 | ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | |
818 | (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | |
819 | (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); |
820 | |
821 | vcn_v1_0_mc_resume_spg_mode(adev); |
822 | |
823 | WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK, 0x10); |
824 | WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, |
825 | RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK) | 0x3); |
826 | |
827 | /* enable VCPU clock */ |
828 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK); |
829 | |
830 | /* boot up the VCPU */ |
831 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0, |
832 | ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); |
833 | |
834 | /* enable UMC */ |
835 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0, |
836 | ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); |
837 | |
838 | tmp = RREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET); |
839 | tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; |
840 | tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; |
841 | WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, tmp); |
842 | |
843 | for (i = 0; i < 10; ++i) { |
844 | uint32_t status; |
845 | |
846 | for (j = 0; j < 100; ++j) { |
847 | status = RREG32_SOC15(UVD, 0, mmUVD_STATUS); |
848 | if (status & UVD_STATUS__IDLE) |
849 | break; |
850 | mdelay(10); |
851 | } |
852 | r = 0; |
853 | if (status & UVD_STATUS__IDLE) |
854 | break; |
855 | |
856 | DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n" ); |
857 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), |
858 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, |
859 | ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); |
860 | mdelay(10); |
861 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0, |
862 | ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); |
863 | mdelay(10); |
864 | r = -1; |
865 | } |
866 | |
867 | if (r) { |
868 | DRM_ERROR("VCN decode not responding, giving up!!!\n" ); |
869 | return r; |
870 | } |
871 | /* enable master interrupt */ |
872 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), |
873 | UVD_MASTINT_EN__VCPU_EN_MASK, ~UVD_MASTINT_EN__VCPU_EN_MASK); |
874 | |
875 | /* enable system interrupt for JRBC, TODO: move to set interrupt*/ |
876 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SYS_INT_EN), |
877 | UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, |
878 | ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK); |
879 | |
880 | /* clear the busy bit of UVD_STATUS */ |
881 | tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) & ~UVD_STATUS__UVD_BUSY; |
882 | WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp); |
883 | |
884 | /* force RBC into idle state */ |
885 | rb_bufsz = order_base_2(ring->ring_size); |
886 | tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); |
887 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); |
888 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); |
889 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); |
890 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); |
891 | WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp); |
892 | |
893 | /* set the write pointer delay */ |
894 | WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0); |
895 | |
896 | /* set the wb address */ |
897 | WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR, |
898 | (upper_32_bits(ring->gpu_addr) >> 2)); |
899 | |
900 | /* programm the RB_BASE for ring buffer */ |
901 | WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, |
902 | lower_32_bits(ring->gpu_addr)); |
903 | WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, |
904 | upper_32_bits(ring->gpu_addr)); |
905 | |
906 | /* Initialize the ring buffer's read and write pointers */ |
907 | WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0); |
908 | |
909 | WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0); |
910 | |
911 | ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); |
912 | WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, |
913 | lower_32_bits(ring->wptr)); |
914 | |
915 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, |
916 | ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); |
917 | |
918 | ring = &adev->vcn.ring_enc[0]; |
919 | WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); |
920 | WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); |
921 | WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); |
922 | WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); |
923 | WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); |
924 | |
925 | ring = &adev->vcn.ring_enc[1]; |
926 | WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); |
927 | WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); |
928 | WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); |
929 | WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); |
930 | WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); |
931 | |
932 | ring = &adev->vcn.ring_jpeg; |
933 | WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); |
934 | WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK | |
935 | UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); |
936 | WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); |
937 | WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr)); |
938 | WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0); |
939 | WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0); |
940 | WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); |
941 | |
942 | /* initialize wptr */ |
943 | ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); |
944 | |
945 | /* copy patch commands to the jpeg ring */ |
946 | vcn_v1_0_jpeg_ring_set_patch_ring(ring, |
947 | (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission)); |
948 | |
949 | return 0; |
950 | } |
951 | |
952 | static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev) |
953 | { |
954 | struct amdgpu_ring *ring = &adev->vcn.ring_dec; |
955 | uint32_t rb_bufsz, tmp; |
956 | uint32_t lmi_swap_cntl; |
957 | |
958 | /* disable byte swapping */ |
959 | lmi_swap_cntl = 0; |
960 | |
961 | vcn_1_0_enable_static_power_gating(adev); |
962 | |
963 | /* enable dynamic power gating mode */ |
964 | tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS); |
965 | tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; |
966 | tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; |
967 | WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp); |
968 | |
969 | /* enable clock gating */ |
970 | vcn_v1_0_clock_gating_dpg_mode(adev, 0); |
971 | |
972 | /* enable VCPU clock */ |
973 | tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); |
974 | tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; |
975 | tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK; |
976 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0); |
977 | |
978 | /* disable interupt */ |
979 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN, |
980 | 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0); |
981 | |
982 | /* initialize VCN memory controller */ |
983 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL, |
984 | (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | |
985 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | |
986 | UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | |
987 | UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | |
988 | UVD_LMI_CTRL__REQ_MODE_MASK | |
989 | UVD_LMI_CTRL__CRC_RESET_MASK | |
990 | UVD_LMI_CTRL__MASK_MC_URGENT_MASK | |
991 | 0x00100000L, 0xFFFFFFFF, 0); |
992 | |
993 | #ifdef __BIG_ENDIAN |
994 | /* swap (8 in 32) RB and IB */ |
995 | lmi_swap_cntl = 0xa; |
996 | #endif |
997 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0); |
998 | |
999 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_CNTL, |
1000 | 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0xFFFFFFFF, 0); |
1001 | |
1002 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXA0, |
1003 | ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | |
1004 | (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | |
1005 | (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | |
1006 | (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0xFFFFFFFF, 0); |
1007 | |
1008 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXB0, |
1009 | ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | |
1010 | (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | |
1011 | (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | |
1012 | (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0xFFFFFFFF, 0); |
1013 | |
1014 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUX, |
1015 | ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | |
1016 | (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | |
1017 | (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0); |
1018 | |
1019 | vcn_v1_0_mc_resume_dpg_mode(adev); |
1020 | |
1021 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0); |
1022 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0); |
1023 | |
1024 | /* boot up the VCPU */ |
1025 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0); |
1026 | |
1027 | /* enable UMC */ |
1028 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL2, |
1029 | 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, |
1030 | 0xFFFFFFFF, 0); |
1031 | |
1032 | /* enable master interrupt */ |
1033 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN, |
1034 | UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0); |
1035 | |
1036 | vcn_v1_0_clock_gating_dpg_mode(adev, 1); |
1037 | /* setup mmUVD_LMI_CTRL */ |
1038 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL, |
1039 | (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | |
1040 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | |
1041 | UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | |
1042 | UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | |
1043 | UVD_LMI_CTRL__REQ_MODE_MASK | |
1044 | UVD_LMI_CTRL__CRC_RESET_MASK | |
1045 | UVD_LMI_CTRL__MASK_MC_URGENT_MASK | |
1046 | 0x00100000L, 0xFFFFFFFF, 1); |
1047 | |
1048 | tmp = adev->gfx.config.gb_addr_config; |
1049 | /* setup VCN global tiling registers */ |
1050 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1); |
1051 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1); |
1052 | |
1053 | /* enable System Interrupt for JRBC */ |
1054 | WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SYS_INT_EN, |
1055 | UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1); |
1056 | |
1057 | /* force RBC into idle state */ |
1058 | rb_bufsz = order_base_2(ring->ring_size); |
1059 | tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); |
1060 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); |
1061 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); |
1062 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); |
1063 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); |
1064 | WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp); |
1065 | |
1066 | /* set the write pointer delay */ |
1067 | WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0); |
1068 | |
1069 | /* set the wb address */ |
1070 | WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR, |
1071 | (upper_32_bits(ring->gpu_addr) >> 2)); |
1072 | |
1073 | /* programm the RB_BASE for ring buffer */ |
1074 | WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, |
1075 | lower_32_bits(ring->gpu_addr)); |
1076 | WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, |
1077 | upper_32_bits(ring->gpu_addr)); |
1078 | |
1079 | /* Initialize the ring buffer's read and write pointers */ |
1080 | WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0); |
1081 | |
1082 | WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0); |
1083 | |
1084 | ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); |
1085 | WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, |
1086 | lower_32_bits(ring->wptr)); |
1087 | |
1088 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, |
1089 | ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); |
1090 | |
1091 | /* initialize JPEG wptr */ |
1092 | ring = &adev->vcn.ring_jpeg; |
1093 | ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); |
1094 | |
1095 | /* copy patch commands to the jpeg ring */ |
1096 | vcn_v1_0_jpeg_ring_set_patch_ring(ring, |
1097 | (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission)); |
1098 | |
1099 | return 0; |
1100 | } |
1101 | |
1102 | static int vcn_v1_0_start(struct amdgpu_device *adev) |
1103 | { |
1104 | int r; |
1105 | |
1106 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) |
1107 | r = vcn_v1_0_start_dpg_mode(adev); |
1108 | else |
1109 | r = vcn_v1_0_start_spg_mode(adev); |
1110 | return r; |
1111 | } |
1112 | |
1113 | /** |
1114 | * vcn_v1_0_stop - stop VCN block |
1115 | * |
1116 | * @adev: amdgpu_device pointer |
1117 | * |
1118 | * stop the VCN block |
1119 | */ |
1120 | static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev) |
1121 | { |
1122 | int ret_code, tmp; |
1123 | |
1124 | SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, ret_code); |
1125 | |
1126 | tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | |
1127 | UVD_LMI_STATUS__READ_CLEAN_MASK | |
1128 | UVD_LMI_STATUS__WRITE_CLEAN_MASK | |
1129 | UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; |
1130 | SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code); |
1131 | |
1132 | /* put VCPU into reset */ |
1133 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), |
1134 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, |
1135 | ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); |
1136 | |
1137 | tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | |
1138 | UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; |
1139 | SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code); |
1140 | |
1141 | /* disable VCPU clock */ |
1142 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0, |
1143 | ~UVD_VCPU_CNTL__CLK_EN_MASK); |
1144 | |
1145 | /* reset LMI UMC/LMI */ |
1146 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), |
1147 | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK, |
1148 | ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); |
1149 | |
1150 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), |
1151 | UVD_SOFT_RESET__LMI_SOFT_RESET_MASK, |
1152 | ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK); |
1153 | |
1154 | WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0); |
1155 | |
1156 | vcn_v1_0_enable_clock_gating(adev); |
1157 | vcn_1_0_enable_static_power_gating(adev); |
1158 | return 0; |
1159 | } |
1160 | |
1161 | static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev) |
1162 | { |
1163 | int ret_code = 0; |
1164 | uint32_t tmp; |
1165 | |
1166 | /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */ |
1167 | SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, |
1168 | UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, |
1169 | UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); |
1170 | |
1171 | /* wait for read ptr to be equal to write ptr */ |
1172 | tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); |
1173 | SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); |
1174 | |
1175 | tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); |
1176 | SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code); |
1177 | |
1178 | tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); |
1179 | SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); |
1180 | |
1181 | tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; |
1182 | SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); |
1183 | |
1184 | SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, |
1185 | UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, |
1186 | UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); |
1187 | |
1188 | /* disable dynamic power gating mode */ |
1189 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, |
1190 | ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); |
1191 | |
1192 | return 0; |
1193 | } |
1194 | |
1195 | static int vcn_v1_0_stop(struct amdgpu_device *adev) |
1196 | { |
1197 | int r; |
1198 | |
1199 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) |
1200 | r = vcn_v1_0_stop_dpg_mode(adev); |
1201 | else |
1202 | r = vcn_v1_0_stop_spg_mode(adev); |
1203 | |
1204 | return r; |
1205 | } |
1206 | |
1207 | static bool vcn_v1_0_is_idle(void *handle) |
1208 | { |
1209 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1210 | |
1211 | return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE); |
1212 | } |
1213 | |
1214 | static int vcn_v1_0_wait_for_idle(void *handle) |
1215 | { |
1216 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1217 | int ret = 0; |
1218 | |
1219 | SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, |
1220 | UVD_STATUS__IDLE, ret); |
1221 | |
1222 | return ret; |
1223 | } |
1224 | |
1225 | static int vcn_v1_0_set_clockgating_state(void *handle, |
1226 | enum amd_clockgating_state state) |
1227 | { |
1228 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1229 | bool enable = (state == AMD_CG_STATE_GATE) ? true : false; |
1230 | |
1231 | if (enable) { |
1232 | /* wait for STATUS to clear */ |
1233 | if (vcn_v1_0_is_idle(handle)) |
1234 | return -EBUSY; |
1235 | vcn_v1_0_enable_clock_gating(adev); |
1236 | } else { |
1237 | /* disable HW gating and enable Sw gating */ |
1238 | vcn_v1_0_disable_clock_gating(adev); |
1239 | } |
1240 | return 0; |
1241 | } |
1242 | |
1243 | /** |
1244 | * vcn_v1_0_dec_ring_get_rptr - get read pointer |
1245 | * |
1246 | * @ring: amdgpu_ring pointer |
1247 | * |
1248 | * Returns the current hardware read pointer |
1249 | */ |
1250 | static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring) |
1251 | { |
1252 | struct amdgpu_device *adev = ring->adev; |
1253 | |
1254 | return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); |
1255 | } |
1256 | |
1257 | /** |
1258 | * vcn_v1_0_dec_ring_get_wptr - get write pointer |
1259 | * |
1260 | * @ring: amdgpu_ring pointer |
1261 | * |
1262 | * Returns the current hardware write pointer |
1263 | */ |
1264 | static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring) |
1265 | { |
1266 | struct amdgpu_device *adev = ring->adev; |
1267 | |
1268 | return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR); |
1269 | } |
1270 | |
1271 | /** |
1272 | * vcn_v1_0_dec_ring_set_wptr - set write pointer |
1273 | * |
1274 | * @ring: amdgpu_ring pointer |
1275 | * |
1276 | * Commits the write pointer to the hardware |
1277 | */ |
1278 | static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring) |
1279 | { |
1280 | struct amdgpu_device *adev = ring->adev; |
1281 | |
1282 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) |
1283 | WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, |
1284 | lower_32_bits(ring->wptr) | 0x80000000); |
1285 | |
1286 | WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); |
1287 | } |
1288 | |
1289 | /** |
1290 | * vcn_v1_0_dec_ring_insert_start - insert a start command |
1291 | * |
1292 | * @ring: amdgpu_ring pointer |
1293 | * |
1294 | * Write a start command to the ring. |
1295 | */ |
1296 | static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring) |
1297 | { |
1298 | struct amdgpu_device *adev = ring->adev; |
1299 | |
1300 | amdgpu_ring_write(ring, |
1301 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); |
1302 | amdgpu_ring_write(ring, 0); |
1303 | amdgpu_ring_write(ring, |
1304 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); |
1305 | amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1); |
1306 | } |
1307 | |
1308 | /** |
1309 | * vcn_v1_0_dec_ring_insert_end - insert a end command |
1310 | * |
1311 | * @ring: amdgpu_ring pointer |
1312 | * |
1313 | * Write a end command to the ring. |
1314 | */ |
1315 | static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring) |
1316 | { |
1317 | struct amdgpu_device *adev = ring->adev; |
1318 | |
1319 | amdgpu_ring_write(ring, |
1320 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); |
1321 | amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1); |
1322 | } |
1323 | |
1324 | /** |
1325 | * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command |
1326 | * |
1327 | * @ring: amdgpu_ring pointer |
1328 | * @fence: fence to emit |
1329 | * |
1330 | * Write a fence and a trap command to the ring. |
1331 | */ |
1332 | static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, |
1333 | unsigned flags) |
1334 | { |
1335 | struct amdgpu_device *adev = ring->adev; |
1336 | |
1337 | WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); |
1338 | |
1339 | amdgpu_ring_write(ring, |
1340 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0)); |
1341 | amdgpu_ring_write(ring, seq); |
1342 | amdgpu_ring_write(ring, |
1343 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); |
1344 | amdgpu_ring_write(ring, addr & 0xffffffff); |
1345 | amdgpu_ring_write(ring, |
1346 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); |
1347 | amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); |
1348 | amdgpu_ring_write(ring, |
1349 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); |
1350 | amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1); |
1351 | |
1352 | amdgpu_ring_write(ring, |
1353 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); |
1354 | amdgpu_ring_write(ring, 0); |
1355 | amdgpu_ring_write(ring, |
1356 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); |
1357 | amdgpu_ring_write(ring, 0); |
1358 | amdgpu_ring_write(ring, |
1359 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); |
1360 | amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1); |
1361 | } |
1362 | |
1363 | /** |
1364 | * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer |
1365 | * |
1366 | * @ring: amdgpu_ring pointer |
1367 | * @ib: indirect buffer to execute |
1368 | * |
1369 | * Write ring commands to execute the indirect buffer |
1370 | */ |
1371 | static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring, |
1372 | struct amdgpu_job *job, |
1373 | struct amdgpu_ib *ib, |
1374 | uint32_t flags) |
1375 | { |
1376 | struct amdgpu_device *adev = ring->adev; |
1377 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); |
1378 | |
1379 | amdgpu_ring_write(ring, |
1380 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0)); |
1381 | amdgpu_ring_write(ring, vmid); |
1382 | |
1383 | amdgpu_ring_write(ring, |
1384 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0)); |
1385 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); |
1386 | amdgpu_ring_write(ring, |
1387 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0)); |
1388 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); |
1389 | amdgpu_ring_write(ring, |
1390 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0)); |
1391 | amdgpu_ring_write(ring, ib->length_dw); |
1392 | } |
1393 | |
1394 | static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, |
1395 | uint32_t reg, uint32_t val, |
1396 | uint32_t mask) |
1397 | { |
1398 | struct amdgpu_device *adev = ring->adev; |
1399 | |
1400 | amdgpu_ring_write(ring, |
1401 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); |
1402 | amdgpu_ring_write(ring, reg << 2); |
1403 | amdgpu_ring_write(ring, |
1404 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); |
1405 | amdgpu_ring_write(ring, val); |
1406 | amdgpu_ring_write(ring, |
1407 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0)); |
1408 | amdgpu_ring_write(ring, mask); |
1409 | amdgpu_ring_write(ring, |
1410 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); |
1411 | amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1); |
1412 | } |
1413 | |
1414 | static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, |
1415 | unsigned vmid, uint64_t pd_addr) |
1416 | { |
1417 | struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; |
1418 | uint32_t data0, data1, mask; |
1419 | |
1420 | pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); |
1421 | |
1422 | /* wait for register write */ |
1423 | data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2; |
1424 | data1 = lower_32_bits(pd_addr); |
1425 | mask = 0xffffffff; |
1426 | vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask); |
1427 | } |
1428 | |
1429 | static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, |
1430 | uint32_t reg, uint32_t val) |
1431 | { |
1432 | struct amdgpu_device *adev = ring->adev; |
1433 | |
1434 | amdgpu_ring_write(ring, |
1435 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); |
1436 | amdgpu_ring_write(ring, reg << 2); |
1437 | amdgpu_ring_write(ring, |
1438 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); |
1439 | amdgpu_ring_write(ring, val); |
1440 | amdgpu_ring_write(ring, |
1441 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); |
1442 | amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1); |
1443 | } |
1444 | |
1445 | /** |
1446 | * vcn_v1_0_enc_ring_get_rptr - get enc read pointer |
1447 | * |
1448 | * @ring: amdgpu_ring pointer |
1449 | * |
1450 | * Returns the current hardware enc read pointer |
1451 | */ |
1452 | static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring) |
1453 | { |
1454 | struct amdgpu_device *adev = ring->adev; |
1455 | |
1456 | if (ring == &adev->vcn.ring_enc[0]) |
1457 | return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR); |
1458 | else |
1459 | return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2); |
1460 | } |
1461 | |
1462 | /** |
1463 | * vcn_v1_0_enc_ring_get_wptr - get enc write pointer |
1464 | * |
1465 | * @ring: amdgpu_ring pointer |
1466 | * |
1467 | * Returns the current hardware enc write pointer |
1468 | */ |
1469 | static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring) |
1470 | { |
1471 | struct amdgpu_device *adev = ring->adev; |
1472 | |
1473 | if (ring == &adev->vcn.ring_enc[0]) |
1474 | return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); |
1475 | else |
1476 | return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); |
1477 | } |
1478 | |
1479 | /** |
1480 | * vcn_v1_0_enc_ring_set_wptr - set enc write pointer |
1481 | * |
1482 | * @ring: amdgpu_ring pointer |
1483 | * |
1484 | * Commits the enc write pointer to the hardware |
1485 | */ |
1486 | static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring) |
1487 | { |
1488 | struct amdgpu_device *adev = ring->adev; |
1489 | |
1490 | if (ring == &adev->vcn.ring_enc[0]) |
1491 | WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, |
1492 | lower_32_bits(ring->wptr)); |
1493 | else |
1494 | WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, |
1495 | lower_32_bits(ring->wptr)); |
1496 | } |
1497 | |
1498 | /** |
1499 | * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command |
1500 | * |
1501 | * @ring: amdgpu_ring pointer |
1502 | * @fence: fence to emit |
1503 | * |
1504 | * Write enc a fence and a trap command to the ring. |
1505 | */ |
1506 | static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, |
1507 | u64 seq, unsigned flags) |
1508 | { |
1509 | WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); |
1510 | |
1511 | amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE); |
1512 | amdgpu_ring_write(ring, addr); |
1513 | amdgpu_ring_write(ring, upper_32_bits(addr)); |
1514 | amdgpu_ring_write(ring, seq); |
1515 | amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP); |
1516 | } |
1517 | |
1518 | static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring) |
1519 | { |
1520 | amdgpu_ring_write(ring, VCN_ENC_CMD_END); |
1521 | } |
1522 | |
1523 | /** |
1524 | * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer |
1525 | * |
1526 | * @ring: amdgpu_ring pointer |
1527 | * @ib: indirect buffer to execute |
1528 | * |
1529 | * Write enc ring commands to execute the indirect buffer |
1530 | */ |
1531 | static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring, |
1532 | struct amdgpu_job *job, |
1533 | struct amdgpu_ib *ib, |
1534 | uint32_t flags) |
1535 | { |
1536 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); |
1537 | |
1538 | amdgpu_ring_write(ring, VCN_ENC_CMD_IB); |
1539 | amdgpu_ring_write(ring, vmid); |
1540 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); |
1541 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); |
1542 | amdgpu_ring_write(ring, ib->length_dw); |
1543 | } |
1544 | |
1545 | static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, |
1546 | uint32_t reg, uint32_t val, |
1547 | uint32_t mask) |
1548 | { |
1549 | amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT); |
1550 | amdgpu_ring_write(ring, reg << 2); |
1551 | amdgpu_ring_write(ring, mask); |
1552 | amdgpu_ring_write(ring, val); |
1553 | } |
1554 | |
1555 | static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, |
1556 | unsigned int vmid, uint64_t pd_addr) |
1557 | { |
1558 | struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; |
1559 | |
1560 | pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); |
1561 | |
1562 | /* wait for reg writes */ |
1563 | vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2, |
1564 | lower_32_bits(pd_addr), 0xffffffff); |
1565 | } |
1566 | |
1567 | static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, |
1568 | uint32_t reg, uint32_t val) |
1569 | { |
1570 | amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); |
1571 | amdgpu_ring_write(ring, reg << 2); |
1572 | amdgpu_ring_write(ring, val); |
1573 | } |
1574 | |
1575 | |
1576 | /** |
1577 | * vcn_v1_0_jpeg_ring_get_rptr - get read pointer |
1578 | * |
1579 | * @ring: amdgpu_ring pointer |
1580 | * |
1581 | * Returns the current hardware read pointer |
1582 | */ |
1583 | static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring) |
1584 | { |
1585 | struct amdgpu_device *adev = ring->adev; |
1586 | |
1587 | return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR); |
1588 | } |
1589 | |
1590 | /** |
1591 | * vcn_v1_0_jpeg_ring_get_wptr - get write pointer |
1592 | * |
1593 | * @ring: amdgpu_ring pointer |
1594 | * |
1595 | * Returns the current hardware write pointer |
1596 | */ |
1597 | static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring) |
1598 | { |
1599 | struct amdgpu_device *adev = ring->adev; |
1600 | |
1601 | return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); |
1602 | } |
1603 | |
1604 | /** |
1605 | * vcn_v1_0_jpeg_ring_set_wptr - set write pointer |
1606 | * |
1607 | * @ring: amdgpu_ring pointer |
1608 | * |
1609 | * Commits the write pointer to the hardware |
1610 | */ |
1611 | static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring) |
1612 | { |
1613 | struct amdgpu_device *adev = ring->adev; |
1614 | |
1615 | WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); |
1616 | } |
1617 | |
1618 | /** |
1619 | * vcn_v1_0_jpeg_ring_insert_start - insert a start command |
1620 | * |
1621 | * @ring: amdgpu_ring pointer |
1622 | * |
1623 | * Write a start command to the ring. |
1624 | */ |
1625 | static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring) |
1626 | { |
1627 | struct amdgpu_device *adev = ring->adev; |
1628 | |
1629 | amdgpu_ring_write(ring, |
1630 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); |
1631 | amdgpu_ring_write(ring, 0x68e04); |
1632 | |
1633 | amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0)); |
1634 | amdgpu_ring_write(ring, 0x80010000); |
1635 | } |
1636 | |
1637 | /** |
1638 | * vcn_v1_0_jpeg_ring_insert_end - insert a end command |
1639 | * |
1640 | * @ring: amdgpu_ring pointer |
1641 | * |
1642 | * Write a end command to the ring. |
1643 | */ |
1644 | static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring) |
1645 | { |
1646 | struct amdgpu_device *adev = ring->adev; |
1647 | |
1648 | amdgpu_ring_write(ring, |
1649 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); |
1650 | amdgpu_ring_write(ring, 0x68e04); |
1651 | |
1652 | amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0)); |
1653 | amdgpu_ring_write(ring, 0x00010000); |
1654 | } |
1655 | |
1656 | /** |
1657 | * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command |
1658 | * |
1659 | * @ring: amdgpu_ring pointer |
1660 | * @fence: fence to emit |
1661 | * |
1662 | * Write a fence and a trap command to the ring. |
1663 | */ |
1664 | static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, |
1665 | unsigned flags) |
1666 | { |
1667 | struct amdgpu_device *adev = ring->adev; |
1668 | |
1669 | WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); |
1670 | |
1671 | amdgpu_ring_write(ring, |
1672 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0)); |
1673 | amdgpu_ring_write(ring, seq); |
1674 | |
1675 | amdgpu_ring_write(ring, |
1676 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0)); |
1677 | amdgpu_ring_write(ring, seq); |
1678 | |
1679 | amdgpu_ring_write(ring, |
1680 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); |
1681 | amdgpu_ring_write(ring, lower_32_bits(addr)); |
1682 | |
1683 | amdgpu_ring_write(ring, |
1684 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); |
1685 | amdgpu_ring_write(ring, upper_32_bits(addr)); |
1686 | |
1687 | amdgpu_ring_write(ring, |
1688 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0)); |
1689 | amdgpu_ring_write(ring, 0x8); |
1690 | |
1691 | amdgpu_ring_write(ring, |
1692 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4)); |
1693 | amdgpu_ring_write(ring, 0); |
1694 | |
1695 | amdgpu_ring_write(ring, |
1696 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); |
1697 | amdgpu_ring_write(ring, 0x01400200); |
1698 | |
1699 | amdgpu_ring_write(ring, |
1700 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); |
1701 | amdgpu_ring_write(ring, seq); |
1702 | |
1703 | amdgpu_ring_write(ring, |
1704 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); |
1705 | amdgpu_ring_write(ring, lower_32_bits(addr)); |
1706 | |
1707 | amdgpu_ring_write(ring, |
1708 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); |
1709 | amdgpu_ring_write(ring, upper_32_bits(addr)); |
1710 | |
1711 | amdgpu_ring_write(ring, |
1712 | PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2)); |
1713 | amdgpu_ring_write(ring, 0xffffffff); |
1714 | |
1715 | amdgpu_ring_write(ring, |
1716 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); |
1717 | amdgpu_ring_write(ring, 0x3fbc); |
1718 | |
1719 | amdgpu_ring_write(ring, |
1720 | PACKETJ(0, 0, 0, PACKETJ_TYPE0)); |
1721 | amdgpu_ring_write(ring, 0x1); |
1722 | |
1723 | /* emit trap */ |
1724 | amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7)); |
1725 | amdgpu_ring_write(ring, 0); |
1726 | } |
1727 | |
1728 | /** |
1729 | * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer |
1730 | * |
1731 | * @ring: amdgpu_ring pointer |
1732 | * @ib: indirect buffer to execute |
1733 | * |
1734 | * Write ring commands to execute the indirect buffer. |
1735 | */ |
1736 | static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, |
1737 | struct amdgpu_job *job, |
1738 | struct amdgpu_ib *ib, |
1739 | uint32_t flags) |
1740 | { |
1741 | struct amdgpu_device *adev = ring->adev; |
1742 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); |
1743 | |
1744 | amdgpu_ring_write(ring, |
1745 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0)); |
1746 | amdgpu_ring_write(ring, (vmid | (vmid << 4))); |
1747 | |
1748 | amdgpu_ring_write(ring, |
1749 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0)); |
1750 | amdgpu_ring_write(ring, (vmid | (vmid << 4))); |
1751 | |
1752 | amdgpu_ring_write(ring, |
1753 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); |
1754 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); |
1755 | |
1756 | amdgpu_ring_write(ring, |
1757 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); |
1758 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); |
1759 | |
1760 | amdgpu_ring_write(ring, |
1761 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0)); |
1762 | amdgpu_ring_write(ring, ib->length_dw); |
1763 | |
1764 | amdgpu_ring_write(ring, |
1765 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); |
1766 | amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr)); |
1767 | |
1768 | amdgpu_ring_write(ring, |
1769 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); |
1770 | amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr)); |
1771 | |
1772 | amdgpu_ring_write(ring, |
1773 | PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2)); |
1774 | amdgpu_ring_write(ring, 0); |
1775 | |
1776 | amdgpu_ring_write(ring, |
1777 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); |
1778 | amdgpu_ring_write(ring, 0x01400200); |
1779 | |
1780 | amdgpu_ring_write(ring, |
1781 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); |
1782 | amdgpu_ring_write(ring, 0x2); |
1783 | |
1784 | amdgpu_ring_write(ring, |
1785 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3)); |
1786 | amdgpu_ring_write(ring, 0x2); |
1787 | } |
1788 | |
1789 | static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, |
1790 | uint32_t reg, uint32_t val, |
1791 | uint32_t mask) |
1792 | { |
1793 | struct amdgpu_device *adev = ring->adev; |
1794 | uint32_t reg_offset = (reg << 2); |
1795 | |
1796 | amdgpu_ring_write(ring, |
1797 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); |
1798 | amdgpu_ring_write(ring, 0x01400200); |
1799 | |
1800 | amdgpu_ring_write(ring, |
1801 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); |
1802 | amdgpu_ring_write(ring, val); |
1803 | |
1804 | amdgpu_ring_write(ring, |
1805 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); |
1806 | if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || |
1807 | ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { |
1808 | amdgpu_ring_write(ring, 0); |
1809 | amdgpu_ring_write(ring, |
1810 | PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3)); |
1811 | } else { |
1812 | amdgpu_ring_write(ring, reg_offset); |
1813 | amdgpu_ring_write(ring, |
1814 | PACKETJ(0, 0, 0, PACKETJ_TYPE3)); |
1815 | } |
1816 | amdgpu_ring_write(ring, mask); |
1817 | } |
1818 | |
1819 | static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring, |
1820 | unsigned vmid, uint64_t pd_addr) |
1821 | { |
1822 | struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; |
1823 | uint32_t data0, data1, mask; |
1824 | |
1825 | pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); |
1826 | |
1827 | /* wait for register write */ |
1828 | data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2; |
1829 | data1 = lower_32_bits(pd_addr); |
1830 | mask = 0xffffffff; |
1831 | vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask); |
1832 | } |
1833 | |
1834 | static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, |
1835 | uint32_t reg, uint32_t val) |
1836 | { |
1837 | struct amdgpu_device *adev = ring->adev; |
1838 | uint32_t reg_offset = (reg << 2); |
1839 | |
1840 | amdgpu_ring_write(ring, |
1841 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); |
1842 | if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || |
1843 | ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { |
1844 | amdgpu_ring_write(ring, 0); |
1845 | amdgpu_ring_write(ring, |
1846 | PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0)); |
1847 | } else { |
1848 | amdgpu_ring_write(ring, reg_offset); |
1849 | amdgpu_ring_write(ring, |
1850 | PACKETJ(0, 0, 0, PACKETJ_TYPE0)); |
1851 | } |
1852 | amdgpu_ring_write(ring, val); |
1853 | } |
1854 | |
1855 | static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count) |
1856 | { |
1857 | int i; |
1858 | |
1859 | WARN_ON(ring->wptr % 2 || count % 2); |
1860 | |
1861 | for (i = 0; i < count / 2; i++) { |
1862 | amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); |
1863 | amdgpu_ring_write(ring, 0); |
1864 | } |
1865 | } |
1866 | |
1867 | static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val) |
1868 | { |
1869 | struct amdgpu_device *adev = ring->adev; |
1870 | ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0); |
1871 | if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || |
1872 | ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { |
1873 | ring->ring[(*ptr)++] = 0; |
1874 | ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0); |
1875 | } else { |
1876 | ring->ring[(*ptr)++] = reg_offset; |
1877 | ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0); |
1878 | } |
1879 | ring->ring[(*ptr)++] = val; |
1880 | } |
1881 | |
1882 | static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr) |
1883 | { |
1884 | struct amdgpu_device *adev = ring->adev; |
1885 | |
1886 | uint32_t reg, reg_offset, val, mask, i; |
1887 | |
1888 | // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW |
1889 | reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW); |
1890 | reg_offset = (reg << 2); |
1891 | val = lower_32_bits(ring->gpu_addr); |
1892 | vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); |
1893 | |
1894 | // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH |
1895 | reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH); |
1896 | reg_offset = (reg << 2); |
1897 | val = upper_32_bits(ring->gpu_addr); |
1898 | vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); |
1899 | |
1900 | // 3rd to 5th: issue MEM_READ commands |
1901 | for (i = 0; i <= 2; i++) { |
1902 | ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2); |
1903 | ring->ring[ptr++] = 0; |
1904 | } |
1905 | |
1906 | // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability |
1907 | reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL); |
1908 | reg_offset = (reg << 2); |
1909 | val = 0x13; |
1910 | vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); |
1911 | |
1912 | // 7th: program mmUVD_JRBC_RB_REF_DATA |
1913 | reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA); |
1914 | reg_offset = (reg << 2); |
1915 | val = 0x1; |
1916 | vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); |
1917 | |
1918 | // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL |
1919 | reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL); |
1920 | reg_offset = (reg << 2); |
1921 | val = 0x1; |
1922 | mask = 0x1; |
1923 | |
1924 | ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0); |
1925 | ring->ring[ptr++] = 0x01400200; |
1926 | ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0); |
1927 | ring->ring[ptr++] = val; |
1928 | ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0); |
1929 | if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || |
1930 | ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { |
1931 | ring->ring[ptr++] = 0; |
1932 | ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3); |
1933 | } else { |
1934 | ring->ring[ptr++] = reg_offset; |
1935 | ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3); |
1936 | } |
1937 | ring->ring[ptr++] = mask; |
1938 | |
1939 | //9th to 21st: insert no-op |
1940 | for (i = 0; i <= 12; i++) { |
1941 | ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); |
1942 | ring->ring[ptr++] = 0; |
1943 | } |
1944 | |
1945 | //22nd: reset mmUVD_JRBC_RB_RPTR |
1946 | reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_RPTR); |
1947 | reg_offset = (reg << 2); |
1948 | val = 0; |
1949 | vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); |
1950 | |
1951 | //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch |
1952 | reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL); |
1953 | reg_offset = (reg << 2); |
1954 | val = 0x12; |
1955 | vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); |
1956 | } |
1957 | |
1958 | static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev, |
1959 | struct amdgpu_irq_src *source, |
1960 | unsigned type, |
1961 | enum amdgpu_interrupt_state state) |
1962 | { |
1963 | return 0; |
1964 | } |
1965 | |
1966 | static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev, |
1967 | struct amdgpu_irq_src *source, |
1968 | struct amdgpu_iv_entry *entry) |
1969 | { |
1970 | DRM_DEBUG("IH: VCN TRAP\n" ); |
1971 | |
1972 | switch (entry->src_id) { |
1973 | case 124: |
1974 | amdgpu_fence_process(&adev->vcn.ring_dec); |
1975 | break; |
1976 | case 119: |
1977 | amdgpu_fence_process(&adev->vcn.ring_enc[0]); |
1978 | break; |
1979 | case 120: |
1980 | amdgpu_fence_process(&adev->vcn.ring_enc[1]); |
1981 | break; |
1982 | case 126: |
1983 | amdgpu_fence_process(&adev->vcn.ring_jpeg); |
1984 | break; |
1985 | default: |
1986 | DRM_ERROR("Unhandled interrupt: %d %d\n" , |
1987 | entry->src_id, entry->src_data[0]); |
1988 | break; |
1989 | } |
1990 | |
1991 | return 0; |
1992 | } |
1993 | |
1994 | static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) |
1995 | { |
1996 | struct amdgpu_device *adev = ring->adev; |
1997 | int i; |
1998 | |
1999 | WARN_ON(ring->wptr % 2 || count % 2); |
2000 | |
2001 | for (i = 0; i < count / 2; i++) { |
2002 | amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0)); |
2003 | amdgpu_ring_write(ring, 0); |
2004 | } |
2005 | } |
2006 | |
2007 | static int vcn_v1_0_set_powergating_state(void *handle, |
2008 | enum amd_powergating_state state) |
2009 | { |
2010 | /* This doesn't actually powergate the VCN block. |
2011 | * That's done in the dpm code via the SMC. This |
2012 | * just re-inits the block as necessary. The actual |
2013 | * gating still happens in the dpm code. We should |
2014 | * revisit this when there is a cleaner line between |
2015 | * the smc and the hw blocks |
2016 | */ |
2017 | int ret; |
2018 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2019 | |
2020 | if(state == adev->vcn.cur_state) |
2021 | return 0; |
2022 | |
2023 | if (state == AMD_PG_STATE_GATE) |
2024 | ret = vcn_v1_0_stop(adev); |
2025 | else |
2026 | ret = vcn_v1_0_start(adev); |
2027 | |
2028 | if(!ret) |
2029 | adev->vcn.cur_state = state; |
2030 | return ret; |
2031 | } |
2032 | |
2033 | static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { |
2034 | .name = "vcn_v1_0" , |
2035 | .early_init = vcn_v1_0_early_init, |
2036 | .late_init = NULL, |
2037 | .sw_init = vcn_v1_0_sw_init, |
2038 | .sw_fini = vcn_v1_0_sw_fini, |
2039 | .hw_init = vcn_v1_0_hw_init, |
2040 | .hw_fini = vcn_v1_0_hw_fini, |
2041 | .suspend = vcn_v1_0_suspend, |
2042 | .resume = vcn_v1_0_resume, |
2043 | .is_idle = vcn_v1_0_is_idle, |
2044 | .wait_for_idle = vcn_v1_0_wait_for_idle, |
2045 | .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */, |
2046 | .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */, |
2047 | .soft_reset = NULL /* vcn_v1_0_soft_reset */, |
2048 | .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */, |
2049 | .set_clockgating_state = vcn_v1_0_set_clockgating_state, |
2050 | .set_powergating_state = vcn_v1_0_set_powergating_state, |
2051 | }; |
2052 | |
2053 | static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = { |
2054 | .type = AMDGPU_RING_TYPE_VCN_DEC, |
2055 | .align_mask = 0xf, |
2056 | .support_64bit_ptrs = false, |
2057 | .vmhub = AMDGPU_MMHUB, |
2058 | .get_rptr = vcn_v1_0_dec_ring_get_rptr, |
2059 | .get_wptr = vcn_v1_0_dec_ring_get_wptr, |
2060 | .set_wptr = vcn_v1_0_dec_ring_set_wptr, |
2061 | .emit_frame_size = |
2062 | 6 + 6 + /* hdp invalidate / flush */ |
2063 | SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + |
2064 | SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + |
2065 | 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */ |
2066 | 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */ |
2067 | 6, |
2068 | .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */ |
2069 | .emit_ib = vcn_v1_0_dec_ring_emit_ib, |
2070 | .emit_fence = vcn_v1_0_dec_ring_emit_fence, |
2071 | .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush, |
2072 | .test_ring = amdgpu_vcn_dec_ring_test_ring, |
2073 | .test_ib = amdgpu_vcn_dec_ring_test_ib, |
2074 | .insert_nop = vcn_v1_0_dec_ring_insert_nop, |
2075 | .insert_start = vcn_v1_0_dec_ring_insert_start, |
2076 | .insert_end = vcn_v1_0_dec_ring_insert_end, |
2077 | .pad_ib = amdgpu_ring_generic_pad_ib, |
2078 | .begin_use = amdgpu_vcn_ring_begin_use, |
2079 | .end_use = amdgpu_vcn_ring_end_use, |
2080 | .emit_wreg = vcn_v1_0_dec_ring_emit_wreg, |
2081 | .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait, |
2082 | .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, |
2083 | }; |
2084 | |
2085 | static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = { |
2086 | .type = AMDGPU_RING_TYPE_VCN_ENC, |
2087 | .align_mask = 0x3f, |
2088 | .nop = VCN_ENC_CMD_NO_OP, |
2089 | .support_64bit_ptrs = false, |
2090 | .vmhub = AMDGPU_MMHUB, |
2091 | .get_rptr = vcn_v1_0_enc_ring_get_rptr, |
2092 | .get_wptr = vcn_v1_0_enc_ring_get_wptr, |
2093 | .set_wptr = vcn_v1_0_enc_ring_set_wptr, |
2094 | .emit_frame_size = |
2095 | SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + |
2096 | SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + |
2097 | 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */ |
2098 | 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */ |
2099 | 1, /* vcn_v1_0_enc_ring_insert_end */ |
2100 | .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */ |
2101 | .emit_ib = vcn_v1_0_enc_ring_emit_ib, |
2102 | .emit_fence = vcn_v1_0_enc_ring_emit_fence, |
2103 | .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush, |
2104 | .test_ring = amdgpu_vcn_enc_ring_test_ring, |
2105 | .test_ib = amdgpu_vcn_enc_ring_test_ib, |
2106 | .insert_nop = amdgpu_ring_insert_nop, |
2107 | .insert_end = vcn_v1_0_enc_ring_insert_end, |
2108 | .pad_ib = amdgpu_ring_generic_pad_ib, |
2109 | .begin_use = amdgpu_vcn_ring_begin_use, |
2110 | .end_use = amdgpu_vcn_ring_end_use, |
2111 | .emit_wreg = vcn_v1_0_enc_ring_emit_wreg, |
2112 | .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait, |
2113 | .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, |
2114 | }; |
2115 | |
2116 | static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = { |
2117 | .type = AMDGPU_RING_TYPE_VCN_JPEG, |
2118 | .align_mask = 0xf, |
2119 | .nop = PACKET0(0x81ff, 0), |
2120 | .support_64bit_ptrs = false, |
2121 | .vmhub = AMDGPU_MMHUB, |
2122 | .extra_dw = 64, |
2123 | .get_rptr = vcn_v1_0_jpeg_ring_get_rptr, |
2124 | .get_wptr = vcn_v1_0_jpeg_ring_get_wptr, |
2125 | .set_wptr = vcn_v1_0_jpeg_ring_set_wptr, |
2126 | .emit_frame_size = |
2127 | 6 + 6 + /* hdp invalidate / flush */ |
2128 | SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + |
2129 | SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + |
2130 | 8 + /* vcn_v1_0_jpeg_ring_emit_vm_flush */ |
2131 | 26 + 26 + /* vcn_v1_0_jpeg_ring_emit_fence x2 vm fence */ |
2132 | 6, |
2133 | .emit_ib_size = 22, /* vcn_v1_0_jpeg_ring_emit_ib */ |
2134 | .emit_ib = vcn_v1_0_jpeg_ring_emit_ib, |
2135 | .emit_fence = vcn_v1_0_jpeg_ring_emit_fence, |
2136 | .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush, |
2137 | .test_ring = amdgpu_vcn_jpeg_ring_test_ring, |
2138 | .test_ib = amdgpu_vcn_jpeg_ring_test_ib, |
2139 | .insert_nop = vcn_v1_0_jpeg_ring_nop, |
2140 | .insert_start = vcn_v1_0_jpeg_ring_insert_start, |
2141 | .insert_end = vcn_v1_0_jpeg_ring_insert_end, |
2142 | .pad_ib = amdgpu_ring_generic_pad_ib, |
2143 | .begin_use = amdgpu_vcn_ring_begin_use, |
2144 | .end_use = amdgpu_vcn_ring_end_use, |
2145 | .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg, |
2146 | .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait, |
2147 | .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, |
2148 | }; |
2149 | |
2150 | static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev) |
2151 | { |
2152 | adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs; |
2153 | DRM_INFO("VCN decode is enabled in VM mode\n" ); |
2154 | } |
2155 | |
2156 | static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev) |
2157 | { |
2158 | int i; |
2159 | |
2160 | for (i = 0; i < adev->vcn.num_enc_rings; ++i) |
2161 | adev->vcn.ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs; |
2162 | |
2163 | DRM_INFO("VCN encode is enabled in VM mode\n" ); |
2164 | } |
2165 | |
2166 | static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev) |
2167 | { |
2168 | adev->vcn.ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs; |
2169 | DRM_INFO("VCN jpeg decode is enabled in VM mode\n" ); |
2170 | } |
2171 | |
2172 | static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = { |
2173 | .set = vcn_v1_0_set_interrupt_state, |
2174 | .process = vcn_v1_0_process_interrupt, |
2175 | }; |
2176 | |
2177 | static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev) |
2178 | { |
2179 | adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2; |
2180 | adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs; |
2181 | } |
2182 | |
2183 | const struct amdgpu_ip_block_version vcn_v1_0_ip_block = |
2184 | { |
2185 | .type = AMD_IP_BLOCK_TYPE_VCN, |
2186 | .major = 1, |
2187 | .minor = 0, |
2188 | .rev = 0, |
2189 | .funcs = &vcn_v1_0_ip_funcs, |
2190 | }; |
2191 | |