1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #ifndef __AMDGPU_VCN_H__ |
25 | #define __AMDGPU_VCN_H__ |
26 | |
27 | #include "amdgpu_ras.h" |
28 | |
29 | #define AMDGPU_VCN_STACK_SIZE (128*1024) |
30 | #define AMDGPU_VCN_CONTEXT_SIZE (512*1024) |
31 | |
32 | #define AMDGPU_VCN_FIRMWARE_OFFSET 256 |
33 | #define AMDGPU_VCN_MAX_ENC_RINGS 3 |
34 | |
35 | #define AMDGPU_MAX_VCN_INSTANCES 4 |
36 | #define AMDGPU_MAX_VCN_ENC_RINGS (AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES) |
37 | |
38 | #define AMDGPU_VCN_HARVEST_VCN0 (1 << 0) |
39 | #define AMDGPU_VCN_HARVEST_VCN1 (1 << 1) |
40 | |
41 | #define VCN_DEC_KMD_CMD 0x80000000 |
42 | #define VCN_DEC_CMD_FENCE 0x00000000 |
43 | #define VCN_DEC_CMD_TRAP 0x00000001 |
44 | #define VCN_DEC_CMD_WRITE_REG 0x00000004 |
45 | #define VCN_DEC_CMD_REG_READ_COND_WAIT 0x00000006 |
46 | #define VCN_DEC_CMD_PACKET_START 0x0000000a |
47 | #define VCN_DEC_CMD_PACKET_END 0x0000000b |
48 | |
49 | #define VCN_DEC_SW_CMD_NO_OP 0x00000000 |
50 | #define VCN_DEC_SW_CMD_END 0x00000001 |
51 | #define VCN_DEC_SW_CMD_IB 0x00000002 |
52 | #define VCN_DEC_SW_CMD_FENCE 0x00000003 |
53 | #define VCN_DEC_SW_CMD_TRAP 0x00000004 |
54 | #define VCN_DEC_SW_CMD_IB_AUTO 0x00000005 |
55 | #define VCN_DEC_SW_CMD_SEMAPHORE 0x00000006 |
56 | #define VCN_DEC_SW_CMD_PREEMPT_FENCE 0x00000009 |
57 | #define VCN_DEC_SW_CMD_REG_WRITE 0x0000000b |
58 | #define VCN_DEC_SW_CMD_REG_WAIT 0x0000000c |
59 | |
60 | #define VCN_ENC_CMD_NO_OP 0x00000000 |
61 | #define VCN_ENC_CMD_END 0x00000001 |
62 | #define VCN_ENC_CMD_IB 0x00000002 |
63 | #define VCN_ENC_CMD_FENCE 0x00000003 |
64 | #define VCN_ENC_CMD_TRAP 0x00000004 |
65 | #define VCN_ENC_CMD_REG_WRITE 0x0000000b |
66 | #define VCN_ENC_CMD_REG_WAIT 0x0000000c |
67 | |
68 | #define VCN_AON_SOC_ADDRESS_2_0 0x1f800 |
69 | #define VCN1_AON_SOC_ADDRESS_3_0 0x48000 |
70 | #define VCN_VID_IP_ADDRESS_2_0 0x0 |
71 | #define VCN_AON_IP_ADDRESS_2_0 0x30000 |
72 | |
73 | #define mmUVD_RBC_XX_IB_REG_CHECK 0x026b |
74 | #define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1 |
75 | #define mmUVD_REG_XX_MASK 0x026c |
76 | #define mmUVD_REG_XX_MASK_BASE_IDX 1 |
77 | |
78 | /* 1 second timeout */ |
79 | #define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000) |
80 | |
81 | #define RREG32_SOC15_DPG_MODE_1_0(ip, inst_idx, reg, mask, sram_sel) \ |
82 | ({ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \ |
83 | WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \ |
84 | UVD_DPG_LMA_CTL__MASK_EN_MASK | \ |
85 | ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \ |
86 | << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \ |
87 | (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \ |
88 | RREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA); \ |
89 | }) |
90 | |
91 | #define WREG32_SOC15_DPG_MODE_1_0(ip, inst_idx, reg, value, mask, sram_sel) \ |
92 | do { \ |
93 | WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA, value); \ |
94 | WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \ |
95 | WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \ |
96 | UVD_DPG_LMA_CTL__READ_WRITE_MASK | \ |
97 | ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \ |
98 | << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \ |
99 | (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \ |
100 | } while (0) |
101 | |
102 | #define SOC15_DPG_MODE_OFFSET(ip, inst_idx, reg) \ |
103 | ({ \ |
104 | uint32_t internal_reg_offset, addr; \ |
105 | bool video_range, video1_range, aon_range, aon1_range; \ |
106 | \ |
107 | addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \ |
108 | addr <<= 2; \ |
109 | video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS_2_0)) && \ |
110 | ((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS_2_0 + 0x2600))))); \ |
111 | video1_range = ((((0xFFFFF & addr) >= (VCN1_VID_SOC_ADDRESS_3_0)) && \ |
112 | ((0xFFFFF & addr) < ((VCN1_VID_SOC_ADDRESS_3_0 + 0x2600))))); \ |
113 | aon_range = ((((0xFFFFF & addr) >= (VCN_AON_SOC_ADDRESS_2_0)) && \ |
114 | ((0xFFFFF & addr) < ((VCN_AON_SOC_ADDRESS_2_0 + 0x600))))); \ |
115 | aon1_range = ((((0xFFFFF & addr) >= (VCN1_AON_SOC_ADDRESS_3_0)) && \ |
116 | ((0xFFFFF & addr) < ((VCN1_AON_SOC_ADDRESS_3_0 + 0x600))))); \ |
117 | if (video_range) \ |
118 | internal_reg_offset = ((0xFFFFF & addr) - (VCN_VID_SOC_ADDRESS_2_0) + \ |
119 | (VCN_VID_IP_ADDRESS_2_0)); \ |
120 | else if (aon_range) \ |
121 | internal_reg_offset = ((0xFFFFF & addr) - (VCN_AON_SOC_ADDRESS_2_0) + \ |
122 | (VCN_AON_IP_ADDRESS_2_0)); \ |
123 | else if (video1_range) \ |
124 | internal_reg_offset = ((0xFFFFF & addr) - (VCN1_VID_SOC_ADDRESS_3_0) + \ |
125 | (VCN_VID_IP_ADDRESS_2_0)); \ |
126 | else if (aon1_range) \ |
127 | internal_reg_offset = ((0xFFFFF & addr) - (VCN1_AON_SOC_ADDRESS_3_0) + \ |
128 | (VCN_AON_IP_ADDRESS_2_0)); \ |
129 | else \ |
130 | internal_reg_offset = (0xFFFFF & addr); \ |
131 | \ |
132 | internal_reg_offset >>= 2; \ |
133 | }) |
134 | |
135 | #define RREG32_SOC15_DPG_MODE(inst_idx, offset, mask_en) \ |
136 | ({ \ |
137 | WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, \ |
138 | (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ |
139 | mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ |
140 | offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ |
141 | RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA); \ |
142 | }) |
143 | |
144 | #define WREG32_SOC15_DPG_MODE(inst_idx, offset, value, mask_en, indirect) \ |
145 | do { \ |
146 | if (!indirect) { \ |
147 | WREG32_SOC15(VCN, GET_INST(VCN, inst_idx), \ |
148 | mmUVD_DPG_LMA_DATA, value); \ |
149 | WREG32_SOC15( \ |
150 | VCN, GET_INST(VCN, inst_idx), \ |
151 | mmUVD_DPG_LMA_CTL, \ |
152 | (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ |
153 | mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ |
154 | offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ |
155 | } else { \ |
156 | *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \ |
157 | offset; \ |
158 | *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \ |
159 | value; \ |
160 | } \ |
161 | } while (0) |
162 | |
163 | #define AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE (1 << 2) |
164 | #define AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT (1 << 4) |
165 | #define AMDGPU_VCN_FW_SHARED_FLAG_0_RB (1 << 6) |
166 | #define AMDGPU_VCN_MULTI_QUEUE_FLAG (1 << 8) |
167 | #define AMDGPU_VCN_SW_RING_FLAG (1 << 9) |
168 | #define AMDGPU_VCN_FW_LOGGING_FLAG (1 << 10) |
169 | #define AMDGPU_VCN_SMU_VERSION_INFO_FLAG (1 << 11) |
170 | #define AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG (1 << 11) |
171 | #define AMDGPU_VCN_VF_RB_SETUP_FLAG (1 << 14) |
172 | #define AMDGPU_VCN_VF_RB_DECOUPLE_FLAG (1 << 15) |
173 | |
174 | #define MAX_NUM_VCN_RB_SETUP 4 |
175 | |
176 | #define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER 0x00000001 |
177 | #define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER 0x00000001 |
178 | |
179 | #define VCN_CODEC_DISABLE_MASK_AV1 (1 << 0) |
180 | #define VCN_CODEC_DISABLE_MASK_VP9 (1 << 1) |
181 | #define VCN_CODEC_DISABLE_MASK_HEVC (1 << 2) |
182 | #define VCN_CODEC_DISABLE_MASK_H264 (1 << 3) |
183 | |
184 | #define AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU (0) |
185 | #define AMDGPU_VCN_SMU_DPM_INTERFACE_APU (1) |
186 | |
187 | #define AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING 2 |
188 | |
189 | enum fw_queue_mode { |
190 | FW_QUEUE_RING_RESET = 1, |
191 | FW_QUEUE_DPG_HOLD_OFF = 2, |
192 | }; |
193 | |
194 | enum engine_status_constants { |
195 | UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0, |
196 | UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0 = 0xAAAA0, |
197 | UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0 = 0x2A2A8AA0, |
198 | UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON = 0x00000002, |
199 | UVD_STATUS__UVD_BUSY = 0x00000004, |
200 | GB_ADDR_CONFIG_DEFAULT = 0x26010011, |
201 | UVD_STATUS__IDLE = 0x2, |
202 | UVD_STATUS__BUSY = 0x5, |
203 | UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF = 0x1, |
204 | UVD_STATUS__RBC_BUSY = 0x1, |
205 | UVD_PGFSM_STATUS_UVDJ_PWR_ON = 0, |
206 | }; |
207 | |
208 | enum internal_dpg_state { |
209 | VCN_DPG_STATE__UNPAUSE = 0, |
210 | VCN_DPG_STATE__PAUSE, |
211 | }; |
212 | |
213 | struct dpg_pause_state { |
214 | enum internal_dpg_state fw_based; |
215 | enum internal_dpg_state jpeg; |
216 | }; |
217 | |
218 | struct amdgpu_vcn_reg{ |
219 | unsigned data0; |
220 | unsigned data1; |
221 | unsigned cmd; |
222 | unsigned nop; |
223 | unsigned context_id; |
224 | unsigned ib_vmid; |
225 | unsigned ib_bar_low; |
226 | unsigned ib_bar_high; |
227 | unsigned ib_size; |
228 | unsigned gp_scratch8; |
229 | unsigned scratch9; |
230 | }; |
231 | |
232 | struct amdgpu_vcn_fw_shared { |
233 | void *cpu_addr; |
234 | uint64_t gpu_addr; |
235 | uint32_t mem_size; |
236 | uint32_t log_offset; |
237 | }; |
238 | |
239 | struct amdgpu_vcn_inst { |
240 | struct amdgpu_bo *vcpu_bo; |
241 | void *cpu_addr; |
242 | uint64_t gpu_addr; |
243 | void *saved_bo; |
244 | struct amdgpu_ring ring_dec; |
245 | struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; |
246 | atomic_t sched_score; |
247 | struct amdgpu_irq_src irq; |
248 | struct amdgpu_irq_src ras_poison_irq; |
249 | struct amdgpu_vcn_reg external; |
250 | struct amdgpu_bo *dpg_sram_bo; |
251 | struct dpg_pause_state pause_state; |
252 | void *dpg_sram_cpu_addr; |
253 | uint64_t dpg_sram_gpu_addr; |
254 | uint32_t *dpg_sram_curr_addr; |
255 | atomic_t dpg_enc_submission_cnt; |
256 | struct amdgpu_vcn_fw_shared fw_shared; |
257 | uint8_t aid_id; |
258 | }; |
259 | |
260 | struct amdgpu_vcn_ras { |
261 | struct amdgpu_ras_block_object ras_block; |
262 | }; |
263 | |
264 | struct amdgpu_vcn { |
265 | unsigned fw_version; |
266 | struct delayed_work idle_work; |
267 | const struct firmware *fw; /* VCN firmware */ |
268 | unsigned num_enc_rings; |
269 | enum amd_powergating_state cur_state; |
270 | bool indirect_sram; |
271 | |
272 | uint8_t num_vcn_inst; |
273 | struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES]; |
274 | uint8_t vcn_config[AMDGPU_MAX_VCN_INSTANCES]; |
275 | uint32_t vcn_codec_disable_mask[AMDGPU_MAX_VCN_INSTANCES]; |
276 | struct amdgpu_vcn_reg internal; |
277 | struct mutex vcn_pg_lock; |
278 | struct mutex vcn1_jpeg1_workaround; |
279 | atomic_t total_submission_cnt; |
280 | |
281 | unsigned harvest_config; |
282 | int (*pause_dpg_mode)(struct amdgpu_device *adev, |
283 | int inst_idx, struct dpg_pause_state *new_state); |
284 | |
285 | struct ras_common_if *ras_if; |
286 | struct amdgpu_vcn_ras *ras; |
287 | |
288 | uint16_t inst_mask; |
289 | uint8_t num_inst_per_aid; |
290 | }; |
291 | |
292 | struct amdgpu_fw_shared_rb_ptrs_struct { |
293 | /* to WA DPG R/W ptr issues.*/ |
294 | uint32_t rptr; |
295 | uint32_t wptr; |
296 | }; |
297 | |
298 | struct amdgpu_fw_shared_multi_queue { |
299 | uint8_t decode_queue_mode; |
300 | uint8_t encode_generalpurpose_queue_mode; |
301 | uint8_t encode_lowlatency_queue_mode; |
302 | uint8_t encode_realtime_queue_mode; |
303 | uint8_t padding[4]; |
304 | }; |
305 | |
306 | struct amdgpu_fw_shared_sw_ring { |
307 | uint8_t is_enabled; |
308 | uint8_t padding[3]; |
309 | }; |
310 | |
311 | struct amdgpu_fw_shared_unified_queue_struct { |
312 | uint8_t is_enabled; |
313 | uint8_t queue_mode; |
314 | uint8_t queue_status; |
315 | uint8_t padding[5]; |
316 | }; |
317 | |
318 | struct amdgpu_fw_shared_fw_logging { |
319 | uint8_t is_enabled; |
320 | uint32_t addr_lo; |
321 | uint32_t addr_hi; |
322 | uint32_t size; |
323 | }; |
324 | |
325 | struct amdgpu_fw_shared_smu_interface_info { |
326 | uint8_t smu_interface_type; |
327 | uint8_t padding[3]; |
328 | }; |
329 | |
330 | struct amdgpu_fw_shared { |
331 | uint32_t present_flag_0; |
332 | uint8_t pad[44]; |
333 | struct amdgpu_fw_shared_rb_ptrs_struct rb; |
334 | uint8_t pad1[1]; |
335 | struct amdgpu_fw_shared_multi_queue multi_queue; |
336 | struct amdgpu_fw_shared_sw_ring sw_ring; |
337 | struct amdgpu_fw_shared_fw_logging fw_log; |
338 | struct amdgpu_fw_shared_smu_interface_info smu_interface_info; |
339 | }; |
340 | |
341 | struct amdgpu_vcn_rb_setup_info { |
342 | uint32_t rb_addr_lo; |
343 | uint32_t rb_addr_hi; |
344 | uint32_t rb_size; |
345 | }; |
346 | |
347 | struct amdgpu_fw_shared_rb_setup { |
348 | uint32_t is_rb_enabled_flags; |
349 | |
350 | union { |
351 | struct { |
352 | uint32_t rb_addr_lo; |
353 | uint32_t rb_addr_hi; |
354 | uint32_t rb_size; |
355 | uint32_t rb4_addr_lo; |
356 | uint32_t rb4_addr_hi; |
357 | uint32_t rb4_size; |
358 | uint32_t reserved[6]; |
359 | }; |
360 | |
361 | struct { |
362 | struct amdgpu_vcn_rb_setup_info rb_info[MAX_NUM_VCN_RB_SETUP]; |
363 | }; |
364 | }; |
365 | }; |
366 | |
367 | struct amdgpu_fw_shared_drm_key_wa { |
368 | uint8_t method; |
369 | uint8_t reserved[3]; |
370 | }; |
371 | |
372 | struct amdgpu_fw_shared_queue_decouple { |
373 | uint8_t is_enabled; |
374 | uint8_t reserved[7]; |
375 | }; |
376 | |
377 | struct amdgpu_vcn4_fw_shared { |
378 | uint32_t present_flag_0; |
379 | uint8_t pad[12]; |
380 | struct amdgpu_fw_shared_unified_queue_struct sq; |
381 | uint8_t pad1[8]; |
382 | struct amdgpu_fw_shared_fw_logging fw_log; |
383 | uint8_t pad2[20]; |
384 | struct amdgpu_fw_shared_rb_setup rb_setup; |
385 | struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface; |
386 | struct amdgpu_fw_shared_drm_key_wa drm_key_wa; |
387 | uint8_t pad3[9]; |
388 | struct amdgpu_fw_shared_queue_decouple decouple; |
389 | }; |
390 | |
391 | struct amdgpu_vcn_fwlog { |
392 | uint32_t rptr; |
393 | uint32_t wptr; |
394 | uint32_t buffer_size; |
395 | uint32_t ; |
396 | uint8_t wrapped; |
397 | }; |
398 | |
399 | struct amdgpu_vcn_decode_buffer { |
400 | uint32_t valid_buf_flag; |
401 | uint32_t msg_buffer_address_hi; |
402 | uint32_t msg_buffer_address_lo; |
403 | uint32_t pad[30]; |
404 | }; |
405 | |
406 | struct amdgpu_vcn_rb_metadata { |
407 | uint32_t size; |
408 | uint32_t present_flag_0; |
409 | |
410 | uint8_t version; |
411 | uint8_t ring_id; |
412 | uint8_t pad[26]; |
413 | }; |
414 | |
415 | #define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80 |
416 | #define VCN_BLOCK_DECODE_DISABLE_MASK 0x40 |
417 | #define VCN_BLOCK_QUEUE_DISABLE_MASK 0xC0 |
418 | |
419 | enum vcn_ring_type { |
420 | VCN_ENCODE_RING, |
421 | VCN_DECODE_RING, |
422 | VCN_UNIFIED_RING, |
423 | }; |
424 | |
425 | int amdgpu_vcn_early_init(struct amdgpu_device *adev); |
426 | int amdgpu_vcn_sw_init(struct amdgpu_device *adev); |
427 | int amdgpu_vcn_sw_fini(struct amdgpu_device *adev); |
428 | int amdgpu_vcn_suspend(struct amdgpu_device *adev); |
429 | int amdgpu_vcn_resume(struct amdgpu_device *adev); |
430 | void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring); |
431 | void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring); |
432 | |
433 | bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, |
434 | enum vcn_ring_type type, uint32_t vcn_instance); |
435 | |
436 | int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring); |
437 | int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout); |
438 | int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring); |
439 | int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout); |
440 | int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout); |
441 | |
442 | int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring); |
443 | int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout); |
444 | |
445 | enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring); |
446 | |
447 | void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev); |
448 | |
449 | void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn); |
450 | void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, |
451 | uint8_t i, struct amdgpu_vcn_inst *vcn); |
452 | |
453 | int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, |
454 | struct amdgpu_irq_src *source, |
455 | struct amdgpu_iv_entry *entry); |
456 | int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, |
457 | struct ras_common_if *ras_block); |
458 | int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev); |
459 | |
460 | int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx, |
461 | enum AMDGPU_UCODE_ID ucode_id); |
462 | |
463 | #endif |
464 | |