1 | /* |
2 | * Copyright 2011 Advanced Micro Devices, Inc. |
3 | * All Rights Reserved. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the |
7 | * "Software"), to deal in the Software without restriction, including |
8 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * the following conditions: |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
20 | * |
21 | * The above copyright notice and this permission notice (including the |
22 | * next paragraph) shall be included in all copies or substantial portions |
23 | * of the Software. |
24 | * |
25 | */ |
26 | /* |
27 | * Authors: |
28 | * Christian König <deathsimple@vodafone.de> |
29 | */ |
30 | |
31 | #include <linux/firmware.h> |
32 | #include <linux/module.h> |
33 | |
34 | #include <drm/drm.h> |
35 | #include <drm/drm_drv.h> |
36 | |
37 | #include "amdgpu.h" |
38 | #include "amdgpu_pm.h" |
39 | #include "amdgpu_uvd.h" |
40 | #include "amdgpu_cs.h" |
41 | #include "cikd.h" |
42 | #include "uvd/uvd_4_2_d.h" |
43 | |
44 | #include "amdgpu_ras.h" |
45 | |
46 | /* 1 second timeout */ |
47 | #define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000) |
48 | |
49 | /* Firmware versions for VI */ |
50 | #define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8)) |
51 | #define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8)) |
52 | #define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8)) |
53 | #define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8)) |
54 | |
55 | /* Polaris10/11 firmware version */ |
56 | #define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8)) |
57 | |
58 | /* Firmware Names */ |
59 | #ifdef CONFIG_DRM_AMDGPU_SI |
60 | #define FIRMWARE_TAHITI "amdgpu/tahiti_uvd.bin" |
61 | #define FIRMWARE_VERDE "amdgpu/verde_uvd.bin" |
62 | #define FIRMWARE_PITCAIRN "amdgpu/pitcairn_uvd.bin" |
63 | #define FIRMWARE_OLAND "amdgpu/oland_uvd.bin" |
64 | #endif |
65 | #ifdef CONFIG_DRM_AMDGPU_CIK |
66 | #define FIRMWARE_BONAIRE "amdgpu/bonaire_uvd.bin" |
67 | #define FIRMWARE_KABINI "amdgpu/kabini_uvd.bin" |
68 | #define FIRMWARE_KAVERI "amdgpu/kaveri_uvd.bin" |
69 | #define FIRMWARE_HAWAII "amdgpu/hawaii_uvd.bin" |
70 | #define FIRMWARE_MULLINS "amdgpu/mullins_uvd.bin" |
71 | #endif |
72 | #define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin" |
73 | #define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin" |
74 | #define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin" |
75 | #define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin" |
76 | #define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin" |
77 | #define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin" |
78 | #define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin" |
79 | #define FIRMWARE_VEGAM "amdgpu/vegam_uvd.bin" |
80 | |
81 | #define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin" |
82 | #define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin" |
83 | #define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin" |
84 | |
85 | /* These are common relative offsets for all asics, from uvd_7_0_offset.h, */ |
86 | #define UVD_GPCOM_VCPU_CMD 0x03c3 |
87 | #define UVD_GPCOM_VCPU_DATA0 0x03c4 |
88 | #define UVD_GPCOM_VCPU_DATA1 0x03c5 |
89 | #define UVD_NO_OP 0x03ff |
90 | #define UVD_BASE_SI 0x3800 |
91 | |
92 | /* |
93 | * amdgpu_uvd_cs_ctx - Command submission parser context |
94 | * |
95 | * Used for emulating virtual memory support on UVD 4.2. |
96 | */ |
97 | struct amdgpu_uvd_cs_ctx { |
98 | struct amdgpu_cs_parser *parser; |
99 | unsigned int reg, count; |
100 | unsigned int data0, data1; |
101 | unsigned int idx; |
102 | struct amdgpu_ib *ib; |
103 | |
104 | /* does the IB has a msg command */ |
105 | bool has_msg_cmd; |
106 | |
107 | /* minimum buffer sizes */ |
108 | unsigned int *buf_sizes; |
109 | }; |
110 | |
111 | #ifdef CONFIG_DRM_AMDGPU_SI |
112 | MODULE_FIRMWARE(FIRMWARE_TAHITI); |
113 | MODULE_FIRMWARE(FIRMWARE_VERDE); |
114 | MODULE_FIRMWARE(FIRMWARE_PITCAIRN); |
115 | MODULE_FIRMWARE(FIRMWARE_OLAND); |
116 | #endif |
117 | #ifdef CONFIG_DRM_AMDGPU_CIK |
118 | MODULE_FIRMWARE(FIRMWARE_BONAIRE); |
119 | MODULE_FIRMWARE(FIRMWARE_KABINI); |
120 | MODULE_FIRMWARE(FIRMWARE_KAVERI); |
121 | MODULE_FIRMWARE(FIRMWARE_HAWAII); |
122 | MODULE_FIRMWARE(FIRMWARE_MULLINS); |
123 | #endif |
124 | MODULE_FIRMWARE(FIRMWARE_TONGA); |
125 | MODULE_FIRMWARE(FIRMWARE_CARRIZO); |
126 | MODULE_FIRMWARE(FIRMWARE_FIJI); |
127 | MODULE_FIRMWARE(FIRMWARE_STONEY); |
128 | MODULE_FIRMWARE(FIRMWARE_POLARIS10); |
129 | MODULE_FIRMWARE(FIRMWARE_POLARIS11); |
130 | MODULE_FIRMWARE(FIRMWARE_POLARIS12); |
131 | MODULE_FIRMWARE(FIRMWARE_VEGAM); |
132 | |
133 | MODULE_FIRMWARE(FIRMWARE_VEGA10); |
134 | MODULE_FIRMWARE(FIRMWARE_VEGA12); |
135 | MODULE_FIRMWARE(FIRMWARE_VEGA20); |
136 | |
137 | static void amdgpu_uvd_idle_work_handler(struct work_struct *work); |
138 | static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo); |
139 | |
140 | static int amdgpu_uvd_create_msg_bo_helper(struct amdgpu_device *adev, |
141 | uint32_t size, |
142 | struct amdgpu_bo **bo_ptr) |
143 | { |
144 | struct ttm_operation_ctx ctx = { true, false }; |
145 | struct amdgpu_bo *bo = NULL; |
146 | void *addr; |
147 | int r; |
148 | |
149 | r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, |
150 | AMDGPU_GEM_DOMAIN_GTT, |
151 | bo_ptr: &bo, NULL, cpu_addr: &addr); |
152 | if (r) |
153 | return r; |
154 | |
155 | if (adev->uvd.address_64_bit) |
156 | goto succ; |
157 | |
158 | amdgpu_bo_kunmap(bo); |
159 | amdgpu_bo_unpin(bo); |
160 | amdgpu_bo_placement_from_domain(abo: bo, AMDGPU_GEM_DOMAIN_VRAM); |
161 | amdgpu_uvd_force_into_uvd_segment(abo: bo); |
162 | r = ttm_bo_validate(bo: &bo->tbo, placement: &bo->placement, ctx: &ctx); |
163 | if (r) |
164 | goto err; |
165 | r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_VRAM); |
166 | if (r) |
167 | goto err_pin; |
168 | r = amdgpu_bo_kmap(bo, ptr: &addr); |
169 | if (r) |
170 | goto err_kmap; |
171 | succ: |
172 | amdgpu_bo_unreserve(bo); |
173 | *bo_ptr = bo; |
174 | return 0; |
175 | err_kmap: |
176 | amdgpu_bo_unpin(bo); |
177 | err_pin: |
178 | err: |
179 | amdgpu_bo_unreserve(bo); |
180 | amdgpu_bo_unref(bo: &bo); |
181 | return r; |
182 | } |
183 | |
184 | int amdgpu_uvd_sw_init(struct amdgpu_device *adev) |
185 | { |
186 | unsigned long bo_size; |
187 | const char *fw_name; |
188 | const struct common_firmware_header *hdr; |
189 | unsigned int family_id; |
190 | int i, j, r; |
191 | |
192 | INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); |
193 | |
194 | switch (adev->asic_type) { |
195 | #ifdef CONFIG_DRM_AMDGPU_SI |
196 | case CHIP_TAHITI: |
197 | fw_name = FIRMWARE_TAHITI; |
198 | break; |
199 | case CHIP_VERDE: |
200 | fw_name = FIRMWARE_VERDE; |
201 | break; |
202 | case CHIP_PITCAIRN: |
203 | fw_name = FIRMWARE_PITCAIRN; |
204 | break; |
205 | case CHIP_OLAND: |
206 | fw_name = FIRMWARE_OLAND; |
207 | break; |
208 | #endif |
209 | #ifdef CONFIG_DRM_AMDGPU_CIK |
210 | case CHIP_BONAIRE: |
211 | fw_name = FIRMWARE_BONAIRE; |
212 | break; |
213 | case CHIP_KABINI: |
214 | fw_name = FIRMWARE_KABINI; |
215 | break; |
216 | case CHIP_KAVERI: |
217 | fw_name = FIRMWARE_KAVERI; |
218 | break; |
219 | case CHIP_HAWAII: |
220 | fw_name = FIRMWARE_HAWAII; |
221 | break; |
222 | case CHIP_MULLINS: |
223 | fw_name = FIRMWARE_MULLINS; |
224 | break; |
225 | #endif |
226 | case CHIP_TONGA: |
227 | fw_name = FIRMWARE_TONGA; |
228 | break; |
229 | case CHIP_FIJI: |
230 | fw_name = FIRMWARE_FIJI; |
231 | break; |
232 | case CHIP_CARRIZO: |
233 | fw_name = FIRMWARE_CARRIZO; |
234 | break; |
235 | case CHIP_STONEY: |
236 | fw_name = FIRMWARE_STONEY; |
237 | break; |
238 | case CHIP_POLARIS10: |
239 | fw_name = FIRMWARE_POLARIS10; |
240 | break; |
241 | case CHIP_POLARIS11: |
242 | fw_name = FIRMWARE_POLARIS11; |
243 | break; |
244 | case CHIP_POLARIS12: |
245 | fw_name = FIRMWARE_POLARIS12; |
246 | break; |
247 | case CHIP_VEGA10: |
248 | fw_name = FIRMWARE_VEGA10; |
249 | break; |
250 | case CHIP_VEGA12: |
251 | fw_name = FIRMWARE_VEGA12; |
252 | break; |
253 | case CHIP_VEGAM: |
254 | fw_name = FIRMWARE_VEGAM; |
255 | break; |
256 | case CHIP_VEGA20: |
257 | fw_name = FIRMWARE_VEGA20; |
258 | break; |
259 | default: |
260 | return -EINVAL; |
261 | } |
262 | |
263 | r = amdgpu_ucode_request(adev, fw: &adev->uvd.fw, fw_name); |
264 | if (r) { |
265 | dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n" , |
266 | fw_name); |
267 | amdgpu_ucode_release(fw: &adev->uvd.fw); |
268 | return r; |
269 | } |
270 | |
271 | /* Set the default UVD handles that the firmware can handle */ |
272 | adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES; |
273 | |
274 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; |
275 | family_id = le32_to_cpu(hdr->ucode_version) & 0xff; |
276 | |
277 | if (adev->asic_type < CHIP_VEGA20) { |
278 | unsigned int version_major, version_minor; |
279 | |
280 | version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; |
281 | version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; |
282 | DRM_INFO("Found UVD firmware Version: %u.%u Family ID: %u\n" , |
283 | version_major, version_minor, family_id); |
284 | |
285 | /* |
286 | * Limit the number of UVD handles depending on microcode major |
287 | * and minor versions. The firmware version which has 40 UVD |
288 | * instances support is 1.80. So all subsequent versions should |
289 | * also have the same support. |
290 | */ |
291 | if ((version_major > 0x01) || |
292 | ((version_major == 0x01) && (version_minor >= 0x50))) |
293 | adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; |
294 | |
295 | adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) | |
296 | (family_id << 8)); |
297 | |
298 | if ((adev->asic_type == CHIP_POLARIS10 || |
299 | adev->asic_type == CHIP_POLARIS11) && |
300 | (adev->uvd.fw_version < FW_1_66_16)) |
301 | DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n" , |
302 | version_major, version_minor); |
303 | } else { |
304 | unsigned int enc_major, enc_minor, dec_minor; |
305 | |
306 | dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; |
307 | enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f; |
308 | enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3; |
309 | DRM_INFO("Found UVD firmware ENC: %u.%u DEC: .%u Family ID: %u\n" , |
310 | enc_major, enc_minor, dec_minor, family_id); |
311 | |
312 | adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; |
313 | |
314 | adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version); |
315 | } |
316 | |
317 | bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE |
318 | + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles; |
319 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) |
320 | bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); |
321 | |
322 | for (j = 0; j < adev->uvd.num_uvd_inst; j++) { |
323 | if (adev->uvd.harvest_config & (1 << j)) |
324 | continue; |
325 | r = amdgpu_bo_create_kernel(adev, size: bo_size, PAGE_SIZE, |
326 | AMDGPU_GEM_DOMAIN_VRAM | |
327 | AMDGPU_GEM_DOMAIN_GTT, |
328 | bo_ptr: &adev->uvd.inst[j].vcpu_bo, |
329 | gpu_addr: &adev->uvd.inst[j].gpu_addr, |
330 | cpu_addr: &adev->uvd.inst[j].cpu_addr); |
331 | if (r) { |
332 | dev_err(adev->dev, "(%d) failed to allocate UVD bo\n" , r); |
333 | return r; |
334 | } |
335 | } |
336 | |
337 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
338 | atomic_set(v: &adev->uvd.handles[i], i: 0); |
339 | adev->uvd.filp[i] = NULL; |
340 | } |
341 | |
342 | /* from uvd v5.0 HW addressing capacity increased to 64 bits */ |
343 | if (!amdgpu_device_ip_block_version_cmp(adev, type: AMD_IP_BLOCK_TYPE_UVD, major: 5, minor: 0)) |
344 | adev->uvd.address_64_bit = true; |
345 | |
346 | r = amdgpu_uvd_create_msg_bo_helper(adev, size: 128 << 10, bo_ptr: &adev->uvd.ib_bo); |
347 | if (r) |
348 | return r; |
349 | |
350 | switch (adev->asic_type) { |
351 | case CHIP_TONGA: |
352 | adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; |
353 | break; |
354 | case CHIP_CARRIZO: |
355 | adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11; |
356 | break; |
357 | case CHIP_FIJI: |
358 | adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12; |
359 | break; |
360 | case CHIP_STONEY: |
361 | adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15; |
362 | break; |
363 | default: |
364 | adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10; |
365 | } |
366 | |
367 | return 0; |
368 | } |
369 | |
370 | int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) |
371 | { |
372 | void *addr = amdgpu_bo_kptr(bo: adev->uvd.ib_bo); |
373 | int i, j; |
374 | |
375 | drm_sched_entity_destroy(entity: &adev->uvd.entity); |
376 | |
377 | for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { |
378 | if (adev->uvd.harvest_config & (1 << j)) |
379 | continue; |
380 | kvfree(addr: adev->uvd.inst[j].saved_bo); |
381 | |
382 | amdgpu_bo_free_kernel(bo: &adev->uvd.inst[j].vcpu_bo, |
383 | gpu_addr: &adev->uvd.inst[j].gpu_addr, |
384 | cpu_addr: (void **)&adev->uvd.inst[j].cpu_addr); |
385 | |
386 | amdgpu_ring_fini(ring: &adev->uvd.inst[j].ring); |
387 | |
388 | for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) |
389 | amdgpu_ring_fini(ring: &adev->uvd.inst[j].ring_enc[i]); |
390 | } |
391 | amdgpu_bo_free_kernel(bo: &adev->uvd.ib_bo, NULL, cpu_addr: &addr); |
392 | amdgpu_ucode_release(fw: &adev->uvd.fw); |
393 | |
394 | return 0; |
395 | } |
396 | |
397 | /** |
398 | * amdgpu_uvd_entity_init - init entity |
399 | * |
400 | * @adev: amdgpu_device pointer |
401 | * |
402 | */ |
403 | int amdgpu_uvd_entity_init(struct amdgpu_device *adev) |
404 | { |
405 | struct amdgpu_ring *ring; |
406 | struct drm_gpu_scheduler *sched; |
407 | int r; |
408 | |
409 | ring = &adev->uvd.inst[0].ring; |
410 | sched = &ring->sched; |
411 | r = drm_sched_entity_init(entity: &adev->uvd.entity, priority: DRM_SCHED_PRIORITY_NORMAL, |
412 | sched_list: &sched, num_sched_list: 1, NULL); |
413 | if (r) { |
414 | DRM_ERROR("Failed setting up UVD kernel entity.\n" ); |
415 | return r; |
416 | } |
417 | |
418 | return 0; |
419 | } |
420 | |
421 | int amdgpu_uvd_prepare_suspend(struct amdgpu_device *adev) |
422 | { |
423 | unsigned int size; |
424 | void *ptr; |
425 | int i, j, idx; |
426 | |
427 | cancel_delayed_work_sync(dwork: &adev->uvd.idle_work); |
428 | |
429 | /* only valid for physical mode */ |
430 | if (adev->asic_type < CHIP_POLARIS10) { |
431 | for (i = 0; i < adev->uvd.max_handles; ++i) |
432 | if (atomic_read(v: &adev->uvd.handles[i])) |
433 | break; |
434 | |
435 | if (i == adev->uvd.max_handles) |
436 | return 0; |
437 | } |
438 | |
439 | for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { |
440 | if (adev->uvd.harvest_config & (1 << j)) |
441 | continue; |
442 | if (adev->uvd.inst[j].vcpu_bo == NULL) |
443 | continue; |
444 | |
445 | size = amdgpu_bo_size(bo: adev->uvd.inst[j].vcpu_bo); |
446 | ptr = adev->uvd.inst[j].cpu_addr; |
447 | |
448 | adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL); |
449 | if (!adev->uvd.inst[j].saved_bo) |
450 | return -ENOMEM; |
451 | |
452 | if (drm_dev_enter(dev: adev_to_drm(adev), idx: &idx)) { |
453 | /* re-write 0 since err_event_athub will corrupt VCPU buffer */ |
454 | if (amdgpu_ras_intr_triggered()) |
455 | memset(adev->uvd.inst[j].saved_bo, 0, size); |
456 | else |
457 | memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); |
458 | |
459 | drm_dev_exit(idx); |
460 | } |
461 | } |
462 | |
463 | return 0; |
464 | } |
465 | |
466 | int amdgpu_uvd_suspend(struct amdgpu_device *adev) |
467 | { |
468 | if (amdgpu_ras_intr_triggered()) |
469 | DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n" ); |
470 | |
471 | return 0; |
472 | } |
473 | |
474 | int amdgpu_uvd_resume(struct amdgpu_device *adev) |
475 | { |
476 | unsigned int size; |
477 | void *ptr; |
478 | int i, idx; |
479 | |
480 | for (i = 0; i < adev->uvd.num_uvd_inst; i++) { |
481 | if (adev->uvd.harvest_config & (1 << i)) |
482 | continue; |
483 | if (adev->uvd.inst[i].vcpu_bo == NULL) |
484 | return -EINVAL; |
485 | |
486 | size = amdgpu_bo_size(bo: adev->uvd.inst[i].vcpu_bo); |
487 | ptr = adev->uvd.inst[i].cpu_addr; |
488 | |
489 | if (adev->uvd.inst[i].saved_bo != NULL) { |
490 | if (drm_dev_enter(dev: adev_to_drm(adev), idx: &idx)) { |
491 | memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); |
492 | drm_dev_exit(idx); |
493 | } |
494 | kvfree(addr: adev->uvd.inst[i].saved_bo); |
495 | adev->uvd.inst[i].saved_bo = NULL; |
496 | } else { |
497 | const struct common_firmware_header *hdr; |
498 | unsigned int offset; |
499 | |
500 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; |
501 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { |
502 | offset = le32_to_cpu(hdr->ucode_array_offset_bytes); |
503 | if (drm_dev_enter(dev: adev_to_drm(adev), idx: &idx)) { |
504 | memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset, |
505 | le32_to_cpu(hdr->ucode_size_bytes)); |
506 | drm_dev_exit(idx); |
507 | } |
508 | size -= le32_to_cpu(hdr->ucode_size_bytes); |
509 | ptr += le32_to_cpu(hdr->ucode_size_bytes); |
510 | } |
511 | memset_io(ptr, 0, size); |
512 | /* to restore uvd fence seq */ |
513 | amdgpu_fence_driver_force_completion(ring: &adev->uvd.inst[i].ring); |
514 | } |
515 | } |
516 | return 0; |
517 | } |
518 | |
519 | void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) |
520 | { |
521 | struct amdgpu_ring *ring = &adev->uvd.inst[0].ring; |
522 | int i, r; |
523 | |
524 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
525 | uint32_t handle = atomic_read(v: &adev->uvd.handles[i]); |
526 | |
527 | if (handle != 0 && adev->uvd.filp[i] == filp) { |
528 | struct dma_fence *fence; |
529 | |
530 | r = amdgpu_uvd_get_destroy_msg(ring, handle, direct: false, |
531 | fence: &fence); |
532 | if (r) { |
533 | DRM_ERROR("Error destroying UVD %d!\n" , r); |
534 | continue; |
535 | } |
536 | |
537 | dma_fence_wait(fence, intr: false); |
538 | dma_fence_put(fence); |
539 | |
540 | adev->uvd.filp[i] = NULL; |
541 | atomic_set(v: &adev->uvd.handles[i], i: 0); |
542 | } |
543 | } |
544 | } |
545 | |
546 | static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo) |
547 | { |
548 | int i; |
549 | |
550 | for (i = 0; i < abo->placement.num_placement; ++i) { |
551 | abo->placements[i].fpfn = 0 >> PAGE_SHIFT; |
552 | abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; |
553 | } |
554 | } |
555 | |
556 | static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx) |
557 | { |
558 | uint32_t lo, hi; |
559 | uint64_t addr; |
560 | |
561 | lo = amdgpu_ib_get_value(ib: ctx->ib, idx: ctx->data0); |
562 | hi = amdgpu_ib_get_value(ib: ctx->ib, idx: ctx->data1); |
563 | addr = ((uint64_t)lo) | (((uint64_t)hi) << 32); |
564 | |
565 | return addr; |
566 | } |
567 | |
568 | /** |
569 | * amdgpu_uvd_cs_pass1 - first parsing round |
570 | * |
571 | * @ctx: UVD parser context |
572 | * |
573 | * Make sure UVD message and feedback buffers are in VRAM and |
574 | * nobody is violating an 256MB boundary. |
575 | */ |
576 | static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) |
577 | { |
578 | struct ttm_operation_ctx tctx = { false, false }; |
579 | struct amdgpu_bo_va_mapping *mapping; |
580 | struct amdgpu_bo *bo; |
581 | uint32_t cmd; |
582 | uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx); |
583 | int r = 0; |
584 | |
585 | r = amdgpu_cs_find_mapping(parser: ctx->parser, addr, bo: &bo, mapping: &mapping); |
586 | if (r) { |
587 | DRM_ERROR("Can't find BO for addr 0x%08llx\n" , addr); |
588 | return r; |
589 | } |
590 | |
591 | if (!ctx->parser->adev->uvd.address_64_bit) { |
592 | /* check if it's a message or feedback command */ |
593 | cmd = amdgpu_ib_get_value(ib: ctx->ib, idx: ctx->idx) >> 1; |
594 | if (cmd == 0x0 || cmd == 0x3) { |
595 | /* yes, force it into VRAM */ |
596 | uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM; |
597 | |
598 | amdgpu_bo_placement_from_domain(abo: bo, domain); |
599 | } |
600 | amdgpu_uvd_force_into_uvd_segment(abo: bo); |
601 | |
602 | r = ttm_bo_validate(bo: &bo->tbo, placement: &bo->placement, ctx: &tctx); |
603 | } |
604 | |
605 | return r; |
606 | } |
607 | |
608 | /** |
609 | * amdgpu_uvd_cs_msg_decode - handle UVD decode message |
610 | * |
611 | * @adev: amdgpu_device pointer |
612 | * @msg: pointer to message structure |
613 | * @buf_sizes: placeholder to put the different buffer lengths |
614 | * |
615 | * Peek into the decode message and calculate the necessary buffer sizes. |
616 | */ |
617 | static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, |
618 | unsigned int buf_sizes[]) |
619 | { |
620 | unsigned int stream_type = msg[4]; |
621 | unsigned int width = msg[6]; |
622 | unsigned int height = msg[7]; |
623 | unsigned int dpb_size = msg[9]; |
624 | unsigned int pitch = msg[28]; |
625 | unsigned int level = msg[57]; |
626 | |
627 | unsigned int width_in_mb = width / 16; |
628 | unsigned int height_in_mb = ALIGN(height / 16, 2); |
629 | unsigned int fs_in_mb = width_in_mb * height_in_mb; |
630 | |
631 | unsigned int image_size, tmp, min_dpb_size, num_dpb_buffer; |
632 | unsigned int min_ctx_size = ~0; |
633 | |
634 | image_size = width * height; |
635 | image_size += image_size / 2; |
636 | image_size = ALIGN(image_size, 1024); |
637 | |
638 | switch (stream_type) { |
639 | case 0: /* H264 */ |
640 | switch (level) { |
641 | case 30: |
642 | num_dpb_buffer = 8100 / fs_in_mb; |
643 | break; |
644 | case 31: |
645 | num_dpb_buffer = 18000 / fs_in_mb; |
646 | break; |
647 | case 32: |
648 | num_dpb_buffer = 20480 / fs_in_mb; |
649 | break; |
650 | case 41: |
651 | num_dpb_buffer = 32768 / fs_in_mb; |
652 | break; |
653 | case 42: |
654 | num_dpb_buffer = 34816 / fs_in_mb; |
655 | break; |
656 | case 50: |
657 | num_dpb_buffer = 110400 / fs_in_mb; |
658 | break; |
659 | case 51: |
660 | num_dpb_buffer = 184320 / fs_in_mb; |
661 | break; |
662 | default: |
663 | num_dpb_buffer = 184320 / fs_in_mb; |
664 | break; |
665 | } |
666 | num_dpb_buffer++; |
667 | if (num_dpb_buffer > 17) |
668 | num_dpb_buffer = 17; |
669 | |
670 | /* reference picture buffer */ |
671 | min_dpb_size = image_size * num_dpb_buffer; |
672 | |
673 | /* macroblock context buffer */ |
674 | min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192; |
675 | |
676 | /* IT surface buffer */ |
677 | min_dpb_size += width_in_mb * height_in_mb * 32; |
678 | break; |
679 | |
680 | case 1: /* VC1 */ |
681 | |
682 | /* reference picture buffer */ |
683 | min_dpb_size = image_size * 3; |
684 | |
685 | /* CONTEXT_BUFFER */ |
686 | min_dpb_size += width_in_mb * height_in_mb * 128; |
687 | |
688 | /* IT surface buffer */ |
689 | min_dpb_size += width_in_mb * 64; |
690 | |
691 | /* DB surface buffer */ |
692 | min_dpb_size += width_in_mb * 128; |
693 | |
694 | /* BP */ |
695 | tmp = max(width_in_mb, height_in_mb); |
696 | min_dpb_size += ALIGN(tmp * 7 * 16, 64); |
697 | break; |
698 | |
699 | case 3: /* MPEG2 */ |
700 | |
701 | /* reference picture buffer */ |
702 | min_dpb_size = image_size * 3; |
703 | break; |
704 | |
705 | case 4: /* MPEG4 */ |
706 | |
707 | /* reference picture buffer */ |
708 | min_dpb_size = image_size * 3; |
709 | |
710 | /* CM */ |
711 | min_dpb_size += width_in_mb * height_in_mb * 64; |
712 | |
713 | /* IT surface buffer */ |
714 | min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64); |
715 | break; |
716 | |
717 | case 7: /* H264 Perf */ |
718 | switch (level) { |
719 | case 30: |
720 | num_dpb_buffer = 8100 / fs_in_mb; |
721 | break; |
722 | case 31: |
723 | num_dpb_buffer = 18000 / fs_in_mb; |
724 | break; |
725 | case 32: |
726 | num_dpb_buffer = 20480 / fs_in_mb; |
727 | break; |
728 | case 41: |
729 | num_dpb_buffer = 32768 / fs_in_mb; |
730 | break; |
731 | case 42: |
732 | num_dpb_buffer = 34816 / fs_in_mb; |
733 | break; |
734 | case 50: |
735 | num_dpb_buffer = 110400 / fs_in_mb; |
736 | break; |
737 | case 51: |
738 | num_dpb_buffer = 184320 / fs_in_mb; |
739 | break; |
740 | default: |
741 | num_dpb_buffer = 184320 / fs_in_mb; |
742 | break; |
743 | } |
744 | num_dpb_buffer++; |
745 | if (num_dpb_buffer > 17) |
746 | num_dpb_buffer = 17; |
747 | |
748 | /* reference picture buffer */ |
749 | min_dpb_size = image_size * num_dpb_buffer; |
750 | |
751 | if (!adev->uvd.use_ctx_buf) { |
752 | /* macroblock context buffer */ |
753 | min_dpb_size += |
754 | width_in_mb * height_in_mb * num_dpb_buffer * 192; |
755 | |
756 | /* IT surface buffer */ |
757 | min_dpb_size += width_in_mb * height_in_mb * 32; |
758 | } else { |
759 | /* macroblock context buffer */ |
760 | min_ctx_size = |
761 | width_in_mb * height_in_mb * num_dpb_buffer * 192; |
762 | } |
763 | break; |
764 | |
765 | case 8: /* MJPEG */ |
766 | min_dpb_size = 0; |
767 | break; |
768 | |
769 | case 16: /* H265 */ |
770 | image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2; |
771 | image_size = ALIGN(image_size, 256); |
772 | |
773 | num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2; |
774 | min_dpb_size = image_size * num_dpb_buffer; |
775 | min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16) |
776 | * 16 * num_dpb_buffer + 52 * 1024; |
777 | break; |
778 | |
779 | default: |
780 | DRM_ERROR("UVD codec not handled %d!\n" , stream_type); |
781 | return -EINVAL; |
782 | } |
783 | |
784 | if (width > pitch) { |
785 | DRM_ERROR("Invalid UVD decoding target pitch!\n" ); |
786 | return -EINVAL; |
787 | } |
788 | |
789 | if (dpb_size < min_dpb_size) { |
790 | DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n" , |
791 | dpb_size, min_dpb_size); |
792 | return -EINVAL; |
793 | } |
794 | |
795 | buf_sizes[0x1] = dpb_size; |
796 | buf_sizes[0x2] = image_size; |
797 | buf_sizes[0x4] = min_ctx_size; |
798 | /* store image width to adjust nb memory pstate */ |
799 | adev->uvd.decode_image_width = width; |
800 | return 0; |
801 | } |
802 | |
803 | /** |
804 | * amdgpu_uvd_cs_msg - handle UVD message |
805 | * |
806 | * @ctx: UVD parser context |
807 | * @bo: buffer object containing the message |
808 | * @offset: offset into the buffer object |
809 | * |
810 | * Peek into the UVD message and extract the session id. |
811 | * Make sure that we don't open up to many sessions. |
812 | */ |
813 | static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, |
814 | struct amdgpu_bo *bo, unsigned int offset) |
815 | { |
816 | struct amdgpu_device *adev = ctx->parser->adev; |
817 | int32_t *msg, msg_type, handle; |
818 | void *ptr; |
819 | long r; |
820 | int i; |
821 | |
822 | if (offset & 0x3F) { |
823 | DRM_ERROR("UVD messages must be 64 byte aligned!\n" ); |
824 | return -EINVAL; |
825 | } |
826 | |
827 | r = amdgpu_bo_kmap(bo, ptr: &ptr); |
828 | if (r) { |
829 | DRM_ERROR("Failed mapping the UVD) message (%ld)!\n" , r); |
830 | return r; |
831 | } |
832 | |
833 | msg = ptr + offset; |
834 | |
835 | msg_type = msg[1]; |
836 | handle = msg[2]; |
837 | |
838 | if (handle == 0) { |
839 | amdgpu_bo_kunmap(bo); |
840 | DRM_ERROR("Invalid UVD handle!\n" ); |
841 | return -EINVAL; |
842 | } |
843 | |
844 | switch (msg_type) { |
845 | case 0: |
846 | /* it's a create msg, calc image size (width * height) */ |
847 | amdgpu_bo_kunmap(bo); |
848 | |
849 | /* try to alloc a new handle */ |
850 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
851 | if (atomic_read(v: &adev->uvd.handles[i]) == handle) { |
852 | DRM_ERROR(")Handle 0x%x already in use!\n" , |
853 | handle); |
854 | return -EINVAL; |
855 | } |
856 | |
857 | if (!atomic_cmpxchg(v: &adev->uvd.handles[i], old: 0, new: handle)) { |
858 | adev->uvd.filp[i] = ctx->parser->filp; |
859 | return 0; |
860 | } |
861 | } |
862 | |
863 | DRM_ERROR("No more free UVD handles!\n" ); |
864 | return -ENOSPC; |
865 | |
866 | case 1: |
867 | /* it's a decode msg, calc buffer sizes */ |
868 | r = amdgpu_uvd_cs_msg_decode(adev, msg, buf_sizes: ctx->buf_sizes); |
869 | amdgpu_bo_kunmap(bo); |
870 | if (r) |
871 | return r; |
872 | |
873 | /* validate the handle */ |
874 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
875 | if (atomic_read(v: &adev->uvd.handles[i]) == handle) { |
876 | if (adev->uvd.filp[i] != ctx->parser->filp) { |
877 | DRM_ERROR("UVD handle collision detected!\n" ); |
878 | return -EINVAL; |
879 | } |
880 | return 0; |
881 | } |
882 | } |
883 | |
884 | DRM_ERROR("Invalid UVD handle 0x%x!\n" , handle); |
885 | return -ENOENT; |
886 | |
887 | case 2: |
888 | /* it's a destroy msg, free the handle */ |
889 | for (i = 0; i < adev->uvd.max_handles; ++i) |
890 | atomic_cmpxchg(v: &adev->uvd.handles[i], old: handle, new: 0); |
891 | amdgpu_bo_kunmap(bo); |
892 | return 0; |
893 | |
894 | default: |
895 | DRM_ERROR("Illegal UVD message type (%d)!\n" , msg_type); |
896 | } |
897 | |
898 | amdgpu_bo_kunmap(bo); |
899 | return -EINVAL; |
900 | } |
901 | |
902 | /** |
903 | * amdgpu_uvd_cs_pass2 - second parsing round |
904 | * |
905 | * @ctx: UVD parser context |
906 | * |
907 | * Patch buffer addresses, make sure buffer sizes are correct. |
908 | */ |
909 | static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) |
910 | { |
911 | struct amdgpu_bo_va_mapping *mapping; |
912 | struct amdgpu_bo *bo; |
913 | uint32_t cmd; |
914 | uint64_t start, end; |
915 | uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx); |
916 | int r; |
917 | |
918 | r = amdgpu_cs_find_mapping(parser: ctx->parser, addr, bo: &bo, mapping: &mapping); |
919 | if (r) { |
920 | DRM_ERROR("Can't find BO for addr 0x%08llx\n" , addr); |
921 | return r; |
922 | } |
923 | |
924 | start = amdgpu_bo_gpu_offset(bo); |
925 | |
926 | end = (mapping->last + 1 - mapping->start); |
927 | end = end * AMDGPU_GPU_PAGE_SIZE + start; |
928 | |
929 | addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE; |
930 | start += addr; |
931 | |
932 | amdgpu_ib_set_value(ib: ctx->ib, idx: ctx->data0, lower_32_bits(start)); |
933 | amdgpu_ib_set_value(ib: ctx->ib, idx: ctx->data1, upper_32_bits(start)); |
934 | |
935 | cmd = amdgpu_ib_get_value(ib: ctx->ib, idx: ctx->idx) >> 1; |
936 | if (cmd < 0x4) { |
937 | if ((end - start) < ctx->buf_sizes[cmd]) { |
938 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n" , cmd, |
939 | (unsigned int)(end - start), |
940 | ctx->buf_sizes[cmd]); |
941 | return -EINVAL; |
942 | } |
943 | |
944 | } else if (cmd == 0x206) { |
945 | if ((end - start) < ctx->buf_sizes[4]) { |
946 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n" , cmd, |
947 | (unsigned int)(end - start), |
948 | ctx->buf_sizes[4]); |
949 | return -EINVAL; |
950 | } |
951 | } else if ((cmd != 0x100) && (cmd != 0x204)) { |
952 | DRM_ERROR("invalid UVD command %X!\n" , cmd); |
953 | return -EINVAL; |
954 | } |
955 | |
956 | if (!ctx->parser->adev->uvd.address_64_bit) { |
957 | if ((start >> 28) != ((end - 1) >> 28)) { |
958 | DRM_ERROR("reloc %llx-%llx crossing 256MB boundary!\n" , |
959 | start, end); |
960 | return -EINVAL; |
961 | } |
962 | |
963 | if ((cmd == 0 || cmd == 0x3) && |
964 | (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) { |
965 | DRM_ERROR("msg/fb buffer %llx-%llx out of 256MB segment!\n" , |
966 | start, end); |
967 | return -EINVAL; |
968 | } |
969 | } |
970 | |
971 | if (cmd == 0) { |
972 | ctx->has_msg_cmd = true; |
973 | r = amdgpu_uvd_cs_msg(ctx, bo, offset: addr); |
974 | if (r) |
975 | return r; |
976 | } else if (!ctx->has_msg_cmd) { |
977 | DRM_ERROR("Message needed before other commands are send!\n" ); |
978 | return -EINVAL; |
979 | } |
980 | |
981 | return 0; |
982 | } |
983 | |
984 | /** |
985 | * amdgpu_uvd_cs_reg - parse register writes |
986 | * |
987 | * @ctx: UVD parser context |
988 | * @cb: callback function |
989 | * |
990 | * Parse the register writes, call cb on each complete command. |
991 | */ |
992 | static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx, |
993 | int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) |
994 | { |
995 | int i, r; |
996 | |
997 | ctx->idx++; |
998 | for (i = 0; i <= ctx->count; ++i) { |
999 | unsigned int reg = ctx->reg + i; |
1000 | |
1001 | if (ctx->idx >= ctx->ib->length_dw) { |
1002 | DRM_ERROR("Register command after end of CS!\n" ); |
1003 | return -EINVAL; |
1004 | } |
1005 | |
1006 | switch (reg) { |
1007 | case mmUVD_GPCOM_VCPU_DATA0: |
1008 | ctx->data0 = ctx->idx; |
1009 | break; |
1010 | case mmUVD_GPCOM_VCPU_DATA1: |
1011 | ctx->data1 = ctx->idx; |
1012 | break; |
1013 | case mmUVD_GPCOM_VCPU_CMD: |
1014 | r = cb(ctx); |
1015 | if (r) |
1016 | return r; |
1017 | break; |
1018 | case mmUVD_ENGINE_CNTL: |
1019 | case mmUVD_NO_OP: |
1020 | break; |
1021 | default: |
1022 | DRM_ERROR("Invalid reg 0x%X!\n" , reg); |
1023 | return -EINVAL; |
1024 | } |
1025 | ctx->idx++; |
1026 | } |
1027 | return 0; |
1028 | } |
1029 | |
1030 | /** |
1031 | * amdgpu_uvd_cs_packets - parse UVD packets |
1032 | * |
1033 | * @ctx: UVD parser context |
1034 | * @cb: callback function |
1035 | * |
1036 | * Parse the command stream packets. |
1037 | */ |
1038 | static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx, |
1039 | int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) |
1040 | { |
1041 | int r; |
1042 | |
1043 | for (ctx->idx = 0 ; ctx->idx < ctx->ib->length_dw; ) { |
1044 | uint32_t cmd = amdgpu_ib_get_value(ib: ctx->ib, idx: ctx->idx); |
1045 | unsigned int type = CP_PACKET_GET_TYPE(cmd); |
1046 | |
1047 | switch (type) { |
1048 | case PACKET_TYPE0: |
1049 | ctx->reg = CP_PACKET0_GET_REG(cmd); |
1050 | ctx->count = CP_PACKET_GET_COUNT(cmd); |
1051 | r = amdgpu_uvd_cs_reg(ctx, cb); |
1052 | if (r) |
1053 | return r; |
1054 | break; |
1055 | case PACKET_TYPE2: |
1056 | ++ctx->idx; |
1057 | break; |
1058 | default: |
1059 | DRM_ERROR("Unknown packet type %d !\n" , type); |
1060 | return -EINVAL; |
1061 | } |
1062 | } |
1063 | return 0; |
1064 | } |
1065 | |
1066 | /** |
1067 | * amdgpu_uvd_ring_parse_cs - UVD command submission parser |
1068 | * |
1069 | * @parser: Command submission parser context |
1070 | * @job: the job to parse |
1071 | * @ib: the IB to patch |
1072 | * |
1073 | * Parse the command stream, patch in addresses as necessary. |
1074 | */ |
1075 | int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, |
1076 | struct amdgpu_job *job, |
1077 | struct amdgpu_ib *ib) |
1078 | { |
1079 | struct amdgpu_uvd_cs_ctx ctx = {}; |
1080 | unsigned int buf_sizes[] = { |
1081 | [0x00000000] = 2048, |
1082 | [0x00000001] = 0xFFFFFFFF, |
1083 | [0x00000002] = 0xFFFFFFFF, |
1084 | [0x00000003] = 2048, |
1085 | [0x00000004] = 0xFFFFFFFF, |
1086 | }; |
1087 | int r; |
1088 | |
1089 | job->vm = NULL; |
1090 | ib->gpu_addr = amdgpu_sa_bo_gpu_addr(sa_bo: ib->sa_bo); |
1091 | |
1092 | if (ib->length_dw % 16) { |
1093 | DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n" , |
1094 | ib->length_dw); |
1095 | return -EINVAL; |
1096 | } |
1097 | |
1098 | ctx.parser = parser; |
1099 | ctx.buf_sizes = buf_sizes; |
1100 | ctx.ib = ib; |
1101 | |
1102 | /* first round only required on chips without UVD 64 bit address support */ |
1103 | if (!parser->adev->uvd.address_64_bit) { |
1104 | /* first round, make sure the buffers are actually in the UVD segment */ |
1105 | r = amdgpu_uvd_cs_packets(ctx: &ctx, cb: amdgpu_uvd_cs_pass1); |
1106 | if (r) |
1107 | return r; |
1108 | } |
1109 | |
1110 | /* second round, patch buffer addresses into the command stream */ |
1111 | r = amdgpu_uvd_cs_packets(ctx: &ctx, cb: amdgpu_uvd_cs_pass2); |
1112 | if (r) |
1113 | return r; |
1114 | |
1115 | if (!ctx.has_msg_cmd) { |
1116 | DRM_ERROR("UVD-IBs need a msg command!\n" ); |
1117 | return -EINVAL; |
1118 | } |
1119 | |
1120 | return 0; |
1121 | } |
1122 | |
1123 | static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, |
1124 | bool direct, struct dma_fence **fence) |
1125 | { |
1126 | struct amdgpu_device *adev = ring->adev; |
1127 | struct dma_fence *f = NULL; |
1128 | uint32_t offset, data[4]; |
1129 | struct amdgpu_job *job; |
1130 | struct amdgpu_ib *ib; |
1131 | uint64_t addr; |
1132 | int i, r; |
1133 | |
1134 | r = amdgpu_job_alloc_with_ib(adev: ring->adev, entity: &adev->uvd.entity, |
1135 | AMDGPU_FENCE_OWNER_UNDEFINED, |
1136 | size: 64, pool_type: direct ? AMDGPU_IB_POOL_DIRECT : |
1137 | AMDGPU_IB_POOL_DELAYED, job: &job); |
1138 | if (r) |
1139 | return r; |
1140 | |
1141 | if (adev->asic_type >= CHIP_VEGA10) |
1142 | offset = adev->reg_offset[UVD_HWIP][ring->me][1]; |
1143 | else |
1144 | offset = UVD_BASE_SI; |
1145 | |
1146 | data[0] = PACKET0(offset + UVD_GPCOM_VCPU_DATA0, 0); |
1147 | data[1] = PACKET0(offset + UVD_GPCOM_VCPU_DATA1, 0); |
1148 | data[2] = PACKET0(offset + UVD_GPCOM_VCPU_CMD, 0); |
1149 | data[3] = PACKET0(offset + UVD_NO_OP, 0); |
1150 | |
1151 | ib = &job->ibs[0]; |
1152 | addr = amdgpu_bo_gpu_offset(bo); |
1153 | ib->ptr[0] = data[0]; |
1154 | ib->ptr[1] = addr; |
1155 | ib->ptr[2] = data[1]; |
1156 | ib->ptr[3] = addr >> 32; |
1157 | ib->ptr[4] = data[2]; |
1158 | ib->ptr[5] = 0; |
1159 | for (i = 6; i < 16; i += 2) { |
1160 | ib->ptr[i] = data[3]; |
1161 | ib->ptr[i+1] = 0; |
1162 | } |
1163 | ib->length_dw = 16; |
1164 | |
1165 | if (direct) { |
1166 | r = amdgpu_job_submit_direct(job, ring, fence: &f); |
1167 | if (r) |
1168 | goto err_free; |
1169 | } else { |
1170 | r = drm_sched_job_add_resv_dependencies(job: &job->base, |
1171 | resv: bo->tbo.base.resv, |
1172 | usage: DMA_RESV_USAGE_KERNEL); |
1173 | if (r) |
1174 | goto err_free; |
1175 | |
1176 | f = amdgpu_job_submit(job); |
1177 | } |
1178 | |
1179 | amdgpu_bo_reserve(bo, no_intr: true); |
1180 | amdgpu_bo_fence(bo, fence: f, shared: false); |
1181 | amdgpu_bo_unreserve(bo); |
1182 | |
1183 | if (fence) |
1184 | *fence = dma_fence_get(fence: f); |
1185 | dma_fence_put(fence: f); |
1186 | |
1187 | return 0; |
1188 | |
1189 | err_free: |
1190 | amdgpu_job_free(job); |
1191 | return r; |
1192 | } |
1193 | |
1194 | /* multiple fence commands without any stream commands in between can |
1195 | * crash the vcpu so just try to emmit a dummy create/destroy msg to |
1196 | * avoid this |
1197 | */ |
1198 | int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, |
1199 | struct dma_fence **fence) |
1200 | { |
1201 | struct amdgpu_device *adev = ring->adev; |
1202 | struct amdgpu_bo *bo = adev->uvd.ib_bo; |
1203 | uint32_t *msg; |
1204 | int i; |
1205 | |
1206 | msg = amdgpu_bo_kptr(bo); |
1207 | /* stitch together an UVD create msg */ |
1208 | msg[0] = cpu_to_le32(0x00000de4); |
1209 | msg[1] = cpu_to_le32(0x00000000); |
1210 | msg[2] = cpu_to_le32(handle); |
1211 | msg[3] = cpu_to_le32(0x00000000); |
1212 | msg[4] = cpu_to_le32(0x00000000); |
1213 | msg[5] = cpu_to_le32(0x00000000); |
1214 | msg[6] = cpu_to_le32(0x00000000); |
1215 | msg[7] = cpu_to_le32(0x00000780); |
1216 | msg[8] = cpu_to_le32(0x00000440); |
1217 | msg[9] = cpu_to_le32(0x00000000); |
1218 | msg[10] = cpu_to_le32(0x01b37000); |
1219 | for (i = 11; i < 1024; ++i) |
1220 | msg[i] = cpu_to_le32(0x0); |
1221 | |
1222 | return amdgpu_uvd_send_msg(ring, bo, direct: true, fence); |
1223 | |
1224 | } |
1225 | |
1226 | int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, |
1227 | bool direct, struct dma_fence **fence) |
1228 | { |
1229 | struct amdgpu_device *adev = ring->adev; |
1230 | struct amdgpu_bo *bo = NULL; |
1231 | uint32_t *msg; |
1232 | int r, i; |
1233 | |
1234 | if (direct) { |
1235 | bo = adev->uvd.ib_bo; |
1236 | } else { |
1237 | r = amdgpu_uvd_create_msg_bo_helper(adev, size: 4096, bo_ptr: &bo); |
1238 | if (r) |
1239 | return r; |
1240 | } |
1241 | |
1242 | msg = amdgpu_bo_kptr(bo); |
1243 | /* stitch together an UVD destroy msg */ |
1244 | msg[0] = cpu_to_le32(0x00000de4); |
1245 | msg[1] = cpu_to_le32(0x00000002); |
1246 | msg[2] = cpu_to_le32(handle); |
1247 | msg[3] = cpu_to_le32(0x00000000); |
1248 | for (i = 4; i < 1024; ++i) |
1249 | msg[i] = cpu_to_le32(0x0); |
1250 | |
1251 | r = amdgpu_uvd_send_msg(ring, bo, direct, fence); |
1252 | |
1253 | if (!direct) |
1254 | amdgpu_bo_free_kernel(bo: &bo, NULL, cpu_addr: (void **)&msg); |
1255 | |
1256 | return r; |
1257 | } |
1258 | |
1259 | static void amdgpu_uvd_idle_work_handler(struct work_struct *work) |
1260 | { |
1261 | struct amdgpu_device *adev = |
1262 | container_of(work, struct amdgpu_device, uvd.idle_work.work); |
1263 | unsigned int fences = 0, i, j; |
1264 | |
1265 | for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { |
1266 | if (adev->uvd.harvest_config & (1 << i)) |
1267 | continue; |
1268 | fences += amdgpu_fence_count_emitted(ring: &adev->uvd.inst[i].ring); |
1269 | for (j = 0; j < adev->uvd.num_enc_rings; ++j) |
1270 | fences += amdgpu_fence_count_emitted(ring: &adev->uvd.inst[i].ring_enc[j]); |
1271 | } |
1272 | |
1273 | if (fences == 0) { |
1274 | if (adev->pm.dpm_enabled) { |
1275 | amdgpu_dpm_enable_uvd(adev, enable: false); |
1276 | } else { |
1277 | amdgpu_asic_set_uvd_clocks(adev, 0, 0); |
1278 | /* shutdown the UVD block */ |
1279 | amdgpu_device_ip_set_powergating_state(dev: adev, block_type: AMD_IP_BLOCK_TYPE_UVD, |
1280 | state: AMD_PG_STATE_GATE); |
1281 | amdgpu_device_ip_set_clockgating_state(dev: adev, block_type: AMD_IP_BLOCK_TYPE_UVD, |
1282 | state: AMD_CG_STATE_GATE); |
1283 | } |
1284 | } else { |
1285 | schedule_delayed_work(dwork: &adev->uvd.idle_work, UVD_IDLE_TIMEOUT); |
1286 | } |
1287 | } |
1288 | |
1289 | void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) |
1290 | { |
1291 | struct amdgpu_device *adev = ring->adev; |
1292 | bool set_clocks; |
1293 | |
1294 | if (amdgpu_sriov_vf(adev)) |
1295 | return; |
1296 | |
1297 | set_clocks = !cancel_delayed_work_sync(dwork: &adev->uvd.idle_work); |
1298 | if (set_clocks) { |
1299 | if (adev->pm.dpm_enabled) { |
1300 | amdgpu_dpm_enable_uvd(adev, enable: true); |
1301 | } else { |
1302 | amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); |
1303 | amdgpu_device_ip_set_clockgating_state(dev: adev, block_type: AMD_IP_BLOCK_TYPE_UVD, |
1304 | state: AMD_CG_STATE_UNGATE); |
1305 | amdgpu_device_ip_set_powergating_state(dev: adev, block_type: AMD_IP_BLOCK_TYPE_UVD, |
1306 | state: AMD_PG_STATE_UNGATE); |
1307 | } |
1308 | } |
1309 | } |
1310 | |
1311 | void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) |
1312 | { |
1313 | if (!amdgpu_sriov_vf(ring->adev)) |
1314 | schedule_delayed_work(dwork: &ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); |
1315 | } |
1316 | |
1317 | /** |
1318 | * amdgpu_uvd_ring_test_ib - test ib execution |
1319 | * |
1320 | * @ring: amdgpu_ring pointer |
1321 | * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT |
1322 | * |
1323 | * Test if we can successfully execute an IB |
1324 | */ |
1325 | int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
1326 | { |
1327 | struct dma_fence *fence; |
1328 | long r; |
1329 | |
1330 | r = amdgpu_uvd_get_create_msg(ring, handle: 1, fence: &fence); |
1331 | if (r) |
1332 | goto error; |
1333 | |
1334 | r = dma_fence_wait_timeout(fence, intr: false, timeout); |
1335 | dma_fence_put(fence); |
1336 | if (r == 0) |
1337 | r = -ETIMEDOUT; |
1338 | if (r < 0) |
1339 | goto error; |
1340 | |
1341 | r = amdgpu_uvd_get_destroy_msg(ring, handle: 1, direct: true, fence: &fence); |
1342 | if (r) |
1343 | goto error; |
1344 | |
1345 | r = dma_fence_wait_timeout(fence, intr: false, timeout); |
1346 | if (r == 0) |
1347 | r = -ETIMEDOUT; |
1348 | else if (r > 0) |
1349 | r = 0; |
1350 | |
1351 | dma_fence_put(fence); |
1352 | |
1353 | error: |
1354 | return r; |
1355 | } |
1356 | |
1357 | /** |
1358 | * amdgpu_uvd_used_handles - returns used UVD handles |
1359 | * |
1360 | * @adev: amdgpu_device pointer |
1361 | * |
1362 | * Returns the number of UVD handles in use |
1363 | */ |
1364 | uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev) |
1365 | { |
1366 | unsigned int i; |
1367 | uint32_t used_handles = 0; |
1368 | |
1369 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
1370 | /* |
1371 | * Handles can be freed in any order, and not |
1372 | * necessarily linear. So we need to count |
1373 | * all non-zero handles. |
1374 | */ |
1375 | if (atomic_read(v: &adev->uvd.handles[i])) |
1376 | used_handles++; |
1377 | } |
1378 | |
1379 | return used_handles; |
1380 | } |
1381 | |