1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | */ |
25 | #include <linux/firmware.h> |
26 | #include "amdgpu.h" |
27 | #include "amdgpu_gfx.h" |
28 | #include "amdgpu_rlc.h" |
29 | |
30 | /** |
31 | * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode |
32 | * |
33 | * @adev: amdgpu_device pointer |
34 | * @xcc_id: xcc accelerated compute core id |
35 | * |
36 | * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode. |
37 | */ |
38 | void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev, int xcc_id) |
39 | { |
40 | if (adev->gfx.rlc.in_safe_mode[xcc_id]) |
41 | return; |
42 | |
43 | /* if RLC is not enabled, do nothing */ |
44 | if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev)) |
45 | return; |
46 | |
47 | if (adev->cg_flags & |
48 | (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG | |
49 | AMD_CG_SUPPORT_GFX_3D_CGCG)) { |
50 | adev->gfx.rlc.funcs->set_safe_mode(adev, xcc_id); |
51 | adev->gfx.rlc.in_safe_mode[xcc_id] = true; |
52 | } |
53 | } |
54 | |
55 | /** |
56 | * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode |
57 | * |
58 | * @adev: amdgpu_device pointer |
59 | * @xcc_id: xcc accelerated compute core id |
60 | * |
61 | * Set RLC exit safe mode if RLC is enabled and have entered into safe mode. |
62 | */ |
63 | void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id) |
64 | { |
65 | if (!(adev->gfx.rlc.in_safe_mode[xcc_id])) |
66 | return; |
67 | |
68 | /* if RLC is not enabled, do nothing */ |
69 | if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev)) |
70 | return; |
71 | |
72 | if (adev->cg_flags & |
73 | (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG | |
74 | AMD_CG_SUPPORT_GFX_3D_CGCG)) { |
75 | adev->gfx.rlc.funcs->unset_safe_mode(adev, xcc_id); |
76 | adev->gfx.rlc.in_safe_mode[xcc_id] = false; |
77 | } |
78 | } |
79 | |
80 | /** |
81 | * amdgpu_gfx_rlc_init_sr - Init save restore block |
82 | * |
83 | * @adev: amdgpu_device pointer |
84 | * @dws: the size of save restore block |
85 | * |
86 | * Allocate and setup value to save restore block of rlc. |
87 | * Returns 0 on succeess or negative error code if allocate failed. |
88 | */ |
89 | int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws) |
90 | { |
91 | const u32 *src_ptr; |
92 | volatile u32 *dst_ptr; |
93 | u32 i; |
94 | int r; |
95 | |
96 | /* allocate save restore block */ |
97 | r = amdgpu_bo_create_reserved(adev, size: dws * 4, PAGE_SIZE, |
98 | AMDGPU_GEM_DOMAIN_VRAM | |
99 | AMDGPU_GEM_DOMAIN_GTT, |
100 | bo_ptr: &adev->gfx.rlc.save_restore_obj, |
101 | gpu_addr: &adev->gfx.rlc.save_restore_gpu_addr, |
102 | cpu_addr: (void **)&adev->gfx.rlc.sr_ptr); |
103 | if (r) { |
104 | dev_warn(adev->dev, "(%d) create RLC sr bo failed\n" , r); |
105 | amdgpu_gfx_rlc_fini(adev); |
106 | return r; |
107 | } |
108 | |
109 | /* write the sr buffer */ |
110 | src_ptr = adev->gfx.rlc.reg_list; |
111 | dst_ptr = adev->gfx.rlc.sr_ptr; |
112 | for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) |
113 | dst_ptr[i] = cpu_to_le32(src_ptr[i]); |
114 | amdgpu_bo_kunmap(bo: adev->gfx.rlc.save_restore_obj); |
115 | amdgpu_bo_unreserve(bo: adev->gfx.rlc.save_restore_obj); |
116 | |
117 | return 0; |
118 | } |
119 | |
120 | /** |
121 | * amdgpu_gfx_rlc_init_csb - Init clear state block |
122 | * |
123 | * @adev: amdgpu_device pointer |
124 | * |
125 | * Allocate and setup value to clear state block of rlc. |
126 | * Returns 0 on succeess or negative error code if allocate failed. |
127 | */ |
128 | int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev) |
129 | { |
130 | u32 dws; |
131 | int r; |
132 | |
133 | /* allocate clear state block */ |
134 | adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev); |
135 | r = amdgpu_bo_create_kernel(adev, size: dws * 4, PAGE_SIZE, |
136 | AMDGPU_GEM_DOMAIN_VRAM | |
137 | AMDGPU_GEM_DOMAIN_GTT, |
138 | bo_ptr: &adev->gfx.rlc.clear_state_obj, |
139 | gpu_addr: &adev->gfx.rlc.clear_state_gpu_addr, |
140 | cpu_addr: (void **)&adev->gfx.rlc.cs_ptr); |
141 | if (r) { |
142 | dev_err(adev->dev, "(%d) failed to create rlc csb bo\n" , r); |
143 | amdgpu_gfx_rlc_fini(adev); |
144 | return r; |
145 | } |
146 | |
147 | return 0; |
148 | } |
149 | |
150 | /** |
151 | * amdgpu_gfx_rlc_init_cpt - Init cp table |
152 | * |
153 | * @adev: amdgpu_device pointer |
154 | * |
155 | * Allocate and setup value to cp table of rlc. |
156 | * Returns 0 on succeess or negative error code if allocate failed. |
157 | */ |
158 | int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev) |
159 | { |
160 | int r; |
161 | |
162 | r = amdgpu_bo_create_reserved(adev, size: adev->gfx.rlc.cp_table_size, |
163 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | |
164 | AMDGPU_GEM_DOMAIN_GTT, |
165 | bo_ptr: &adev->gfx.rlc.cp_table_obj, |
166 | gpu_addr: &adev->gfx.rlc.cp_table_gpu_addr, |
167 | cpu_addr: (void **)&adev->gfx.rlc.cp_table_ptr); |
168 | if (r) { |
169 | dev_err(adev->dev, "(%d) failed to create cp table bo\n" , r); |
170 | amdgpu_gfx_rlc_fini(adev); |
171 | return r; |
172 | } |
173 | |
174 | /* set up the cp table */ |
175 | amdgpu_gfx_rlc_setup_cp_table(adev); |
176 | amdgpu_bo_kunmap(bo: adev->gfx.rlc.cp_table_obj); |
177 | amdgpu_bo_unreserve(bo: adev->gfx.rlc.cp_table_obj); |
178 | |
179 | return 0; |
180 | } |
181 | |
182 | /** |
183 | * amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table |
184 | * |
185 | * @adev: amdgpu_device pointer |
186 | * |
187 | * Write cp firmware data into cp table. |
188 | */ |
189 | void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev) |
190 | { |
191 | const __le32 *fw_data; |
192 | volatile u32 *dst_ptr; |
193 | int me, i, max_me; |
194 | u32 bo_offset = 0; |
195 | u32 table_offset, table_size; |
196 | |
197 | max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev); |
198 | |
199 | /* write the cp table buffer */ |
200 | dst_ptr = adev->gfx.rlc.cp_table_ptr; |
201 | for (me = 0; me < max_me; me++) { |
202 | if (me == 0) { |
203 | const struct gfx_firmware_header_v1_0 *hdr = |
204 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; |
205 | fw_data = (const __le32 *) |
206 | (adev->gfx.ce_fw->data + |
207 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); |
208 | table_offset = le32_to_cpu(hdr->jt_offset); |
209 | table_size = le32_to_cpu(hdr->jt_size); |
210 | } else if (me == 1) { |
211 | const struct gfx_firmware_header_v1_0 *hdr = |
212 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; |
213 | fw_data = (const __le32 *) |
214 | (adev->gfx.pfp_fw->data + |
215 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); |
216 | table_offset = le32_to_cpu(hdr->jt_offset); |
217 | table_size = le32_to_cpu(hdr->jt_size); |
218 | } else if (me == 2) { |
219 | const struct gfx_firmware_header_v1_0 *hdr = |
220 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; |
221 | fw_data = (const __le32 *) |
222 | (adev->gfx.me_fw->data + |
223 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); |
224 | table_offset = le32_to_cpu(hdr->jt_offset); |
225 | table_size = le32_to_cpu(hdr->jt_size); |
226 | } else if (me == 3) { |
227 | const struct gfx_firmware_header_v1_0 *hdr = |
228 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; |
229 | fw_data = (const __le32 *) |
230 | (adev->gfx.mec_fw->data + |
231 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); |
232 | table_offset = le32_to_cpu(hdr->jt_offset); |
233 | table_size = le32_to_cpu(hdr->jt_size); |
234 | } else if (me == 4) { |
235 | const struct gfx_firmware_header_v1_0 *hdr = |
236 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; |
237 | fw_data = (const __le32 *) |
238 | (adev->gfx.mec2_fw->data + |
239 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); |
240 | table_offset = le32_to_cpu(hdr->jt_offset); |
241 | table_size = le32_to_cpu(hdr->jt_size); |
242 | } |
243 | |
244 | for (i = 0; i < table_size; i ++) { |
245 | dst_ptr[bo_offset + i] = |
246 | cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); |
247 | } |
248 | |
249 | bo_offset += table_size; |
250 | } |
251 | } |
252 | |
253 | /** |
254 | * amdgpu_gfx_rlc_fini - Free BO which used for RLC |
255 | * |
256 | * @adev: amdgpu_device pointer |
257 | * |
258 | * Free three BO which is used for rlc_save_restore_block, rlc_clear_state_block |
259 | * and rlc_jump_table_block. |
260 | */ |
261 | void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev) |
262 | { |
263 | /* save restore block */ |
264 | if (adev->gfx.rlc.save_restore_obj) { |
265 | amdgpu_bo_free_kernel(bo: &adev->gfx.rlc.save_restore_obj, |
266 | gpu_addr: &adev->gfx.rlc.save_restore_gpu_addr, |
267 | cpu_addr: (void **)&adev->gfx.rlc.sr_ptr); |
268 | } |
269 | |
270 | /* clear state block */ |
271 | amdgpu_bo_free_kernel(bo: &adev->gfx.rlc.clear_state_obj, |
272 | gpu_addr: &adev->gfx.rlc.clear_state_gpu_addr, |
273 | cpu_addr: (void **)&adev->gfx.rlc.cs_ptr); |
274 | |
275 | /* jump table block */ |
276 | amdgpu_bo_free_kernel(bo: &adev->gfx.rlc.cp_table_obj, |
277 | gpu_addr: &adev->gfx.rlc.cp_table_gpu_addr, |
278 | cpu_addr: (void **)&adev->gfx.rlc.cp_table_ptr); |
279 | } |
280 | |
281 | static int amdgpu_gfx_rlc_init_microcode_v2_0(struct amdgpu_device *adev) |
282 | { |
283 | const struct common_firmware_header *common_hdr; |
284 | const struct rlc_firmware_header_v2_0 *rlc_hdr; |
285 | struct amdgpu_firmware_info *info; |
286 | unsigned int *tmp; |
287 | unsigned int i; |
288 | |
289 | rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; |
290 | |
291 | adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); |
292 | adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); |
293 | adev->gfx.rlc.save_and_restore_offset = |
294 | le32_to_cpu(rlc_hdr->save_and_restore_offset); |
295 | adev->gfx.rlc.clear_state_descriptor_offset = |
296 | le32_to_cpu(rlc_hdr->clear_state_descriptor_offset); |
297 | adev->gfx.rlc.avail_scratch_ram_locations = |
298 | le32_to_cpu(rlc_hdr->avail_scratch_ram_locations); |
299 | adev->gfx.rlc.reg_restore_list_size = |
300 | le32_to_cpu(rlc_hdr->reg_restore_list_size); |
301 | adev->gfx.rlc.reg_list_format_start = |
302 | le32_to_cpu(rlc_hdr->reg_list_format_start); |
303 | adev->gfx.rlc.reg_list_format_separate_start = |
304 | le32_to_cpu(rlc_hdr->reg_list_format_separate_start); |
305 | adev->gfx.rlc.starting_offsets_start = |
306 | le32_to_cpu(rlc_hdr->starting_offsets_start); |
307 | adev->gfx.rlc.reg_list_format_size_bytes = |
308 | le32_to_cpu(rlc_hdr->reg_list_format_size_bytes); |
309 | adev->gfx.rlc.reg_list_size_bytes = |
310 | le32_to_cpu(rlc_hdr->reg_list_size_bytes); |
311 | adev->gfx.rlc.register_list_format = |
312 | kmalloc(size: adev->gfx.rlc.reg_list_format_size_bytes + |
313 | adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL); |
314 | if (!adev->gfx.rlc.register_list_format) { |
315 | dev_err(adev->dev, "failed to allocate memory for rlc register_list_format\n" ); |
316 | return -ENOMEM; |
317 | } |
318 | |
319 | tmp = (unsigned int *)((uintptr_t)rlc_hdr + |
320 | le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); |
321 | for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++) |
322 | adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); |
323 | |
324 | adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; |
325 | |
326 | tmp = (unsigned int *)((uintptr_t)rlc_hdr + |
327 | le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); |
328 | for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) |
329 | adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); |
330 | |
331 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
332 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; |
333 | info->ucode_id = AMDGPU_UCODE_ID_RLC_G; |
334 | info->fw = adev->gfx.rlc_fw; |
335 | if (info->fw) { |
336 | common_hdr = (const struct common_firmware_header *)info->fw->data; |
337 | adev->firmware.fw_size += |
338 | ALIGN(le32_to_cpu(common_hdr->ucode_size_bytes), PAGE_SIZE); |
339 | } |
340 | } |
341 | |
342 | return 0; |
343 | } |
344 | |
345 | static void amdgpu_gfx_rlc_init_microcode_v2_1(struct amdgpu_device *adev) |
346 | { |
347 | const struct rlc_firmware_header_v2_1 *rlc_hdr; |
348 | struct amdgpu_firmware_info *info; |
349 | |
350 | rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data; |
351 | adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver); |
352 | adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver); |
353 | adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes); |
354 | adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes); |
355 | adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver); |
356 | adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver); |
357 | adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes); |
358 | adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes); |
359 | adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver); |
360 | adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver); |
361 | adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes); |
362 | adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes); |
363 | adev->gfx.rlc.reg_list_format_direct_reg_list_length = |
364 | le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length); |
365 | |
366 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
367 | if (adev->gfx.rlc.save_restore_list_cntl_size_bytes) { |
368 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL]; |
369 | info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL; |
370 | info->fw = adev->gfx.rlc_fw; |
371 | adev->firmware.fw_size += |
372 | ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE); |
373 | } |
374 | |
375 | if (adev->gfx.rlc.save_restore_list_gpm_size_bytes) { |
376 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM]; |
377 | info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM; |
378 | info->fw = adev->gfx.rlc_fw; |
379 | adev->firmware.fw_size += |
380 | ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE); |
381 | } |
382 | |
383 | if (adev->gfx.rlc.save_restore_list_srm_size_bytes) { |
384 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM]; |
385 | info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM; |
386 | info->fw = adev->gfx.rlc_fw; |
387 | adev->firmware.fw_size += |
388 | ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE); |
389 | } |
390 | } |
391 | } |
392 | |
393 | static void amdgpu_gfx_rlc_init_microcode_v2_2(struct amdgpu_device *adev) |
394 | { |
395 | const struct rlc_firmware_header_v2_2 *rlc_hdr; |
396 | struct amdgpu_firmware_info *info; |
397 | |
398 | rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; |
399 | adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes); |
400 | adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes); |
401 | adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes); |
402 | adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes); |
403 | |
404 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
405 | if (adev->gfx.rlc.rlc_iram_ucode_size_bytes) { |
406 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM]; |
407 | info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM; |
408 | info->fw = adev->gfx.rlc_fw; |
409 | adev->firmware.fw_size += |
410 | ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE); |
411 | } |
412 | |
413 | if (adev->gfx.rlc.rlc_dram_ucode_size_bytes) { |
414 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM]; |
415 | info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM; |
416 | info->fw = adev->gfx.rlc_fw; |
417 | adev->firmware.fw_size += |
418 | ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE); |
419 | } |
420 | } |
421 | } |
422 | |
423 | static void amdgpu_gfx_rlc_init_microcode_v2_3(struct amdgpu_device *adev) |
424 | { |
425 | const struct rlc_firmware_header_v2_3 *rlc_hdr; |
426 | struct amdgpu_firmware_info *info; |
427 | |
428 | rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data; |
429 | adev->gfx.rlcp_ucode_version = le32_to_cpu(rlc_hdr->rlcp_ucode_version); |
430 | adev->gfx.rlcp_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcp_ucode_feature_version); |
431 | adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes); |
432 | adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes); |
433 | |
434 | adev->gfx.rlcv_ucode_version = le32_to_cpu(rlc_hdr->rlcv_ucode_version); |
435 | adev->gfx.rlcv_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcv_ucode_feature_version); |
436 | adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes); |
437 | adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes); |
438 | |
439 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
440 | if (adev->gfx.rlc.rlcp_ucode_size_bytes) { |
441 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P]; |
442 | info->ucode_id = AMDGPU_UCODE_ID_RLC_P; |
443 | info->fw = adev->gfx.rlc_fw; |
444 | adev->firmware.fw_size += |
445 | ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE); |
446 | } |
447 | |
448 | if (adev->gfx.rlc.rlcv_ucode_size_bytes) { |
449 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V]; |
450 | info->ucode_id = AMDGPU_UCODE_ID_RLC_V; |
451 | info->fw = adev->gfx.rlc_fw; |
452 | adev->firmware.fw_size += |
453 | ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE); |
454 | } |
455 | } |
456 | } |
457 | |
458 | static void amdgpu_gfx_rlc_init_microcode_v2_4(struct amdgpu_device *adev) |
459 | { |
460 | const struct rlc_firmware_header_v2_4 *rlc_hdr; |
461 | struct amdgpu_firmware_info *info; |
462 | |
463 | rlc_hdr = (const struct rlc_firmware_header_v2_4 *)adev->gfx.rlc_fw->data; |
464 | adev->gfx.rlc.global_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->global_tap_delays_ucode_size_bytes); |
465 | adev->gfx.rlc.global_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->global_tap_delays_ucode_offset_bytes); |
466 | adev->gfx.rlc.se0_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_size_bytes); |
467 | adev->gfx.rlc.se0_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_offset_bytes); |
468 | adev->gfx.rlc.se1_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_size_bytes); |
469 | adev->gfx.rlc.se1_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_offset_bytes); |
470 | adev->gfx.rlc.se2_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_size_bytes); |
471 | adev->gfx.rlc.se2_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_offset_bytes); |
472 | adev->gfx.rlc.se3_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_size_bytes); |
473 | adev->gfx.rlc.se3_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_offset_bytes); |
474 | |
475 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
476 | if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) { |
477 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS]; |
478 | info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS; |
479 | info->fw = adev->gfx.rlc_fw; |
480 | adev->firmware.fw_size += |
481 | ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE); |
482 | } |
483 | |
484 | if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) { |
485 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS]; |
486 | info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS; |
487 | info->fw = adev->gfx.rlc_fw; |
488 | adev->firmware.fw_size += |
489 | ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE); |
490 | } |
491 | |
492 | if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) { |
493 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS]; |
494 | info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS; |
495 | info->fw = adev->gfx.rlc_fw; |
496 | adev->firmware.fw_size += |
497 | ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE); |
498 | } |
499 | |
500 | if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) { |
501 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS]; |
502 | info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS; |
503 | info->fw = adev->gfx.rlc_fw; |
504 | adev->firmware.fw_size += |
505 | ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE); |
506 | } |
507 | |
508 | if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) { |
509 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS]; |
510 | info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS; |
511 | info->fw = adev->gfx.rlc_fw; |
512 | adev->firmware.fw_size += |
513 | ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE); |
514 | } |
515 | } |
516 | } |
517 | |
518 | int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev, |
519 | uint16_t version_major, |
520 | uint16_t version_minor) |
521 | { |
522 | int err; |
523 | |
524 | if (version_major < 2) { |
525 | /* only support rlc_hdr v2.x and onwards */ |
526 | dev_err(adev->dev, "unsupported rlc fw hdr\n" ); |
527 | return -EINVAL; |
528 | } |
529 | |
530 | /* is_rlc_v2_1 is still used in APU code path */ |
531 | if (version_major == 2 && version_minor == 1) |
532 | adev->gfx.rlc.is_rlc_v2_1 = true; |
533 | |
534 | if (version_minor >= 0) { |
535 | err = amdgpu_gfx_rlc_init_microcode_v2_0(adev); |
536 | if (err) { |
537 | dev_err(adev->dev, "fail to init rlc v2_0 microcode\n" ); |
538 | return err; |
539 | } |
540 | } |
541 | if (version_minor >= 1) |
542 | amdgpu_gfx_rlc_init_microcode_v2_1(adev); |
543 | if (version_minor >= 2) |
544 | amdgpu_gfx_rlc_init_microcode_v2_2(adev); |
545 | if (version_minor == 3) |
546 | amdgpu_gfx_rlc_init_microcode_v2_3(adev); |
547 | if (version_minor == 4) |
548 | amdgpu_gfx_rlc_init_microcode_v2_4(adev); |
549 | |
550 | return 0; |
551 | } |
552 | |