1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | #include <linux/firmware.h> |
24 | #include <linux/slab.h> |
25 | #include <linux/module.h> |
26 | #include <drm/drmP.h> |
27 | #include "amdgpu.h" |
28 | #include "amdgpu_atombios.h" |
29 | #include "amdgpu_ih.h" |
30 | #include "amdgpu_uvd.h" |
31 | #include "amdgpu_vce.h" |
32 | #include "amdgpu_ucode.h" |
33 | #include "amdgpu_psp.h" |
34 | #include "atom.h" |
35 | #include "amd_pcie.h" |
36 | |
37 | #include "uvd/uvd_7_0_offset.h" |
38 | #include "gc/gc_9_0_offset.h" |
39 | #include "gc/gc_9_0_sh_mask.h" |
40 | #include "sdma0/sdma0_4_0_offset.h" |
41 | #include "sdma1/sdma1_4_0_offset.h" |
42 | #include "hdp/hdp_4_0_offset.h" |
43 | #include "hdp/hdp_4_0_sh_mask.h" |
44 | #include "smuio/smuio_9_0_offset.h" |
45 | #include "smuio/smuio_9_0_sh_mask.h" |
46 | #include "nbio/nbio_7_0_default.h" |
47 | #include "nbio/nbio_7_0_sh_mask.h" |
48 | #include "nbio/nbio_7_0_smn.h" |
49 | #include "mp/mp_9_0_offset.h" |
50 | |
51 | #include "soc15.h" |
52 | #include "soc15_common.h" |
53 | #include "gfx_v9_0.h" |
54 | #include "gmc_v9_0.h" |
55 | #include "gfxhub_v1_0.h" |
56 | #include "mmhub_v1_0.h" |
57 | #include "df_v1_7.h" |
58 | #include "df_v3_6.h" |
59 | #include "vega10_ih.h" |
60 | #include "sdma_v4_0.h" |
61 | #include "uvd_v7_0.h" |
62 | #include "vce_v4_0.h" |
63 | #include "vcn_v1_0.h" |
64 | #include "dce_virtual.h" |
65 | #include "mxgpu_ai.h" |
66 | |
67 | #define mmMP0_MISC_CGTT_CTRL0 0x01b9 |
68 | #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0 |
69 | #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba |
70 | #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 |
71 | |
72 | /* for Vega20 register name change */ |
73 | #define mmHDP_MEM_POWER_CTRL 0x00d4 |
74 | #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L |
75 | #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L |
76 | #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L |
77 | #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L |
78 | #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0 |
79 | /* |
80 | * Indirect registers accessor |
81 | */ |
82 | static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) |
83 | { |
84 | unsigned long flags, address, data; |
85 | u32 r; |
86 | address = adev->nbio_funcs->get_pcie_index_offset(adev); |
87 | data = adev->nbio_funcs->get_pcie_data_offset(adev); |
88 | |
89 | spin_lock_irqsave(&adev->pcie_idx_lock, flags); |
90 | WREG32(address, reg); |
91 | (void)RREG32(address); |
92 | r = RREG32(data); |
93 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); |
94 | return r; |
95 | } |
96 | |
97 | static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) |
98 | { |
99 | unsigned long flags, address, data; |
100 | |
101 | address = adev->nbio_funcs->get_pcie_index_offset(adev); |
102 | data = adev->nbio_funcs->get_pcie_data_offset(adev); |
103 | |
104 | spin_lock_irqsave(&adev->pcie_idx_lock, flags); |
105 | WREG32(address, reg); |
106 | (void)RREG32(address); |
107 | WREG32(data, v); |
108 | (void)RREG32(data); |
109 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); |
110 | } |
111 | |
112 | static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) |
113 | { |
114 | unsigned long flags, address, data; |
115 | u32 r; |
116 | |
117 | address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); |
118 | data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); |
119 | |
120 | spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); |
121 | WREG32(address, ((reg) & 0x1ff)); |
122 | r = RREG32(data); |
123 | spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); |
124 | return r; |
125 | } |
126 | |
127 | static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) |
128 | { |
129 | unsigned long flags, address, data; |
130 | |
131 | address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); |
132 | data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); |
133 | |
134 | spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); |
135 | WREG32(address, ((reg) & 0x1ff)); |
136 | WREG32(data, (v)); |
137 | spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); |
138 | } |
139 | |
140 | static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg) |
141 | { |
142 | unsigned long flags, address, data; |
143 | u32 r; |
144 | |
145 | address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); |
146 | data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); |
147 | |
148 | spin_lock_irqsave(&adev->didt_idx_lock, flags); |
149 | WREG32(address, (reg)); |
150 | r = RREG32(data); |
151 | spin_unlock_irqrestore(&adev->didt_idx_lock, flags); |
152 | return r; |
153 | } |
154 | |
155 | static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) |
156 | { |
157 | unsigned long flags, address, data; |
158 | |
159 | address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); |
160 | data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); |
161 | |
162 | spin_lock_irqsave(&adev->didt_idx_lock, flags); |
163 | WREG32(address, (reg)); |
164 | WREG32(data, (v)); |
165 | spin_unlock_irqrestore(&adev->didt_idx_lock, flags); |
166 | } |
167 | |
168 | static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) |
169 | { |
170 | unsigned long flags; |
171 | u32 r; |
172 | |
173 | spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); |
174 | WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); |
175 | r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA); |
176 | spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); |
177 | return r; |
178 | } |
179 | |
180 | static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) |
181 | { |
182 | unsigned long flags; |
183 | |
184 | spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); |
185 | WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); |
186 | WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v)); |
187 | spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); |
188 | } |
189 | |
190 | static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg) |
191 | { |
192 | unsigned long flags; |
193 | u32 r; |
194 | |
195 | spin_lock_irqsave(&adev->se_cac_idx_lock, flags); |
196 | WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); |
197 | r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA); |
198 | spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); |
199 | return r; |
200 | } |
201 | |
202 | static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) |
203 | { |
204 | unsigned long flags; |
205 | |
206 | spin_lock_irqsave(&adev->se_cac_idx_lock, flags); |
207 | WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); |
208 | WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v)); |
209 | spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); |
210 | } |
211 | |
212 | static u32 soc15_get_config_memsize(struct amdgpu_device *adev) |
213 | { |
214 | return adev->nbio_funcs->get_memsize(adev); |
215 | } |
216 | |
217 | static u32 soc15_get_xclk(struct amdgpu_device *adev) |
218 | { |
219 | return adev->clock.spll.reference_freq; |
220 | } |
221 | |
222 | |
223 | void soc15_grbm_select(struct amdgpu_device *adev, |
224 | u32 me, u32 pipe, u32 queue, u32 vmid) |
225 | { |
226 | u32 grbm_gfx_cntl = 0; |
227 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); |
228 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); |
229 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); |
230 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); |
231 | |
232 | WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl); |
233 | } |
234 | |
235 | static void soc15_vga_set_state(struct amdgpu_device *adev, bool state) |
236 | { |
237 | /* todo */ |
238 | } |
239 | |
240 | static bool soc15_read_disabled_bios(struct amdgpu_device *adev) |
241 | { |
242 | /* todo */ |
243 | return false; |
244 | } |
245 | |
246 | static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, |
247 | u8 *bios, u32 length_bytes) |
248 | { |
249 | u32 *dw_ptr; |
250 | u32 i, length_dw; |
251 | |
252 | if (bios == NULL) |
253 | return false; |
254 | if (length_bytes == 0) |
255 | return false; |
256 | /* APU vbios image is part of sbios image */ |
257 | if (adev->flags & AMD_IS_APU) |
258 | return false; |
259 | |
260 | dw_ptr = (u32 *)bios; |
261 | length_dw = ALIGN(length_bytes, 4) / 4; |
262 | |
263 | /* set rom index to 0 */ |
264 | WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); |
265 | /* read out the rom data */ |
266 | for (i = 0; i < length_dw; i++) |
267 | dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); |
268 | |
269 | return true; |
270 | } |
271 | |
272 | struct soc15_allowed_register_entry { |
273 | uint32_t hwip; |
274 | uint32_t inst; |
275 | uint32_t seg; |
276 | uint32_t reg_offset; |
277 | bool grbm_indexed; |
278 | }; |
279 | |
280 | |
281 | static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = { |
282 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, |
283 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, |
284 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, |
285 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, |
286 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, |
287 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, |
288 | { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, |
289 | { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, |
290 | { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, |
291 | { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, |
292 | { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, |
293 | { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, |
294 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, |
295 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, |
296 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, |
297 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, |
298 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, |
299 | { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, |
300 | { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)}, |
301 | }; |
302 | |
303 | static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num, |
304 | u32 sh_num, u32 reg_offset) |
305 | { |
306 | uint32_t val; |
307 | |
308 | mutex_lock(&adev->grbm_idx_mutex); |
309 | if (se_num != 0xffffffff || sh_num != 0xffffffff) |
310 | amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); |
311 | |
312 | val = RREG32(reg_offset); |
313 | |
314 | if (se_num != 0xffffffff || sh_num != 0xffffffff) |
315 | amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); |
316 | mutex_unlock(&adev->grbm_idx_mutex); |
317 | return val; |
318 | } |
319 | |
320 | static uint32_t soc15_get_register_value(struct amdgpu_device *adev, |
321 | bool indexed, u32 se_num, |
322 | u32 sh_num, u32 reg_offset) |
323 | { |
324 | if (indexed) { |
325 | return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset); |
326 | } else { |
327 | if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) |
328 | return adev->gfx.config.gb_addr_config; |
329 | else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2)) |
330 | return adev->gfx.config.db_debug2; |
331 | return RREG32(reg_offset); |
332 | } |
333 | } |
334 | |
335 | static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, |
336 | u32 sh_num, u32 reg_offset, u32 *value) |
337 | { |
338 | uint32_t i; |
339 | struct soc15_allowed_register_entry *en; |
340 | |
341 | *value = 0; |
342 | for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { |
343 | en = &soc15_allowed_read_registers[i]; |
344 | if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] |
345 | + en->reg_offset)) |
346 | continue; |
347 | |
348 | *value = soc15_get_register_value(adev, |
349 | soc15_allowed_read_registers[i].grbm_indexed, |
350 | se_num, sh_num, reg_offset); |
351 | return 0; |
352 | } |
353 | return -EINVAL; |
354 | } |
355 | |
356 | |
357 | /** |
358 | * soc15_program_register_sequence - program an array of registers. |
359 | * |
360 | * @adev: amdgpu_device pointer |
361 | * @regs: pointer to the register array |
362 | * @array_size: size of the register array |
363 | * |
364 | * Programs an array or registers with and and or masks. |
365 | * This is a helper for setting golden registers. |
366 | */ |
367 | |
368 | void soc15_program_register_sequence(struct amdgpu_device *adev, |
369 | const struct soc15_reg_golden *regs, |
370 | const u32 array_size) |
371 | { |
372 | const struct soc15_reg_golden *entry; |
373 | u32 tmp, reg; |
374 | int i; |
375 | |
376 | for (i = 0; i < array_size; ++i) { |
377 | entry = ®s[i]; |
378 | reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; |
379 | |
380 | if (entry->and_mask == 0xffffffff) { |
381 | tmp = entry->or_mask; |
382 | } else { |
383 | tmp = RREG32(reg); |
384 | tmp &= ~(entry->and_mask); |
385 | tmp |= entry->or_mask; |
386 | } |
387 | WREG32(reg, tmp); |
388 | } |
389 | |
390 | } |
391 | |
392 | static int soc15_asic_mode1_reset(struct amdgpu_device *adev) |
393 | { |
394 | u32 i; |
395 | |
396 | amdgpu_atombios_scratch_regs_engine_hung(adev, true); |
397 | |
398 | dev_info(adev->dev, "GPU mode1 reset\n" ); |
399 | |
400 | /* disable BM */ |
401 | pci_clear_master(adev->pdev); |
402 | |
403 | pci_save_state(adev->pdev); |
404 | |
405 | psp_gpu_reset(adev); |
406 | |
407 | pci_restore_state(adev->pdev); |
408 | |
409 | /* wait for asic to come out of reset */ |
410 | for (i = 0; i < adev->usec_timeout; i++) { |
411 | u32 memsize = adev->nbio_funcs->get_memsize(adev); |
412 | |
413 | if (memsize != 0xffffffff) |
414 | break; |
415 | udelay(1); |
416 | } |
417 | |
418 | amdgpu_atombios_scratch_regs_engine_hung(adev, false); |
419 | |
420 | return 0; |
421 | } |
422 | |
423 | static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap) |
424 | { |
425 | void *pp_handle = adev->powerplay.pp_handle; |
426 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; |
427 | |
428 | if (!pp_funcs || !pp_funcs->get_asic_baco_capability) { |
429 | *cap = false; |
430 | return -ENOENT; |
431 | } |
432 | |
433 | return pp_funcs->get_asic_baco_capability(pp_handle, cap); |
434 | } |
435 | |
436 | static int soc15_asic_baco_reset(struct amdgpu_device *adev) |
437 | { |
438 | void *pp_handle = adev->powerplay.pp_handle; |
439 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; |
440 | |
441 | if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) |
442 | return -ENOENT; |
443 | |
444 | /* enter BACO state */ |
445 | if (pp_funcs->set_asic_baco_state(pp_handle, 1)) |
446 | return -EIO; |
447 | |
448 | /* exit BACO state */ |
449 | if (pp_funcs->set_asic_baco_state(pp_handle, 0)) |
450 | return -EIO; |
451 | |
452 | dev_info(adev->dev, "GPU BACO reset\n" ); |
453 | |
454 | return 0; |
455 | } |
456 | |
457 | static int soc15_asic_reset(struct amdgpu_device *adev) |
458 | { |
459 | int ret; |
460 | bool baco_reset; |
461 | |
462 | switch (adev->asic_type) { |
463 | case CHIP_VEGA10: |
464 | soc15_asic_get_baco_capability(adev, &baco_reset); |
465 | break; |
466 | default: |
467 | baco_reset = false; |
468 | break; |
469 | } |
470 | |
471 | if (baco_reset) |
472 | ret = soc15_asic_baco_reset(adev); |
473 | else |
474 | ret = soc15_asic_mode1_reset(adev); |
475 | |
476 | return ret; |
477 | } |
478 | |
479 | /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock, |
480 | u32 cntl_reg, u32 status_reg) |
481 | { |
482 | return 0; |
483 | }*/ |
484 | |
485 | static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) |
486 | { |
487 | /*int r; |
488 | |
489 | r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); |
490 | if (r) |
491 | return r; |
492 | |
493 | r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); |
494 | */ |
495 | return 0; |
496 | } |
497 | |
498 | static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) |
499 | { |
500 | /* todo */ |
501 | |
502 | return 0; |
503 | } |
504 | |
505 | static void soc15_pcie_gen3_enable(struct amdgpu_device *adev) |
506 | { |
507 | if (pci_is_root_bus(adev->pdev->bus)) |
508 | return; |
509 | |
510 | if (amdgpu_pcie_gen2 == 0) |
511 | return; |
512 | |
513 | if (adev->flags & AMD_IS_APU) |
514 | return; |
515 | |
516 | if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | |
517 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) |
518 | return; |
519 | |
520 | /* todo */ |
521 | } |
522 | |
523 | static void soc15_program_aspm(struct amdgpu_device *adev) |
524 | { |
525 | |
526 | if (amdgpu_aspm == 0) |
527 | return; |
528 | |
529 | /* todo */ |
530 | } |
531 | |
532 | static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, |
533 | bool enable) |
534 | { |
535 | adev->nbio_funcs->enable_doorbell_aperture(adev, enable); |
536 | adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable); |
537 | } |
538 | |
539 | static const struct amdgpu_ip_block_version vega10_common_ip_block = |
540 | { |
541 | .type = AMD_IP_BLOCK_TYPE_COMMON, |
542 | .major = 2, |
543 | .minor = 0, |
544 | .rev = 0, |
545 | .funcs = &soc15_common_ip_funcs, |
546 | }; |
547 | |
548 | static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) |
549 | { |
550 | return adev->nbio_funcs->get_rev_id(adev); |
551 | } |
552 | |
553 | int soc15_set_ip_blocks(struct amdgpu_device *adev) |
554 | { |
555 | /* Set IP register base before any HW register access */ |
556 | switch (adev->asic_type) { |
557 | case CHIP_VEGA10: |
558 | case CHIP_VEGA12: |
559 | case CHIP_RAVEN: |
560 | vega10_reg_base_init(adev); |
561 | break; |
562 | case CHIP_VEGA20: |
563 | vega20_reg_base_init(adev); |
564 | break; |
565 | default: |
566 | return -EINVAL; |
567 | } |
568 | |
569 | if (adev->asic_type == CHIP_VEGA20) |
570 | adev->gmc.xgmi.supported = true; |
571 | |
572 | if (adev->flags & AMD_IS_APU) |
573 | adev->nbio_funcs = &nbio_v7_0_funcs; |
574 | else if (adev->asic_type == CHIP_VEGA20) |
575 | adev->nbio_funcs = &nbio_v7_4_funcs; |
576 | else |
577 | adev->nbio_funcs = &nbio_v6_1_funcs; |
578 | |
579 | if (adev->asic_type == CHIP_VEGA20) |
580 | adev->df_funcs = &df_v3_6_funcs; |
581 | else |
582 | adev->df_funcs = &df_v1_7_funcs; |
583 | |
584 | adev->rev_id = soc15_get_rev_id(adev); |
585 | adev->nbio_funcs->detect_hw_virt(adev); |
586 | |
587 | if (amdgpu_sriov_vf(adev)) |
588 | adev->virt.ops = &xgpu_ai_virt_ops; |
589 | |
590 | switch (adev->asic_type) { |
591 | case CHIP_VEGA10: |
592 | case CHIP_VEGA12: |
593 | case CHIP_VEGA20: |
594 | amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); |
595 | amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); |
596 | amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); |
597 | if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { |
598 | if (adev->asic_type == CHIP_VEGA20) |
599 | amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); |
600 | else |
601 | amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); |
602 | } |
603 | amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); |
604 | amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); |
605 | if (!amdgpu_sriov_vf(adev)) |
606 | amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); |
607 | if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) |
608 | amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); |
609 | #if defined(CONFIG_DRM_AMD_DC) |
610 | else if (amdgpu_device_has_dc_support(adev)) |
611 | amdgpu_device_ip_block_add(adev, &dm_ip_block); |
612 | #else |
613 | # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15." |
614 | #endif |
615 | if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) { |
616 | amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); |
617 | amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); |
618 | } |
619 | break; |
620 | case CHIP_RAVEN: |
621 | amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); |
622 | amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); |
623 | amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); |
624 | if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) |
625 | amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); |
626 | amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); |
627 | amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); |
628 | amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); |
629 | if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) |
630 | amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); |
631 | #if defined(CONFIG_DRM_AMD_DC) |
632 | else if (amdgpu_device_has_dc_support(adev)) |
633 | amdgpu_device_ip_block_add(adev, &dm_ip_block); |
634 | #else |
635 | # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15." |
636 | #endif |
637 | amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); |
638 | break; |
639 | default: |
640 | return -EINVAL; |
641 | } |
642 | |
643 | return 0; |
644 | } |
645 | |
646 | static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) |
647 | { |
648 | adev->nbio_funcs->hdp_flush(adev, ring); |
649 | } |
650 | |
651 | static void soc15_invalidate_hdp(struct amdgpu_device *adev, |
652 | struct amdgpu_ring *ring) |
653 | { |
654 | if (!ring || !ring->funcs->emit_wreg) |
655 | WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); |
656 | else |
657 | amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( |
658 | HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); |
659 | } |
660 | |
661 | static bool soc15_need_full_reset(struct amdgpu_device *adev) |
662 | { |
663 | /* change this when we implement soft reset */ |
664 | return true; |
665 | } |
666 | static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, |
667 | uint64_t *count1) |
668 | { |
669 | uint32_t perfctr = 0; |
670 | uint64_t cnt0_of, cnt1_of; |
671 | int tmp; |
672 | |
673 | /* This reports 0 on APUs, so return to avoid writing/reading registers |
674 | * that may or may not be different from their GPU counterparts |
675 | */ |
676 | if (adev->flags & AMD_IS_APU) |
677 | return; |
678 | |
679 | /* Set the 2 events that we wish to watch, defined above */ |
680 | /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */ |
681 | perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); |
682 | perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); |
683 | |
684 | /* Write to enable desired perf counters */ |
685 | WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr); |
686 | /* Zero out and enable the perf counters |
687 | * Write 0x5: |
688 | * Bit 0 = Start all counters(1) |
689 | * Bit 2 = Global counter reset enable(1) |
690 | */ |
691 | WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005); |
692 | |
693 | msleep(1000); |
694 | |
695 | /* Load the shadow and disable the perf counters |
696 | * Write 0x2: |
697 | * Bit 0 = Stop counters(0) |
698 | * Bit 1 = Load the shadow counters(1) |
699 | */ |
700 | WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002); |
701 | |
702 | /* Read register values to get any >32bit overflow */ |
703 | tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK); |
704 | cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); |
705 | cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); |
706 | |
707 | /* Get the values and add the overflow */ |
708 | *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); |
709 | *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); |
710 | } |
711 | |
712 | static bool soc15_need_reset_on_init(struct amdgpu_device *adev) |
713 | { |
714 | u32 sol_reg; |
715 | |
716 | if (adev->flags & AMD_IS_APU) |
717 | return false; |
718 | |
719 | /* Check sOS sign of life register to confirm sys driver and sOS |
720 | * are already been loaded. |
721 | */ |
722 | sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); |
723 | if (sol_reg) |
724 | return true; |
725 | |
726 | return false; |
727 | } |
728 | |
729 | static const struct amdgpu_asic_funcs soc15_asic_funcs = |
730 | { |
731 | .read_disabled_bios = &soc15_read_disabled_bios, |
732 | .read_bios_from_rom = &soc15_read_bios_from_rom, |
733 | .read_register = &soc15_read_register, |
734 | .reset = &soc15_asic_reset, |
735 | .set_vga_state = &soc15_vga_set_state, |
736 | .get_xclk = &soc15_get_xclk, |
737 | .set_uvd_clocks = &soc15_set_uvd_clocks, |
738 | .set_vce_clocks = &soc15_set_vce_clocks, |
739 | .get_config_memsize = &soc15_get_config_memsize, |
740 | .flush_hdp = &soc15_flush_hdp, |
741 | .invalidate_hdp = &soc15_invalidate_hdp, |
742 | .need_full_reset = &soc15_need_full_reset, |
743 | .init_doorbell_index = &vega10_doorbell_index_init, |
744 | .get_pcie_usage = &soc15_get_pcie_usage, |
745 | .need_reset_on_init = &soc15_need_reset_on_init, |
746 | }; |
747 | |
748 | static const struct amdgpu_asic_funcs vega20_asic_funcs = |
749 | { |
750 | .read_disabled_bios = &soc15_read_disabled_bios, |
751 | .read_bios_from_rom = &soc15_read_bios_from_rom, |
752 | .read_register = &soc15_read_register, |
753 | .reset = &soc15_asic_reset, |
754 | .set_vga_state = &soc15_vga_set_state, |
755 | .get_xclk = &soc15_get_xclk, |
756 | .set_uvd_clocks = &soc15_set_uvd_clocks, |
757 | .set_vce_clocks = &soc15_set_vce_clocks, |
758 | .get_config_memsize = &soc15_get_config_memsize, |
759 | .flush_hdp = &soc15_flush_hdp, |
760 | .invalidate_hdp = &soc15_invalidate_hdp, |
761 | .need_full_reset = &soc15_need_full_reset, |
762 | .init_doorbell_index = &vega20_doorbell_index_init, |
763 | .get_pcie_usage = &soc15_get_pcie_usage, |
764 | .need_reset_on_init = &soc15_need_reset_on_init, |
765 | }; |
766 | |
767 | static int soc15_common_early_init(void *handle) |
768 | { |
769 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
770 | |
771 | adev->smc_rreg = NULL; |
772 | adev->smc_wreg = NULL; |
773 | adev->pcie_rreg = &soc15_pcie_rreg; |
774 | adev->pcie_wreg = &soc15_pcie_wreg; |
775 | adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg; |
776 | adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg; |
777 | adev->didt_rreg = &soc15_didt_rreg; |
778 | adev->didt_wreg = &soc15_didt_wreg; |
779 | adev->gc_cac_rreg = &soc15_gc_cac_rreg; |
780 | adev->gc_cac_wreg = &soc15_gc_cac_wreg; |
781 | adev->se_cac_rreg = &soc15_se_cac_rreg; |
782 | adev->se_cac_wreg = &soc15_se_cac_wreg; |
783 | |
784 | |
785 | adev->external_rev_id = 0xFF; |
786 | switch (adev->asic_type) { |
787 | case CHIP_VEGA10: |
788 | adev->asic_funcs = &soc15_asic_funcs; |
789 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
790 | AMD_CG_SUPPORT_GFX_MGLS | |
791 | AMD_CG_SUPPORT_GFX_RLC_LS | |
792 | AMD_CG_SUPPORT_GFX_CP_LS | |
793 | AMD_CG_SUPPORT_GFX_3D_CGCG | |
794 | AMD_CG_SUPPORT_GFX_3D_CGLS | |
795 | AMD_CG_SUPPORT_GFX_CGCG | |
796 | AMD_CG_SUPPORT_GFX_CGLS | |
797 | AMD_CG_SUPPORT_BIF_MGCG | |
798 | AMD_CG_SUPPORT_BIF_LS | |
799 | AMD_CG_SUPPORT_HDP_LS | |
800 | AMD_CG_SUPPORT_DRM_MGCG | |
801 | AMD_CG_SUPPORT_DRM_LS | |
802 | AMD_CG_SUPPORT_ROM_MGCG | |
803 | AMD_CG_SUPPORT_DF_MGCG | |
804 | AMD_CG_SUPPORT_SDMA_MGCG | |
805 | AMD_CG_SUPPORT_SDMA_LS | |
806 | AMD_CG_SUPPORT_MC_MGCG | |
807 | AMD_CG_SUPPORT_MC_LS; |
808 | adev->pg_flags = 0; |
809 | adev->external_rev_id = 0x1; |
810 | break; |
811 | case CHIP_VEGA12: |
812 | adev->asic_funcs = &soc15_asic_funcs; |
813 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
814 | AMD_CG_SUPPORT_GFX_MGLS | |
815 | AMD_CG_SUPPORT_GFX_CGCG | |
816 | AMD_CG_SUPPORT_GFX_CGLS | |
817 | AMD_CG_SUPPORT_GFX_3D_CGCG | |
818 | AMD_CG_SUPPORT_GFX_3D_CGLS | |
819 | AMD_CG_SUPPORT_GFX_CP_LS | |
820 | AMD_CG_SUPPORT_MC_LS | |
821 | AMD_CG_SUPPORT_MC_MGCG | |
822 | AMD_CG_SUPPORT_SDMA_MGCG | |
823 | AMD_CG_SUPPORT_SDMA_LS | |
824 | AMD_CG_SUPPORT_BIF_MGCG | |
825 | AMD_CG_SUPPORT_BIF_LS | |
826 | AMD_CG_SUPPORT_HDP_MGCG | |
827 | AMD_CG_SUPPORT_HDP_LS | |
828 | AMD_CG_SUPPORT_ROM_MGCG | |
829 | AMD_CG_SUPPORT_VCE_MGCG | |
830 | AMD_CG_SUPPORT_UVD_MGCG; |
831 | adev->pg_flags = 0; |
832 | adev->external_rev_id = adev->rev_id + 0x14; |
833 | break; |
834 | case CHIP_VEGA20: |
835 | adev->asic_funcs = &vega20_asic_funcs; |
836 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
837 | AMD_CG_SUPPORT_GFX_MGLS | |
838 | AMD_CG_SUPPORT_GFX_CGCG | |
839 | AMD_CG_SUPPORT_GFX_CGLS | |
840 | AMD_CG_SUPPORT_GFX_3D_CGCG | |
841 | AMD_CG_SUPPORT_GFX_3D_CGLS | |
842 | AMD_CG_SUPPORT_GFX_CP_LS | |
843 | AMD_CG_SUPPORT_MC_LS | |
844 | AMD_CG_SUPPORT_MC_MGCG | |
845 | AMD_CG_SUPPORT_SDMA_MGCG | |
846 | AMD_CG_SUPPORT_SDMA_LS | |
847 | AMD_CG_SUPPORT_BIF_MGCG | |
848 | AMD_CG_SUPPORT_BIF_LS | |
849 | AMD_CG_SUPPORT_HDP_MGCG | |
850 | AMD_CG_SUPPORT_HDP_LS | |
851 | AMD_CG_SUPPORT_ROM_MGCG | |
852 | AMD_CG_SUPPORT_VCE_MGCG | |
853 | AMD_CG_SUPPORT_UVD_MGCG; |
854 | adev->pg_flags = 0; |
855 | adev->external_rev_id = adev->rev_id + 0x28; |
856 | break; |
857 | case CHIP_RAVEN: |
858 | adev->asic_funcs = &soc15_asic_funcs; |
859 | if (adev->rev_id >= 0x8) |
860 | adev->external_rev_id = adev->rev_id + 0x79; |
861 | else if (adev->pdev->device == 0x15d8) |
862 | adev->external_rev_id = adev->rev_id + 0x41; |
863 | else if (adev->rev_id == 1) |
864 | adev->external_rev_id = adev->rev_id + 0x20; |
865 | else |
866 | adev->external_rev_id = adev->rev_id + 0x01; |
867 | |
868 | if (adev->rev_id >= 0x8) { |
869 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
870 | AMD_CG_SUPPORT_GFX_MGLS | |
871 | AMD_CG_SUPPORT_GFX_CP_LS | |
872 | AMD_CG_SUPPORT_GFX_3D_CGCG | |
873 | AMD_CG_SUPPORT_GFX_3D_CGLS | |
874 | AMD_CG_SUPPORT_GFX_CGCG | |
875 | AMD_CG_SUPPORT_GFX_CGLS | |
876 | AMD_CG_SUPPORT_BIF_LS | |
877 | AMD_CG_SUPPORT_HDP_LS | |
878 | AMD_CG_SUPPORT_ROM_MGCG | |
879 | AMD_CG_SUPPORT_MC_MGCG | |
880 | AMD_CG_SUPPORT_MC_LS | |
881 | AMD_CG_SUPPORT_SDMA_MGCG | |
882 | AMD_CG_SUPPORT_SDMA_LS | |
883 | AMD_CG_SUPPORT_VCN_MGCG; |
884 | |
885 | adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; |
886 | } else if (adev->pdev->device == 0x15d8) { |
887 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS | |
888 | AMD_CG_SUPPORT_GFX_CP_LS | |
889 | AMD_CG_SUPPORT_GFX_3D_CGCG | |
890 | AMD_CG_SUPPORT_GFX_3D_CGLS | |
891 | AMD_CG_SUPPORT_GFX_CGCG | |
892 | AMD_CG_SUPPORT_GFX_CGLS | |
893 | AMD_CG_SUPPORT_BIF_LS | |
894 | AMD_CG_SUPPORT_HDP_LS | |
895 | AMD_CG_SUPPORT_ROM_MGCG | |
896 | AMD_CG_SUPPORT_MC_MGCG | |
897 | AMD_CG_SUPPORT_MC_LS | |
898 | AMD_CG_SUPPORT_SDMA_MGCG | |
899 | AMD_CG_SUPPORT_SDMA_LS; |
900 | |
901 | adev->pg_flags = AMD_PG_SUPPORT_SDMA | |
902 | AMD_PG_SUPPORT_MMHUB | |
903 | AMD_PG_SUPPORT_VCN | |
904 | AMD_PG_SUPPORT_VCN_DPG; |
905 | } else { |
906 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
907 | AMD_CG_SUPPORT_GFX_MGLS | |
908 | AMD_CG_SUPPORT_GFX_RLC_LS | |
909 | AMD_CG_SUPPORT_GFX_CP_LS | |
910 | AMD_CG_SUPPORT_GFX_3D_CGCG | |
911 | AMD_CG_SUPPORT_GFX_3D_CGLS | |
912 | AMD_CG_SUPPORT_GFX_CGCG | |
913 | AMD_CG_SUPPORT_GFX_CGLS | |
914 | AMD_CG_SUPPORT_BIF_MGCG | |
915 | AMD_CG_SUPPORT_BIF_LS | |
916 | AMD_CG_SUPPORT_HDP_MGCG | |
917 | AMD_CG_SUPPORT_HDP_LS | |
918 | AMD_CG_SUPPORT_DRM_MGCG | |
919 | AMD_CG_SUPPORT_DRM_LS | |
920 | AMD_CG_SUPPORT_ROM_MGCG | |
921 | AMD_CG_SUPPORT_MC_MGCG | |
922 | AMD_CG_SUPPORT_MC_LS | |
923 | AMD_CG_SUPPORT_SDMA_MGCG | |
924 | AMD_CG_SUPPORT_SDMA_LS | |
925 | AMD_CG_SUPPORT_VCN_MGCG; |
926 | |
927 | adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; |
928 | } |
929 | |
930 | if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) |
931 | adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | |
932 | AMD_PG_SUPPORT_CP | |
933 | AMD_PG_SUPPORT_RLC_SMU_HS; |
934 | break; |
935 | default: |
936 | /* FIXME: not supported yet */ |
937 | return -EINVAL; |
938 | } |
939 | |
940 | if (amdgpu_sriov_vf(adev)) { |
941 | amdgpu_virt_init_setting(adev); |
942 | xgpu_ai_mailbox_set_irq_funcs(adev); |
943 | } |
944 | |
945 | return 0; |
946 | } |
947 | |
948 | static int soc15_common_late_init(void *handle) |
949 | { |
950 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
951 | |
952 | if (amdgpu_sriov_vf(adev)) |
953 | xgpu_ai_mailbox_get_irq(adev); |
954 | |
955 | return 0; |
956 | } |
957 | |
958 | static int soc15_common_sw_init(void *handle) |
959 | { |
960 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
961 | |
962 | if (amdgpu_sriov_vf(adev)) |
963 | xgpu_ai_mailbox_add_irq_id(adev); |
964 | |
965 | return 0; |
966 | } |
967 | |
968 | static int soc15_common_sw_fini(void *handle) |
969 | { |
970 | return 0; |
971 | } |
972 | |
973 | static void soc15_doorbell_range_init(struct amdgpu_device *adev) |
974 | { |
975 | int i; |
976 | struct amdgpu_ring *ring; |
977 | |
978 | for (i = 0; i < adev->sdma.num_instances; i++) { |
979 | ring = &adev->sdma.instance[i].ring; |
980 | adev->nbio_funcs->sdma_doorbell_range(adev, i, |
981 | ring->use_doorbell, ring->doorbell_index, |
982 | adev->doorbell_index.sdma_doorbell_range); |
983 | } |
984 | |
985 | adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, |
986 | adev->irq.ih.doorbell_index); |
987 | } |
988 | |
989 | static int soc15_common_hw_init(void *handle) |
990 | { |
991 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
992 | |
993 | /* enable pcie gen2/3 link */ |
994 | soc15_pcie_gen3_enable(adev); |
995 | /* enable aspm */ |
996 | soc15_program_aspm(adev); |
997 | /* setup nbio registers */ |
998 | adev->nbio_funcs->init_registers(adev); |
999 | /* enable the doorbell aperture */ |
1000 | soc15_enable_doorbell_aperture(adev, true); |
1001 | /* HW doorbell routing policy: doorbell writing not |
1002 | * in SDMA/IH/MM/ACV range will be routed to CP. So |
1003 | * we need to init SDMA/IH/MM/ACV doorbell range prior |
1004 | * to CP ip block init and ring test. |
1005 | */ |
1006 | soc15_doorbell_range_init(adev); |
1007 | |
1008 | return 0; |
1009 | } |
1010 | |
1011 | static int soc15_common_hw_fini(void *handle) |
1012 | { |
1013 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1014 | |
1015 | /* disable the doorbell aperture */ |
1016 | soc15_enable_doorbell_aperture(adev, false); |
1017 | if (amdgpu_sriov_vf(adev)) |
1018 | xgpu_ai_mailbox_put_irq(adev); |
1019 | |
1020 | return 0; |
1021 | } |
1022 | |
1023 | static int soc15_common_suspend(void *handle) |
1024 | { |
1025 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1026 | |
1027 | return soc15_common_hw_fini(adev); |
1028 | } |
1029 | |
1030 | static int soc15_common_resume(void *handle) |
1031 | { |
1032 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1033 | |
1034 | return soc15_common_hw_init(adev); |
1035 | } |
1036 | |
1037 | static bool soc15_common_is_idle(void *handle) |
1038 | { |
1039 | return true; |
1040 | } |
1041 | |
1042 | static int soc15_common_wait_for_idle(void *handle) |
1043 | { |
1044 | return 0; |
1045 | } |
1046 | |
1047 | static int soc15_common_soft_reset(void *handle) |
1048 | { |
1049 | return 0; |
1050 | } |
1051 | |
1052 | static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable) |
1053 | { |
1054 | uint32_t def, data; |
1055 | |
1056 | if (adev->asic_type == CHIP_VEGA20) { |
1057 | def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL)); |
1058 | |
1059 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) |
1060 | data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | |
1061 | HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | |
1062 | HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | |
1063 | HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK; |
1064 | else |
1065 | data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | |
1066 | HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | |
1067 | HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | |
1068 | HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK); |
1069 | |
1070 | if (def != data) |
1071 | WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data); |
1072 | } else { |
1073 | def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); |
1074 | |
1075 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) |
1076 | data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; |
1077 | else |
1078 | data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; |
1079 | |
1080 | if (def != data) |
1081 | WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data); |
1082 | } |
1083 | } |
1084 | |
1085 | static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) |
1086 | { |
1087 | uint32_t def, data; |
1088 | |
1089 | def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); |
1090 | |
1091 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG)) |
1092 | data &= ~(0x01000000 | |
1093 | 0x02000000 | |
1094 | 0x04000000 | |
1095 | 0x08000000 | |
1096 | 0x10000000 | |
1097 | 0x20000000 | |
1098 | 0x40000000 | |
1099 | 0x80000000); |
1100 | else |
1101 | data |= (0x01000000 | |
1102 | 0x02000000 | |
1103 | 0x04000000 | |
1104 | 0x08000000 | |
1105 | 0x10000000 | |
1106 | 0x20000000 | |
1107 | 0x40000000 | |
1108 | 0x80000000); |
1109 | |
1110 | if (def != data) |
1111 | WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data); |
1112 | } |
1113 | |
1114 | static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable) |
1115 | { |
1116 | uint32_t def, data; |
1117 | |
1118 | def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); |
1119 | |
1120 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) |
1121 | data |= 1; |
1122 | else |
1123 | data &= ~1; |
1124 | |
1125 | if (def != data) |
1126 | WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data); |
1127 | } |
1128 | |
1129 | static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, |
1130 | bool enable) |
1131 | { |
1132 | uint32_t def, data; |
1133 | |
1134 | def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0)); |
1135 | |
1136 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) |
1137 | data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | |
1138 | CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); |
1139 | else |
1140 | data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | |
1141 | CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; |
1142 | |
1143 | if (def != data) |
1144 | WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data); |
1145 | } |
1146 | |
1147 | static int soc15_common_set_clockgating_state(void *handle, |
1148 | enum amd_clockgating_state state) |
1149 | { |
1150 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1151 | |
1152 | if (amdgpu_sriov_vf(adev)) |
1153 | return 0; |
1154 | |
1155 | switch (adev->asic_type) { |
1156 | case CHIP_VEGA10: |
1157 | case CHIP_VEGA12: |
1158 | case CHIP_VEGA20: |
1159 | adev->nbio_funcs->update_medium_grain_clock_gating(adev, |
1160 | state == AMD_CG_STATE_GATE ? true : false); |
1161 | adev->nbio_funcs->update_medium_grain_light_sleep(adev, |
1162 | state == AMD_CG_STATE_GATE ? true : false); |
1163 | soc15_update_hdp_light_sleep(adev, |
1164 | state == AMD_CG_STATE_GATE ? true : false); |
1165 | soc15_update_drm_clock_gating(adev, |
1166 | state == AMD_CG_STATE_GATE ? true : false); |
1167 | soc15_update_drm_light_sleep(adev, |
1168 | state == AMD_CG_STATE_GATE ? true : false); |
1169 | soc15_update_rom_medium_grain_clock_gating(adev, |
1170 | state == AMD_CG_STATE_GATE ? true : false); |
1171 | adev->df_funcs->update_medium_grain_clock_gating(adev, |
1172 | state == AMD_CG_STATE_GATE ? true : false); |
1173 | break; |
1174 | case CHIP_RAVEN: |
1175 | adev->nbio_funcs->update_medium_grain_clock_gating(adev, |
1176 | state == AMD_CG_STATE_GATE ? true : false); |
1177 | adev->nbio_funcs->update_medium_grain_light_sleep(adev, |
1178 | state == AMD_CG_STATE_GATE ? true : false); |
1179 | soc15_update_hdp_light_sleep(adev, |
1180 | state == AMD_CG_STATE_GATE ? true : false); |
1181 | soc15_update_drm_clock_gating(adev, |
1182 | state == AMD_CG_STATE_GATE ? true : false); |
1183 | soc15_update_drm_light_sleep(adev, |
1184 | state == AMD_CG_STATE_GATE ? true : false); |
1185 | soc15_update_rom_medium_grain_clock_gating(adev, |
1186 | state == AMD_CG_STATE_GATE ? true : false); |
1187 | break; |
1188 | default: |
1189 | break; |
1190 | } |
1191 | return 0; |
1192 | } |
1193 | |
1194 | static void soc15_common_get_clockgating_state(void *handle, u32 *flags) |
1195 | { |
1196 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1197 | int data; |
1198 | |
1199 | if (amdgpu_sriov_vf(adev)) |
1200 | *flags = 0; |
1201 | |
1202 | adev->nbio_funcs->get_clockgating_state(adev, flags); |
1203 | |
1204 | /* AMD_CG_SUPPORT_HDP_LS */ |
1205 | data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); |
1206 | if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) |
1207 | *flags |= AMD_CG_SUPPORT_HDP_LS; |
1208 | |
1209 | /* AMD_CG_SUPPORT_DRM_MGCG */ |
1210 | data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); |
1211 | if (!(data & 0x01000000)) |
1212 | *flags |= AMD_CG_SUPPORT_DRM_MGCG; |
1213 | |
1214 | /* AMD_CG_SUPPORT_DRM_LS */ |
1215 | data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); |
1216 | if (data & 0x1) |
1217 | *flags |= AMD_CG_SUPPORT_DRM_LS; |
1218 | |
1219 | /* AMD_CG_SUPPORT_ROM_MGCG */ |
1220 | data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0)); |
1221 | if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) |
1222 | *flags |= AMD_CG_SUPPORT_ROM_MGCG; |
1223 | |
1224 | adev->df_funcs->get_clockgating_state(adev, flags); |
1225 | } |
1226 | |
1227 | static int soc15_common_set_powergating_state(void *handle, |
1228 | enum amd_powergating_state state) |
1229 | { |
1230 | /* todo */ |
1231 | return 0; |
1232 | } |
1233 | |
1234 | const struct amd_ip_funcs soc15_common_ip_funcs = { |
1235 | .name = "soc15_common" , |
1236 | .early_init = soc15_common_early_init, |
1237 | .late_init = soc15_common_late_init, |
1238 | .sw_init = soc15_common_sw_init, |
1239 | .sw_fini = soc15_common_sw_fini, |
1240 | .hw_init = soc15_common_hw_init, |
1241 | .hw_fini = soc15_common_hw_fini, |
1242 | .suspend = soc15_common_suspend, |
1243 | .resume = soc15_common_resume, |
1244 | .is_idle = soc15_common_is_idle, |
1245 | .wait_for_idle = soc15_common_wait_for_idle, |
1246 | .soft_reset = soc15_common_soft_reset, |
1247 | .set_clockgating_state = soc15_common_set_clockgating_state, |
1248 | .set_powergating_state = soc15_common_set_powergating_state, |
1249 | .get_clockgating_state= soc15_common_get_clockgating_state, |
1250 | }; |
1251 | |