1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <drm/drmP.h>
24#include "amdgpu.h"
25#include "amdgpu_ih.h"
26#include "soc15.h"
27
28#include "oss/osssys_4_0_offset.h"
29#include "oss/osssys_4_0_sh_mask.h"
30
31#include "soc15_common.h"
32#include "vega10_ih.h"
33
34
35
36static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
37
38/**
39 * vega10_ih_enable_interrupts - Enable the interrupt ring buffer
40 *
41 * @adev: amdgpu_device pointer
42 *
43 * Enable the interrupt ring buffer (VEGA10).
44 */
45static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
46{
47 u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
48
49 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
50 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
51 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
52 adev->irq.ih.enabled = true;
53
54 if (adev->irq.ih1.ring_size) {
55 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
56 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
57 RB_ENABLE, 1);
58 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
59 adev->irq.ih1.enabled = true;
60 }
61
62 if (adev->irq.ih2.ring_size) {
63 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
64 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
65 RB_ENABLE, 1);
66 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
67 adev->irq.ih2.enabled = true;
68 }
69}
70
71/**
72 * vega10_ih_disable_interrupts - Disable the interrupt ring buffer
73 *
74 * @adev: amdgpu_device pointer
75 *
76 * Disable the interrupt ring buffer (VEGA10).
77 */
78static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
79{
80 u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
81
82 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
83 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
84 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
85 /* set rptr, wptr to 0 */
86 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
87 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
88 adev->irq.ih.enabled = false;
89 adev->irq.ih.rptr = 0;
90
91 if (adev->irq.ih1.ring_size) {
92 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
93 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
94 RB_ENABLE, 0);
95 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
96 /* set rptr, wptr to 0 */
97 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
98 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
99 adev->irq.ih1.enabled = false;
100 adev->irq.ih1.rptr = 0;
101 }
102
103 if (adev->irq.ih2.ring_size) {
104 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
105 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
106 RB_ENABLE, 0);
107 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
108 /* set rptr, wptr to 0 */
109 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
110 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
111 adev->irq.ih2.enabled = false;
112 adev->irq.ih2.rptr = 0;
113 }
114}
115
116static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
117{
118 int rb_bufsz = order_base_2(ih->ring_size / 4);
119
120 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
121 MC_SPACE, ih->use_bus_addr ? 1 : 4);
122 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
123 WPTR_OVERFLOW_CLEAR, 1);
124 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
125 WPTR_OVERFLOW_ENABLE, 1);
126 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
127 /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
128 * value is written to memory
129 */
130 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
131 WPTR_WRITEBACK_ENABLE, 1);
132 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
133 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
134 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
135
136 return ih_rb_cntl;
137}
138
139/**
140 * vega10_ih_irq_init - init and enable the interrupt ring
141 *
142 * @adev: amdgpu_device pointer
143 *
144 * Allocate a ring buffer for the interrupt controller,
145 * enable the RLC, disable interrupts, enable the IH
146 * ring buffer and enable it (VI).
147 * Called at device load and reume.
148 * Returns 0 for success, errors for failure.
149 */
150static int vega10_ih_irq_init(struct amdgpu_device *adev)
151{
152 struct amdgpu_ih_ring *ih;
153 int ret = 0;
154 u32 ih_rb_cntl, ih_doorbell_rtpr;
155 u32 tmp;
156
157 /* disable irqs */
158 vega10_ih_disable_interrupts(adev);
159
160 adev->nbio_funcs->ih_control(adev);
161
162 ih = &adev->irq.ih;
163 /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
164 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
165 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
166
167 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
168 ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
169 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
170 !!adev->irq.msi_enabled);
171 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
172
173 /* set the writeback address whether it's enabled or not */
174 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
175 lower_32_bits(ih->wptr_addr));
176 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI,
177 upper_32_bits(ih->wptr_addr) & 0xFFFF);
178
179 /* set rptr, wptr to 0 */
180 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
181 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
182
183 ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR);
184 if (adev->irq.ih.use_doorbell) {
185 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
186 IH_DOORBELL_RPTR, OFFSET,
187 adev->irq.ih.doorbell_index);
188 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
189 IH_DOORBELL_RPTR,
190 ENABLE, 1);
191 } else {
192 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
193 IH_DOORBELL_RPTR,
194 ENABLE, 0);
195 }
196 WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
197
198 ih = &adev->irq.ih1;
199 if (ih->ring_size) {
200 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
201 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
202 (ih->gpu_addr >> 40) & 0xff);
203
204 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
205 ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
206 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
207
208 /* set rptr, wptr to 0 */
209 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
210 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
211 }
212
213 ih = &adev->irq.ih2;
214 if (ih->ring_size) {
215 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
216 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
217 (ih->gpu_addr >> 40) & 0xff);
218
219 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
220 ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
221 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
222
223 /* set rptr, wptr to 0 */
224 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
225 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
226 }
227
228 tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
229 tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
230 CLIENT18_IS_STORM_CLIENT, 1);
231 WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
232
233 tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
234 tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
235 WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
236
237 pci_set_master(adev->pdev);
238
239 /* enable interrupts */
240 vega10_ih_enable_interrupts(adev);
241
242 return ret;
243}
244
245/**
246 * vega10_ih_irq_disable - disable interrupts
247 *
248 * @adev: amdgpu_device pointer
249 *
250 * Disable interrupts on the hw (VEGA10).
251 */
252static void vega10_ih_irq_disable(struct amdgpu_device *adev)
253{
254 vega10_ih_disable_interrupts(adev);
255
256 /* Wait and acknowledge irq */
257 mdelay(1);
258}
259
260/**
261 * vega10_ih_get_wptr - get the IH ring buffer wptr
262 *
263 * @adev: amdgpu_device pointer
264 *
265 * Get the IH ring buffer wptr from either the register
266 * or the writeback memory buffer (VEGA10). Also check for
267 * ring buffer overflow and deal with it.
268 * Returns the value of the wptr.
269 */
270static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
271 struct amdgpu_ih_ring *ih)
272{
273 u32 wptr, reg, tmp;
274
275 wptr = le32_to_cpu(*ih->wptr_cpu);
276
277 if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
278 goto out;
279
280 /* Double check that the overflow wasn't already cleared. */
281
282 if (ih == &adev->irq.ih)
283 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
284 else if (ih == &adev->irq.ih1)
285 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
286 else if (ih == &adev->irq.ih2)
287 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
288 else
289 BUG();
290
291 wptr = RREG32_NO_KIQ(reg);
292 if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
293 goto out;
294
295 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
296
297 /* When a ring buffer overflow happen start parsing interrupt
298 * from the last not overwritten vector (wptr + 32). Hopefully
299 * this should allow us to catchup.
300 */
301 tmp = (wptr + 32) & ih->ptr_mask;
302 dev_warn(adev->dev, "IH ring buffer overflow "
303 "(0x%08X, 0x%08X, 0x%08X)\n",
304 wptr, ih->rptr, tmp);
305 ih->rptr = tmp;
306
307 if (ih == &adev->irq.ih)
308 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
309 else if (ih == &adev->irq.ih1)
310 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
311 else if (ih == &adev->irq.ih2)
312 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
313 else
314 BUG();
315
316 tmp = RREG32_NO_KIQ(reg);
317 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
318 WREG32_NO_KIQ(reg, tmp);
319
320out:
321 return (wptr & ih->ptr_mask);
322}
323
324/**
325 * vega10_ih_decode_iv - decode an interrupt vector
326 *
327 * @adev: amdgpu_device pointer
328 *
329 * Decodes the interrupt vector at the current rptr
330 * position and also advance the position.
331 */
332static void vega10_ih_decode_iv(struct amdgpu_device *adev,
333 struct amdgpu_ih_ring *ih,
334 struct amdgpu_iv_entry *entry)
335{
336 /* wptr/rptr are in bytes! */
337 u32 ring_index = ih->rptr >> 2;
338 uint32_t dw[8];
339
340 dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
341 dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
342 dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
343 dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
344 dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
345 dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
346 dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
347 dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
348
349 entry->client_id = dw[0] & 0xff;
350 entry->src_id = (dw[0] >> 8) & 0xff;
351 entry->ring_id = (dw[0] >> 16) & 0xff;
352 entry->vmid = (dw[0] >> 24) & 0xf;
353 entry->vmid_src = (dw[0] >> 31);
354 entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
355 entry->timestamp_src = dw[2] >> 31;
356 entry->pasid = dw[3] & 0xffff;
357 entry->pasid_src = dw[3] >> 31;
358 entry->src_data[0] = dw[4];
359 entry->src_data[1] = dw[5];
360 entry->src_data[2] = dw[6];
361 entry->src_data[3] = dw[7];
362
363 /* wptr/rptr are in bytes! */
364 ih->rptr += 32;
365}
366
367/**
368 * vega10_ih_set_rptr - set the IH ring buffer rptr
369 *
370 * @adev: amdgpu_device pointer
371 *
372 * Set the IH ring buffer rptr.
373 */
374static void vega10_ih_set_rptr(struct amdgpu_device *adev,
375 struct amdgpu_ih_ring *ih)
376{
377 if (ih->use_doorbell) {
378 /* XXX check if swapping is necessary on BE */
379 *ih->rptr_cpu = ih->rptr;
380 WDOORBELL32(ih->doorbell_index, ih->rptr);
381 } else if (ih == &adev->irq.ih) {
382 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
383 } else if (ih == &adev->irq.ih1) {
384 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
385 } else if (ih == &adev->irq.ih2) {
386 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
387 }
388}
389
390/**
391 * vega10_ih_self_irq - dispatch work for ring 1 and 2
392 *
393 * @adev: amdgpu_device pointer
394 * @source: irq source
395 * @entry: IV with WPTR update
396 *
397 * Update the WPTR from the IV and schedule work to handle the entries.
398 */
399static int vega10_ih_self_irq(struct amdgpu_device *adev,
400 struct amdgpu_irq_src *source,
401 struct amdgpu_iv_entry *entry)
402{
403 uint32_t wptr = cpu_to_le32(entry->src_data[0]);
404
405 switch (entry->ring_id) {
406 case 1:
407 *adev->irq.ih1.wptr_cpu = wptr;
408 schedule_work(&adev->irq.ih1_work);
409 break;
410 case 2:
411 *adev->irq.ih2.wptr_cpu = wptr;
412 schedule_work(&adev->irq.ih2_work);
413 break;
414 default: break;
415 }
416 return 0;
417}
418
419static const struct amdgpu_irq_src_funcs vega10_ih_self_irq_funcs = {
420 .process = vega10_ih_self_irq,
421};
422
423static void vega10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
424{
425 adev->irq.self_irq.num_types = 0;
426 adev->irq.self_irq.funcs = &vega10_ih_self_irq_funcs;
427}
428
429static int vega10_ih_early_init(void *handle)
430{
431 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
432
433 vega10_ih_set_interrupt_funcs(adev);
434 vega10_ih_set_self_irq_funcs(adev);
435 return 0;
436}
437
438static int vega10_ih_sw_init(void *handle)
439{
440 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
441 int r;
442
443 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
444 &adev->irq.self_irq);
445 if (r)
446 return r;
447
448 r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true);
449 if (r)
450 return r;
451
452 if (adev->asic_type == CHIP_VEGA10) {
453 r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
454 if (r)
455 return r;
456
457 r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
458 if (r)
459 return r;
460 }
461
462 /* TODO add doorbell for IH1 & IH2 as well */
463 adev->irq.ih.use_doorbell = true;
464 adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
465
466 r = amdgpu_irq_init(adev);
467
468 return r;
469}
470
471static int vega10_ih_sw_fini(void *handle)
472{
473 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
474
475 amdgpu_irq_fini(adev);
476 amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
477 amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
478 amdgpu_ih_ring_fini(adev, &adev->irq.ih);
479
480 return 0;
481}
482
483static int vega10_ih_hw_init(void *handle)
484{
485 int r;
486 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
487
488 r = vega10_ih_irq_init(adev);
489 if (r)
490 return r;
491
492 return 0;
493}
494
495static int vega10_ih_hw_fini(void *handle)
496{
497 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
498
499 vega10_ih_irq_disable(adev);
500
501 return 0;
502}
503
504static int vega10_ih_suspend(void *handle)
505{
506 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
507
508 return vega10_ih_hw_fini(adev);
509}
510
511static int vega10_ih_resume(void *handle)
512{
513 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
514
515 return vega10_ih_hw_init(adev);
516}
517
518static bool vega10_ih_is_idle(void *handle)
519{
520 /* todo */
521 return true;
522}
523
524static int vega10_ih_wait_for_idle(void *handle)
525{
526 /* todo */
527 return -ETIMEDOUT;
528}
529
530static int vega10_ih_soft_reset(void *handle)
531{
532 /* todo */
533
534 return 0;
535}
536
537static int vega10_ih_set_clockgating_state(void *handle,
538 enum amd_clockgating_state state)
539{
540 return 0;
541}
542
543static int vega10_ih_set_powergating_state(void *handle,
544 enum amd_powergating_state state)
545{
546 return 0;
547}
548
549const struct amd_ip_funcs vega10_ih_ip_funcs = {
550 .name = "vega10_ih",
551 .early_init = vega10_ih_early_init,
552 .late_init = NULL,
553 .sw_init = vega10_ih_sw_init,
554 .sw_fini = vega10_ih_sw_fini,
555 .hw_init = vega10_ih_hw_init,
556 .hw_fini = vega10_ih_hw_fini,
557 .suspend = vega10_ih_suspend,
558 .resume = vega10_ih_resume,
559 .is_idle = vega10_ih_is_idle,
560 .wait_for_idle = vega10_ih_wait_for_idle,
561 .soft_reset = vega10_ih_soft_reset,
562 .set_clockgating_state = vega10_ih_set_clockgating_state,
563 .set_powergating_state = vega10_ih_set_powergating_state,
564};
565
566static const struct amdgpu_ih_funcs vega10_ih_funcs = {
567 .get_wptr = vega10_ih_get_wptr,
568 .decode_iv = vega10_ih_decode_iv,
569 .set_rptr = vega10_ih_set_rptr
570};
571
572static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
573{
574 adev->irq.ih_funcs = &vega10_ih_funcs;
575}
576
577const struct amdgpu_ip_block_version vega10_ih_ip_block =
578{
579 .type = AMD_IP_BLOCK_TYPE_IH,
580 .major = 4,
581 .minor = 0,
582 .rev = 0,
583 .funcs = &vega10_ih_ip_funcs,
584};
585