1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | |
3 | #include <drm/drm_atomic_helper.h> |
4 | #include <drm/drm_simple_kms_helper.h> |
5 | #include <drm/drm_vblank.h> |
6 | |
7 | #include "amdgpu.h" |
8 | #ifdef CONFIG_DRM_AMDGPU_SI |
9 | #include "dce_v6_0.h" |
10 | #endif |
11 | #ifdef CONFIG_DRM_AMDGPU_CIK |
12 | #include "dce_v8_0.h" |
13 | #endif |
14 | #include "dce_v10_0.h" |
15 | #include "dce_v11_0.h" |
16 | #include "ivsrcid/ivsrcid_vislands30.h" |
17 | #include "amdgpu_vkms.h" |
18 | #include "amdgpu_display.h" |
19 | #include "atom.h" |
20 | #include "amdgpu_irq.h" |
21 | |
22 | /** |
23 | * DOC: amdgpu_vkms |
24 | * |
25 | * The amdgpu vkms interface provides a virtual KMS interface for several use |
26 | * cases: devices without display hardware, platforms where the actual display |
27 | * hardware is not useful (e.g., servers), SR-IOV virtual functions, device |
28 | * emulation/simulation, and device bring up prior to display hardware being |
29 | * usable. We previously emulated a legacy KMS interface, but there was a desire |
30 | * to move to the atomic KMS interface. The vkms driver did everything we |
31 | * needed, but we wanted KMS support natively in the driver without buffer |
32 | * sharing and the ability to support an instance of VKMS per device. We first |
33 | * looked at splitting vkms into a stub driver and a helper module that other |
34 | * drivers could use to implement a virtual display, but this strategy ended up |
35 | * being messy due to driver specific callbacks needed for buffer management. |
36 | * Ultimately, it proved easier to import the vkms code as it mostly used core |
37 | * drm helpers anyway. |
38 | */ |
39 | |
40 | static const u32 amdgpu_vkms_formats[] = { |
41 | DRM_FORMAT_XRGB8888, |
42 | }; |
43 | |
44 | static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer) |
45 | { |
46 | struct amdgpu_crtc *amdgpu_crtc = container_of(timer, struct amdgpu_crtc, vblank_timer); |
47 | struct drm_crtc *crtc = &amdgpu_crtc->base; |
48 | struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc); |
49 | u64 ret_overrun; |
50 | bool ret; |
51 | |
52 | ret_overrun = hrtimer_forward_now(timer: &amdgpu_crtc->vblank_timer, |
53 | interval: output->period_ns); |
54 | if (ret_overrun != 1) |
55 | DRM_WARN("%s: vblank timer overrun\n" , __func__); |
56 | |
57 | ret = drm_crtc_handle_vblank(crtc); |
58 | /* Don't queue timer again when vblank is disabled. */ |
59 | if (!ret) |
60 | return HRTIMER_NORESTART; |
61 | |
62 | return HRTIMER_RESTART; |
63 | } |
64 | |
65 | static int amdgpu_vkms_enable_vblank(struct drm_crtc *crtc) |
66 | { |
67 | struct drm_device *dev = crtc->dev; |
68 | unsigned int pipe = drm_crtc_index(crtc); |
69 | struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; |
70 | struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc); |
71 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
72 | |
73 | drm_calc_timestamping_constants(crtc, mode: &crtc->mode); |
74 | |
75 | out->period_ns = ktime_set(secs: 0, nsecs: vblank->framedur_ns); |
76 | hrtimer_start(timer: &amdgpu_crtc->vblank_timer, tim: out->period_ns, mode: HRTIMER_MODE_REL); |
77 | |
78 | return 0; |
79 | } |
80 | |
81 | static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc) |
82 | { |
83 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
84 | |
85 | hrtimer_try_to_cancel(timer: &amdgpu_crtc->vblank_timer); |
86 | } |
87 | |
88 | static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc, |
89 | int *max_error, |
90 | ktime_t *vblank_time, |
91 | bool in_vblank_irq) |
92 | { |
93 | struct drm_device *dev = crtc->dev; |
94 | unsigned int pipe = crtc->index; |
95 | struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc); |
96 | struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; |
97 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
98 | |
99 | if (!READ_ONCE(vblank->enabled)) { |
100 | *vblank_time = ktime_get(); |
101 | return true; |
102 | } |
103 | |
104 | *vblank_time = READ_ONCE(amdgpu_crtc->vblank_timer.node.expires); |
105 | |
106 | if (WARN_ON(*vblank_time == vblank->time)) |
107 | return true; |
108 | |
109 | /* |
110 | * To prevent races we roll the hrtimer forward before we do any |
111 | * interrupt processing - this is how real hw works (the interrupt is |
112 | * only generated after all the vblank registers are updated) and what |
113 | * the vblank core expects. Therefore we need to always correct the |
114 | * timestampe by one frame. |
115 | */ |
116 | *vblank_time -= output->period_ns; |
117 | |
118 | return true; |
119 | } |
120 | |
121 | static const struct drm_crtc_funcs amdgpu_vkms_crtc_funcs = { |
122 | .set_config = drm_atomic_helper_set_config, |
123 | .destroy = drm_crtc_cleanup, |
124 | .page_flip = drm_atomic_helper_page_flip, |
125 | .reset = drm_atomic_helper_crtc_reset, |
126 | .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, |
127 | .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, |
128 | .enable_vblank = amdgpu_vkms_enable_vblank, |
129 | .disable_vblank = amdgpu_vkms_disable_vblank, |
130 | .get_vblank_timestamp = amdgpu_vkms_get_vblank_timestamp, |
131 | }; |
132 | |
133 | static void amdgpu_vkms_crtc_atomic_enable(struct drm_crtc *crtc, |
134 | struct drm_atomic_state *state) |
135 | { |
136 | drm_crtc_vblank_on(crtc); |
137 | } |
138 | |
139 | static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc, |
140 | struct drm_atomic_state *state) |
141 | { |
142 | drm_crtc_vblank_off(crtc); |
143 | } |
144 | |
145 | static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc, |
146 | struct drm_atomic_state *state) |
147 | { |
148 | unsigned long flags; |
149 | if (crtc->state->event) { |
150 | spin_lock_irqsave(&crtc->dev->event_lock, flags); |
151 | |
152 | if (drm_crtc_vblank_get(crtc) != 0) |
153 | drm_crtc_send_vblank_event(crtc, e: crtc->state->event); |
154 | else |
155 | drm_crtc_arm_vblank_event(crtc, e: crtc->state->event); |
156 | |
157 | spin_unlock_irqrestore(lock: &crtc->dev->event_lock, flags); |
158 | |
159 | crtc->state->event = NULL; |
160 | } |
161 | } |
162 | |
163 | static const struct drm_crtc_helper_funcs amdgpu_vkms_crtc_helper_funcs = { |
164 | .atomic_flush = amdgpu_vkms_crtc_atomic_flush, |
165 | .atomic_enable = amdgpu_vkms_crtc_atomic_enable, |
166 | .atomic_disable = amdgpu_vkms_crtc_atomic_disable, |
167 | }; |
168 | |
169 | static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, |
170 | struct drm_plane *primary, struct drm_plane *cursor) |
171 | { |
172 | struct amdgpu_device *adev = drm_to_adev(ddev: dev); |
173 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
174 | int ret; |
175 | |
176 | ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor, |
177 | funcs: &amdgpu_vkms_crtc_funcs, NULL); |
178 | if (ret) { |
179 | DRM_ERROR("Failed to init CRTC\n" ); |
180 | return ret; |
181 | } |
182 | |
183 | drm_crtc_helper_add(crtc, funcs: &amdgpu_vkms_crtc_helper_funcs); |
184 | |
185 | amdgpu_crtc->crtc_id = drm_crtc_index(crtc); |
186 | adev->mode_info.crtcs[drm_crtc_index(crtc)] = amdgpu_crtc; |
187 | |
188 | amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; |
189 | amdgpu_crtc->encoder = NULL; |
190 | amdgpu_crtc->connector = NULL; |
191 | amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; |
192 | |
193 | hrtimer_init(timer: &amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL); |
194 | amdgpu_crtc->vblank_timer.function = &amdgpu_vkms_vblank_simulate; |
195 | |
196 | return ret; |
197 | } |
198 | |
199 | static const struct drm_connector_funcs amdgpu_vkms_connector_funcs = { |
200 | .fill_modes = drm_helper_probe_single_connector_modes, |
201 | .destroy = drm_connector_cleanup, |
202 | .reset = drm_atomic_helper_connector_reset, |
203 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, |
204 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, |
205 | }; |
206 | |
207 | static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector) |
208 | { |
209 | struct drm_device *dev = connector->dev; |
210 | struct drm_display_mode *mode = NULL; |
211 | unsigned i; |
212 | static const struct mode_size { |
213 | int w; |
214 | int h; |
215 | } common_modes[] = { |
216 | { 640, 480}, |
217 | { 720, 480}, |
218 | { 800, 600}, |
219 | { 848, 480}, |
220 | {1024, 768}, |
221 | {1152, 768}, |
222 | {1280, 720}, |
223 | {1280, 800}, |
224 | {1280, 854}, |
225 | {1280, 960}, |
226 | {1280, 1024}, |
227 | {1440, 900}, |
228 | {1400, 1050}, |
229 | {1680, 1050}, |
230 | {1600, 1200}, |
231 | {1920, 1080}, |
232 | {1920, 1200}, |
233 | {2560, 1440}, |
234 | {4096, 3112}, |
235 | {3656, 2664}, |
236 | {3840, 2160}, |
237 | {4096, 2160}, |
238 | }; |
239 | |
240 | for (i = 0; i < ARRAY_SIZE(common_modes); i++) { |
241 | mode = drm_cvt_mode(dev, hdisplay: common_modes[i].w, vdisplay: common_modes[i].h, vrefresh: 60, reduced: false, interlaced: false, margins: false); |
242 | if (!mode) |
243 | continue; |
244 | drm_mode_probed_add(connector, mode); |
245 | } |
246 | |
247 | drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); |
248 | |
249 | return ARRAY_SIZE(common_modes); |
250 | } |
251 | |
252 | static const struct drm_connector_helper_funcs amdgpu_vkms_conn_helper_funcs = { |
253 | .get_modes = amdgpu_vkms_conn_get_modes, |
254 | }; |
255 | |
256 | static const struct drm_plane_funcs amdgpu_vkms_plane_funcs = { |
257 | .update_plane = drm_atomic_helper_update_plane, |
258 | .disable_plane = drm_atomic_helper_disable_plane, |
259 | .destroy = drm_plane_cleanup, |
260 | .reset = drm_atomic_helper_plane_reset, |
261 | .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, |
262 | .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, |
263 | }; |
264 | |
265 | static void amdgpu_vkms_plane_atomic_update(struct drm_plane *plane, |
266 | struct drm_atomic_state *old_state) |
267 | { |
268 | return; |
269 | } |
270 | |
271 | static int amdgpu_vkms_plane_atomic_check(struct drm_plane *plane, |
272 | struct drm_atomic_state *state) |
273 | { |
274 | struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, |
275 | plane); |
276 | struct drm_crtc_state *crtc_state; |
277 | int ret; |
278 | |
279 | if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) |
280 | return 0; |
281 | |
282 | crtc_state = drm_atomic_get_crtc_state(state, |
283 | crtc: new_plane_state->crtc); |
284 | if (IS_ERR(ptr: crtc_state)) |
285 | return PTR_ERR(ptr: crtc_state); |
286 | |
287 | ret = drm_atomic_helper_check_plane_state(plane_state: new_plane_state, crtc_state, |
288 | DRM_PLANE_NO_SCALING, |
289 | DRM_PLANE_NO_SCALING, |
290 | can_position: false, can_update_disabled: true); |
291 | if (ret != 0) |
292 | return ret; |
293 | |
294 | /* for now primary plane must be visible and full screen */ |
295 | if (!new_plane_state->visible) |
296 | return -EINVAL; |
297 | |
298 | return 0; |
299 | } |
300 | |
301 | static int amdgpu_vkms_prepare_fb(struct drm_plane *plane, |
302 | struct drm_plane_state *new_state) |
303 | { |
304 | struct amdgpu_framebuffer *afb; |
305 | struct drm_gem_object *obj; |
306 | struct amdgpu_device *adev; |
307 | struct amdgpu_bo *rbo; |
308 | uint32_t domain; |
309 | int r; |
310 | |
311 | if (!new_state->fb) { |
312 | DRM_DEBUG_KMS("No FB bound\n" ); |
313 | return 0; |
314 | } |
315 | afb = to_amdgpu_framebuffer(new_state->fb); |
316 | obj = new_state->fb->obj[0]; |
317 | rbo = gem_to_amdgpu_bo(obj); |
318 | adev = amdgpu_ttm_adev(bdev: rbo->tbo.bdev); |
319 | |
320 | r = amdgpu_bo_reserve(bo: rbo, no_intr: true); |
321 | if (r) { |
322 | dev_err(adev->dev, "fail to reserve bo (%d)\n" , r); |
323 | return r; |
324 | } |
325 | |
326 | r = dma_resv_reserve_fences(obj: rbo->tbo.base.resv, num_fences: 1); |
327 | if (r) { |
328 | dev_err(adev->dev, "allocating fence slot failed (%d)\n" , r); |
329 | goto error_unlock; |
330 | } |
331 | |
332 | if (plane->type != DRM_PLANE_TYPE_CURSOR) |
333 | domain = amdgpu_display_supported_domains(adev, bo_flags: rbo->flags); |
334 | else |
335 | domain = AMDGPU_GEM_DOMAIN_VRAM; |
336 | |
337 | r = amdgpu_bo_pin(bo: rbo, domain); |
338 | if (unlikely(r != 0)) { |
339 | if (r != -ERESTARTSYS) |
340 | DRM_ERROR("Failed to pin framebuffer with error %d\n" , r); |
341 | goto error_unlock; |
342 | } |
343 | |
344 | r = amdgpu_ttm_alloc_gart(bo: &rbo->tbo); |
345 | if (unlikely(r != 0)) { |
346 | DRM_ERROR("%p bind failed\n" , rbo); |
347 | goto error_unpin; |
348 | } |
349 | |
350 | amdgpu_bo_unreserve(bo: rbo); |
351 | |
352 | afb->address = amdgpu_bo_gpu_offset(bo: rbo); |
353 | |
354 | amdgpu_bo_ref(bo: rbo); |
355 | |
356 | return 0; |
357 | |
358 | error_unpin: |
359 | amdgpu_bo_unpin(bo: rbo); |
360 | |
361 | error_unlock: |
362 | amdgpu_bo_unreserve(bo: rbo); |
363 | return r; |
364 | } |
365 | |
366 | static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane, |
367 | struct drm_plane_state *old_state) |
368 | { |
369 | struct amdgpu_bo *rbo; |
370 | int r; |
371 | |
372 | if (!old_state->fb) |
373 | return; |
374 | |
375 | rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); |
376 | r = amdgpu_bo_reserve(bo: rbo, no_intr: false); |
377 | if (unlikely(r)) { |
378 | DRM_ERROR("failed to reserve rbo before unpin\n" ); |
379 | return; |
380 | } |
381 | |
382 | amdgpu_bo_unpin(bo: rbo); |
383 | amdgpu_bo_unreserve(bo: rbo); |
384 | amdgpu_bo_unref(bo: &rbo); |
385 | } |
386 | |
387 | static const struct drm_plane_helper_funcs amdgpu_vkms_primary_helper_funcs = { |
388 | .atomic_update = amdgpu_vkms_plane_atomic_update, |
389 | .atomic_check = amdgpu_vkms_plane_atomic_check, |
390 | .prepare_fb = amdgpu_vkms_prepare_fb, |
391 | .cleanup_fb = amdgpu_vkms_cleanup_fb, |
392 | }; |
393 | |
394 | static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev, |
395 | enum drm_plane_type type, |
396 | int index) |
397 | { |
398 | struct drm_plane *plane; |
399 | int ret; |
400 | |
401 | plane = kzalloc(size: sizeof(*plane), GFP_KERNEL); |
402 | if (!plane) |
403 | return ERR_PTR(error: -ENOMEM); |
404 | |
405 | ret = drm_universal_plane_init(dev, plane, possible_crtcs: 1 << index, |
406 | funcs: &amdgpu_vkms_plane_funcs, |
407 | formats: amdgpu_vkms_formats, |
408 | ARRAY_SIZE(amdgpu_vkms_formats), |
409 | NULL, type, NULL); |
410 | if (ret) { |
411 | kfree(objp: plane); |
412 | return ERR_PTR(error: ret); |
413 | } |
414 | |
415 | drm_plane_helper_add(plane, funcs: &amdgpu_vkms_primary_helper_funcs); |
416 | |
417 | return plane; |
418 | } |
419 | |
420 | static int amdgpu_vkms_output_init(struct drm_device *dev, struct |
421 | amdgpu_vkms_output *output, int index) |
422 | { |
423 | struct drm_connector *connector = &output->connector; |
424 | struct drm_encoder *encoder = &output->encoder; |
425 | struct drm_crtc *crtc = &output->crtc.base; |
426 | struct drm_plane *primary, *cursor = NULL; |
427 | int ret; |
428 | |
429 | primary = amdgpu_vkms_plane_init(dev, type: DRM_PLANE_TYPE_PRIMARY, index); |
430 | if (IS_ERR(ptr: primary)) |
431 | return PTR_ERR(ptr: primary); |
432 | |
433 | ret = amdgpu_vkms_crtc_init(dev, crtc, primary, cursor); |
434 | if (ret) |
435 | goto err_crtc; |
436 | |
437 | ret = drm_connector_init(dev, connector, funcs: &amdgpu_vkms_connector_funcs, |
438 | DRM_MODE_CONNECTOR_VIRTUAL); |
439 | if (ret) { |
440 | DRM_ERROR("Failed to init connector\n" ); |
441 | goto err_connector; |
442 | } |
443 | |
444 | drm_connector_helper_add(connector, funcs: &amdgpu_vkms_conn_helper_funcs); |
445 | |
446 | ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL); |
447 | if (ret) { |
448 | DRM_ERROR("Failed to init encoder\n" ); |
449 | goto err_encoder; |
450 | } |
451 | encoder->possible_crtcs = 1 << index; |
452 | |
453 | ret = drm_connector_attach_encoder(connector, encoder); |
454 | if (ret) { |
455 | DRM_ERROR("Failed to attach connector to encoder\n" ); |
456 | goto err_attach; |
457 | } |
458 | |
459 | drm_mode_config_reset(dev); |
460 | |
461 | return 0; |
462 | |
463 | err_attach: |
464 | drm_encoder_cleanup(encoder); |
465 | |
466 | err_encoder: |
467 | drm_connector_cleanup(connector); |
468 | |
469 | err_connector: |
470 | drm_crtc_cleanup(crtc); |
471 | |
472 | err_crtc: |
473 | drm_plane_cleanup(plane: primary); |
474 | |
475 | return ret; |
476 | } |
477 | |
478 | const struct drm_mode_config_funcs amdgpu_vkms_mode_funcs = { |
479 | .fb_create = amdgpu_display_user_framebuffer_create, |
480 | .atomic_check = drm_atomic_helper_check, |
481 | .atomic_commit = drm_atomic_helper_commit, |
482 | }; |
483 | |
484 | static int amdgpu_vkms_sw_init(void *handle) |
485 | { |
486 | int r, i; |
487 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
488 | |
489 | adev->amdgpu_vkms_output = kcalloc(n: adev->mode_info.num_crtc, |
490 | size: sizeof(struct amdgpu_vkms_output), GFP_KERNEL); |
491 | if (!adev->amdgpu_vkms_output) |
492 | return -ENOMEM; |
493 | |
494 | adev_to_drm(adev)->max_vblank_count = 0; |
495 | |
496 | adev_to_drm(adev)->mode_config.funcs = &amdgpu_vkms_mode_funcs; |
497 | |
498 | adev_to_drm(adev)->mode_config.max_width = XRES_MAX; |
499 | adev_to_drm(adev)->mode_config.max_height = YRES_MAX; |
500 | |
501 | adev_to_drm(adev)->mode_config.preferred_depth = 24; |
502 | adev_to_drm(adev)->mode_config.prefer_shadow = 1; |
503 | |
504 | adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; |
505 | |
506 | r = amdgpu_display_modeset_create_props(adev); |
507 | if (r) |
508 | return r; |
509 | |
510 | /* allocate crtcs, encoders, connectors */ |
511 | for (i = 0; i < adev->mode_info.num_crtc; i++) { |
512 | r = amdgpu_vkms_output_init(dev: adev_to_drm(adev), output: &adev->amdgpu_vkms_output[i], index: i); |
513 | if (r) |
514 | return r; |
515 | } |
516 | |
517 | r = drm_vblank_init(dev: adev_to_drm(adev), num_crtcs: adev->mode_info.num_crtc); |
518 | if (r) |
519 | return r; |
520 | |
521 | drm_kms_helper_poll_init(dev: adev_to_drm(adev)); |
522 | |
523 | adev->mode_info.mode_config_initialized = true; |
524 | return 0; |
525 | } |
526 | |
527 | static int amdgpu_vkms_sw_fini(void *handle) |
528 | { |
529 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
530 | int i = 0; |
531 | |
532 | for (i = 0; i < adev->mode_info.num_crtc; i++) |
533 | if (adev->mode_info.crtcs[i]) |
534 | hrtimer_cancel(timer: &adev->mode_info.crtcs[i]->vblank_timer); |
535 | |
536 | drm_kms_helper_poll_fini(dev: adev_to_drm(adev)); |
537 | drm_mode_config_cleanup(dev: adev_to_drm(adev)); |
538 | |
539 | adev->mode_info.mode_config_initialized = false; |
540 | |
541 | kfree(objp: adev->mode_info.bios_hardcoded_edid); |
542 | kfree(objp: adev->amdgpu_vkms_output); |
543 | return 0; |
544 | } |
545 | |
546 | static int amdgpu_vkms_hw_init(void *handle) |
547 | { |
548 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
549 | |
550 | switch (adev->asic_type) { |
551 | #ifdef CONFIG_DRM_AMDGPU_SI |
552 | case CHIP_TAHITI: |
553 | case CHIP_PITCAIRN: |
554 | case CHIP_VERDE: |
555 | case CHIP_OLAND: |
556 | dce_v6_0_disable_dce(adev); |
557 | break; |
558 | #endif |
559 | #ifdef CONFIG_DRM_AMDGPU_CIK |
560 | case CHIP_BONAIRE: |
561 | case CHIP_HAWAII: |
562 | case CHIP_KAVERI: |
563 | case CHIP_KABINI: |
564 | case CHIP_MULLINS: |
565 | dce_v8_0_disable_dce(adev); |
566 | break; |
567 | #endif |
568 | case CHIP_FIJI: |
569 | case CHIP_TONGA: |
570 | dce_v10_0_disable_dce(adev); |
571 | break; |
572 | case CHIP_CARRIZO: |
573 | case CHIP_STONEY: |
574 | case CHIP_POLARIS10: |
575 | case CHIP_POLARIS11: |
576 | case CHIP_VEGAM: |
577 | dce_v11_0_disable_dce(adev); |
578 | break; |
579 | case CHIP_TOPAZ: |
580 | #ifdef CONFIG_DRM_AMDGPU_SI |
581 | case CHIP_HAINAN: |
582 | #endif |
583 | /* no DCE */ |
584 | break; |
585 | default: |
586 | break; |
587 | } |
588 | return 0; |
589 | } |
590 | |
591 | static int amdgpu_vkms_hw_fini(void *handle) |
592 | { |
593 | return 0; |
594 | } |
595 | |
596 | static int amdgpu_vkms_suspend(void *handle) |
597 | { |
598 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
599 | int r; |
600 | |
601 | r = drm_mode_config_helper_suspend(dev: adev_to_drm(adev)); |
602 | if (r) |
603 | return r; |
604 | return amdgpu_vkms_hw_fini(handle); |
605 | } |
606 | |
607 | static int amdgpu_vkms_resume(void *handle) |
608 | { |
609 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
610 | int r; |
611 | |
612 | r = amdgpu_vkms_hw_init(handle); |
613 | if (r) |
614 | return r; |
615 | return drm_mode_config_helper_resume(dev: adev_to_drm(adev)); |
616 | } |
617 | |
618 | static bool amdgpu_vkms_is_idle(void *handle) |
619 | { |
620 | return true; |
621 | } |
622 | |
623 | static int amdgpu_vkms_wait_for_idle(void *handle) |
624 | { |
625 | return 0; |
626 | } |
627 | |
628 | static int amdgpu_vkms_soft_reset(void *handle) |
629 | { |
630 | return 0; |
631 | } |
632 | |
633 | static int amdgpu_vkms_set_clockgating_state(void *handle, |
634 | enum amd_clockgating_state state) |
635 | { |
636 | return 0; |
637 | } |
638 | |
639 | static int amdgpu_vkms_set_powergating_state(void *handle, |
640 | enum amd_powergating_state state) |
641 | { |
642 | return 0; |
643 | } |
644 | |
645 | static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = { |
646 | .name = "amdgpu_vkms" , |
647 | .early_init = NULL, |
648 | .late_init = NULL, |
649 | .sw_init = amdgpu_vkms_sw_init, |
650 | .sw_fini = amdgpu_vkms_sw_fini, |
651 | .hw_init = amdgpu_vkms_hw_init, |
652 | .hw_fini = amdgpu_vkms_hw_fini, |
653 | .suspend = amdgpu_vkms_suspend, |
654 | .resume = amdgpu_vkms_resume, |
655 | .is_idle = amdgpu_vkms_is_idle, |
656 | .wait_for_idle = amdgpu_vkms_wait_for_idle, |
657 | .soft_reset = amdgpu_vkms_soft_reset, |
658 | .set_clockgating_state = amdgpu_vkms_set_clockgating_state, |
659 | .set_powergating_state = amdgpu_vkms_set_powergating_state, |
660 | }; |
661 | |
662 | const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = |
663 | { |
664 | .type = AMD_IP_BLOCK_TYPE_DCE, |
665 | .major = 1, |
666 | .minor = 0, |
667 | .rev = 0, |
668 | .funcs = &amdgpu_vkms_ip_funcs, |
669 | }; |
670 | |
671 | |