1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27#include "vmwgfx_kms.h"
28
29#include "vmwgfx_bo.h"
30#include "vmw_surface_cache.h"
31
32#include <drm/drm_atomic.h>
33#include <drm/drm_atomic_helper.h>
34#include <drm/drm_damage_helper.h>
35#include <drm/drm_fourcc.h>
36#include <drm/drm_rect.h>
37#include <drm/drm_sysfs.h>
38#include <drm/drm_edid.h>
39
40void vmw_du_cleanup(struct vmw_display_unit *du)
41{
42 struct vmw_private *dev_priv = vmw_priv(dev: du->primary.dev);
43 drm_plane_cleanup(plane: &du->primary);
44 if (vmw_cmd_supported(vmw: dev_priv))
45 drm_plane_cleanup(plane: &du->cursor.base);
46
47 drm_connector_unregister(connector: &du->connector);
48 drm_crtc_cleanup(crtc: &du->crtc);
49 drm_encoder_cleanup(encoder: &du->encoder);
50 drm_connector_cleanup(connector: &du->connector);
51}
52
53/*
54 * Display Unit Cursor functions
55 */
56
57static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
58static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
59 struct vmw_plane_state *vps,
60 u32 *image, u32 width, u32 height,
61 u32 hotspotX, u32 hotspotY);
62
63struct vmw_svga_fifo_cmd_define_cursor {
64 u32 cmd;
65 SVGAFifoCmdDefineAlphaCursor cursor;
66};
67
68/**
69 * vmw_send_define_cursor_cmd - queue a define cursor command
70 * @dev_priv: the private driver struct
71 * @image: buffer which holds the cursor image
72 * @width: width of the mouse cursor image
73 * @height: height of the mouse cursor image
74 * @hotspotX: the horizontal position of mouse hotspot
75 * @hotspotY: the vertical position of mouse hotspot
76 */
77static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
78 u32 *image, u32 width, u32 height,
79 u32 hotspotX, u32 hotspotY)
80{
81 struct vmw_svga_fifo_cmd_define_cursor *cmd;
82 const u32 image_size = width * height * sizeof(*image);
83 const u32 cmd_size = sizeof(*cmd) + image_size;
84
85 /* Try to reserve fifocmd space and swallow any failures;
86 such reservations cannot be left unconsumed for long
87 under the risk of clogging other fifocmd users, so
88 we treat reservations separtely from the way we treat
89 other fallible KMS-atomic resources at prepare_fb */
90 cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
91
92 if (unlikely(!cmd))
93 return;
94
95 memset(cmd, 0, sizeof(*cmd));
96
97 memcpy(&cmd[1], image, image_size);
98
99 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
100 cmd->cursor.id = 0;
101 cmd->cursor.width = width;
102 cmd->cursor.height = height;
103 cmd->cursor.hotspotX = hotspotX;
104 cmd->cursor.hotspotY = hotspotY;
105
106 vmw_cmd_commit_flush(dev_priv, bytes: cmd_size);
107}
108
109/**
110 * vmw_cursor_update_image - update the cursor image on the provided plane
111 * @dev_priv: the private driver struct
112 * @vps: the plane state of the cursor plane
113 * @image: buffer which holds the cursor image
114 * @width: width of the mouse cursor image
115 * @height: height of the mouse cursor image
116 * @hotspotX: the horizontal position of mouse hotspot
117 * @hotspotY: the vertical position of mouse hotspot
118 */
119static void vmw_cursor_update_image(struct vmw_private *dev_priv,
120 struct vmw_plane_state *vps,
121 u32 *image, u32 width, u32 height,
122 u32 hotspotX, u32 hotspotY)
123{
124 if (vps->cursor.bo)
125 vmw_cursor_update_mob(dev_priv, vps, image,
126 width: vps->base.crtc_w, height: vps->base.crtc_h,
127 hotspotX, hotspotY);
128
129 else
130 vmw_send_define_cursor_cmd(dev_priv, image, width, height,
131 hotspotX, hotspotY);
132}
133
134
135/**
136 * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
137 *
138 * Called from inside vmw_du_cursor_plane_atomic_update to actually
139 * make the cursor-image live.
140 *
141 * @dev_priv: device to work with
142 * @vps: the plane state of the cursor plane
143 * @image: cursor source data to fill the MOB with
144 * @width: source data width
145 * @height: source data height
146 * @hotspotX: cursor hotspot x
147 * @hotspotY: cursor hotspot Y
148 */
149static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
150 struct vmw_plane_state *vps,
151 u32 *image, u32 width, u32 height,
152 u32 hotspotX, u32 hotspotY)
153{
154 SVGAGBCursorHeader *header;
155 SVGAGBAlphaCursorHeader *alpha_header;
156 const u32 image_size = width * height * sizeof(*image);
157
158 header = vmw_bo_map_and_cache(vbo: vps->cursor.bo);
159 alpha_header = &header->header.alphaHeader;
160
161 memset(header, 0, sizeof(*header));
162
163 header->type = SVGA_ALPHA_CURSOR;
164 header->sizeInBytes = image_size;
165
166 alpha_header->hotspotX = hotspotX;
167 alpha_header->hotspotY = hotspotY;
168 alpha_header->width = width;
169 alpha_header->height = height;
170
171 memcpy(header + 1, image, image_size);
172 vmw_write(dev_priv, offset: SVGA_REG_CURSOR_MOBID,
173 value: vps->cursor.bo->tbo.resource->start);
174}
175
176
177static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
178{
179 return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
180}
181
182/**
183 * vmw_du_cursor_plane_acquire_image -- Acquire the image data
184 * @vps: cursor plane state
185 */
186static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
187{
188 if (vps->surf) {
189 if (vps->surf_mapped)
190 return vmw_bo_map_and_cache(vbo: vps->surf->res.guest_memory_bo);
191 return vps->surf->snooper.image;
192 } else if (vps->bo)
193 return vmw_bo_map_and_cache(vbo: vps->bo);
194 return NULL;
195}
196
197static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
198 struct vmw_plane_state *new_vps)
199{
200 void *old_image;
201 void *new_image;
202 u32 size;
203 bool changed;
204
205 if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
206 old_vps->base.crtc_h != new_vps->base.crtc_h)
207 return true;
208
209 if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
210 old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
211 return true;
212
213 size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
214
215 old_image = vmw_du_cursor_plane_acquire_image(vps: old_vps);
216 new_image = vmw_du_cursor_plane_acquire_image(vps: new_vps);
217
218 changed = false;
219 if (old_image && new_image)
220 changed = memcmp(p: old_image, q: new_image, size) != 0;
221
222 return changed;
223}
224
225static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
226{
227 if (!(*vbo))
228 return;
229
230 ttm_bo_unpin(bo: &(*vbo)->tbo);
231 vmw_bo_unreference(buf: vbo);
232}
233
234static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
235 struct vmw_plane_state *vps)
236{
237 u32 i;
238
239 if (!vps->cursor.bo)
240 return;
241
242 vmw_du_cursor_plane_unmap_cm(vps);
243
244 /* Look for a free slot to return this mob to the cache. */
245 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
246 if (!vcp->cursor_mobs[i]) {
247 vcp->cursor_mobs[i] = vps->cursor.bo;
248 vps->cursor.bo = NULL;
249 return;
250 }
251 }
252
253 /* Cache is full: See if this mob is bigger than an existing mob. */
254 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
255 if (vcp->cursor_mobs[i]->tbo.base.size <
256 vps->cursor.bo->tbo.base.size) {
257 vmw_du_destroy_cursor_mob(vbo: &vcp->cursor_mobs[i]);
258 vcp->cursor_mobs[i] = vps->cursor.bo;
259 vps->cursor.bo = NULL;
260 return;
261 }
262 }
263
264 /* Destroy it if it's not worth caching. */
265 vmw_du_destroy_cursor_mob(vbo: &vps->cursor.bo);
266}
267
268static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
269 struct vmw_plane_state *vps)
270{
271 struct vmw_private *dev_priv = vcp->base.dev->dev_private;
272 u32 size = vmw_du_cursor_mob_size(w: vps->base.crtc_w, h: vps->base.crtc_h);
273 u32 i;
274 u32 cursor_max_dim, mob_max_size;
275 struct vmw_fence_obj *fence = NULL;
276 int ret;
277
278 if (!dev_priv->has_mob ||
279 (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
280 return -EINVAL;
281
282 mob_max_size = vmw_read(dev_priv, offset: SVGA_REG_MOB_MAX_SIZE);
283 cursor_max_dim = vmw_read(dev_priv, offset: SVGA_REG_CURSOR_MAX_DIMENSION);
284
285 if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
286 vps->base.crtc_h > cursor_max_dim)
287 return -EINVAL;
288
289 if (vps->cursor.bo) {
290 if (vps->cursor.bo->tbo.base.size >= size)
291 return 0;
292 vmw_du_put_cursor_mob(vcp, vps);
293 }
294
295 /* Look for an unused mob in the cache. */
296 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
297 if (vcp->cursor_mobs[i] &&
298 vcp->cursor_mobs[i]->tbo.base.size >= size) {
299 vps->cursor.bo = vcp->cursor_mobs[i];
300 vcp->cursor_mobs[i] = NULL;
301 return 0;
302 }
303 }
304 /* Create a new mob if we can't find an existing one. */
305 ret = vmw_bo_create_and_populate(dev_priv, bo_size: size,
306 domain: VMW_BO_DOMAIN_MOB,
307 bo_p: &vps->cursor.bo);
308
309 if (ret != 0)
310 return ret;
311
312 /* Fence the mob creation so we are guarateed to have the mob */
313 ret = ttm_bo_reserve(bo: &vps->cursor.bo->tbo, interruptible: false, no_wait: false, NULL);
314 if (ret != 0)
315 goto teardown;
316
317 ret = vmw_execbuf_fence_commands(NULL, dev_priv, p_fence: &fence, NULL);
318 if (ret != 0) {
319 ttm_bo_unreserve(bo: &vps->cursor.bo->tbo);
320 goto teardown;
321 }
322
323 dma_fence_wait(fence: &fence->base, intr: false);
324 dma_fence_put(fence: &fence->base);
325
326 ttm_bo_unreserve(bo: &vps->cursor.bo->tbo);
327 return 0;
328
329teardown:
330 vmw_du_destroy_cursor_mob(vbo: &vps->cursor.bo);
331 return ret;
332}
333
334
335static void vmw_cursor_update_position(struct vmw_private *dev_priv,
336 bool show, int x, int y)
337{
338 const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
339 : SVGA_CURSOR_ON_HIDE;
340 uint32_t count;
341
342 spin_lock(lock: &dev_priv->cursor_lock);
343 if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
344 vmw_write(dev_priv, offset: SVGA_REG_CURSOR4_X, value: x);
345 vmw_write(dev_priv, offset: SVGA_REG_CURSOR4_Y, value: y);
346 vmw_write(dev_priv, offset: SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
347 vmw_write(dev_priv, offset: SVGA_REG_CURSOR4_ON, value: svga_cursor_on);
348 vmw_write(dev_priv, offset: SVGA_REG_CURSOR4_SUBMIT, value: 1);
349 } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
350 vmw_fifo_mem_write(vmw: dev_priv, fifo_reg: SVGA_FIFO_CURSOR_ON, value: svga_cursor_on);
351 vmw_fifo_mem_write(vmw: dev_priv, fifo_reg: SVGA_FIFO_CURSOR_X, value: x);
352 vmw_fifo_mem_write(vmw: dev_priv, fifo_reg: SVGA_FIFO_CURSOR_Y, value: y);
353 count = vmw_fifo_mem_read(vmw: dev_priv, fifo_reg: SVGA_FIFO_CURSOR_COUNT);
354 vmw_fifo_mem_write(vmw: dev_priv, fifo_reg: SVGA_FIFO_CURSOR_COUNT, value: ++count);
355 } else {
356 vmw_write(dev_priv, offset: SVGA_REG_CURSOR_X, value: x);
357 vmw_write(dev_priv, offset: SVGA_REG_CURSOR_Y, value: y);
358 vmw_write(dev_priv, offset: SVGA_REG_CURSOR_ON, value: svga_cursor_on);
359 }
360 spin_unlock(lock: &dev_priv->cursor_lock);
361}
362
363void vmw_kms_cursor_snoop(struct vmw_surface *srf,
364 struct ttm_object_file *tfile,
365 struct ttm_buffer_object *bo,
366 SVGA3dCmdHeader *header)
367{
368 struct ttm_bo_kmap_obj map;
369 unsigned long kmap_offset;
370 unsigned long kmap_num;
371 SVGA3dCopyBox *box;
372 unsigned box_count;
373 void *virtual;
374 bool is_iomem;
375 struct vmw_dma_cmd {
376 SVGA3dCmdHeader header;
377 SVGA3dCmdSurfaceDMA dma;
378 } *cmd;
379 int i, ret;
380 const struct SVGA3dSurfaceDesc *desc =
381 vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
382 const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
383
384 cmd = container_of(header, struct vmw_dma_cmd, header);
385
386 /* No snooper installed, nothing to copy */
387 if (!srf->snooper.image)
388 return;
389
390 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
391 DRM_ERROR("face and mipmap for cursors should never != 0\n");
392 return;
393 }
394
395 if (cmd->header.size < 64) {
396 DRM_ERROR("at least one full copy box must be given\n");
397 return;
398 }
399
400 box = (SVGA3dCopyBox *)&cmd[1];
401 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
402 sizeof(SVGA3dCopyBox);
403
404 if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
405 box->x != 0 || box->y != 0 || box->z != 0 ||
406 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
407 box->d != 1 || box_count != 1 ||
408 box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
409 /* TODO handle none page aligned offsets */
410 /* TODO handle more dst & src != 0 */
411 /* TODO handle more then one copy */
412 DRM_ERROR("Can't snoop dma request for cursor!\n");
413 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
414 box->srcx, box->srcy, box->srcz,
415 box->x, box->y, box->z,
416 box->w, box->h, box->d, box_count,
417 cmd->dma.guest.ptr.offset);
418 return;
419 }
420
421 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
422 kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
423
424 ret = ttm_bo_reserve(bo, interruptible: true, no_wait: false, NULL);
425 if (unlikely(ret != 0)) {
426 DRM_ERROR("reserve failed\n");
427 return;
428 }
429
430 ret = ttm_bo_kmap(bo, start_page: kmap_offset, num_pages: kmap_num, map: &map);
431 if (unlikely(ret != 0))
432 goto err_unreserve;
433
434 virtual = ttm_kmap_obj_virtual(map: &map, is_iomem: &is_iomem);
435
436 if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
437 memcpy(srf->snooper.image, virtual,
438 VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
439 } else {
440 /* Image is unsigned pointer. */
441 for (i = 0; i < box->h; i++)
442 memcpy(srf->snooper.image + i * image_pitch,
443 virtual + i * cmd->dma.guest.pitch,
444 box->w * desc->pitchBytesPerBlock);
445 }
446
447 srf->snooper.age++;
448
449 ttm_bo_kunmap(map: &map);
450err_unreserve:
451 ttm_bo_unreserve(bo);
452}
453
454/**
455 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
456 *
457 * @dev_priv: Pointer to the device private struct.
458 *
459 * Clears all legacy hotspots.
460 */
461void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
462{
463 struct drm_device *dev = &dev_priv->drm;
464 struct vmw_display_unit *du;
465 struct drm_crtc *crtc;
466
467 drm_modeset_lock_all(dev);
468 drm_for_each_crtc(crtc, dev) {
469 du = vmw_crtc_to_du(crtc);
470
471 du->hotspot_x = 0;
472 du->hotspot_y = 0;
473 }
474 drm_modeset_unlock_all(dev);
475}
476
477void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
478{
479 struct drm_device *dev = &dev_priv->drm;
480 struct vmw_display_unit *du;
481 struct drm_crtc *crtc;
482
483 mutex_lock(&dev->mode_config.mutex);
484
485 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
486 du = vmw_crtc_to_du(crtc);
487 if (!du->cursor_surface ||
488 du->cursor_age == du->cursor_surface->snooper.age ||
489 !du->cursor_surface->snooper.image)
490 continue;
491
492 du->cursor_age = du->cursor_surface->snooper.age;
493 vmw_send_define_cursor_cmd(dev_priv,
494 image: du->cursor_surface->snooper.image,
495 VMW_CURSOR_SNOOP_WIDTH,
496 VMW_CURSOR_SNOOP_HEIGHT,
497 hotspotX: du->hotspot_x + du->core_hotspot_x,
498 hotspotY: du->hotspot_y + du->core_hotspot_y);
499 }
500
501 mutex_unlock(lock: &dev->mode_config.mutex);
502}
503
504
505void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
506{
507 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
508 u32 i;
509
510 vmw_cursor_update_position(dev_priv: plane->dev->dev_private, show: false, x: 0, y: 0);
511
512 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
513 vmw_du_destroy_cursor_mob(vbo: &vcp->cursor_mobs[i]);
514
515 drm_plane_cleanup(plane);
516}
517
518
519void vmw_du_primary_plane_destroy(struct drm_plane *plane)
520{
521 drm_plane_cleanup(plane);
522
523 /* Planes are static in our case so we don't free it */
524}
525
526
527/**
528 * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
529 *
530 * @vps: plane state associated with the display surface
531 * @unreference: true if we also want to unreference the display.
532 */
533void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
534 bool unreference)
535{
536 if (vps->surf) {
537 if (vps->pinned) {
538 vmw_resource_unpin(res: &vps->surf->res);
539 vps->pinned--;
540 }
541
542 if (unreference) {
543 if (vps->pinned)
544 DRM_ERROR("Surface still pinned\n");
545 vmw_surface_unreference(srf: &vps->surf);
546 }
547 }
548}
549
550
551/**
552 * vmw_du_plane_cleanup_fb - Unpins the plane surface
553 *
554 * @plane: display plane
555 * @old_state: Contains the FB to clean up
556 *
557 * Unpins the framebuffer surface
558 *
559 * Returns 0 on success
560 */
561void
562vmw_du_plane_cleanup_fb(struct drm_plane *plane,
563 struct drm_plane_state *old_state)
564{
565 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
566
567 vmw_du_plane_unpin_surf(vps, unreference: false);
568}
569
570
571/**
572 * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
573 *
574 * @vps: plane_state
575 *
576 * Returns 0 on success
577 */
578
579static int
580vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
581{
582 int ret;
583 u32 size = vmw_du_cursor_mob_size(w: vps->base.crtc_w, h: vps->base.crtc_h);
584 struct ttm_buffer_object *bo;
585
586 if (!vps->cursor.bo)
587 return -EINVAL;
588
589 bo = &vps->cursor.bo->tbo;
590
591 if (bo->base.size < size)
592 return -EINVAL;
593
594 if (vps->cursor.bo->map.virtual)
595 return 0;
596
597 ret = ttm_bo_reserve(bo, interruptible: false, no_wait: false, NULL);
598 if (unlikely(ret != 0))
599 return -ENOMEM;
600
601 vmw_bo_map_and_cache(vbo: vps->cursor.bo);
602
603 ttm_bo_unreserve(bo);
604
605 if (unlikely(ret != 0))
606 return -ENOMEM;
607
608 return 0;
609}
610
611
612/**
613 * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
614 *
615 * @vps: state of the cursor plane
616 *
617 * Returns 0 on success
618 */
619
620static int
621vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
622{
623 int ret = 0;
624 struct vmw_bo *vbo = vps->cursor.bo;
625
626 if (!vbo || !vbo->map.virtual)
627 return 0;
628
629 ret = ttm_bo_reserve(bo: &vbo->tbo, interruptible: true, no_wait: false, NULL);
630 if (likely(ret == 0)) {
631 vmw_bo_unmap(vbo);
632 ttm_bo_unreserve(bo: &vbo->tbo);
633 }
634
635 return ret;
636}
637
638
639/**
640 * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
641 *
642 * @plane: cursor plane
643 * @old_state: contains the state to clean up
644 *
645 * Unmaps all cursor bo mappings and unpins the cursor surface
646 *
647 * Returns 0 on success
648 */
649void
650vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
651 struct drm_plane_state *old_state)
652{
653 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
654 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
655
656 if (vps->surf_mapped) {
657 vmw_bo_unmap(vbo: vps->surf->res.guest_memory_bo);
658 vps->surf_mapped = false;
659 }
660
661 vmw_du_cursor_plane_unmap_cm(vps);
662 vmw_du_put_cursor_mob(vcp, vps);
663
664 vmw_du_plane_unpin_surf(vps, unreference: false);
665
666 if (vps->surf) {
667 vmw_surface_unreference(srf: &vps->surf);
668 vps->surf = NULL;
669 }
670
671 if (vps->bo) {
672 vmw_bo_unreference(buf: &vps->bo);
673 vps->bo = NULL;
674 }
675}
676
677
678/**
679 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
680 *
681 * @plane: display plane
682 * @new_state: info on the new plane state, including the FB
683 *
684 * Returns 0 on success
685 */
686int
687vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
688 struct drm_plane_state *new_state)
689{
690 struct drm_framebuffer *fb = new_state->fb;
691 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
692 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
693 int ret = 0;
694
695 if (vps->surf) {
696 if (vps->surf_mapped) {
697 vmw_bo_unmap(vbo: vps->surf->res.guest_memory_bo);
698 vps->surf_mapped = false;
699 }
700 vmw_surface_unreference(srf: &vps->surf);
701 vps->surf = NULL;
702 }
703
704 if (vps->bo) {
705 vmw_bo_unreference(buf: &vps->bo);
706 vps->bo = NULL;
707 }
708
709 if (fb) {
710 if (vmw_framebuffer_to_vfb(fb)->bo) {
711 vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
712 vmw_bo_reference(buf: vps->bo);
713 } else {
714 vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
715 vmw_surface_reference(srf: vps->surf);
716 }
717 }
718
719 if (!vps->surf && vps->bo) {
720 const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
721
722 /*
723 * Not using vmw_bo_map_and_cache() helper here as we need to
724 * reserve the ttm_buffer_object first which
725 * vmw_bo_map_and_cache() omits.
726 */
727 ret = ttm_bo_reserve(bo: &vps->bo->tbo, interruptible: true, no_wait: false, NULL);
728
729 if (unlikely(ret != 0))
730 return -ENOMEM;
731
732 ret = ttm_bo_kmap(bo: &vps->bo->tbo, start_page: 0, PFN_UP(size), map: &vps->bo->map);
733
734 ttm_bo_unreserve(bo: &vps->bo->tbo);
735
736 if (unlikely(ret != 0))
737 return -ENOMEM;
738 } else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
739
740 WARN_ON(vps->surf->snooper.image);
741 ret = ttm_bo_reserve(bo: &vps->surf->res.guest_memory_bo->tbo, interruptible: true, no_wait: false,
742 NULL);
743 if (unlikely(ret != 0))
744 return -ENOMEM;
745 vmw_bo_map_and_cache(vbo: vps->surf->res.guest_memory_bo);
746 ttm_bo_unreserve(bo: &vps->surf->res.guest_memory_bo->tbo);
747 vps->surf_mapped = true;
748 }
749
750 if (vps->surf || vps->bo) {
751 vmw_du_get_cursor_mob(vcp, vps);
752 vmw_du_cursor_plane_map_cm(vps);
753 }
754
755 return 0;
756}
757
758
759void
760vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
761 struct drm_atomic_state *state)
762{
763 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
764 plane);
765 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
766 plane);
767 struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
768 struct vmw_private *dev_priv = vmw_priv(dev: crtc->dev);
769 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
770 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
771 struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
772 s32 hotspot_x, hotspot_y;
773
774 hotspot_x = du->hotspot_x + new_state->hotspot_x;
775 hotspot_y = du->hotspot_y + new_state->hotspot_y;
776
777 du->cursor_surface = vps->surf;
778 du->cursor_bo = vps->bo;
779
780 if (!vps->surf && !vps->bo) {
781 vmw_cursor_update_position(dev_priv, show: false, x: 0, y: 0);
782 return;
783 }
784
785 vps->cursor.hotspot_x = hotspot_x;
786 vps->cursor.hotspot_y = hotspot_y;
787
788 if (vps->surf) {
789 du->cursor_age = du->cursor_surface->snooper.age;
790 }
791
792 if (!vmw_du_cursor_plane_has_changed(old_vps, new_vps: vps)) {
793 /*
794 * If it hasn't changed, avoid making the device do extra
795 * work by keeping the old cursor active.
796 */
797 struct vmw_cursor_plane_state tmp = old_vps->cursor;
798 old_vps->cursor = vps->cursor;
799 vps->cursor = tmp;
800 } else {
801 void *image = vmw_du_cursor_plane_acquire_image(vps);
802 if (image)
803 vmw_cursor_update_image(dev_priv, vps, image,
804 width: new_state->crtc_w,
805 height: new_state->crtc_h,
806 hotspotX: hotspot_x, hotspotY: hotspot_y);
807 }
808
809 du->cursor_x = new_state->crtc_x + du->set_gui_x;
810 du->cursor_y = new_state->crtc_y + du->set_gui_y;
811
812 vmw_cursor_update_position(dev_priv, show: true,
813 x: du->cursor_x + hotspot_x,
814 y: du->cursor_y + hotspot_y);
815
816 du->core_hotspot_x = hotspot_x - du->hotspot_x;
817 du->core_hotspot_y = hotspot_y - du->hotspot_y;
818}
819
820
821/**
822 * vmw_du_primary_plane_atomic_check - check if the new state is okay
823 *
824 * @plane: display plane
825 * @state: info on the new plane state, including the FB
826 *
827 * Check if the new state is settable given the current state. Other
828 * than what the atomic helper checks, we care about crtc fitting
829 * the FB and maintaining one active framebuffer.
830 *
831 * Returns 0 on success
832 */
833int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
834 struct drm_atomic_state *state)
835{
836 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
837 plane);
838 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
839 plane);
840 struct drm_crtc_state *crtc_state = NULL;
841 struct drm_framebuffer *new_fb = new_state->fb;
842 struct drm_framebuffer *old_fb = old_state->fb;
843 int ret;
844
845 /*
846 * Ignore damage clips if the framebuffer attached to the plane's state
847 * has changed since the last plane update (page-flip). In this case, a
848 * full plane update should happen because uploads are done per-buffer.
849 */
850 if (old_fb != new_fb)
851 new_state->ignore_damage_clips = true;
852
853 if (new_state->crtc)
854 crtc_state = drm_atomic_get_new_crtc_state(state,
855 crtc: new_state->crtc);
856
857 ret = drm_atomic_helper_check_plane_state(plane_state: new_state, crtc_state,
858 DRM_PLANE_NO_SCALING,
859 DRM_PLANE_NO_SCALING,
860 can_position: false, can_update_disabled: true);
861
862 if (!ret && new_fb) {
863 struct drm_crtc *crtc = new_state->crtc;
864 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
865
866 vmw_connector_state_to_vcs(du->connector.state);
867 }
868
869
870 return ret;
871}
872
873
874/**
875 * vmw_du_cursor_plane_atomic_check - check if the new state is okay
876 *
877 * @plane: cursor plane
878 * @state: info on the new plane state
879 *
880 * This is a chance to fail if the new cursor state does not fit
881 * our requirements.
882 *
883 * Returns 0 on success
884 */
885int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
886 struct drm_atomic_state *state)
887{
888 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
889 plane);
890 int ret = 0;
891 struct drm_crtc_state *crtc_state = NULL;
892 struct vmw_surface *surface = NULL;
893 struct drm_framebuffer *fb = new_state->fb;
894
895 if (new_state->crtc)
896 crtc_state = drm_atomic_get_new_crtc_state(state: new_state->state,
897 crtc: new_state->crtc);
898
899 ret = drm_atomic_helper_check_plane_state(plane_state: new_state, crtc_state,
900 DRM_PLANE_NO_SCALING,
901 DRM_PLANE_NO_SCALING,
902 can_position: true, can_update_disabled: true);
903 if (ret)
904 return ret;
905
906 /* Turning off */
907 if (!fb)
908 return 0;
909
910 /* A lot of the code assumes this */
911 if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
912 DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
913 new_state->crtc_w, new_state->crtc_h);
914 return -EINVAL;
915 }
916
917 if (!vmw_framebuffer_to_vfb(fb)->bo) {
918 surface = vmw_framebuffer_to_vfbs(fb)->surface;
919
920 WARN_ON(!surface);
921
922 if (!surface ||
923 (!surface->snooper.image && !surface->res.guest_memory_bo)) {
924 DRM_ERROR("surface not suitable for cursor\n");
925 return -EINVAL;
926 }
927 }
928
929 return 0;
930}
931
932
933int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
934 struct drm_atomic_state *state)
935{
936 struct vmw_private *vmw = vmw_priv(dev: crtc->dev);
937 struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
938 crtc);
939 struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
940 int connector_mask = drm_connector_mask(connector: &du->connector);
941 bool has_primary = new_state->plane_mask &
942 drm_plane_mask(plane: crtc->primary);
943
944 /*
945 * This is fine in general, but broken userspace might expect
946 * some actual rendering so give a clue as why it's blank.
947 */
948 if (new_state->enable && !has_primary)
949 drm_dbg_driver(&vmw->drm,
950 "CRTC without a primary plane will be blank.\n");
951
952
953 if (new_state->connector_mask != connector_mask &&
954 new_state->connector_mask != 0) {
955 DRM_ERROR("Invalid connectors configuration\n");
956 return -EINVAL;
957 }
958
959 /*
960 * Our virtual device does not have a dot clock, so use the logical
961 * clock value as the dot clock.
962 */
963 if (new_state->mode.crtc_clock == 0)
964 new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
965
966 return 0;
967}
968
969
970void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
971 struct drm_atomic_state *state)
972{
973}
974
975
976void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
977 struct drm_atomic_state *state)
978{
979}
980
981
982/**
983 * vmw_du_crtc_duplicate_state - duplicate crtc state
984 * @crtc: DRM crtc
985 *
986 * Allocates and returns a copy of the crtc state (both common and
987 * vmw-specific) for the specified crtc.
988 *
989 * Returns: The newly allocated crtc state, or NULL on failure.
990 */
991struct drm_crtc_state *
992vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
993{
994 struct drm_crtc_state *state;
995 struct vmw_crtc_state *vcs;
996
997 if (WARN_ON(!crtc->state))
998 return NULL;
999
1000 vcs = kmemdup(p: crtc->state, size: sizeof(*vcs), GFP_KERNEL);
1001
1002 if (!vcs)
1003 return NULL;
1004
1005 state = &vcs->base;
1006
1007 __drm_atomic_helper_crtc_duplicate_state(crtc, state);
1008
1009 return state;
1010}
1011
1012
1013/**
1014 * vmw_du_crtc_reset - creates a blank vmw crtc state
1015 * @crtc: DRM crtc
1016 *
1017 * Resets the atomic state for @crtc by freeing the state pointer (which
1018 * might be NULL, e.g. at driver load time) and allocating a new empty state
1019 * object.
1020 */
1021void vmw_du_crtc_reset(struct drm_crtc *crtc)
1022{
1023 struct vmw_crtc_state *vcs;
1024
1025
1026 if (crtc->state) {
1027 __drm_atomic_helper_crtc_destroy_state(state: crtc->state);
1028
1029 kfree(vmw_crtc_state_to_vcs(crtc->state));
1030 }
1031
1032 vcs = kzalloc(size: sizeof(*vcs), GFP_KERNEL);
1033
1034 if (!vcs) {
1035 DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1036 return;
1037 }
1038
1039 __drm_atomic_helper_crtc_reset(crtc, state: &vcs->base);
1040}
1041
1042
1043/**
1044 * vmw_du_crtc_destroy_state - destroy crtc state
1045 * @crtc: DRM crtc
1046 * @state: state object to destroy
1047 *
1048 * Destroys the crtc state (both common and vmw-specific) for the
1049 * specified plane.
1050 */
1051void
1052vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1053 struct drm_crtc_state *state)
1054{
1055 drm_atomic_helper_crtc_destroy_state(crtc, state);
1056}
1057
1058
1059/**
1060 * vmw_du_plane_duplicate_state - duplicate plane state
1061 * @plane: drm plane
1062 *
1063 * Allocates and returns a copy of the plane state (both common and
1064 * vmw-specific) for the specified plane.
1065 *
1066 * Returns: The newly allocated plane state, or NULL on failure.
1067 */
1068struct drm_plane_state *
1069vmw_du_plane_duplicate_state(struct drm_plane *plane)
1070{
1071 struct drm_plane_state *state;
1072 struct vmw_plane_state *vps;
1073
1074 vps = kmemdup(p: plane->state, size: sizeof(*vps), GFP_KERNEL);
1075
1076 if (!vps)
1077 return NULL;
1078
1079 vps->pinned = 0;
1080 vps->cpp = 0;
1081
1082 memset(&vps->cursor, 0, sizeof(vps->cursor));
1083
1084 /* Each ref counted resource needs to be acquired again */
1085 if (vps->surf)
1086 (void) vmw_surface_reference(srf: vps->surf);
1087
1088 if (vps->bo)
1089 (void) vmw_bo_reference(buf: vps->bo);
1090
1091 state = &vps->base;
1092
1093 __drm_atomic_helper_plane_duplicate_state(plane, state);
1094
1095 return state;
1096}
1097
1098
1099/**
1100 * vmw_du_plane_reset - creates a blank vmw plane state
1101 * @plane: drm plane
1102 *
1103 * Resets the atomic state for @plane by freeing the state pointer (which might
1104 * be NULL, e.g. at driver load time) and allocating a new empty state object.
1105 */
1106void vmw_du_plane_reset(struct drm_plane *plane)
1107{
1108 struct vmw_plane_state *vps;
1109
1110 if (plane->state)
1111 vmw_du_plane_destroy_state(plane, state: plane->state);
1112
1113 vps = kzalloc(size: sizeof(*vps), GFP_KERNEL);
1114
1115 if (!vps) {
1116 DRM_ERROR("Cannot allocate vmw_plane_state\n");
1117 return;
1118 }
1119
1120 __drm_atomic_helper_plane_reset(plane, state: &vps->base);
1121}
1122
1123
1124/**
1125 * vmw_du_plane_destroy_state - destroy plane state
1126 * @plane: DRM plane
1127 * @state: state object to destroy
1128 *
1129 * Destroys the plane state (both common and vmw-specific) for the
1130 * specified plane.
1131 */
1132void
1133vmw_du_plane_destroy_state(struct drm_plane *plane,
1134 struct drm_plane_state *state)
1135{
1136 struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1137
1138 /* Should have been freed by cleanup_fb */
1139 if (vps->surf)
1140 vmw_surface_unreference(srf: &vps->surf);
1141
1142 if (vps->bo)
1143 vmw_bo_unreference(buf: &vps->bo);
1144
1145 drm_atomic_helper_plane_destroy_state(plane, state);
1146}
1147
1148
1149/**
1150 * vmw_du_connector_duplicate_state - duplicate connector state
1151 * @connector: DRM connector
1152 *
1153 * Allocates and returns a copy of the connector state (both common and
1154 * vmw-specific) for the specified connector.
1155 *
1156 * Returns: The newly allocated connector state, or NULL on failure.
1157 */
1158struct drm_connector_state *
1159vmw_du_connector_duplicate_state(struct drm_connector *connector)
1160{
1161 struct drm_connector_state *state;
1162 struct vmw_connector_state *vcs;
1163
1164 if (WARN_ON(!connector->state))
1165 return NULL;
1166
1167 vcs = kmemdup(p: connector->state, size: sizeof(*vcs), GFP_KERNEL);
1168
1169 if (!vcs)
1170 return NULL;
1171
1172 state = &vcs->base;
1173
1174 __drm_atomic_helper_connector_duplicate_state(connector, state);
1175
1176 return state;
1177}
1178
1179
1180/**
1181 * vmw_du_connector_reset - creates a blank vmw connector state
1182 * @connector: DRM connector
1183 *
1184 * Resets the atomic state for @connector by freeing the state pointer (which
1185 * might be NULL, e.g. at driver load time) and allocating a new empty state
1186 * object.
1187 */
1188void vmw_du_connector_reset(struct drm_connector *connector)
1189{
1190 struct vmw_connector_state *vcs;
1191
1192
1193 if (connector->state) {
1194 __drm_atomic_helper_connector_destroy_state(state: connector->state);
1195
1196 kfree(vmw_connector_state_to_vcs(connector->state));
1197 }
1198
1199 vcs = kzalloc(size: sizeof(*vcs), GFP_KERNEL);
1200
1201 if (!vcs) {
1202 DRM_ERROR("Cannot allocate vmw_connector_state\n");
1203 return;
1204 }
1205
1206 __drm_atomic_helper_connector_reset(connector, conn_state: &vcs->base);
1207}
1208
1209
1210/**
1211 * vmw_du_connector_destroy_state - destroy connector state
1212 * @connector: DRM connector
1213 * @state: state object to destroy
1214 *
1215 * Destroys the connector state (both common and vmw-specific) for the
1216 * specified plane.
1217 */
1218void
1219vmw_du_connector_destroy_state(struct drm_connector *connector,
1220 struct drm_connector_state *state)
1221{
1222 drm_atomic_helper_connector_destroy_state(connector, state);
1223}
1224/*
1225 * Generic framebuffer code
1226 */
1227
1228/*
1229 * Surface framebuffer code
1230 */
1231
1232static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1233{
1234 struct vmw_framebuffer_surface *vfbs =
1235 vmw_framebuffer_to_vfbs(framebuffer);
1236
1237 drm_framebuffer_cleanup(fb: framebuffer);
1238 vmw_surface_unreference(srf: &vfbs->surface);
1239
1240 kfree(objp: vfbs);
1241}
1242
1243/**
1244 * vmw_kms_readback - Perform a readback from the screen system to
1245 * a buffer-object backed framebuffer.
1246 *
1247 * @dev_priv: Pointer to the device private structure.
1248 * @file_priv: Pointer to a struct drm_file identifying the caller.
1249 * Must be set to NULL if @user_fence_rep is NULL.
1250 * @vfb: Pointer to the buffer-object backed framebuffer.
1251 * @user_fence_rep: User-space provided structure for fence information.
1252 * Must be set to non-NULL if @file_priv is non-NULL.
1253 * @vclips: Array of clip rects.
1254 * @num_clips: Number of clip rects in @vclips.
1255 *
1256 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1257 * interrupted.
1258 */
1259int vmw_kms_readback(struct vmw_private *dev_priv,
1260 struct drm_file *file_priv,
1261 struct vmw_framebuffer *vfb,
1262 struct drm_vmw_fence_rep __user *user_fence_rep,
1263 struct drm_vmw_rect *vclips,
1264 uint32_t num_clips)
1265{
1266 switch (dev_priv->active_display_unit) {
1267 case vmw_du_screen_object:
1268 return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1269 user_fence_rep, vclips, num_clips,
1270 NULL);
1271 case vmw_du_screen_target:
1272 return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
1273 user_fence_rep, NULL, vclips, num_clips,
1274 increment: 1, NULL);
1275 default:
1276 WARN_ONCE(true,
1277 "Readback called with invalid display system.\n");
1278}
1279
1280 return -ENOSYS;
1281}
1282
1283
1284static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1285 .destroy = vmw_framebuffer_surface_destroy,
1286 .dirty = drm_atomic_helper_dirtyfb,
1287};
1288
1289static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1290 struct vmw_surface *surface,
1291 struct vmw_framebuffer **out,
1292 const struct drm_mode_fb_cmd2
1293 *mode_cmd,
1294 bool is_bo_proxy)
1295
1296{
1297 struct drm_device *dev = &dev_priv->drm;
1298 struct vmw_framebuffer_surface *vfbs;
1299 enum SVGA3dSurfaceFormat format;
1300 int ret;
1301
1302 /* 3D is only supported on HWv8 and newer hosts */
1303 if (dev_priv->active_display_unit == vmw_du_legacy)
1304 return -ENOSYS;
1305
1306 /*
1307 * Sanity checks.
1308 */
1309
1310 if (!drm_any_plane_has_format(dev: &dev_priv->drm,
1311 format: mode_cmd->pixel_format,
1312 modifier: mode_cmd->modifier[0])) {
1313 drm_dbg(&dev_priv->drm,
1314 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1315 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1316 return -EINVAL;
1317 }
1318
1319 /* Surface must be marked as a scanout. */
1320 if (unlikely(!surface->metadata.scanout))
1321 return -EINVAL;
1322
1323 if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1324 surface->metadata.num_sizes != 1 ||
1325 surface->metadata.base_size.width < mode_cmd->width ||
1326 surface->metadata.base_size.height < mode_cmd->height ||
1327 surface->metadata.base_size.depth != 1)) {
1328 DRM_ERROR("Incompatible surface dimensions "
1329 "for requested mode.\n");
1330 return -EINVAL;
1331 }
1332
1333 switch (mode_cmd->pixel_format) {
1334 case DRM_FORMAT_ARGB8888:
1335 format = SVGA3D_A8R8G8B8;
1336 break;
1337 case DRM_FORMAT_XRGB8888:
1338 format = SVGA3D_X8R8G8B8;
1339 break;
1340 case DRM_FORMAT_RGB565:
1341 format = SVGA3D_R5G6B5;
1342 break;
1343 case DRM_FORMAT_XRGB1555:
1344 format = SVGA3D_A1R5G5B5;
1345 break;
1346 default:
1347 DRM_ERROR("Invalid pixel format: %p4cc\n",
1348 &mode_cmd->pixel_format);
1349 return -EINVAL;
1350 }
1351
1352 /*
1353 * For DX, surface format validation is done when surface->scanout
1354 * is set.
1355 */
1356 if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1357 DRM_ERROR("Invalid surface format for requested mode.\n");
1358 return -EINVAL;
1359 }
1360
1361 vfbs = kzalloc(size: sizeof(*vfbs), GFP_KERNEL);
1362 if (!vfbs) {
1363 ret = -ENOMEM;
1364 goto out_err1;
1365 }
1366
1367 drm_helper_mode_fill_fb_struct(dev, fb: &vfbs->base.base, mode_cmd);
1368 vfbs->surface = vmw_surface_reference(srf: surface);
1369 vfbs->base.user_handle = mode_cmd->handles[0];
1370 vfbs->is_bo_proxy = is_bo_proxy;
1371
1372 *out = &vfbs->base;
1373
1374 ret = drm_framebuffer_init(dev, fb: &vfbs->base.base,
1375 funcs: &vmw_framebuffer_surface_funcs);
1376 if (ret)
1377 goto out_err2;
1378
1379 return 0;
1380
1381out_err2:
1382 vmw_surface_unreference(srf: &surface);
1383 kfree(objp: vfbs);
1384out_err1:
1385 return ret;
1386}
1387
1388/*
1389 * Buffer-object framebuffer code
1390 */
1391
1392static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1393 struct drm_file *file_priv,
1394 unsigned int *handle)
1395{
1396 struct vmw_framebuffer_bo *vfbd =
1397 vmw_framebuffer_to_vfbd(fb);
1398
1399 return drm_gem_handle_create(file_priv, obj: &vfbd->buffer->tbo.base, handlep: handle);
1400}
1401
1402static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1403{
1404 struct vmw_framebuffer_bo *vfbd =
1405 vmw_framebuffer_to_vfbd(framebuffer);
1406
1407 drm_framebuffer_cleanup(fb: framebuffer);
1408 vmw_bo_unreference(buf: &vfbd->buffer);
1409
1410 kfree(objp: vfbd);
1411}
1412
1413static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1414 .create_handle = vmw_framebuffer_bo_create_handle,
1415 .destroy = vmw_framebuffer_bo_destroy,
1416 .dirty = drm_atomic_helper_dirtyfb,
1417};
1418
1419/**
1420 * vmw_create_bo_proxy - create a proxy surface for the buffer object
1421 *
1422 * @dev: DRM device
1423 * @mode_cmd: parameters for the new surface
1424 * @bo_mob: MOB backing the buffer object
1425 * @srf_out: newly created surface
1426 *
1427 * When the content FB is a buffer object, we create a surface as a proxy to the
1428 * same buffer. This way we can do a surface copy rather than a surface DMA.
1429 * This is a more efficient approach
1430 *
1431 * RETURNS:
1432 * 0 on success, error code otherwise
1433 */
1434static int vmw_create_bo_proxy(struct drm_device *dev,
1435 const struct drm_mode_fb_cmd2 *mode_cmd,
1436 struct vmw_bo *bo_mob,
1437 struct vmw_surface **srf_out)
1438{
1439 struct vmw_surface_metadata metadata = {0};
1440 uint32_t format;
1441 struct vmw_resource *res;
1442 unsigned int bytes_pp;
1443 int ret;
1444
1445 switch (mode_cmd->pixel_format) {
1446 case DRM_FORMAT_ARGB8888:
1447 case DRM_FORMAT_XRGB8888:
1448 format = SVGA3D_X8R8G8B8;
1449 bytes_pp = 4;
1450 break;
1451
1452 case DRM_FORMAT_RGB565:
1453 case DRM_FORMAT_XRGB1555:
1454 format = SVGA3D_R5G6B5;
1455 bytes_pp = 2;
1456 break;
1457
1458 case 8:
1459 format = SVGA3D_P8;
1460 bytes_pp = 1;
1461 break;
1462
1463 default:
1464 DRM_ERROR("Invalid framebuffer format %p4cc\n",
1465 &mode_cmd->pixel_format);
1466 return -EINVAL;
1467 }
1468
1469 metadata.format = format;
1470 metadata.mip_levels[0] = 1;
1471 metadata.num_sizes = 1;
1472 metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1473 metadata.base_size.height = mode_cmd->height;
1474 metadata.base_size.depth = 1;
1475 metadata.scanout = true;
1476
1477 ret = vmw_gb_surface_define(dev_priv: vmw_priv(dev), req: &metadata, srf_out);
1478 if (ret) {
1479 DRM_ERROR("Failed to allocate proxy content buffer\n");
1480 return ret;
1481 }
1482
1483 res = &(*srf_out)->res;
1484
1485 /* Reserve and switch the backing mob. */
1486 mutex_lock(&res->dev_priv->cmdbuf_mutex);
1487 (void) vmw_resource_reserve(res, interruptible: false, no_backup: true);
1488 vmw_user_bo_unref(buf: &res->guest_memory_bo);
1489 res->guest_memory_bo = vmw_user_bo_ref(vbo: bo_mob);
1490 res->guest_memory_offset = 0;
1491 vmw_resource_unreserve(res, dirty_set: false, dirty: false, switch_guest_memory: false, NULL, new_guest_memory_offset: 0);
1492 mutex_unlock(lock: &res->dev_priv->cmdbuf_mutex);
1493
1494 return 0;
1495}
1496
1497
1498
1499static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1500 struct vmw_bo *bo,
1501 struct vmw_framebuffer **out,
1502 const struct drm_mode_fb_cmd2
1503 *mode_cmd)
1504
1505{
1506 struct drm_device *dev = &dev_priv->drm;
1507 struct vmw_framebuffer_bo *vfbd;
1508 unsigned int requested_size;
1509 int ret;
1510
1511 requested_size = mode_cmd->height * mode_cmd->pitches[0];
1512 if (unlikely(requested_size > bo->tbo.base.size)) {
1513 DRM_ERROR("Screen buffer object size is too small "
1514 "for requested mode.\n");
1515 return -EINVAL;
1516 }
1517
1518 if (!drm_any_plane_has_format(dev: &dev_priv->drm,
1519 format: mode_cmd->pixel_format,
1520 modifier: mode_cmd->modifier[0])) {
1521 drm_dbg(&dev_priv->drm,
1522 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1523 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1524 return -EINVAL;
1525 }
1526
1527 vfbd = kzalloc(size: sizeof(*vfbd), GFP_KERNEL);
1528 if (!vfbd) {
1529 ret = -ENOMEM;
1530 goto out_err1;
1531 }
1532
1533 vfbd->base.base.obj[0] = &bo->tbo.base;
1534 drm_helper_mode_fill_fb_struct(dev, fb: &vfbd->base.base, mode_cmd);
1535 vfbd->base.bo = true;
1536 vfbd->buffer = vmw_bo_reference(buf: bo);
1537 vfbd->base.user_handle = mode_cmd->handles[0];
1538 *out = &vfbd->base;
1539
1540 ret = drm_framebuffer_init(dev, fb: &vfbd->base.base,
1541 funcs: &vmw_framebuffer_bo_funcs);
1542 if (ret)
1543 goto out_err2;
1544
1545 return 0;
1546
1547out_err2:
1548 vmw_bo_unreference(buf: &bo);
1549 kfree(objp: vfbd);
1550out_err1:
1551 return ret;
1552}
1553
1554
1555/**
1556 * vmw_kms_srf_ok - check if a surface can be created
1557 *
1558 * @dev_priv: Pointer to device private struct.
1559 * @width: requested width
1560 * @height: requested height
1561 *
1562 * Surfaces need to be less than texture size
1563 */
1564static bool
1565vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1566{
1567 if (width > dev_priv->texture_max_width ||
1568 height > dev_priv->texture_max_height)
1569 return false;
1570
1571 return true;
1572}
1573
1574/**
1575 * vmw_kms_new_framebuffer - Create a new framebuffer.
1576 *
1577 * @dev_priv: Pointer to device private struct.
1578 * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1579 * Either @bo or @surface must be NULL.
1580 * @surface: Pointer to a surface to wrap the kms framebuffer around.
1581 * Either @bo or @surface must be NULL.
1582 * @only_2d: No presents will occur to this buffer object based framebuffer.
1583 * This helps the code to do some important optimizations.
1584 * @mode_cmd: Frame-buffer metadata.
1585 */
1586struct vmw_framebuffer *
1587vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1588 struct vmw_bo *bo,
1589 struct vmw_surface *surface,
1590 bool only_2d,
1591 const struct drm_mode_fb_cmd2 *mode_cmd)
1592{
1593 struct vmw_framebuffer *vfb = NULL;
1594 bool is_bo_proxy = false;
1595 int ret;
1596
1597 /*
1598 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1599 * therefore, wrap the buffer object in a surface so we can use the
1600 * SurfaceCopy command.
1601 */
1602 if (vmw_kms_srf_ok(dev_priv, width: mode_cmd->width, height: mode_cmd->height) &&
1603 bo && only_2d &&
1604 mode_cmd->width > 64 && /* Don't create a proxy for cursor */
1605 dev_priv->active_display_unit == vmw_du_screen_target) {
1606 ret = vmw_create_bo_proxy(dev: &dev_priv->drm, mode_cmd,
1607 bo_mob: bo, srf_out: &surface);
1608 if (ret)
1609 return ERR_PTR(error: ret);
1610
1611 is_bo_proxy = true;
1612 }
1613
1614 /* Create the new framebuffer depending one what we have */
1615 if (surface) {
1616 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, out: &vfb,
1617 mode_cmd,
1618 is_bo_proxy);
1619 /*
1620 * vmw_create_bo_proxy() adds a reference that is no longer
1621 * needed
1622 */
1623 if (is_bo_proxy)
1624 vmw_surface_unreference(srf: &surface);
1625 } else if (bo) {
1626 ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, out: &vfb,
1627 mode_cmd);
1628 } else {
1629 BUG();
1630 }
1631
1632 if (ret)
1633 return ERR_PTR(error: ret);
1634
1635 return vfb;
1636}
1637
1638/*
1639 * Generic Kernel modesetting functions
1640 */
1641
1642static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1643 struct drm_file *file_priv,
1644 const struct drm_mode_fb_cmd2 *mode_cmd)
1645{
1646 struct vmw_private *dev_priv = vmw_priv(dev);
1647 struct vmw_framebuffer *vfb = NULL;
1648 struct vmw_surface *surface = NULL;
1649 struct vmw_bo *bo = NULL;
1650 int ret;
1651
1652 /* returns either a bo or surface */
1653 ret = vmw_user_lookup_handle(dev_priv, filp: file_priv,
1654 handle: mode_cmd->handles[0],
1655 out_surf: &surface, out_buf: &bo);
1656 if (ret) {
1657 DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1658 mode_cmd->handles[0], mode_cmd->handles[0]);
1659 goto err_out;
1660 }
1661
1662
1663 if (!bo &&
1664 !vmw_kms_srf_ok(dev_priv, width: mode_cmd->width, height: mode_cmd->height)) {
1665 DRM_ERROR("Surface size cannot exceed %dx%d\n",
1666 dev_priv->texture_max_width,
1667 dev_priv->texture_max_height);
1668 goto err_out;
1669 }
1670
1671
1672 vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1673 only_2d: !(dev_priv->capabilities & SVGA_CAP_3D),
1674 mode_cmd);
1675 if (IS_ERR(ptr: vfb)) {
1676 ret = PTR_ERR(ptr: vfb);
1677 goto err_out;
1678 }
1679
1680err_out:
1681 /* vmw_user_lookup_handle takes one ref so does new_fb */
1682 if (bo)
1683 vmw_user_bo_unref(buf: &bo);
1684 if (surface)
1685 vmw_surface_unreference(srf: &surface);
1686
1687 if (ret) {
1688 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1689 return ERR_PTR(error: ret);
1690 }
1691
1692 return &vfb->base;
1693}
1694
1695/**
1696 * vmw_kms_check_display_memory - Validates display memory required for a
1697 * topology
1698 * @dev: DRM device
1699 * @num_rects: number of drm_rect in rects
1700 * @rects: array of drm_rect representing the topology to validate indexed by
1701 * crtc index.
1702 *
1703 * Returns:
1704 * 0 on success otherwise negative error code
1705 */
1706static int vmw_kms_check_display_memory(struct drm_device *dev,
1707 uint32_t num_rects,
1708 struct drm_rect *rects)
1709{
1710 struct vmw_private *dev_priv = vmw_priv(dev);
1711 struct drm_rect bounding_box = {0};
1712 u64 total_pixels = 0, pixel_mem, bb_mem;
1713 int i;
1714
1715 for (i = 0; i < num_rects; i++) {
1716 /*
1717 * For STDU only individual screen (screen target) is limited by
1718 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1719 */
1720 if (dev_priv->active_display_unit == vmw_du_screen_target &&
1721 (drm_rect_width(r: &rects[i]) > dev_priv->stdu_max_width ||
1722 drm_rect_height(r: &rects[i]) > dev_priv->stdu_max_height)) {
1723 VMW_DEBUG_KMS("Screen size not supported.\n");
1724 return -EINVAL;
1725 }
1726
1727 /* Bounding box upper left is at (0,0). */
1728 if (rects[i].x2 > bounding_box.x2)
1729 bounding_box.x2 = rects[i].x2;
1730
1731 if (rects[i].y2 > bounding_box.y2)
1732 bounding_box.y2 = rects[i].y2;
1733
1734 total_pixels += (u64) drm_rect_width(r: &rects[i]) *
1735 (u64) drm_rect_height(r: &rects[i]);
1736 }
1737
1738 /* Virtual svga device primary limits are always in 32-bpp. */
1739 pixel_mem = total_pixels * 4;
1740
1741 /*
1742 * For HV10 and below prim_bb_mem is vram size. When
1743 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1744 * limit on primary bounding box
1745 */
1746 if (pixel_mem > dev_priv->max_primary_mem) {
1747 VMW_DEBUG_KMS("Combined output size too large.\n");
1748 return -EINVAL;
1749 }
1750
1751 /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1752 if (dev_priv->active_display_unit != vmw_du_screen_target ||
1753 !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1754 bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1755
1756 if (bb_mem > dev_priv->max_primary_mem) {
1757 VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1758 return -EINVAL;
1759 }
1760 }
1761
1762 return 0;
1763}
1764
1765/**
1766 * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1767 * crtc mutex
1768 * @state: The atomic state pointer containing the new atomic state
1769 * @crtc: The crtc
1770 *
1771 * This function returns the new crtc state if it's part of the state update.
1772 * Otherwise returns the current crtc state. It also makes sure that the
1773 * crtc mutex is locked.
1774 *
1775 * Returns: A valid crtc state pointer or NULL. It may also return a
1776 * pointer error, in particular -EDEADLK if locking needs to be rerun.
1777 */
1778static struct drm_crtc_state *
1779vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1780{
1781 struct drm_crtc_state *crtc_state;
1782
1783 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1784 if (crtc_state) {
1785 lockdep_assert_held(&crtc->mutex.mutex.base);
1786 } else {
1787 int ret = drm_modeset_lock(lock: &crtc->mutex, ctx: state->acquire_ctx);
1788
1789 if (ret != 0 && ret != -EALREADY)
1790 return ERR_PTR(error: ret);
1791
1792 crtc_state = crtc->state;
1793 }
1794
1795 return crtc_state;
1796}
1797
1798/**
1799 * vmw_kms_check_implicit - Verify that all implicit display units scan out
1800 * from the same fb after the new state is committed.
1801 * @dev: The drm_device.
1802 * @state: The new state to be checked.
1803 *
1804 * Returns:
1805 * Zero on success,
1806 * -EINVAL on invalid state,
1807 * -EDEADLK if modeset locking needs to be rerun.
1808 */
1809static int vmw_kms_check_implicit(struct drm_device *dev,
1810 struct drm_atomic_state *state)
1811{
1812 struct drm_framebuffer *implicit_fb = NULL;
1813 struct drm_crtc *crtc;
1814 struct drm_crtc_state *crtc_state;
1815 struct drm_plane_state *plane_state;
1816
1817 drm_for_each_crtc(crtc, dev) {
1818 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1819
1820 if (!du->is_implicit)
1821 continue;
1822
1823 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1824 if (IS_ERR(ptr: crtc_state))
1825 return PTR_ERR(ptr: crtc_state);
1826
1827 if (!crtc_state || !crtc_state->enable)
1828 continue;
1829
1830 /*
1831 * Can't move primary planes across crtcs, so this is OK.
1832 * It also means we don't need to take the plane mutex.
1833 */
1834 plane_state = du->primary.state;
1835 if (plane_state->crtc != crtc)
1836 continue;
1837
1838 if (!implicit_fb)
1839 implicit_fb = plane_state->fb;
1840 else if (implicit_fb != plane_state->fb)
1841 return -EINVAL;
1842 }
1843
1844 return 0;
1845}
1846
1847/**
1848 * vmw_kms_check_topology - Validates topology in drm_atomic_state
1849 * @dev: DRM device
1850 * @state: the driver state object
1851 *
1852 * Returns:
1853 * 0 on success otherwise negative error code
1854 */
1855static int vmw_kms_check_topology(struct drm_device *dev,
1856 struct drm_atomic_state *state)
1857{
1858 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1859 struct drm_rect *rects;
1860 struct drm_crtc *crtc;
1861 uint32_t i;
1862 int ret = 0;
1863
1864 rects = kcalloc(n: dev->mode_config.num_crtc, size: sizeof(struct drm_rect),
1865 GFP_KERNEL);
1866 if (!rects)
1867 return -ENOMEM;
1868
1869 drm_for_each_crtc(crtc, dev) {
1870 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1871 struct drm_crtc_state *crtc_state;
1872
1873 i = drm_crtc_index(crtc);
1874
1875 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1876 if (IS_ERR(ptr: crtc_state)) {
1877 ret = PTR_ERR(ptr: crtc_state);
1878 goto clean;
1879 }
1880
1881 if (!crtc_state)
1882 continue;
1883
1884 if (crtc_state->enable) {
1885 rects[i].x1 = du->gui_x;
1886 rects[i].y1 = du->gui_y;
1887 rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1888 rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1889 } else {
1890 rects[i].x1 = 0;
1891 rects[i].y1 = 0;
1892 rects[i].x2 = 0;
1893 rects[i].y2 = 0;
1894 }
1895 }
1896
1897 /* Determine change to topology due to new atomic state */
1898 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1899 new_crtc_state, i) {
1900 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1901 struct drm_connector *connector;
1902 struct drm_connector_state *conn_state;
1903 struct vmw_connector_state *vmw_conn_state;
1904
1905 if (!du->pref_active && new_crtc_state->enable) {
1906 VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1907 ret = -EINVAL;
1908 goto clean;
1909 }
1910
1911 /*
1912 * For vmwgfx each crtc has only one connector attached and it
1913 * is not changed so don't really need to check the
1914 * crtc->connector_mask and iterate over it.
1915 */
1916 connector = &du->connector;
1917 conn_state = drm_atomic_get_connector_state(state, connector);
1918 if (IS_ERR(ptr: conn_state)) {
1919 ret = PTR_ERR(ptr: conn_state);
1920 goto clean;
1921 }
1922
1923 vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1924 vmw_conn_state->gui_x = du->gui_x;
1925 vmw_conn_state->gui_y = du->gui_y;
1926 }
1927
1928 ret = vmw_kms_check_display_memory(dev, num_rects: dev->mode_config.num_crtc,
1929 rects);
1930
1931clean:
1932 kfree(objp: rects);
1933 return ret;
1934}
1935
1936/**
1937 * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1938 *
1939 * @dev: DRM device
1940 * @state: the driver state object
1941 *
1942 * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1943 * us to assign a value to mode->crtc_clock so that
1944 * drm_calc_timestamping_constants() won't throw an error message
1945 *
1946 * Returns:
1947 * Zero for success or -errno
1948 */
1949static int
1950vmw_kms_atomic_check_modeset(struct drm_device *dev,
1951 struct drm_atomic_state *state)
1952{
1953 struct drm_crtc *crtc;
1954 struct drm_crtc_state *crtc_state;
1955 bool need_modeset = false;
1956 int i, ret;
1957
1958 ret = drm_atomic_helper_check(dev, state);
1959 if (ret)
1960 return ret;
1961
1962 ret = vmw_kms_check_implicit(dev, state);
1963 if (ret) {
1964 VMW_DEBUG_KMS("Invalid implicit state\n");
1965 return ret;
1966 }
1967
1968 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1969 if (drm_atomic_crtc_needs_modeset(state: crtc_state))
1970 need_modeset = true;
1971 }
1972
1973 if (need_modeset)
1974 return vmw_kms_check_topology(dev, state);
1975
1976 return ret;
1977}
1978
1979static const struct drm_mode_config_funcs vmw_kms_funcs = {
1980 .fb_create = vmw_kms_fb_create,
1981 .atomic_check = vmw_kms_atomic_check_modeset,
1982 .atomic_commit = drm_atomic_helper_commit,
1983};
1984
1985static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1986 struct drm_file *file_priv,
1987 struct vmw_framebuffer *vfb,
1988 struct vmw_surface *surface,
1989 uint32_t sid,
1990 int32_t destX, int32_t destY,
1991 struct drm_vmw_rect *clips,
1992 uint32_t num_clips)
1993{
1994 return vmw_kms_sou_do_surface_dirty(dev_priv, framebuffer: vfb, NULL, vclips: clips,
1995 srf: &surface->res, dest_x: destX, dest_y: destY,
1996 num_clips, inc: 1, NULL, NULL);
1997}
1998
1999
2000int vmw_kms_present(struct vmw_private *dev_priv,
2001 struct drm_file *file_priv,
2002 struct vmw_framebuffer *vfb,
2003 struct vmw_surface *surface,
2004 uint32_t sid,
2005 int32_t destX, int32_t destY,
2006 struct drm_vmw_rect *clips,
2007 uint32_t num_clips)
2008{
2009 int ret;
2010
2011 switch (dev_priv->active_display_unit) {
2012 case vmw_du_screen_target:
2013 ret = vmw_kms_stdu_surface_dirty(dev_priv, framebuffer: vfb, NULL, vclips: clips,
2014 srf: &surface->res, dest_x: destX, dest_y: destY,
2015 num_clips, inc: 1, NULL, NULL);
2016 break;
2017 case vmw_du_screen_object:
2018 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
2019 sid, destX, destY, clips,
2020 num_clips);
2021 break;
2022 default:
2023 WARN_ONCE(true,
2024 "Present called with invalid display system.\n");
2025 ret = -ENOSYS;
2026 break;
2027 }
2028 if (ret)
2029 return ret;
2030
2031 vmw_cmd_flush(dev_priv, interruptible: false);
2032
2033 return 0;
2034}
2035
2036static void
2037vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
2038{
2039 if (dev_priv->hotplug_mode_update_property)
2040 return;
2041
2042 dev_priv->hotplug_mode_update_property =
2043 drm_property_create_range(dev: &dev_priv->drm,
2044 DRM_MODE_PROP_IMMUTABLE,
2045 name: "hotplug_mode_update", min: 0, max: 1);
2046}
2047
2048int vmw_kms_init(struct vmw_private *dev_priv)
2049{
2050 struct drm_device *dev = &dev_priv->drm;
2051 int ret;
2052 static const char *display_unit_names[] = {
2053 "Invalid",
2054 "Legacy",
2055 "Screen Object",
2056 "Screen Target",
2057 "Invalid (max)"
2058 };
2059
2060 drm_mode_config_init(dev);
2061 dev->mode_config.funcs = &vmw_kms_funcs;
2062 dev->mode_config.min_width = 1;
2063 dev->mode_config.min_height = 1;
2064 dev->mode_config.max_width = dev_priv->texture_max_width;
2065 dev->mode_config.max_height = dev_priv->texture_max_height;
2066 dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
2067
2068 drm_mode_create_suggested_offset_properties(dev);
2069 vmw_kms_create_hotplug_mode_update_property(dev_priv);
2070
2071 ret = vmw_kms_stdu_init_display(dev_priv);
2072 if (ret) {
2073 ret = vmw_kms_sou_init_display(dev_priv);
2074 if (ret) /* Fallback */
2075 ret = vmw_kms_ldu_init_display(dev_priv);
2076 }
2077 BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
2078 drm_info(&dev_priv->drm, "%s display unit initialized\n",
2079 display_unit_names[dev_priv->active_display_unit]);
2080
2081 return ret;
2082}
2083
2084int vmw_kms_close(struct vmw_private *dev_priv)
2085{
2086 int ret = 0;
2087
2088 /*
2089 * Docs says we should take the lock before calling this function
2090 * but since it destroys encoders and our destructor calls
2091 * drm_encoder_cleanup which takes the lock we deadlock.
2092 */
2093 drm_mode_config_cleanup(dev: &dev_priv->drm);
2094 if (dev_priv->active_display_unit == vmw_du_legacy)
2095 ret = vmw_kms_ldu_close_display(dev_priv);
2096
2097 return ret;
2098}
2099
2100int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2101 struct drm_file *file_priv)
2102{
2103 struct drm_vmw_cursor_bypass_arg *arg = data;
2104 struct vmw_display_unit *du;
2105 struct drm_crtc *crtc;
2106 int ret = 0;
2107
2108 mutex_lock(&dev->mode_config.mutex);
2109 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2110
2111 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2112 du = vmw_crtc_to_du(crtc);
2113 du->hotspot_x = arg->xhot;
2114 du->hotspot_y = arg->yhot;
2115 }
2116
2117 mutex_unlock(lock: &dev->mode_config.mutex);
2118 return 0;
2119 }
2120
2121 crtc = drm_crtc_find(dev, file_priv, id: arg->crtc_id);
2122 if (!crtc) {
2123 ret = -ENOENT;
2124 goto out;
2125 }
2126
2127 du = vmw_crtc_to_du(crtc);
2128
2129 du->hotspot_x = arg->xhot;
2130 du->hotspot_y = arg->yhot;
2131
2132out:
2133 mutex_unlock(lock: &dev->mode_config.mutex);
2134
2135 return ret;
2136}
2137
2138int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2139 unsigned width, unsigned height, unsigned pitch,
2140 unsigned bpp, unsigned depth)
2141{
2142 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2143 vmw_write(dev_priv: vmw_priv, offset: SVGA_REG_PITCHLOCK, value: pitch);
2144 else if (vmw_fifo_have_pitchlock(dev_priv: vmw_priv))
2145 vmw_fifo_mem_write(vmw: vmw_priv, fifo_reg: SVGA_FIFO_PITCHLOCK, value: pitch);
2146 vmw_write(dev_priv: vmw_priv, offset: SVGA_REG_WIDTH, value: width);
2147 vmw_write(dev_priv: vmw_priv, offset: SVGA_REG_HEIGHT, value: height);
2148 if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2149 vmw_write(dev_priv: vmw_priv, offset: SVGA_REG_BITS_PER_PIXEL, value: bpp);
2150
2151 if (vmw_read(dev_priv: vmw_priv, offset: SVGA_REG_DEPTH) != depth) {
2152 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2153 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2154 return -EINVAL;
2155 }
2156
2157 return 0;
2158}
2159
2160bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2161 uint32_t pitch,
2162 uint32_t height)
2163{
2164 return ((u64) pitch * (u64) height) < (u64)
2165 ((dev_priv->active_display_unit == vmw_du_screen_target) ?
2166 dev_priv->max_primary_mem : dev_priv->vram_size);
2167}
2168
2169/**
2170 * vmw_du_update_layout - Update the display unit with topology from resolution
2171 * plugin and generate DRM uevent
2172 * @dev_priv: device private
2173 * @num_rects: number of drm_rect in rects
2174 * @rects: toplogy to update
2175 */
2176static int vmw_du_update_layout(struct vmw_private *dev_priv,
2177 unsigned int num_rects, struct drm_rect *rects)
2178{
2179 struct drm_device *dev = &dev_priv->drm;
2180 struct vmw_display_unit *du;
2181 struct drm_connector *con;
2182 struct drm_connector_list_iter conn_iter;
2183 struct drm_modeset_acquire_ctx ctx;
2184 struct drm_crtc *crtc;
2185 int ret;
2186
2187 /* Currently gui_x/y is protected with the crtc mutex */
2188 mutex_lock(&dev->mode_config.mutex);
2189 drm_modeset_acquire_init(ctx: &ctx, flags: 0);
2190retry:
2191 drm_for_each_crtc(crtc, dev) {
2192 ret = drm_modeset_lock(lock: &crtc->mutex, ctx: &ctx);
2193 if (ret < 0) {
2194 if (ret == -EDEADLK) {
2195 drm_modeset_backoff(ctx: &ctx);
2196 goto retry;
2197 }
2198 goto out_fini;
2199 }
2200 }
2201
2202 drm_connector_list_iter_begin(dev, iter: &conn_iter);
2203 drm_for_each_connector_iter(con, &conn_iter) {
2204 du = vmw_connector_to_du(con);
2205 if (num_rects > du->unit) {
2206 du->pref_width = drm_rect_width(r: &rects[du->unit]);
2207 du->pref_height = drm_rect_height(r: &rects[du->unit]);
2208 du->pref_active = true;
2209 du->gui_x = rects[du->unit].x1;
2210 du->gui_y = rects[du->unit].y1;
2211 } else {
2212 du->pref_width = VMWGFX_MIN_INITIAL_WIDTH;
2213 du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2214 du->pref_active = false;
2215 du->gui_x = 0;
2216 du->gui_y = 0;
2217 }
2218 }
2219 drm_connector_list_iter_end(iter: &conn_iter);
2220
2221 list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2222 du = vmw_connector_to_du(con);
2223 if (num_rects > du->unit) {
2224 drm_object_property_set_value
2225 (obj: &con->base, property: dev->mode_config.suggested_x_property,
2226 val: du->gui_x);
2227 drm_object_property_set_value
2228 (obj: &con->base, property: dev->mode_config.suggested_y_property,
2229 val: du->gui_y);
2230 } else {
2231 drm_object_property_set_value
2232 (obj: &con->base, property: dev->mode_config.suggested_x_property,
2233 val: 0);
2234 drm_object_property_set_value
2235 (obj: &con->base, property: dev->mode_config.suggested_y_property,
2236 val: 0);
2237 }
2238 con->status = vmw_du_connector_detect(connector: con, force: true);
2239 }
2240out_fini:
2241 drm_modeset_drop_locks(ctx: &ctx);
2242 drm_modeset_acquire_fini(ctx: &ctx);
2243 mutex_unlock(lock: &dev->mode_config.mutex);
2244
2245 drm_sysfs_hotplug_event(dev);
2246
2247 return 0;
2248}
2249
2250int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2251 u16 *r, u16 *g, u16 *b,
2252 uint32_t size,
2253 struct drm_modeset_acquire_ctx *ctx)
2254{
2255 struct vmw_private *dev_priv = vmw_priv(dev: crtc->dev);
2256 int i;
2257
2258 for (i = 0; i < size; i++) {
2259 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2260 r[i], g[i], b[i]);
2261 vmw_write(dev_priv, offset: SVGA_PALETTE_BASE + i * 3 + 0, value: r[i] >> 8);
2262 vmw_write(dev_priv, offset: SVGA_PALETTE_BASE + i * 3 + 1, value: g[i] >> 8);
2263 vmw_write(dev_priv, offset: SVGA_PALETTE_BASE + i * 3 + 2, value: b[i] >> 8);
2264 }
2265
2266 return 0;
2267}
2268
2269int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2270{
2271 return 0;
2272}
2273
2274enum drm_connector_status
2275vmw_du_connector_detect(struct drm_connector *connector, bool force)
2276{
2277 uint32_t num_displays;
2278 struct drm_device *dev = connector->dev;
2279 struct vmw_private *dev_priv = vmw_priv(dev);
2280 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2281
2282 num_displays = vmw_read(dev_priv, offset: SVGA_REG_NUM_DISPLAYS);
2283
2284 return ((vmw_connector_to_du(connector)->unit < num_displays &&
2285 du->pref_active) ?
2286 connector_status_connected : connector_status_disconnected);
2287}
2288
2289/**
2290 * vmw_guess_mode_timing - Provide fake timings for a
2291 * 60Hz vrefresh mode.
2292 *
2293 * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2294 * members filled in.
2295 */
2296void vmw_guess_mode_timing(struct drm_display_mode *mode)
2297{
2298 mode->hsync_start = mode->hdisplay + 50;
2299 mode->hsync_end = mode->hsync_start + 50;
2300 mode->htotal = mode->hsync_end + 50;
2301
2302 mode->vsync_start = mode->vdisplay + 50;
2303 mode->vsync_end = mode->vsync_start + 50;
2304 mode->vtotal = mode->vsync_end + 50;
2305
2306 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2307}
2308
2309
2310/**
2311 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2312 * @dev: drm device for the ioctl
2313 * @data: data pointer for the ioctl
2314 * @file_priv: drm file for the ioctl call
2315 *
2316 * Update preferred topology of display unit as per ioctl request. The topology
2317 * is expressed as array of drm_vmw_rect.
2318 * e.g.
2319 * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2320 *
2321 * NOTE:
2322 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2323 * device limit on topology, x + w and y + h (lower right) cannot be greater
2324 * than INT_MAX. So topology beyond these limits will return with error.
2325 *
2326 * Returns:
2327 * Zero on success, negative errno on failure.
2328 */
2329int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2330 struct drm_file *file_priv)
2331{
2332 struct vmw_private *dev_priv = vmw_priv(dev);
2333 struct drm_mode_config *mode_config = &dev->mode_config;
2334 struct drm_vmw_update_layout_arg *arg =
2335 (struct drm_vmw_update_layout_arg *)data;
2336 void __user *user_rects;
2337 struct drm_vmw_rect *rects;
2338 struct drm_rect *drm_rects;
2339 unsigned rects_size;
2340 int ret, i;
2341
2342 if (!arg->num_outputs) {
2343 struct drm_rect def_rect = {0, 0,
2344 VMWGFX_MIN_INITIAL_WIDTH,
2345 VMWGFX_MIN_INITIAL_HEIGHT};
2346 vmw_du_update_layout(dev_priv, num_rects: 1, rects: &def_rect);
2347 return 0;
2348 }
2349
2350 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2351 rects = kcalloc(n: arg->num_outputs, size: sizeof(struct drm_vmw_rect),
2352 GFP_KERNEL);
2353 if (unlikely(!rects))
2354 return -ENOMEM;
2355
2356 user_rects = (void __user *)(unsigned long)arg->rects;
2357 ret = copy_from_user(to: rects, from: user_rects, n: rects_size);
2358 if (unlikely(ret != 0)) {
2359 DRM_ERROR("Failed to get rects.\n");
2360 ret = -EFAULT;
2361 goto out_free;
2362 }
2363
2364 drm_rects = (struct drm_rect *)rects;
2365
2366 VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2367 for (i = 0; i < arg->num_outputs; i++) {
2368 struct drm_vmw_rect curr_rect;
2369
2370 /* Verify user-space for overflow as kernel use drm_rect */
2371 if ((rects[i].x + rects[i].w > INT_MAX) ||
2372 (rects[i].y + rects[i].h > INT_MAX)) {
2373 ret = -ERANGE;
2374 goto out_free;
2375 }
2376
2377 curr_rect = rects[i];
2378 drm_rects[i].x1 = curr_rect.x;
2379 drm_rects[i].y1 = curr_rect.y;
2380 drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2381 drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2382
2383 VMW_DEBUG_KMS(" x1 = %d y1 = %d x2 = %d y2 = %d\n",
2384 drm_rects[i].x1, drm_rects[i].y1,
2385 drm_rects[i].x2, drm_rects[i].y2);
2386
2387 /*
2388 * Currently this check is limiting the topology within
2389 * mode_config->max (which actually is max texture size
2390 * supported by virtual device). This limit is here to address
2391 * window managers that create a big framebuffer for whole
2392 * topology.
2393 */
2394 if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
2395 drm_rects[i].x2 > mode_config->max_width ||
2396 drm_rects[i].y2 > mode_config->max_height) {
2397 VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2398 drm_rects[i].x1, drm_rects[i].y1,
2399 drm_rects[i].x2, drm_rects[i].y2);
2400 ret = -EINVAL;
2401 goto out_free;
2402 }
2403 }
2404
2405 ret = vmw_kms_check_display_memory(dev, num_rects: arg->num_outputs, rects: drm_rects);
2406
2407 if (ret == 0)
2408 vmw_du_update_layout(dev_priv, num_rects: arg->num_outputs, rects: drm_rects);
2409
2410out_free:
2411 kfree(objp: rects);
2412 return ret;
2413}
2414
2415/**
2416 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2417 * on a set of cliprects and a set of display units.
2418 *
2419 * @dev_priv: Pointer to a device private structure.
2420 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2421 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2422 * Cliprects are given in framebuffer coordinates.
2423 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2424 * be NULL. Cliprects are given in source coordinates.
2425 * @dest_x: X coordinate offset for the crtc / destination clip rects.
2426 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2427 * @num_clips: Number of cliprects in the @clips or @vclips array.
2428 * @increment: Integer with which to increment the clip counter when looping.
2429 * Used to skip a predetermined number of clip rects.
2430 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2431 */
2432int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2433 struct vmw_framebuffer *framebuffer,
2434 const struct drm_clip_rect *clips,
2435 const struct drm_vmw_rect *vclips,
2436 s32 dest_x, s32 dest_y,
2437 int num_clips,
2438 int increment,
2439 struct vmw_kms_dirty *dirty)
2440{
2441 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2442 struct drm_crtc *crtc;
2443 u32 num_units = 0;
2444 u32 i, k;
2445
2446 dirty->dev_priv = dev_priv;
2447
2448 /* If crtc is passed, no need to iterate over other display units */
2449 if (dirty->crtc) {
2450 units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2451 } else {
2452 list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2453 head) {
2454 struct drm_plane *plane = crtc->primary;
2455
2456 if (plane->state->fb == &framebuffer->base)
2457 units[num_units++] = vmw_crtc_to_du(crtc);
2458 }
2459 }
2460
2461 for (k = 0; k < num_units; k++) {
2462 struct vmw_display_unit *unit = units[k];
2463 s32 crtc_x = unit->crtc.x;
2464 s32 crtc_y = unit->crtc.y;
2465 s32 crtc_width = unit->crtc.mode.hdisplay;
2466 s32 crtc_height = unit->crtc.mode.vdisplay;
2467 const struct drm_clip_rect *clips_ptr = clips;
2468 const struct drm_vmw_rect *vclips_ptr = vclips;
2469
2470 dirty->unit = unit;
2471 if (dirty->fifo_reserve_size > 0) {
2472 dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2473 dirty->fifo_reserve_size);
2474 if (!dirty->cmd)
2475 return -ENOMEM;
2476
2477 memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2478 }
2479 dirty->num_hits = 0;
2480 for (i = 0; i < num_clips; i++, clips_ptr += increment,
2481 vclips_ptr += increment) {
2482 s32 clip_left;
2483 s32 clip_top;
2484
2485 /*
2486 * Select clip array type. Note that integer type
2487 * in @clips is unsigned short, whereas in @vclips
2488 * it's 32-bit.
2489 */
2490 if (clips) {
2491 dirty->fb_x = (s32) clips_ptr->x1;
2492 dirty->fb_y = (s32) clips_ptr->y1;
2493 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2494 crtc_x;
2495 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2496 crtc_y;
2497 } else {
2498 dirty->fb_x = vclips_ptr->x;
2499 dirty->fb_y = vclips_ptr->y;
2500 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2501 dest_x - crtc_x;
2502 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2503 dest_y - crtc_y;
2504 }
2505
2506 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2507 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2508
2509 /* Skip this clip if it's outside the crtc region */
2510 if (dirty->unit_x1 >= crtc_width ||
2511 dirty->unit_y1 >= crtc_height ||
2512 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2513 continue;
2514
2515 /* Clip right and bottom to crtc limits */
2516 dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2517 crtc_width);
2518 dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2519 crtc_height);
2520
2521 /* Clip left and top to crtc limits */
2522 clip_left = min_t(s32, dirty->unit_x1, 0);
2523 clip_top = min_t(s32, dirty->unit_y1, 0);
2524 dirty->unit_x1 -= clip_left;
2525 dirty->unit_y1 -= clip_top;
2526 dirty->fb_x -= clip_left;
2527 dirty->fb_y -= clip_top;
2528
2529 dirty->clip(dirty);
2530 }
2531
2532 dirty->fifo_commit(dirty);
2533 }
2534
2535 return 0;
2536}
2537
2538/**
2539 * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2540 * cleanup and fencing
2541 * @dev_priv: Pointer to the device-private struct
2542 * @file_priv: Pointer identifying the client when user-space fencing is used
2543 * @ctx: Pointer to the validation context
2544 * @out_fence: If non-NULL, returned refcounted fence-pointer
2545 * @user_fence_rep: If non-NULL, pointer to user-space address area
2546 * in which to copy user-space fence info
2547 */
2548void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2549 struct drm_file *file_priv,
2550 struct vmw_validation_context *ctx,
2551 struct vmw_fence_obj **out_fence,
2552 struct drm_vmw_fence_rep __user *
2553 user_fence_rep)
2554{
2555 struct vmw_fence_obj *fence = NULL;
2556 uint32_t handle = 0;
2557 int ret = 0;
2558
2559 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2560 out_fence)
2561 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, p_fence: &fence,
2562 p_handle: file_priv ? &handle : NULL);
2563 vmw_validation_done(ctx, fence);
2564 if (file_priv)
2565 vmw_execbuf_copy_fence_user(dev_priv, vmw_fp: vmw_fpriv(file_priv),
2566 ret, user_fence_rep, fence,
2567 fence_handle: handle, out_fence_fd: -1);
2568 if (out_fence)
2569 *out_fence = fence;
2570 else
2571 vmw_fence_obj_unreference(fence_p: &fence);
2572}
2573
2574/**
2575 * vmw_kms_update_proxy - Helper function to update a proxy surface from
2576 * its backing MOB.
2577 *
2578 * @res: Pointer to the surface resource
2579 * @clips: Clip rects in framebuffer (surface) space.
2580 * @num_clips: Number of clips in @clips.
2581 * @increment: Integer with which to increment the clip counter when looping.
2582 * Used to skip a predetermined number of clip rects.
2583 *
2584 * This function makes sure the proxy surface is updated from its backing MOB
2585 * using the region given by @clips. The surface resource @res and its backing
2586 * MOB needs to be reserved and validated on call.
2587 */
2588int vmw_kms_update_proxy(struct vmw_resource *res,
2589 const struct drm_clip_rect *clips,
2590 unsigned num_clips,
2591 int increment)
2592{
2593 struct vmw_private *dev_priv = res->dev_priv;
2594 struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2595 struct {
2596 SVGA3dCmdHeader header;
2597 SVGA3dCmdUpdateGBImage body;
2598 } *cmd;
2599 SVGA3dBox *box;
2600 size_t copy_size = 0;
2601 int i;
2602
2603 if (!clips)
2604 return 0;
2605
2606 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2607 if (!cmd)
2608 return -ENOMEM;
2609
2610 for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2611 box = &cmd->body.box;
2612
2613 cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2614 cmd->header.size = sizeof(cmd->body);
2615 cmd->body.image.sid = res->id;
2616 cmd->body.image.face = 0;
2617 cmd->body.image.mipmap = 0;
2618
2619 if (clips->x1 > size->width || clips->x2 > size->width ||
2620 clips->y1 > size->height || clips->y2 > size->height) {
2621 DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2622 return -EINVAL;
2623 }
2624
2625 box->x = clips->x1;
2626 box->y = clips->y1;
2627 box->z = 0;
2628 box->w = clips->x2 - clips->x1;
2629 box->h = clips->y2 - clips->y1;
2630 box->d = 1;
2631
2632 copy_size += sizeof(*cmd);
2633 }
2634
2635 vmw_cmd_commit(dev_priv, bytes: copy_size);
2636
2637 return 0;
2638}
2639
2640/**
2641 * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2642 * property.
2643 *
2644 * @dev_priv: Pointer to a device private struct.
2645 *
2646 * Sets up the implicit placement property unless it's already set up.
2647 */
2648void
2649vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2650{
2651 if (dev_priv->implicit_placement_property)
2652 return;
2653
2654 dev_priv->implicit_placement_property =
2655 drm_property_create_range(dev: &dev_priv->drm,
2656 DRM_MODE_PROP_IMMUTABLE,
2657 name: "implicit_placement", min: 0, max: 1);
2658}
2659
2660/**
2661 * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2662 *
2663 * @dev: Pointer to the drm device
2664 * Return: 0 on success. Negative error code on failure.
2665 */
2666int vmw_kms_suspend(struct drm_device *dev)
2667{
2668 struct vmw_private *dev_priv = vmw_priv(dev);
2669
2670 dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2671 if (IS_ERR(ptr: dev_priv->suspend_state)) {
2672 int ret = PTR_ERR(ptr: dev_priv->suspend_state);
2673
2674 DRM_ERROR("Failed kms suspend: %d\n", ret);
2675 dev_priv->suspend_state = NULL;
2676
2677 return ret;
2678 }
2679
2680 return 0;
2681}
2682
2683
2684/**
2685 * vmw_kms_resume - Re-enable modesetting and restore state
2686 *
2687 * @dev: Pointer to the drm device
2688 * Return: 0 on success. Negative error code on failure.
2689 *
2690 * State is resumed from a previous vmw_kms_suspend(). It's illegal
2691 * to call this function without a previous vmw_kms_suspend().
2692 */
2693int vmw_kms_resume(struct drm_device *dev)
2694{
2695 struct vmw_private *dev_priv = vmw_priv(dev);
2696 int ret;
2697
2698 if (WARN_ON(!dev_priv->suspend_state))
2699 return 0;
2700
2701 ret = drm_atomic_helper_resume(dev, state: dev_priv->suspend_state);
2702 dev_priv->suspend_state = NULL;
2703
2704 return ret;
2705}
2706
2707/**
2708 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2709 *
2710 * @dev: Pointer to the drm device
2711 */
2712void vmw_kms_lost_device(struct drm_device *dev)
2713{
2714 drm_atomic_helper_shutdown(dev);
2715}
2716
2717/**
2718 * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2719 * @update: The closure structure.
2720 *
2721 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2722 * update on display unit.
2723 *
2724 * Return: 0 on success or a negative error code on failure.
2725 */
2726int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2727{
2728 struct drm_plane_state *state = update->plane->state;
2729 struct drm_plane_state *old_state = update->old_state;
2730 struct drm_atomic_helper_damage_iter iter;
2731 struct drm_rect clip;
2732 struct drm_rect bb;
2733 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2734 uint32_t reserved_size = 0;
2735 uint32_t submit_size = 0;
2736 uint32_t curr_size = 0;
2737 uint32_t num_hits = 0;
2738 void *cmd_start;
2739 char *cmd_next;
2740 int ret;
2741
2742 /*
2743 * Iterate in advance to check if really need plane update and find the
2744 * number of clips that actually are in plane src for fifo allocation.
2745 */
2746 drm_atomic_helper_damage_iter_init(iter: &iter, old_state, new_state: state);
2747 drm_atomic_for_each_plane_damage(&iter, &clip)
2748 num_hits++;
2749
2750 if (num_hits == 0)
2751 return 0;
2752
2753 if (update->vfb->bo) {
2754 struct vmw_framebuffer_bo *vfbbo =
2755 container_of(update->vfb, typeof(*vfbbo), base);
2756
2757 /*
2758 * For screen targets we want a mappable bo, for everything else we want
2759 * accelerated i.e. host backed (vram or gmr) bo. If the display unit
2760 * is not screen target then mob's shouldn't be available.
2761 */
2762 if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
2763 vmw_bo_placement_set(bo: vfbbo->buffer,
2764 domain: VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
2765 busy_domain: VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
2766 } else {
2767 WARN_ON(update->dev_priv->has_mob);
2768 vmw_bo_placement_set_default_accelerated(bo: vfbbo->buffer);
2769 }
2770 ret = vmw_validation_add_bo(ctx: &val_ctx, vbo: vfbbo->buffer);
2771 } else {
2772 struct vmw_framebuffer_surface *vfbs =
2773 container_of(update->vfb, typeof(*vfbs), base);
2774
2775 ret = vmw_validation_add_resource(ctx: &val_ctx, res: &vfbs->surface->res,
2776 priv_size: 0, VMW_RES_DIRTY_NONE, NULL,
2777 NULL);
2778 }
2779
2780 if (ret)
2781 return ret;
2782
2783 ret = vmw_validation_prepare(ctx: &val_ctx, mutex: update->mutex, intr: update->intr);
2784 if (ret)
2785 goto out_unref;
2786
2787 reserved_size = update->calc_fifo_size(update, num_hits);
2788 cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2789 if (!cmd_start) {
2790 ret = -ENOMEM;
2791 goto out_revert;
2792 }
2793
2794 cmd_next = cmd_start;
2795
2796 if (update->post_prepare) {
2797 curr_size = update->post_prepare(update, cmd_next);
2798 cmd_next += curr_size;
2799 submit_size += curr_size;
2800 }
2801
2802 if (update->pre_clip) {
2803 curr_size = update->pre_clip(update, cmd_next, num_hits);
2804 cmd_next += curr_size;
2805 submit_size += curr_size;
2806 }
2807
2808 bb.x1 = INT_MAX;
2809 bb.y1 = INT_MAX;
2810 bb.x2 = INT_MIN;
2811 bb.y2 = INT_MIN;
2812
2813 drm_atomic_helper_damage_iter_init(iter: &iter, old_state, new_state: state);
2814 drm_atomic_for_each_plane_damage(&iter, &clip) {
2815 uint32_t fb_x = clip.x1;
2816 uint32_t fb_y = clip.y1;
2817
2818 vmw_du_translate_to_crtc(state, r: &clip);
2819 if (update->clip) {
2820 curr_size = update->clip(update, cmd_next, &clip, fb_x,
2821 fb_y);
2822 cmd_next += curr_size;
2823 submit_size += curr_size;
2824 }
2825 bb.x1 = min_t(int, bb.x1, clip.x1);
2826 bb.y1 = min_t(int, bb.y1, clip.y1);
2827 bb.x2 = max_t(int, bb.x2, clip.x2);
2828 bb.y2 = max_t(int, bb.y2, clip.y2);
2829 }
2830
2831 curr_size = update->post_clip(update, cmd_next, &bb);
2832 submit_size += curr_size;
2833
2834 if (reserved_size < submit_size)
2835 submit_size = 0;
2836
2837 vmw_cmd_commit(dev_priv: update->dev_priv, bytes: submit_size);
2838
2839 vmw_kms_helper_validation_finish(dev_priv: update->dev_priv, NULL, ctx: &val_ctx,
2840 out_fence: update->out_fence, NULL);
2841 return ret;
2842
2843out_revert:
2844 vmw_validation_revert(ctx: &val_ctx);
2845
2846out_unref:
2847 vmw_validation_unref_lists(ctx: &val_ctx);
2848 return ret;
2849}
2850
2851/**
2852 * vmw_connector_mode_valid - implements drm_connector_helper_funcs.mode_valid callback
2853 *
2854 * @connector: the drm connector, part of a DU container
2855 * @mode: drm mode to check
2856 *
2857 * Returns MODE_OK on success, or a drm_mode_status error code.
2858 */
2859enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
2860 struct drm_display_mode *mode)
2861{
2862 struct drm_device *dev = connector->dev;
2863 struct vmw_private *dev_priv = vmw_priv(dev);
2864 u32 max_width = dev_priv->texture_max_width;
2865 u32 max_height = dev_priv->texture_max_height;
2866 u32 assumed_cpp = 4;
2867
2868 if (dev_priv->assume_16bpp)
2869 assumed_cpp = 2;
2870
2871 if (dev_priv->active_display_unit == vmw_du_screen_target) {
2872 max_width = min(dev_priv->stdu_max_width, max_width);
2873 max_height = min(dev_priv->stdu_max_height, max_height);
2874 }
2875
2876 if (max_width < mode->hdisplay)
2877 return MODE_BAD_HVALUE;
2878
2879 if (max_height < mode->vdisplay)
2880 return MODE_BAD_VVALUE;
2881
2882 if (!vmw_kms_validate_mode_vram(dev_priv,
2883 pitch: mode->hdisplay * assumed_cpp,
2884 height: mode->vdisplay))
2885 return MODE_MEM;
2886
2887 return MODE_OK;
2888}
2889
2890/**
2891 * vmw_connector_get_modes - implements drm_connector_helper_funcs.get_modes callback
2892 *
2893 * @connector: the drm connector, part of a DU container
2894 *
2895 * Returns the number of added modes.
2896 */
2897int vmw_connector_get_modes(struct drm_connector *connector)
2898{
2899 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2900 struct drm_device *dev = connector->dev;
2901 struct vmw_private *dev_priv = vmw_priv(dev);
2902 struct drm_display_mode *mode = NULL;
2903 struct drm_display_mode prefmode = { DRM_MODE("preferred",
2904 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2905 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2906 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2907 };
2908 u32 max_width;
2909 u32 max_height;
2910 u32 num_modes;
2911
2912 /* Add preferred mode */
2913 mode = drm_mode_duplicate(dev, mode: &prefmode);
2914 if (!mode)
2915 return 0;
2916
2917 mode->hdisplay = du->pref_width;
2918 mode->vdisplay = du->pref_height;
2919 vmw_guess_mode_timing(mode);
2920 drm_mode_set_name(mode);
2921
2922 drm_mode_probed_add(connector, mode);
2923 drm_dbg_kms(dev, "preferred mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2924
2925 /* Probe connector for all modes not exceeding our geom limits */
2926 max_width = dev_priv->texture_max_width;
2927 max_height = dev_priv->texture_max_height;
2928
2929 if (dev_priv->active_display_unit == vmw_du_screen_target) {
2930 max_width = min(dev_priv->stdu_max_width, max_width);
2931 max_height = min(dev_priv->stdu_max_height, max_height);
2932 }
2933
2934 num_modes = 1 + drm_add_modes_noedid(connector, hdisplay: max_width, vdisplay: max_height);
2935
2936 return num_modes;
2937}
2938

source code of linux/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c