1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27#include "vmwgfx_bo.h"
28#include "vmwgfx_drv.h"
29
30#include "device_include/svga_overlay.h"
31#include "device_include/svga_escape.h"
32
33#include <drm/ttm/ttm_placement.h>
34
35#define VMW_MAX_NUM_STREAMS 1
36#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
37
38struct vmw_stream {
39 struct vmw_bo *buf;
40 bool claimed;
41 bool paused;
42 struct drm_vmw_control_stream_arg saved;
43};
44
45/*
46 * Overlay control
47 */
48struct vmw_overlay {
49 /*
50 * Each stream is a single overlay. In Xv these are called ports.
51 */
52 struct mutex mutex;
53 struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
54};
55
56struct vmw_escape_header {
57 uint32_t cmd;
58 SVGAFifoCmdEscape body;
59};
60
61struct vmw_escape_video_flush {
62 struct vmw_escape_header escape;
63 SVGAEscapeVideoFlush flush;
64};
65
66static inline void fill_escape(struct vmw_escape_header *header,
67 uint32_t size)
68{
69 header->cmd = SVGA_CMD_ESCAPE;
70 header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
71 header->body.size = size;
72}
73
74static inline void fill_flush(struct vmw_escape_video_flush *cmd,
75 uint32_t stream_id)
76{
77 fill_escape(header: &cmd->escape, size: sizeof(cmd->flush));
78 cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
79 cmd->flush.streamId = stream_id;
80}
81
82/*
83 * Send put command to hw.
84 *
85 * Returns
86 * -ERESTARTSYS if interrupted by a signal.
87 */
88static int vmw_overlay_send_put(struct vmw_private *dev_priv,
89 struct vmw_bo *buf,
90 struct drm_vmw_control_stream_arg *arg,
91 bool interruptible)
92{
93 struct vmw_escape_video_flush *flush;
94 size_t fifo_size;
95 bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
96 int i, num_items;
97 SVGAGuestPtr ptr;
98
99 struct {
100 struct vmw_escape_header escape;
101 struct {
102 uint32_t cmdType;
103 uint32_t streamId;
104 } header;
105 } *cmds;
106 struct {
107 uint32_t registerId;
108 uint32_t value;
109 } *items;
110
111 /* defines are a index needs + 1 */
112 if (have_so)
113 num_items = SVGA_VIDEO_DST_SCREEN_ID + 1;
114 else
115 num_items = SVGA_VIDEO_PITCH_3 + 1;
116
117 fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
118
119 cmds = VMW_CMD_RESERVE(dev_priv, fifo_size);
120 /* hardware has hung, can't do anything here */
121 if (!cmds)
122 return -ENOMEM;
123
124 items = (typeof(items))&cmds[1];
125 flush = (struct vmw_escape_video_flush *)&items[num_items];
126
127 /* the size is header + number of items */
128 fill_escape(header: &cmds->escape, size: sizeof(*items) * (num_items + 1));
129
130 cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
131 cmds->header.streamId = arg->stream_id;
132
133 /* the IDs are neatly numbered */
134 for (i = 0; i < num_items; i++)
135 items[i].registerId = i;
136
137 vmw_bo_get_guest_ptr(buf: &buf->tbo, ptr: &ptr);
138 ptr.offset += arg->offset;
139
140 items[SVGA_VIDEO_ENABLED].value = true;
141 items[SVGA_VIDEO_FLAGS].value = arg->flags;
142 items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset;
143 items[SVGA_VIDEO_FORMAT].value = arg->format;
144 items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
145 items[SVGA_VIDEO_SIZE].value = arg->size;
146 items[SVGA_VIDEO_WIDTH].value = arg->width;
147 items[SVGA_VIDEO_HEIGHT].value = arg->height;
148 items[SVGA_VIDEO_SRC_X].value = arg->src.x;
149 items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
150 items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
151 items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
152 items[SVGA_VIDEO_DST_X].value = arg->dst.x;
153 items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
154 items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
155 items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
156 items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
157 items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
158 items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
159 if (have_so) {
160 items[SVGA_VIDEO_DATA_GMRID].value = ptr.gmrId;
161 items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID;
162 }
163
164 fill_flush(cmd: flush, stream_id: arg->stream_id);
165
166 vmw_cmd_commit(dev_priv, bytes: fifo_size);
167
168 return 0;
169}
170
171/*
172 * Send stop command to hw.
173 *
174 * Returns
175 * -ERESTARTSYS if interrupted by a signal.
176 */
177static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
178 uint32_t stream_id,
179 bool interruptible)
180{
181 struct {
182 struct vmw_escape_header escape;
183 SVGAEscapeVideoSetRegs body;
184 struct vmw_escape_video_flush flush;
185 } *cmds;
186 int ret;
187
188 for (;;) {
189 cmds = VMW_CMD_RESERVE(dev_priv, sizeof(*cmds));
190 if (cmds)
191 break;
192
193 ret = vmw_fallback_wait(dev_priv, lazy: false, fifo_idle: true, seqno: 0,
194 interruptible, timeout: 3*HZ);
195 if (interruptible && ret == -ERESTARTSYS)
196 return ret;
197 else
198 BUG_ON(ret != 0);
199 }
200
201 fill_escape(header: &cmds->escape, size: sizeof(cmds->body));
202 cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
203 cmds->body.header.streamId = stream_id;
204 cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
205 cmds->body.items[0].value = false;
206 fill_flush(cmd: &cmds->flush, stream_id);
207
208 vmw_cmd_commit(dev_priv, bytes: sizeof(*cmds));
209
210 return 0;
211}
212
213/*
214 * Move a buffer to vram or gmr if @pin is set, else unpin the buffer.
215 *
216 * With the introduction of screen objects buffers could now be
217 * used with GMRs instead of being locked to vram.
218 */
219static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
220 struct vmw_bo *buf,
221 bool pin, bool inter)
222{
223 if (!pin)
224 return vmw_bo_unpin(vmw_priv: dev_priv, bo: buf, interruptible: inter);
225
226 if (dev_priv->active_display_unit == vmw_du_legacy)
227 return vmw_bo_pin_in_vram(dev_priv, buf, interruptible: inter);
228
229 return vmw_bo_pin_in_vram_or_gmr(dev_priv, buf, interruptible: inter);
230}
231
232/*
233 * Stop or pause a stream.
234 *
235 * If the stream is paused the no evict flag is removed from the buffer
236 * but left in vram. This allows for instance mode_set to evict it
237 * should it need to.
238 *
239 * The caller must hold the overlay lock.
240 *
241 * @stream_id which stream to stop/pause.
242 * @pause true to pause, false to stop completely.
243 */
244static int vmw_overlay_stop(struct vmw_private *dev_priv,
245 uint32_t stream_id, bool pause,
246 bool interruptible)
247{
248 struct vmw_overlay *overlay = dev_priv->overlay_priv;
249 struct vmw_stream *stream = &overlay->stream[stream_id];
250 int ret;
251
252 /* no buffer attached the stream is completely stopped */
253 if (!stream->buf)
254 return 0;
255
256 /* If the stream is paused this is already done */
257 if (!stream->paused) {
258 ret = vmw_overlay_send_stop(dev_priv, stream_id,
259 interruptible);
260 if (ret)
261 return ret;
262
263 /* We just remove the NO_EVICT flag so no -ENOMEM */
264 ret = vmw_overlay_move_buffer(dev_priv, buf: stream->buf, pin: false,
265 inter: interruptible);
266 if (interruptible && ret == -ERESTARTSYS)
267 return ret;
268 else
269 BUG_ON(ret != 0);
270 }
271
272 if (!pause) {
273 vmw_bo_unreference(buf: &stream->buf);
274 stream->paused = false;
275 } else {
276 stream->paused = true;
277 }
278
279 return 0;
280}
281
282/*
283 * Update a stream and send any put or stop fifo commands needed.
284 *
285 * The caller must hold the overlay lock.
286 *
287 * Returns
288 * -ENOMEM if buffer doesn't fit in vram.
289 * -ERESTARTSYS if interrupted.
290 */
291static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
292 struct vmw_bo *buf,
293 struct drm_vmw_control_stream_arg *arg,
294 bool interruptible)
295{
296 struct vmw_overlay *overlay = dev_priv->overlay_priv;
297 struct vmw_stream *stream = &overlay->stream[arg->stream_id];
298 int ret = 0;
299
300 if (!buf)
301 return -EINVAL;
302
303 DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__,
304 stream->buf, buf, stream->paused ? "" : "not ");
305
306 if (stream->buf != buf) {
307 ret = vmw_overlay_stop(dev_priv, stream_id: arg->stream_id,
308 pause: false, interruptible);
309 if (ret)
310 return ret;
311 } else if (!stream->paused) {
312 /* If the buffers match and not paused then just send
313 * the put command, no need to do anything else.
314 */
315 ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
316 if (ret == 0)
317 stream->saved = *arg;
318 else
319 BUG_ON(!interruptible);
320
321 return ret;
322 }
323
324 /* We don't start the old stream if we are interrupted.
325 * Might return -ENOMEM if it can't fit the buffer in vram.
326 */
327 ret = vmw_overlay_move_buffer(dev_priv, buf, pin: true, inter: interruptible);
328 if (ret)
329 return ret;
330
331 ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
332 if (ret) {
333 /* This one needs to happen no matter what. We only remove
334 * the NO_EVICT flag so this is safe from -ENOMEM.
335 */
336 BUG_ON(vmw_overlay_move_buffer(dev_priv, buf, false, false)
337 != 0);
338 return ret;
339 }
340
341 if (stream->buf != buf)
342 stream->buf = vmw_bo_reference(buf);
343 stream->saved = *arg;
344 /* stream is no longer stopped/paused */
345 stream->paused = false;
346
347 return 0;
348}
349
350/*
351 * Try to resume all paused streams.
352 *
353 * Used by the kms code after moving a new scanout buffer to vram.
354 *
355 * Takes the overlay lock.
356 */
357int vmw_overlay_resume_all(struct vmw_private *dev_priv)
358{
359 struct vmw_overlay *overlay = dev_priv->overlay_priv;
360 int i, ret;
361
362 if (!overlay)
363 return 0;
364
365 mutex_lock(&overlay->mutex);
366
367 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
368 struct vmw_stream *stream = &overlay->stream[i];
369 if (!stream->paused)
370 continue;
371
372 ret = vmw_overlay_update_stream(dev_priv, buf: stream->buf,
373 arg: &stream->saved, interruptible: false);
374 if (ret != 0)
375 DRM_INFO("%s: *warning* failed to resume stream %i\n",
376 __func__, i);
377 }
378
379 mutex_unlock(lock: &overlay->mutex);
380
381 return 0;
382}
383
384/*
385 * Pauses all active streams.
386 *
387 * Used by the kms code when moving a new scanout buffer to vram.
388 *
389 * Takes the overlay lock.
390 */
391int vmw_overlay_pause_all(struct vmw_private *dev_priv)
392{
393 struct vmw_overlay *overlay = dev_priv->overlay_priv;
394 int i, ret;
395
396 if (!overlay)
397 return 0;
398
399 mutex_lock(&overlay->mutex);
400
401 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
402 if (overlay->stream[i].paused)
403 DRM_INFO("%s: *warning* stream %i already paused\n",
404 __func__, i);
405 ret = vmw_overlay_stop(dev_priv, stream_id: i, pause: true, interruptible: false);
406 WARN_ON(ret != 0);
407 }
408
409 mutex_unlock(lock: &overlay->mutex);
410
411 return 0;
412}
413
414
415static bool vmw_overlay_available(const struct vmw_private *dev_priv)
416{
417 return (dev_priv->overlay_priv != NULL &&
418 ((vmw_fifo_caps(dev_priv) & VMW_OVERLAY_CAP_MASK) ==
419 VMW_OVERLAY_CAP_MASK));
420}
421
422int vmw_overlay_ioctl(struct drm_device *dev, void *data,
423 struct drm_file *file_priv)
424{
425 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
426 struct vmw_private *dev_priv = vmw_priv(dev);
427 struct vmw_overlay *overlay = dev_priv->overlay_priv;
428 struct drm_vmw_control_stream_arg *arg =
429 (struct drm_vmw_control_stream_arg *)data;
430 struct vmw_bo *buf;
431 struct vmw_resource *res;
432 int ret;
433
434 if (!vmw_overlay_available(dev_priv))
435 return -ENOSYS;
436
437 ret = vmw_user_stream_lookup(dev_priv, tfile, inout_id: &arg->stream_id, out: &res);
438 if (ret)
439 return ret;
440
441 mutex_lock(&overlay->mutex);
442
443 if (!arg->enabled) {
444 ret = vmw_overlay_stop(dev_priv, stream_id: arg->stream_id, pause: false, interruptible: true);
445 goto out_unlock;
446 }
447
448 ret = vmw_user_bo_lookup(filp: file_priv, handle: arg->handle, out: &buf);
449 if (ret)
450 goto out_unlock;
451
452 ret = vmw_overlay_update_stream(dev_priv, buf, arg, interruptible: true);
453
454 vmw_user_bo_unref(buf: &buf);
455
456out_unlock:
457 mutex_unlock(lock: &overlay->mutex);
458 vmw_resource_unreference(p_res: &res);
459
460 return ret;
461}
462
463int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
464{
465 if (!vmw_overlay_available(dev_priv))
466 return 0;
467
468 return VMW_MAX_NUM_STREAMS;
469}
470
471int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
472{
473 struct vmw_overlay *overlay = dev_priv->overlay_priv;
474 int i, k;
475
476 if (!vmw_overlay_available(dev_priv))
477 return 0;
478
479 mutex_lock(&overlay->mutex);
480
481 for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
482 if (!overlay->stream[i].claimed)
483 k++;
484
485 mutex_unlock(lock: &overlay->mutex);
486
487 return k;
488}
489
490int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
491{
492 struct vmw_overlay *overlay = dev_priv->overlay_priv;
493 int i;
494
495 if (!overlay)
496 return -ENOSYS;
497
498 mutex_lock(&overlay->mutex);
499
500 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
501
502 if (overlay->stream[i].claimed)
503 continue;
504
505 overlay->stream[i].claimed = true;
506 *out = i;
507 mutex_unlock(lock: &overlay->mutex);
508 return 0;
509 }
510
511 mutex_unlock(lock: &overlay->mutex);
512 return -ESRCH;
513}
514
515int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
516{
517 struct vmw_overlay *overlay = dev_priv->overlay_priv;
518
519 BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
520
521 if (!overlay)
522 return -ENOSYS;
523
524 mutex_lock(&overlay->mutex);
525
526 WARN_ON(!overlay->stream[stream_id].claimed);
527 vmw_overlay_stop(dev_priv, stream_id, pause: false, interruptible: false);
528 overlay->stream[stream_id].claimed = false;
529
530 mutex_unlock(lock: &overlay->mutex);
531 return 0;
532}
533
534int vmw_overlay_init(struct vmw_private *dev_priv)
535{
536 struct vmw_overlay *overlay;
537 int i;
538
539 if (dev_priv->overlay_priv)
540 return -EINVAL;
541
542 overlay = kzalloc(size: sizeof(*overlay), GFP_KERNEL);
543 if (!overlay)
544 return -ENOMEM;
545
546 mutex_init(&overlay->mutex);
547 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
548 overlay->stream[i].buf = NULL;
549 overlay->stream[i].paused = false;
550 overlay->stream[i].claimed = false;
551 }
552
553 dev_priv->overlay_priv = overlay;
554
555 return 0;
556}
557
558int vmw_overlay_close(struct vmw_private *dev_priv)
559{
560 struct vmw_overlay *overlay = dev_priv->overlay_priv;
561 bool forgotten_buffer = false;
562 int i;
563
564 if (!overlay)
565 return -ENOSYS;
566
567 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
568 if (overlay->stream[i].buf) {
569 forgotten_buffer = true;
570 vmw_overlay_stop(dev_priv, stream_id: i, pause: false, interruptible: false);
571 }
572 }
573
574 WARN_ON(forgotten_buffer);
575
576 dev_priv->overlay_priv = NULL;
577 kfree(objp: overlay);
578
579 return 0;
580}
581

source code of linux/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c