1/**************************************************************************
2 *
3 * Copyright © 2009-2022 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef __VMWGFX_DRM_H__
29#define __VMWGFX_DRM_H__
30
31#include "drm.h"
32
33#if defined(__cplusplus)
34extern "C" {
35#endif
36
37#define DRM_VMW_MAX_SURFACE_FACES 6
38#define DRM_VMW_MAX_MIP_LEVELS 24
39
40
41#define DRM_VMW_GET_PARAM 0
42#define DRM_VMW_ALLOC_DMABUF 1
43#define DRM_VMW_ALLOC_BO 1
44#define DRM_VMW_UNREF_DMABUF 2
45#define DRM_VMW_HANDLE_CLOSE 2
46#define DRM_VMW_CURSOR_BYPASS 3
47/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
48#define DRM_VMW_CONTROL_STREAM 4
49#define DRM_VMW_CLAIM_STREAM 5
50#define DRM_VMW_UNREF_STREAM 6
51/* guarded by DRM_VMW_PARAM_3D == 1 */
52#define DRM_VMW_CREATE_CONTEXT 7
53#define DRM_VMW_UNREF_CONTEXT 8
54#define DRM_VMW_CREATE_SURFACE 9
55#define DRM_VMW_UNREF_SURFACE 10
56#define DRM_VMW_REF_SURFACE 11
57#define DRM_VMW_EXECBUF 12
58#define DRM_VMW_GET_3D_CAP 13
59#define DRM_VMW_FENCE_WAIT 14
60#define DRM_VMW_FENCE_SIGNALED 15
61#define DRM_VMW_FENCE_UNREF 16
62#define DRM_VMW_FENCE_EVENT 17
63#define DRM_VMW_PRESENT 18
64#define DRM_VMW_PRESENT_READBACK 19
65#define DRM_VMW_UPDATE_LAYOUT 20
66#define DRM_VMW_CREATE_SHADER 21
67#define DRM_VMW_UNREF_SHADER 22
68#define DRM_VMW_GB_SURFACE_CREATE 23
69#define DRM_VMW_GB_SURFACE_REF 24
70#define DRM_VMW_SYNCCPU 25
71#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
72#define DRM_VMW_GB_SURFACE_CREATE_EXT 27
73#define DRM_VMW_GB_SURFACE_REF_EXT 28
74#define DRM_VMW_MSG 29
75#define DRM_VMW_MKSSTAT_RESET 30
76#define DRM_VMW_MKSSTAT_ADD 31
77#define DRM_VMW_MKSSTAT_REMOVE 32
78
79/*************************************************************************/
80/**
81 * DRM_VMW_GET_PARAM - get device information.
82 *
83 * DRM_VMW_PARAM_FIFO_OFFSET:
84 * Offset to use to map the first page of the FIFO read-only.
85 * The fifo is mapped using the mmap() system call on the drm device.
86 *
87 * DRM_VMW_PARAM_OVERLAY_IOCTL:
88 * Does the driver support the overlay ioctl.
89 *
90 * DRM_VMW_PARAM_SM4_1
91 * SM4_1 support is enabled.
92 *
93 * DRM_VMW_PARAM_SM5
94 * SM5 support is enabled.
95 *
96 * DRM_VMW_PARAM_GL43
97 * SM5.1+GL4.3 support is enabled.
98 *
99 * DRM_VMW_PARAM_DEVICE_ID
100 * PCI ID of the underlying SVGA device.
101 */
102
103#define DRM_VMW_PARAM_NUM_STREAMS 0
104#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
105#define DRM_VMW_PARAM_3D 2
106#define DRM_VMW_PARAM_HW_CAPS 3
107#define DRM_VMW_PARAM_FIFO_CAPS 4
108#define DRM_VMW_PARAM_MAX_FB_SIZE 5
109#define DRM_VMW_PARAM_FIFO_HW_VERSION 6
110#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
111#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
112#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
113#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
114#define DRM_VMW_PARAM_SCREEN_TARGET 11
115#define DRM_VMW_PARAM_DX 12
116#define DRM_VMW_PARAM_HW_CAPS2 13
117#define DRM_VMW_PARAM_SM4_1 14
118#define DRM_VMW_PARAM_SM5 15
119#define DRM_VMW_PARAM_GL43 16
120#define DRM_VMW_PARAM_DEVICE_ID 17
121
122/**
123 * enum drm_vmw_handle_type - handle type for ref ioctls
124 *
125 */
126enum drm_vmw_handle_type {
127 DRM_VMW_HANDLE_LEGACY = 0,
128 DRM_VMW_HANDLE_PRIME = 1
129};
130
131/**
132 * struct drm_vmw_getparam_arg
133 *
134 * @value: Returned value. //Out
135 * @param: Parameter to query. //In.
136 *
137 * Argument to the DRM_VMW_GET_PARAM Ioctl.
138 */
139
140struct drm_vmw_getparam_arg {
141 __u64 value;
142 __u32 param;
143 __u32 pad64;
144};
145
146/*************************************************************************/
147/**
148 * DRM_VMW_CREATE_CONTEXT - Create a host context.
149 *
150 * Allocates a device unique context id, and queues a create context command
151 * for the host. Does not wait for host completion.
152 */
153
154/**
155 * struct drm_vmw_context_arg
156 *
157 * @cid: Device unique context ID.
158 *
159 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
160 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
161 */
162
163struct drm_vmw_context_arg {
164 __s32 cid;
165 __u32 pad64;
166};
167
168/*************************************************************************/
169/**
170 * DRM_VMW_UNREF_CONTEXT - Create a host context.
171 *
172 * Frees a global context id, and queues a destroy host command for the host.
173 * Does not wait for host completion. The context ID can be used directly
174 * in the command stream and shows up as the same context ID on the host.
175 */
176
177/*************************************************************************/
178/**
179 * DRM_VMW_CREATE_SURFACE - Create a host suface.
180 *
181 * Allocates a device unique surface id, and queues a create surface command
182 * for the host. Does not wait for host completion. The surface ID can be
183 * used directly in the command stream and shows up as the same surface
184 * ID on the host.
185 */
186
187/**
188 * struct drm_wmv_surface_create_req
189 *
190 * @flags: Surface flags as understood by the host.
191 * @format: Surface format as understood by the host.
192 * @mip_levels: Number of mip levels for each face.
193 * An unused face should have 0 encoded.
194 * @size_addr: Address of a user-space array of sruct drm_vmw_size
195 * cast to an __u64 for 32-64 bit compatibility.
196 * The size of the array should equal the total number of mipmap levels.
197 * @shareable: Boolean whether other clients (as identified by file descriptors)
198 * may reference this surface.
199 * @scanout: Boolean whether the surface is intended to be used as a
200 * scanout.
201 *
202 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
203 * Output data from the DRM_VMW_REF_SURFACE Ioctl.
204 */
205
206struct drm_vmw_surface_create_req {
207 __u32 flags;
208 __u32 format;
209 __u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
210 __u64 size_addr;
211 __s32 shareable;
212 __s32 scanout;
213};
214
215/**
216 * struct drm_wmv_surface_arg
217 *
218 * @sid: Surface id of created surface or surface to destroy or reference.
219 * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.
220 *
221 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
222 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
223 * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
224 */
225
226struct drm_vmw_surface_arg {
227 __s32 sid;
228 enum drm_vmw_handle_type handle_type;
229};
230
231/**
232 * struct drm_vmw_size ioctl.
233 *
234 * @width - mip level width
235 * @height - mip level height
236 * @depth - mip level depth
237 *
238 * Description of a mip level.
239 * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
240 */
241
242struct drm_vmw_size {
243 __u32 width;
244 __u32 height;
245 __u32 depth;
246 __u32 pad64;
247};
248
249/**
250 * union drm_vmw_surface_create_arg
251 *
252 * @rep: Output data as described above.
253 * @req: Input data as described above.
254 *
255 * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
256 */
257
258union drm_vmw_surface_create_arg {
259 struct drm_vmw_surface_arg rep;
260 struct drm_vmw_surface_create_req req;
261};
262
263/*************************************************************************/
264/**
265 * DRM_VMW_REF_SURFACE - Reference a host surface.
266 *
267 * Puts a reference on a host surface with a give sid, as previously
268 * returned by the DRM_VMW_CREATE_SURFACE ioctl.
269 * A reference will make sure the surface isn't destroyed while we hold
270 * it and will allow the calling client to use the surface ID in the command
271 * stream.
272 *
273 * On successful return, the Ioctl returns the surface information given
274 * in the DRM_VMW_CREATE_SURFACE ioctl.
275 */
276
277/**
278 * union drm_vmw_surface_reference_arg
279 *
280 * @rep: Output data as described above.
281 * @req: Input data as described above.
282 *
283 * Argument to the DRM_VMW_REF_SURFACE Ioctl.
284 */
285
286union drm_vmw_surface_reference_arg {
287 struct drm_vmw_surface_create_req rep;
288 struct drm_vmw_surface_arg req;
289};
290
291/*************************************************************************/
292/**
293 * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
294 *
295 * Clear a reference previously put on a host surface.
296 * When all references are gone, including the one implicitly placed
297 * on creation,
298 * a destroy surface command will be queued for the host.
299 * Does not wait for completion.
300 */
301
302/*************************************************************************/
303/**
304 * DRM_VMW_EXECBUF
305 *
306 * Submit a command buffer for execution on the host, and return a
307 * fence seqno that when signaled, indicates that the command buffer has
308 * executed.
309 */
310
311/**
312 * struct drm_vmw_execbuf_arg
313 *
314 * @commands: User-space address of a command buffer cast to an __u64.
315 * @command-size: Size in bytes of the command buffer.
316 * @throttle-us: Sleep until software is less than @throttle_us
317 * microseconds ahead of hardware. The driver may round this value
318 * to the nearest kernel tick.
319 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
320 * __u64.
321 * @version: Allows expanding the execbuf ioctl parameters without breaking
322 * backwards compatibility, since user-space will always tell the kernel
323 * which version it uses.
324 * @flags: Execbuf flags.
325 * @imported_fence_fd: FD for a fence imported from another device
326 *
327 * Argument to the DRM_VMW_EXECBUF Ioctl.
328 */
329
330#define DRM_VMW_EXECBUF_VERSION 2
331
332#define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0)
333#define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1)
334
335struct drm_vmw_execbuf_arg {
336 __u64 commands;
337 __u32 command_size;
338 __u32 throttle_us;
339 __u64 fence_rep;
340 __u32 version;
341 __u32 flags;
342 __u32 context_handle;
343 __s32 imported_fence_fd;
344};
345
346/**
347 * struct drm_vmw_fence_rep
348 *
349 * @handle: Fence object handle for fence associated with a command submission.
350 * @mask: Fence flags relevant for this fence object.
351 * @seqno: Fence sequence number in fifo. A fence object with a lower
352 * seqno will signal the EXEC flag before a fence object with a higher
353 * seqno. This can be used by user-space to avoid kernel calls to determine
354 * whether a fence has signaled the EXEC flag. Note that @seqno will
355 * wrap at 32-bit.
356 * @passed_seqno: The highest seqno number processed by the hardware
357 * so far. This can be used to mark user-space fence objects as signaled, and
358 * to determine whether a fence seqno might be stale.
359 * @fd: FD associated with the fence, -1 if not exported
360 * @error: This member should've been set to -EFAULT on submission.
361 * The following actions should be take on completion:
362 * error == -EFAULT: Fence communication failed. The host is synchronized.
363 * Use the last fence id read from the FIFO fence register.
364 * error != 0 && error != -EFAULT:
365 * Fence submission failed. The host is synchronized. Use the fence_seq member.
366 * error == 0: All is OK, The host may not be synchronized.
367 * Use the fence_seq member.
368 *
369 * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
370 */
371
372struct drm_vmw_fence_rep {
373 __u32 handle;
374 __u32 mask;
375 __u32 seqno;
376 __u32 passed_seqno;
377 __s32 fd;
378 __s32 error;
379};
380
381/*************************************************************************/
382/**
383 * DRM_VMW_ALLOC_BO
384 *
385 * Allocate a buffer object that is visible also to the host.
386 * NOTE: The buffer is
387 * identified by a handle and an offset, which are private to the guest, but
388 * useable in the command stream. The guest kernel may translate these
389 * and patch up the command stream accordingly. In the future, the offset may
390 * be zero at all times, or it may disappear from the interface before it is
391 * fixed.
392 *
393 * The buffer object may stay user-space mapped in the guest at all times,
394 * and is thus suitable for sub-allocation.
395 *
396 * Buffer objects are mapped using the mmap() syscall on the drm device.
397 */
398
399/**
400 * struct drm_vmw_alloc_bo_req
401 *
402 * @size: Required minimum size of the buffer.
403 *
404 * Input data to the DRM_VMW_ALLOC_BO Ioctl.
405 */
406
407struct drm_vmw_alloc_bo_req {
408 __u32 size;
409 __u32 pad64;
410};
411#define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req
412
413/**
414 * struct drm_vmw_bo_rep
415 *
416 * @map_handle: Offset to use in the mmap() call used to map the buffer.
417 * @handle: Handle unique to this buffer. Used for unreferencing.
418 * @cur_gmr_id: GMR id to use in the command stream when this buffer is
419 * referenced. See not above.
420 * @cur_gmr_offset: Offset to use in the command stream when this buffer is
421 * referenced. See note above.
422 *
423 * Output data from the DRM_VMW_ALLOC_BO Ioctl.
424 */
425
426struct drm_vmw_bo_rep {
427 __u64 map_handle;
428 __u32 handle;
429 __u32 cur_gmr_id;
430 __u32 cur_gmr_offset;
431 __u32 pad64;
432};
433#define drm_vmw_dmabuf_rep drm_vmw_bo_rep
434
435/**
436 * union drm_vmw_alloc_bo_arg
437 *
438 * @req: Input data as described above.
439 * @rep: Output data as described above.
440 *
441 * Argument to the DRM_VMW_ALLOC_BO Ioctl.
442 */
443
444union drm_vmw_alloc_bo_arg {
445 struct drm_vmw_alloc_bo_req req;
446 struct drm_vmw_bo_rep rep;
447};
448#define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg
449
450/*************************************************************************/
451/**
452 * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
453 *
454 * This IOCTL controls the overlay units of the svga device.
455 * The SVGA overlay units does not work like regular hardware units in
456 * that they do not automaticaly read back the contents of the given dma
457 * buffer. But instead only read back for each call to this ioctl, and
458 * at any point between this call being made and a following call that
459 * either changes the buffer or disables the stream.
460 */
461
462/**
463 * struct drm_vmw_rect
464 *
465 * Defines a rectangle. Used in the overlay ioctl to define
466 * source and destination rectangle.
467 */
468
469struct drm_vmw_rect {
470 __s32 x;
471 __s32 y;
472 __u32 w;
473 __u32 h;
474};
475
476/**
477 * struct drm_vmw_control_stream_arg
478 *
479 * @stream_id: Stearm to control
480 * @enabled: If false all following arguments are ignored.
481 * @handle: Handle to buffer for getting data from.
482 * @format: Format of the overlay as understood by the host.
483 * @width: Width of the overlay.
484 * @height: Height of the overlay.
485 * @size: Size of the overlay in bytes.
486 * @pitch: Array of pitches, the two last are only used for YUV12 formats.
487 * @offset: Offset from start of dma buffer to overlay.
488 * @src: Source rect, must be within the defined area above.
489 * @dst: Destination rect, x and y may be negative.
490 *
491 * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
492 */
493
494struct drm_vmw_control_stream_arg {
495 __u32 stream_id;
496 __u32 enabled;
497
498 __u32 flags;
499 __u32 color_key;
500
501 __u32 handle;
502 __u32 offset;
503 __s32 format;
504 __u32 size;
505 __u32 width;
506 __u32 height;
507 __u32 pitch[3];
508
509 __u32 pad64;
510 struct drm_vmw_rect src;
511 struct drm_vmw_rect dst;
512};
513
514/*************************************************************************/
515/**
516 * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
517 *
518 */
519
520#define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0)
521#define DRM_VMW_CURSOR_BYPASS_FLAGS (1)
522
523/**
524 * struct drm_vmw_cursor_bypass_arg
525 *
526 * @flags: Flags.
527 * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
528 * @xpos: X position of cursor.
529 * @ypos: Y position of cursor.
530 * @xhot: X hotspot.
531 * @yhot: Y hotspot.
532 *
533 * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
534 */
535
536struct drm_vmw_cursor_bypass_arg {
537 __u32 flags;
538 __u32 crtc_id;
539 __s32 xpos;
540 __s32 ypos;
541 __s32 xhot;
542 __s32 yhot;
543};
544
545/*************************************************************************/
546/**
547 * DRM_VMW_CLAIM_STREAM - Claim a single stream.
548 */
549
550/**
551 * struct drm_vmw_context_arg
552 *
553 * @stream_id: Device unique context ID.
554 *
555 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
556 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
557 */
558
559struct drm_vmw_stream_arg {
560 __u32 stream_id;
561 __u32 pad64;
562};
563
564/*************************************************************************/
565/**
566 * DRM_VMW_UNREF_STREAM - Unclaim a stream.
567 *
568 * Return a single stream that was claimed by this process. Also makes
569 * sure that the stream has been stopped.
570 */
571
572/*************************************************************************/
573/**
574 * DRM_VMW_GET_3D_CAP
575 *
576 * Read 3D capabilities from the FIFO
577 *
578 */
579
580/**
581 * struct drm_vmw_get_3d_cap_arg
582 *
583 * @buffer: Pointer to a buffer for capability data, cast to an __u64
584 * @size: Max size to copy
585 *
586 * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
587 * ioctls.
588 */
589
590struct drm_vmw_get_3d_cap_arg {
591 __u64 buffer;
592 __u32 max_size;
593 __u32 pad64;
594};
595
596/*************************************************************************/
597/**
598 * DRM_VMW_FENCE_WAIT
599 *
600 * Waits for a fence object to signal. The wait is interruptible, so that
601 * signals may be delivered during the interrupt. The wait may timeout,
602 * in which case the calls returns -EBUSY. If the wait is restarted,
603 * that is restarting without resetting @cookie_valid to zero,
604 * the timeout is computed from the first call.
605 *
606 * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
607 * on:
608 * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
609 * stream
610 * have executed.
611 * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
612 * commands
613 * in the buffer given to the EXECBUF ioctl returning the fence object handle
614 * are available to user-space.
615 *
616 * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
617 * fenc wait ioctl returns 0, the fence object has been unreferenced after
618 * the wait.
619 */
620
621#define DRM_VMW_FENCE_FLAG_EXEC (1 << 0)
622#define DRM_VMW_FENCE_FLAG_QUERY (1 << 1)
623
624#define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
625
626/**
627 * struct drm_vmw_fence_wait_arg
628 *
629 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
630 * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
631 * @kernel_cookie: Set to 0 on first call. Left alone on restart.
632 * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
633 * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
634 * before returning.
635 * @flags: Fence flags to wait on.
636 * @wait_options: Options that control the behaviour of the wait ioctl.
637 *
638 * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
639 */
640
641struct drm_vmw_fence_wait_arg {
642 __u32 handle;
643 __s32 cookie_valid;
644 __u64 kernel_cookie;
645 __u64 timeout_us;
646 __s32 lazy;
647 __s32 flags;
648 __s32 wait_options;
649 __s32 pad64;
650};
651
652/*************************************************************************/
653/**
654 * DRM_VMW_FENCE_SIGNALED
655 *
656 * Checks if a fence object is signaled..
657 */
658
659/**
660 * struct drm_vmw_fence_signaled_arg
661 *
662 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
663 * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
664 * @signaled: Out: Flags signaled.
665 * @sequence: Out: Highest sequence passed so far. Can be used to signal the
666 * EXEC flag of user-space fence objects.
667 *
668 * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
669 * ioctls.
670 */
671
672struct drm_vmw_fence_signaled_arg {
673 __u32 handle;
674 __u32 flags;
675 __s32 signaled;
676 __u32 passed_seqno;
677 __u32 signaled_flags;
678 __u32 pad64;
679};
680
681/*************************************************************************/
682/**
683 * DRM_VMW_FENCE_UNREF
684 *
685 * Unreferences a fence object, and causes it to be destroyed if there are no
686 * other references to it.
687 *
688 */
689
690/**
691 * struct drm_vmw_fence_arg
692 *
693 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
694 *
695 * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
696 */
697
698struct drm_vmw_fence_arg {
699 __u32 handle;
700 __u32 pad64;
701};
702
703
704/*************************************************************************/
705/**
706 * DRM_VMW_FENCE_EVENT
707 *
708 * Queues an event on a fence to be delivered on the drm character device
709 * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.
710 * Optionally the approximate time when the fence signaled is
711 * given by the event.
712 */
713
714/*
715 * The event type
716 */
717#define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000
718
719struct drm_vmw_event_fence {
720 struct drm_event base;
721 __u64 user_data;
722 __u32 tv_sec;
723 __u32 tv_usec;
724};
725
726/*
727 * Flags that may be given to the command.
728 */
729/* Request fence signaled time on the event. */
730#define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)
731
732/**
733 * struct drm_vmw_fence_event_arg
734 *
735 * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if
736 * the fence is not supposed to be referenced by user-space.
737 * @user_info: Info to be delivered with the event.
738 * @handle: Attach the event to this fence only.
739 * @flags: A set of flags as defined above.
740 */
741struct drm_vmw_fence_event_arg {
742 __u64 fence_rep;
743 __u64 user_data;
744 __u32 handle;
745 __u32 flags;
746};
747
748
749/*************************************************************************/
750/**
751 * DRM_VMW_PRESENT
752 *
753 * Executes an SVGA present on a given fb for a given surface. The surface
754 * is placed on the framebuffer. Cliprects are given relative to the given
755 * point (the point disignated by dest_{x|y}).
756 *
757 */
758
759/**
760 * struct drm_vmw_present_arg
761 * @fb_id: framebuffer id to present / read back from.
762 * @sid: Surface id to present from.
763 * @dest_x: X placement coordinate for surface.
764 * @dest_y: Y placement coordinate for surface.
765 * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
766 * @num_clips: Number of cliprects given relative to the framebuffer origin,
767 * in the same coordinate space as the frame buffer.
768 * @pad64: Unused 64-bit padding.
769 *
770 * Input argument to the DRM_VMW_PRESENT ioctl.
771 */
772
773struct drm_vmw_present_arg {
774 __u32 fb_id;
775 __u32 sid;
776 __s32 dest_x;
777 __s32 dest_y;
778 __u64 clips_ptr;
779 __u32 num_clips;
780 __u32 pad64;
781};
782
783
784/*************************************************************************/
785/**
786 * DRM_VMW_PRESENT_READBACK
787 *
788 * Executes an SVGA present readback from a given fb to the dma buffer
789 * currently bound as the fb. If there is no dma buffer bound to the fb,
790 * an error will be returned.
791 *
792 */
793
794/**
795 * struct drm_vmw_present_arg
796 * @fb_id: fb_id to present / read back from.
797 * @num_clips: Number of cliprects.
798 * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
799 * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64.
800 * If this member is NULL, then the ioctl should not return a fence.
801 */
802
803struct drm_vmw_present_readback_arg {
804 __u32 fb_id;
805 __u32 num_clips;
806 __u64 clips_ptr;
807 __u64 fence_rep;
808};
809
810/*************************************************************************/
811/**
812 * DRM_VMW_UPDATE_LAYOUT - Update layout
813 *
814 * Updates the preferred modes and connection status for connectors. The
815 * command consists of one drm_vmw_update_layout_arg pointing to an array
816 * of num_outputs drm_vmw_rect's.
817 */
818
819/**
820 * struct drm_vmw_update_layout_arg
821 *
822 * @num_outputs: number of active connectors
823 * @rects: pointer to array of drm_vmw_rect cast to an __u64
824 *
825 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
826 */
827struct drm_vmw_update_layout_arg {
828 __u32 num_outputs;
829 __u32 pad64;
830 __u64 rects;
831};
832
833
834/*************************************************************************/
835/**
836 * DRM_VMW_CREATE_SHADER - Create shader
837 *
838 * Creates a shader and optionally binds it to a dma buffer containing
839 * the shader byte-code.
840 */
841
842/**
843 * enum drm_vmw_shader_type - Shader types
844 */
845enum drm_vmw_shader_type {
846 drm_vmw_shader_type_vs = 0,
847 drm_vmw_shader_type_ps,
848};
849
850
851/**
852 * struct drm_vmw_shader_create_arg
853 *
854 * @shader_type: Shader type of the shader to create.
855 * @size: Size of the byte-code in bytes.
856 * where the shader byte-code starts
857 * @buffer_handle: Buffer handle identifying the buffer containing the
858 * shader byte-code
859 * @shader_handle: On successful completion contains a handle that
860 * can be used to subsequently identify the shader.
861 * @offset: Offset in bytes into the buffer given by @buffer_handle,
862 *
863 * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
864 */
865struct drm_vmw_shader_create_arg {
866 enum drm_vmw_shader_type shader_type;
867 __u32 size;
868 __u32 buffer_handle;
869 __u32 shader_handle;
870 __u64 offset;
871};
872
873/*************************************************************************/
874/**
875 * DRM_VMW_UNREF_SHADER - Unreferences a shader
876 *
877 * Destroys a user-space reference to a shader, optionally destroying
878 * it.
879 */
880
881/**
882 * struct drm_vmw_shader_arg
883 *
884 * @handle: Handle identifying the shader to destroy.
885 *
886 * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
887 */
888struct drm_vmw_shader_arg {
889 __u32 handle;
890 __u32 pad64;
891};
892
893/*************************************************************************/
894/**
895 * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
896 *
897 * Allocates a surface handle and queues a create surface command
898 * for the host on the first use of the surface. The surface ID can
899 * be used as the surface ID in commands referencing the surface.
900 */
901
902/**
903 * enum drm_vmw_surface_flags
904 *
905 * @drm_vmw_surface_flag_shareable: Whether the surface is shareable
906 * @drm_vmw_surface_flag_scanout: Whether the surface is a scanout
907 * surface.
908 * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
909 * given.
910 * @drm_vmw_surface_flag_coherent: Back surface with coherent memory.
911 */
912enum drm_vmw_surface_flags {
913 drm_vmw_surface_flag_shareable = (1 << 0),
914 drm_vmw_surface_flag_scanout = (1 << 1),
915 drm_vmw_surface_flag_create_buffer = (1 << 2),
916 drm_vmw_surface_flag_coherent = (1 << 3),
917};
918
919/**
920 * struct drm_vmw_gb_surface_create_req
921 *
922 * @svga3d_flags: SVGA3d surface flags for the device.
923 * @format: SVGA3d format.
924 * @mip_level: Number of mip levels for all faces.
925 * @drm_surface_flags Flags as described above.
926 * @multisample_count Future use. Set to 0.
927 * @autogen_filter Future use. Set to 0.
928 * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
929 * if none.
930 * @base_size Size of the base mip level for all faces.
931 * @array_size Must be zero for non-DX hardware, and if non-zero
932 * svga3d_flags must have proper bind flags setup.
933 *
934 * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
935 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
936 */
937struct drm_vmw_gb_surface_create_req {
938 __u32 svga3d_flags;
939 __u32 format;
940 __u32 mip_levels;
941 enum drm_vmw_surface_flags drm_surface_flags;
942 __u32 multisample_count;
943 __u32 autogen_filter;
944 __u32 buffer_handle;
945 __u32 array_size;
946 struct drm_vmw_size base_size;
947};
948
949/**
950 * struct drm_vmw_gb_surface_create_rep
951 *
952 * @handle: Surface handle.
953 * @backup_size: Size of backup buffers for this surface.
954 * @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none.
955 * @buffer_size: Actual size of the buffer identified by
956 * @buffer_handle
957 * @buffer_map_handle: Offset into device address space for the buffer
958 * identified by @buffer_handle.
959 *
960 * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
961 * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
962 */
963struct drm_vmw_gb_surface_create_rep {
964 __u32 handle;
965 __u32 backup_size;
966 __u32 buffer_handle;
967 __u32 buffer_size;
968 __u64 buffer_map_handle;
969};
970
971/**
972 * union drm_vmw_gb_surface_create_arg
973 *
974 * @req: Input argument as described above.
975 * @rep: Output argument as described above.
976 *
977 * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
978 */
979union drm_vmw_gb_surface_create_arg {
980 struct drm_vmw_gb_surface_create_rep rep;
981 struct drm_vmw_gb_surface_create_req req;
982};
983
984/*************************************************************************/
985/**
986 * DRM_VMW_GB_SURFACE_REF - Reference a host surface.
987 *
988 * Puts a reference on a host surface with a given handle, as previously
989 * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
990 * A reference will make sure the surface isn't destroyed while we hold
991 * it and will allow the calling client to use the surface handle in
992 * the command stream.
993 *
994 * On successful return, the Ioctl returns the surface information given
995 * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
996 */
997
998/**
999 * struct drm_vmw_gb_surface_reference_arg
1000 *
1001 * @creq: The data used as input when the surface was created, as described
1002 * above at "struct drm_vmw_gb_surface_create_req"
1003 * @crep: Additional data output when the surface was created, as described
1004 * above at "struct drm_vmw_gb_surface_create_rep"
1005 *
1006 * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
1007 */
1008struct drm_vmw_gb_surface_ref_rep {
1009 struct drm_vmw_gb_surface_create_req creq;
1010 struct drm_vmw_gb_surface_create_rep crep;
1011};
1012
1013/**
1014 * union drm_vmw_gb_surface_reference_arg
1015 *
1016 * @req: Input data as described above at "struct drm_vmw_surface_arg"
1017 * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
1018 *
1019 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
1020 */
1021union drm_vmw_gb_surface_reference_arg {
1022 struct drm_vmw_gb_surface_ref_rep rep;
1023 struct drm_vmw_surface_arg req;
1024};
1025
1026
1027/*************************************************************************/
1028/**
1029 * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
1030 *
1031 * Idles any previously submitted GPU operations on the buffer and
1032 * by default blocks command submissions that reference the buffer.
1033 * If the file descriptor used to grab a blocking CPU sync is closed, the
1034 * cpu sync is released.
1035 * The flags argument indicates how the grab / release operation should be
1036 * performed:
1037 */
1038
1039/**
1040 * enum drm_vmw_synccpu_flags - Synccpu flags:
1041 *
1042 * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
1043 * hint to the kernel to allow command submissions that references the buffer
1044 * for read-only.
1045 * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
1046 * referencing this buffer.
1047 * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
1048 * -EBUSY should the buffer be busy.
1049 * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
1050 * while the buffer is synced for CPU. This is similar to the GEM bo idle
1051 * behavior.
1052 */
1053enum drm_vmw_synccpu_flags {
1054 drm_vmw_synccpu_read = (1 << 0),
1055 drm_vmw_synccpu_write = (1 << 1),
1056 drm_vmw_synccpu_dontblock = (1 << 2),
1057 drm_vmw_synccpu_allow_cs = (1 << 3)
1058};
1059
1060/**
1061 * enum drm_vmw_synccpu_op - Synccpu operations:
1062 *
1063 * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations
1064 * @drm_vmw_synccpu_release: Release a previous grab.
1065 */
1066enum drm_vmw_synccpu_op {
1067 drm_vmw_synccpu_grab,
1068 drm_vmw_synccpu_release
1069};
1070
1071/**
1072 * struct drm_vmw_synccpu_arg
1073 *
1074 * @op: The synccpu operation as described above.
1075 * @handle: Handle identifying the buffer object.
1076 * @flags: Flags as described above.
1077 */
1078struct drm_vmw_synccpu_arg {
1079 enum drm_vmw_synccpu_op op;
1080 enum drm_vmw_synccpu_flags flags;
1081 __u32 handle;
1082 __u32 pad64;
1083};
1084
1085/*************************************************************************/
1086/**
1087 * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
1088 *
1089 * Allocates a device unique context id, and queues a create context command
1090 * for the host. Does not wait for host completion.
1091 */
1092enum drm_vmw_extended_context {
1093 drm_vmw_context_legacy,
1094 drm_vmw_context_dx
1095};
1096
1097/**
1098 * union drm_vmw_extended_context_arg
1099 *
1100 * @req: Context type.
1101 * @rep: Context identifier.
1102 *
1103 * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
1104 */
1105union drm_vmw_extended_context_arg {
1106 enum drm_vmw_extended_context req;
1107 struct drm_vmw_context_arg rep;
1108};
1109
1110/*************************************************************************/
1111/*
1112 * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
1113 * underlying resource.
1114 *
1115 * Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF
1116 * Ioctl.
1117 */
1118
1119/**
1120 * struct drm_vmw_handle_close_arg
1121 *
1122 * @handle: Handle to close.
1123 *
1124 * Argument to the DRM_VMW_HANDLE_CLOSE Ioctl.
1125 */
1126struct drm_vmw_handle_close_arg {
1127 __u32 handle;
1128 __u32 pad64;
1129};
1130#define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg
1131
1132/*************************************************************************/
1133/**
1134 * DRM_VMW_GB_SURFACE_CREATE_EXT - Create a host guest-backed surface.
1135 *
1136 * Allocates a surface handle and queues a create surface command
1137 * for the host on the first use of the surface. The surface ID can
1138 * be used as the surface ID in commands referencing the surface.
1139 *
1140 * This new command extends DRM_VMW_GB_SURFACE_CREATE by adding version
1141 * parameter and 64 bit svga flag.
1142 */
1143
1144/**
1145 * enum drm_vmw_surface_version
1146 *
1147 * @drm_vmw_surface_gb_v1: Corresponds to current gb surface format with
1148 * svga3d surface flags split into 2, upper half and lower half.
1149 */
1150enum drm_vmw_surface_version {
1151 drm_vmw_gb_surface_v1,
1152};
1153
1154/**
1155 * struct drm_vmw_gb_surface_create_ext_req
1156 *
1157 * @base: Surface create parameters.
1158 * @version: Version of surface create ioctl.
1159 * @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags.
1160 * @multisample_pattern: Multisampling pattern when msaa is supported.
1161 * @quality_level: Precision settings for each sample.
1162 * @buffer_byte_stride: Buffer byte stride.
1163 * @must_be_zero: Reserved for future usage.
1164 *
1165 * Input argument to the DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl.
1166 * Part of output argument for the DRM_VMW_GB_SURFACE_REF_EXT Ioctl.
1167 */
1168struct drm_vmw_gb_surface_create_ext_req {
1169 struct drm_vmw_gb_surface_create_req base;
1170 enum drm_vmw_surface_version version;
1171 __u32 svga3d_flags_upper_32_bits;
1172 __u32 multisample_pattern;
1173 __u32 quality_level;
1174 __u32 buffer_byte_stride;
1175 __u32 must_be_zero;
1176};
1177
1178/**
1179 * union drm_vmw_gb_surface_create_ext_arg
1180 *
1181 * @req: Input argument as described above.
1182 * @rep: Output argument as described above.
1183 *
1184 * Argument to the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1185 */
1186union drm_vmw_gb_surface_create_ext_arg {
1187 struct drm_vmw_gb_surface_create_rep rep;
1188 struct drm_vmw_gb_surface_create_ext_req req;
1189};
1190
1191/*************************************************************************/
1192/**
1193 * DRM_VMW_GB_SURFACE_REF_EXT - Reference a host surface.
1194 *
1195 * Puts a reference on a host surface with a given handle, as previously
1196 * returned by the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1197 * A reference will make sure the surface isn't destroyed while we hold
1198 * it and will allow the calling client to use the surface handle in
1199 * the command stream.
1200 *
1201 * On successful return, the Ioctl returns the surface information given
1202 * to and returned from the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1203 */
1204
1205/**
1206 * struct drm_vmw_gb_surface_ref_ext_rep
1207 *
1208 * @creq: The data used as input when the surface was created, as described
1209 * above at "struct drm_vmw_gb_surface_create_ext_req"
1210 * @crep: Additional data output when the surface was created, as described
1211 * above at "struct drm_vmw_gb_surface_create_rep"
1212 *
1213 * Output Argument to the DRM_VMW_GB_SURFACE_REF_EXT ioctl.
1214 */
1215struct drm_vmw_gb_surface_ref_ext_rep {
1216 struct drm_vmw_gb_surface_create_ext_req creq;
1217 struct drm_vmw_gb_surface_create_rep crep;
1218};
1219
1220/**
1221 * union drm_vmw_gb_surface_reference_ext_arg
1222 *
1223 * @req: Input data as described above at "struct drm_vmw_surface_arg"
1224 * @rep: Output data as described above at
1225 * "struct drm_vmw_gb_surface_ref_ext_rep"
1226 *
1227 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
1228 */
1229union drm_vmw_gb_surface_reference_ext_arg {
1230 struct drm_vmw_gb_surface_ref_ext_rep rep;
1231 struct drm_vmw_surface_arg req;
1232};
1233
1234/**
1235 * struct drm_vmw_msg_arg
1236 *
1237 * @send: Pointer to user-space msg string (null terminated).
1238 * @receive: Pointer to user-space receive buffer.
1239 * @send_only: Boolean whether this is only sending or receiving too.
1240 *
1241 * Argument to the DRM_VMW_MSG ioctl.
1242 */
1243struct drm_vmw_msg_arg {
1244 __u64 send;
1245 __u64 receive;
1246 __s32 send_only;
1247 __u32 receive_len;
1248};
1249
1250/**
1251 * struct drm_vmw_mksstat_add_arg
1252 *
1253 * @stat: Pointer to user-space stat-counters array, page-aligned.
1254 * @info: Pointer to user-space counter-infos array, page-aligned.
1255 * @strs: Pointer to user-space stat strings, page-aligned.
1256 * @stat_len: Length in bytes of stat-counters array.
1257 * @info_len: Length in bytes of counter-infos array.
1258 * @strs_len: Length in bytes of the stat strings, terminators included.
1259 * @description: Pointer to instance descriptor string; will be truncated
1260 * to MKS_GUEST_STAT_INSTANCE_DESC_LENGTH chars.
1261 * @id: Output identifier of the produced record; -1 if error.
1262 *
1263 * Argument to the DRM_VMW_MKSSTAT_ADD ioctl.
1264 */
1265struct drm_vmw_mksstat_add_arg {
1266 __u64 stat;
1267 __u64 info;
1268 __u64 strs;
1269 __u64 stat_len;
1270 __u64 info_len;
1271 __u64 strs_len;
1272 __u64 description;
1273 __u64 id;
1274};
1275
1276/**
1277 * struct drm_vmw_mksstat_remove_arg
1278 *
1279 * @id: Identifier of the record being disposed, originally obtained through
1280 * DRM_VMW_MKSSTAT_ADD ioctl.
1281 *
1282 * Argument to the DRM_VMW_MKSSTAT_REMOVE ioctl.
1283 */
1284struct drm_vmw_mksstat_remove_arg {
1285 __u64 id;
1286};
1287
1288#if defined(__cplusplus)
1289}
1290#endif
1291
1292#endif
1293

source code of linux/include/uapi/drm/vmwgfx_drm.h