1/*
2 * Copyright 2005 Stephane Marchesin.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef __NOUVEAU_DRM_H__
26#define __NOUVEAU_DRM_H__
27
28#define DRM_NOUVEAU_EVENT_NVIF 0x80000000
29
30#include "drm.h"
31
32#if defined(__cplusplus)
33extern "C" {
34#endif
35
36#define NOUVEAU_GETPARAM_PCI_VENDOR 3
37#define NOUVEAU_GETPARAM_PCI_DEVICE 4
38#define NOUVEAU_GETPARAM_BUS_TYPE 5
39#define NOUVEAU_GETPARAM_FB_SIZE 8
40#define NOUVEAU_GETPARAM_AGP_SIZE 9
41#define NOUVEAU_GETPARAM_CHIPSET_ID 11
42#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
43#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
44#define NOUVEAU_GETPARAM_PTIMER_TIME 14
45#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
46#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
47
48/*
49 * NOUVEAU_GETPARAM_EXEC_PUSH_MAX - query max pushes through getparam
50 *
51 * Query the maximum amount of IBs that can be pushed through a single
52 * &drm_nouveau_exec structure and hence a single &DRM_IOCTL_NOUVEAU_EXEC
53 * ioctl().
54 */
55#define NOUVEAU_GETPARAM_EXEC_PUSH_MAX 17
56
57struct drm_nouveau_getparam {
58 __u64 param;
59 __u64 value;
60};
61
62struct drm_nouveau_channel_alloc {
63 __u32 fb_ctxdma_handle;
64 __u32 tt_ctxdma_handle;
65
66 __s32 channel;
67 __u32 pushbuf_domains;
68
69 /* Notifier memory */
70 __u32 notifier_handle;
71
72 /* DRM-enforced subchannel assignments */
73 struct {
74 __u32 handle;
75 __u32 grclass;
76 } subchan[8];
77 __u32 nr_subchan;
78};
79
80struct drm_nouveau_channel_free {
81 __s32 channel;
82};
83
84#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
85#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
86#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
87#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
88#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
89/* The BO will never be shared via import or export. */
90#define NOUVEAU_GEM_DOMAIN_NO_SHARE (1 << 5)
91
92#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
93#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
94#define NOUVEAU_GEM_TILE_16BPP 0x00000001
95#define NOUVEAU_GEM_TILE_32BPP 0x00000002
96#define NOUVEAU_GEM_TILE_ZETA 0x00000004
97#define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008
98
99struct drm_nouveau_gem_info {
100 __u32 handle;
101 __u32 domain;
102 __u64 size;
103 __u64 offset;
104 __u64 map_handle;
105 __u32 tile_mode;
106 __u32 tile_flags;
107};
108
109struct drm_nouveau_gem_new {
110 struct drm_nouveau_gem_info info;
111 __u32 channel_hint;
112 __u32 align;
113};
114
115#define NOUVEAU_GEM_MAX_BUFFERS 1024
116struct drm_nouveau_gem_pushbuf_bo_presumed {
117 __u32 valid;
118 __u32 domain;
119 __u64 offset;
120};
121
122struct drm_nouveau_gem_pushbuf_bo {
123 __u64 user_priv;
124 __u32 handle;
125 __u32 read_domains;
126 __u32 write_domains;
127 __u32 valid_domains;
128 struct drm_nouveau_gem_pushbuf_bo_presumed presumed;
129};
130
131#define NOUVEAU_GEM_RELOC_LOW (1 << 0)
132#define NOUVEAU_GEM_RELOC_HIGH (1 << 1)
133#define NOUVEAU_GEM_RELOC_OR (1 << 2)
134#define NOUVEAU_GEM_MAX_RELOCS 1024
135struct drm_nouveau_gem_pushbuf_reloc {
136 __u32 reloc_bo_index;
137 __u32 reloc_bo_offset;
138 __u32 bo_index;
139 __u32 flags;
140 __u32 data;
141 __u32 vor;
142 __u32 tor;
143};
144
145#define NOUVEAU_GEM_MAX_PUSH 512
146struct drm_nouveau_gem_pushbuf_push {
147 __u32 bo_index;
148 __u32 pad;
149 __u64 offset;
150 __u64 length;
151#define NOUVEAU_GEM_PUSHBUF_NO_PREFETCH (1 << 23)
152};
153
154struct drm_nouveau_gem_pushbuf {
155 __u32 channel;
156 __u32 nr_buffers;
157 __u64 buffers;
158 __u32 nr_relocs;
159 __u32 nr_push;
160 __u64 relocs;
161 __u64 push;
162 __u32 suffix0;
163 __u32 suffix1;
164#define NOUVEAU_GEM_PUSHBUF_SYNC (1ULL << 0)
165 __u64 vram_available;
166 __u64 gart_available;
167};
168
169#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
170#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
171struct drm_nouveau_gem_cpu_prep {
172 __u32 handle;
173 __u32 flags;
174};
175
176struct drm_nouveau_gem_cpu_fini {
177 __u32 handle;
178};
179
180/**
181 * struct drm_nouveau_sync - sync object
182 *
183 * This structure serves as synchronization mechanism for (potentially)
184 * asynchronous operations such as EXEC or VM_BIND.
185 */
186struct drm_nouveau_sync {
187 /**
188 * @flags: the flags for a sync object
189 *
190 * The first 8 bits are used to determine the type of the sync object.
191 */
192 __u32 flags;
193#define DRM_NOUVEAU_SYNC_SYNCOBJ 0x0
194#define DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ 0x1
195#define DRM_NOUVEAU_SYNC_TYPE_MASK 0xf
196 /**
197 * @handle: the handle of the sync object
198 */
199 __u32 handle;
200 /**
201 * @timeline_value:
202 *
203 * The timeline point of the sync object in case the syncobj is of
204 * type DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ.
205 */
206 __u64 timeline_value;
207};
208
209/**
210 * struct drm_nouveau_vm_init - GPU VA space init structure
211 *
212 * Used to initialize the GPU's VA space for a user client, telling the kernel
213 * which portion of the VA space is managed by the UMD and kernel respectively.
214 *
215 * For the UMD to use the VM_BIND uAPI, this must be called before any BOs or
216 * channels are created; if called afterwards DRM_IOCTL_NOUVEAU_VM_INIT fails
217 * with -ENOSYS.
218 */
219struct drm_nouveau_vm_init {
220 /**
221 * @kernel_managed_addr: start address of the kernel managed VA space
222 * region
223 */
224 __u64 kernel_managed_addr;
225 /**
226 * @kernel_managed_size: size of the kernel managed VA space region in
227 * bytes
228 */
229 __u64 kernel_managed_size;
230};
231
232/**
233 * struct drm_nouveau_vm_bind_op - VM_BIND operation
234 *
235 * This structure represents a single VM_BIND operation. UMDs should pass
236 * an array of this structure via struct drm_nouveau_vm_bind's &op_ptr field.
237 */
238struct drm_nouveau_vm_bind_op {
239 /**
240 * @op: the operation type
241 */
242 __u32 op;
243/**
244 * @DRM_NOUVEAU_VM_BIND_OP_MAP:
245 *
246 * Map a GEM object to the GPU's VA space. Optionally, the
247 * &DRM_NOUVEAU_VM_BIND_SPARSE flag can be passed to instruct the kernel to
248 * create sparse mappings for the given range.
249 */
250#define DRM_NOUVEAU_VM_BIND_OP_MAP 0x0
251/**
252 * @DRM_NOUVEAU_VM_BIND_OP_UNMAP:
253 *
254 * Unmap an existing mapping in the GPU's VA space. If the region the mapping
255 * is located in is a sparse region, new sparse mappings are created where the
256 * unmapped (memory backed) mapping was mapped previously. To remove a sparse
257 * region the &DRM_NOUVEAU_VM_BIND_SPARSE must be set.
258 */
259#define DRM_NOUVEAU_VM_BIND_OP_UNMAP 0x1
260 /**
261 * @flags: the flags for a &drm_nouveau_vm_bind_op
262 */
263 __u32 flags;
264/**
265 * @DRM_NOUVEAU_VM_BIND_SPARSE:
266 *
267 * Indicates that an allocated VA space region should be sparse.
268 */
269#define DRM_NOUVEAU_VM_BIND_SPARSE (1 << 8)
270 /**
271 * @handle: the handle of the DRM GEM object to map
272 */
273 __u32 handle;
274 /**
275 * @pad: 32 bit padding, should be 0
276 */
277 __u32 pad;
278 /**
279 * @addr:
280 *
281 * the address the VA space region or (memory backed) mapping should be mapped to
282 */
283 __u64 addr;
284 /**
285 * @bo_offset: the offset within the BO backing the mapping
286 */
287 __u64 bo_offset;
288 /**
289 * @range: the size of the requested mapping in bytes
290 */
291 __u64 range;
292};
293
294/**
295 * struct drm_nouveau_vm_bind - structure for DRM_IOCTL_NOUVEAU_VM_BIND
296 */
297struct drm_nouveau_vm_bind {
298 /**
299 * @op_count: the number of &drm_nouveau_vm_bind_op
300 */
301 __u32 op_count;
302 /**
303 * @flags: the flags for a &drm_nouveau_vm_bind ioctl
304 */
305 __u32 flags;
306/**
307 * @DRM_NOUVEAU_VM_BIND_RUN_ASYNC:
308 *
309 * Indicates that the given VM_BIND operation should be executed asynchronously
310 * by the kernel.
311 *
312 * If this flag is not supplied the kernel executes the associated operations
313 * synchronously and doesn't accept any &drm_nouveau_sync objects.
314 */
315#define DRM_NOUVEAU_VM_BIND_RUN_ASYNC 0x1
316 /**
317 * @wait_count: the number of wait &drm_nouveau_syncs
318 */
319 __u32 wait_count;
320 /**
321 * @sig_count: the number of &drm_nouveau_syncs to signal when finished
322 */
323 __u32 sig_count;
324 /**
325 * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
326 */
327 __u64 wait_ptr;
328 /**
329 * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
330 */
331 __u64 sig_ptr;
332 /**
333 * @op_ptr: pointer to the &drm_nouveau_vm_bind_ops to execute
334 */
335 __u64 op_ptr;
336};
337
338/**
339 * struct drm_nouveau_exec_push - EXEC push operation
340 *
341 * This structure represents a single EXEC push operation. UMDs should pass an
342 * array of this structure via struct drm_nouveau_exec's &push_ptr field.
343 */
344struct drm_nouveau_exec_push {
345 /**
346 * @va: the virtual address of the push buffer mapping
347 */
348 __u64 va;
349 /**
350 * @va_len: the length of the push buffer mapping
351 */
352 __u32 va_len;
353 /**
354 * @flags: the flags for this push buffer mapping
355 */
356 __u32 flags;
357#define DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH 0x1
358};
359
360/**
361 * struct drm_nouveau_exec - structure for DRM_IOCTL_NOUVEAU_EXEC
362 */
363struct drm_nouveau_exec {
364 /**
365 * @channel: the channel to execute the push buffer in
366 */
367 __u32 channel;
368 /**
369 * @push_count: the number of &drm_nouveau_exec_push ops
370 */
371 __u32 push_count;
372 /**
373 * @wait_count: the number of wait &drm_nouveau_syncs
374 */
375 __u32 wait_count;
376 /**
377 * @sig_count: the number of &drm_nouveau_syncs to signal when finished
378 */
379 __u32 sig_count;
380 /**
381 * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
382 */
383 __u64 wait_ptr;
384 /**
385 * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
386 */
387 __u64 sig_ptr;
388 /**
389 * @push_ptr: pointer to &drm_nouveau_exec_push ops
390 */
391 __u64 push_ptr;
392};
393
394#define DRM_NOUVEAU_GETPARAM 0x00
395#define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */
396#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
397#define DRM_NOUVEAU_CHANNEL_FREE 0x03
398#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */
399#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
400#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
401#define DRM_NOUVEAU_NVIF 0x07
402#define DRM_NOUVEAU_SVM_INIT 0x08
403#define DRM_NOUVEAU_SVM_BIND 0x09
404#define DRM_NOUVEAU_VM_INIT 0x10
405#define DRM_NOUVEAU_VM_BIND 0x11
406#define DRM_NOUVEAU_EXEC 0x12
407#define DRM_NOUVEAU_GEM_NEW 0x40
408#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
409#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
410#define DRM_NOUVEAU_GEM_CPU_FINI 0x43
411#define DRM_NOUVEAU_GEM_INFO 0x44
412
413struct drm_nouveau_svm_init {
414 __u64 unmanaged_addr;
415 __u64 unmanaged_size;
416};
417
418struct drm_nouveau_svm_bind {
419 __u64 header;
420 __u64 va_start;
421 __u64 va_end;
422 __u64 npages;
423 __u64 stride;
424 __u64 result;
425 __u64 reserved0;
426 __u64 reserved1;
427};
428
429#define NOUVEAU_SVM_BIND_COMMAND_SHIFT 0
430#define NOUVEAU_SVM_BIND_COMMAND_BITS 8
431#define NOUVEAU_SVM_BIND_COMMAND_MASK ((1 << 8) - 1)
432#define NOUVEAU_SVM_BIND_PRIORITY_SHIFT 8
433#define NOUVEAU_SVM_BIND_PRIORITY_BITS 8
434#define NOUVEAU_SVM_BIND_PRIORITY_MASK ((1 << 8) - 1)
435#define NOUVEAU_SVM_BIND_TARGET_SHIFT 16
436#define NOUVEAU_SVM_BIND_TARGET_BITS 32
437#define NOUVEAU_SVM_BIND_TARGET_MASK 0xffffffff
438
439/*
440 * Below is use to validate ioctl argument, userspace can also use it to make
441 * sure that no bit are set beyond known fields for a given kernel version.
442 */
443#define NOUVEAU_SVM_BIND_VALID_BITS 48
444#define NOUVEAU_SVM_BIND_VALID_MASK ((1ULL << NOUVEAU_SVM_BIND_VALID_BITS) - 1)
445
446
447/*
448 * NOUVEAU_BIND_COMMAND__MIGRATE: synchronous migrate to target memory.
449 * result: number of page successfuly migrate to the target memory.
450 */
451#define NOUVEAU_SVM_BIND_COMMAND__MIGRATE 0
452
453/*
454 * NOUVEAU_SVM_BIND_HEADER_TARGET__GPU_VRAM: target the GPU VRAM memory.
455 */
456#define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM (1UL << 31)
457
458
459#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
460#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
461#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
462
463#define DRM_IOCTL_NOUVEAU_SVM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_INIT, struct drm_nouveau_svm_init)
464#define DRM_IOCTL_NOUVEAU_SVM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_BIND, struct drm_nouveau_svm_bind)
465
466#define DRM_IOCTL_NOUVEAU_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new)
467#define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf)
468#define DRM_IOCTL_NOUVEAU_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_PREP, struct drm_nouveau_gem_cpu_prep)
469#define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
470#define DRM_IOCTL_NOUVEAU_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
471
472#define DRM_IOCTL_NOUVEAU_VM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_INIT, struct drm_nouveau_vm_init)
473#define DRM_IOCTL_NOUVEAU_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_BIND, struct drm_nouveau_vm_bind)
474#define DRM_IOCTL_NOUVEAU_EXEC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_EXEC, struct drm_nouveau_exec)
475#if defined(__cplusplus)
476}
477#endif
478
479#endif /* __NOUVEAU_DRM_H__ */
480

source code of linux/include/uapi/drm/nouveau_drm.h