1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | */ |
22 | |
23 | #ifndef KFD_IOCTL_H_INCLUDED |
24 | #define KFD_IOCTL_H_INCLUDED |
25 | |
26 | #include <drm/drm.h> |
27 | #include <linux/ioctl.h> |
28 | |
29 | #define KFD_IOCTL_MAJOR_VERSION 1 |
30 | #define KFD_IOCTL_MINOR_VERSION 1 |
31 | |
32 | struct kfd_ioctl_get_version_args { |
33 | __u32 major_version; /* from KFD */ |
34 | __u32 minor_version; /* from KFD */ |
35 | }; |
36 | |
37 | /* For kfd_ioctl_create_queue_args.queue_type. */ |
38 | #define KFD_IOC_QUEUE_TYPE_COMPUTE 0 |
39 | #define KFD_IOC_QUEUE_TYPE_SDMA 1 |
40 | #define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 2 |
41 | |
42 | #define KFD_MAX_QUEUE_PERCENTAGE 100 |
43 | #define KFD_MAX_QUEUE_PRIORITY 15 |
44 | |
45 | struct kfd_ioctl_create_queue_args { |
46 | __u64 ring_base_address; /* to KFD */ |
47 | __u64 write_pointer_address; /* from KFD */ |
48 | __u64 read_pointer_address; /* from KFD */ |
49 | __u64 doorbell_offset; /* from KFD */ |
50 | |
51 | __u32 ring_size; /* to KFD */ |
52 | __u32 gpu_id; /* to KFD */ |
53 | __u32 queue_type; /* to KFD */ |
54 | __u32 queue_percentage; /* to KFD */ |
55 | __u32 queue_priority; /* to KFD */ |
56 | __u32 queue_id; /* from KFD */ |
57 | |
58 | __u64 eop_buffer_address; /* to KFD */ |
59 | __u64 eop_buffer_size; /* to KFD */ |
60 | __u64 ctx_save_restore_address; /* to KFD */ |
61 | __u32 ctx_save_restore_size; /* to KFD */ |
62 | __u32 ctl_stack_size; /* to KFD */ |
63 | }; |
64 | |
65 | struct kfd_ioctl_destroy_queue_args { |
66 | __u32 queue_id; /* to KFD */ |
67 | __u32 pad; |
68 | }; |
69 | |
70 | struct kfd_ioctl_update_queue_args { |
71 | __u64 ring_base_address; /* to KFD */ |
72 | |
73 | __u32 queue_id; /* to KFD */ |
74 | __u32 ring_size; /* to KFD */ |
75 | __u32 queue_percentage; /* to KFD */ |
76 | __u32 queue_priority; /* to KFD */ |
77 | }; |
78 | |
79 | struct kfd_ioctl_set_cu_mask_args { |
80 | __u32 queue_id; /* to KFD */ |
81 | __u32 num_cu_mask; /* to KFD */ |
82 | __u64 cu_mask_ptr; /* to KFD */ |
83 | }; |
84 | |
85 | struct kfd_ioctl_get_queue_wave_state_args { |
86 | __u64 ctl_stack_address; /* to KFD */ |
87 | __u32 ctl_stack_used_size; /* from KFD */ |
88 | __u32 save_area_used_size; /* from KFD */ |
89 | __u32 queue_id; /* to KFD */ |
90 | __u32 pad; |
91 | }; |
92 | |
93 | /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */ |
94 | #define KFD_IOC_CACHE_POLICY_COHERENT 0 |
95 | #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1 |
96 | |
97 | struct kfd_ioctl_set_memory_policy_args { |
98 | __u64 alternate_aperture_base; /* to KFD */ |
99 | __u64 alternate_aperture_size; /* to KFD */ |
100 | |
101 | __u32 gpu_id; /* to KFD */ |
102 | __u32 default_policy; /* to KFD */ |
103 | __u32 alternate_policy; /* to KFD */ |
104 | __u32 pad; |
105 | }; |
106 | |
107 | /* |
108 | * All counters are monotonic. They are used for profiling of compute jobs. |
109 | * The profiling is done by userspace. |
110 | * |
111 | * In case of GPU reset, the counter should not be affected. |
112 | */ |
113 | |
114 | struct kfd_ioctl_get_clock_counters_args { |
115 | __u64 gpu_clock_counter; /* from KFD */ |
116 | __u64 cpu_clock_counter; /* from KFD */ |
117 | __u64 system_clock_counter; /* from KFD */ |
118 | __u64 system_clock_freq; /* from KFD */ |
119 | |
120 | __u32 gpu_id; /* to KFD */ |
121 | __u32 pad; |
122 | }; |
123 | |
124 | struct kfd_process_device_apertures { |
125 | __u64 lds_base; /* from KFD */ |
126 | __u64 lds_limit; /* from KFD */ |
127 | __u64 scratch_base; /* from KFD */ |
128 | __u64 scratch_limit; /* from KFD */ |
129 | __u64 gpuvm_base; /* from KFD */ |
130 | __u64 gpuvm_limit; /* from KFD */ |
131 | __u32 gpu_id; /* from KFD */ |
132 | __u32 pad; |
133 | }; |
134 | |
135 | /* |
136 | * AMDKFD_IOC_GET_PROCESS_APERTURES is deprecated. Use |
137 | * AMDKFD_IOC_GET_PROCESS_APERTURES_NEW instead, which supports an |
138 | * unlimited number of GPUs. |
139 | */ |
140 | #define NUM_OF_SUPPORTED_GPUS 7 |
141 | struct kfd_ioctl_get_process_apertures_args { |
142 | struct kfd_process_device_apertures |
143 | process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */ |
144 | |
145 | /* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */ |
146 | __u32 num_of_nodes; |
147 | __u32 pad; |
148 | }; |
149 | |
150 | struct kfd_ioctl_get_process_apertures_new_args { |
151 | /* User allocated. Pointer to struct kfd_process_device_apertures |
152 | * filled in by Kernel |
153 | */ |
154 | __u64 kfd_process_device_apertures_ptr; |
155 | /* to KFD - indicates amount of memory present in |
156 | * kfd_process_device_apertures_ptr |
157 | * from KFD - Number of entries filled by KFD. |
158 | */ |
159 | __u32 num_of_nodes; |
160 | __u32 pad; |
161 | }; |
162 | |
163 | #define MAX_ALLOWED_NUM_POINTS 100 |
164 | #define MAX_ALLOWED_AW_BUFF_SIZE 4096 |
165 | #define MAX_ALLOWED_WAC_BUFF_SIZE 128 |
166 | |
167 | struct kfd_ioctl_dbg_register_args { |
168 | __u32 gpu_id; /* to KFD */ |
169 | __u32 pad; |
170 | }; |
171 | |
172 | struct kfd_ioctl_dbg_unregister_args { |
173 | __u32 gpu_id; /* to KFD */ |
174 | __u32 pad; |
175 | }; |
176 | |
177 | struct kfd_ioctl_dbg_address_watch_args { |
178 | __u64 content_ptr; /* a pointer to the actual content */ |
179 | __u32 gpu_id; /* to KFD */ |
180 | __u32 buf_size_in_bytes; /*including gpu_id and buf_size */ |
181 | }; |
182 | |
183 | struct kfd_ioctl_dbg_wave_control_args { |
184 | __u64 content_ptr; /* a pointer to the actual content */ |
185 | __u32 gpu_id; /* to KFD */ |
186 | __u32 buf_size_in_bytes; /*including gpu_id and buf_size */ |
187 | }; |
188 | |
189 | /* Matching HSA_EVENTTYPE */ |
190 | #define KFD_IOC_EVENT_SIGNAL 0 |
191 | #define KFD_IOC_EVENT_NODECHANGE 1 |
192 | #define KFD_IOC_EVENT_DEVICESTATECHANGE 2 |
193 | #define KFD_IOC_EVENT_HW_EXCEPTION 3 |
194 | #define KFD_IOC_EVENT_SYSTEM_EVENT 4 |
195 | #define KFD_IOC_EVENT_DEBUG_EVENT 5 |
196 | #define KFD_IOC_EVENT_PROFILE_EVENT 6 |
197 | #define KFD_IOC_EVENT_QUEUE_EVENT 7 |
198 | #define KFD_IOC_EVENT_MEMORY 8 |
199 | |
200 | #define KFD_IOC_WAIT_RESULT_COMPLETE 0 |
201 | #define KFD_IOC_WAIT_RESULT_TIMEOUT 1 |
202 | #define KFD_IOC_WAIT_RESULT_FAIL 2 |
203 | |
204 | #define KFD_SIGNAL_EVENT_LIMIT 4096 |
205 | |
206 | /* For kfd_event_data.hw_exception_data.reset_type. */ |
207 | #define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0 |
208 | #define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1 |
209 | |
210 | /* For kfd_event_data.hw_exception_data.reset_cause. */ |
211 | #define KFD_HW_EXCEPTION_GPU_HANG 0 |
212 | #define KFD_HW_EXCEPTION_ECC 1 |
213 | |
214 | |
215 | struct kfd_ioctl_create_event_args { |
216 | __u64 event_page_offset; /* from KFD */ |
217 | __u32 event_trigger_data; /* from KFD - signal events only */ |
218 | __u32 event_type; /* to KFD */ |
219 | __u32 auto_reset; /* to KFD */ |
220 | __u32 node_id; /* to KFD - only valid for certain |
221 | event types */ |
222 | __u32 event_id; /* from KFD */ |
223 | __u32 event_slot_index; /* from KFD */ |
224 | }; |
225 | |
226 | struct kfd_ioctl_destroy_event_args { |
227 | __u32 event_id; /* to KFD */ |
228 | __u32 pad; |
229 | }; |
230 | |
231 | struct kfd_ioctl_set_event_args { |
232 | __u32 event_id; /* to KFD */ |
233 | __u32 pad; |
234 | }; |
235 | |
236 | struct kfd_ioctl_reset_event_args { |
237 | __u32 event_id; /* to KFD */ |
238 | __u32 pad; |
239 | }; |
240 | |
241 | struct kfd_memory_exception_failure { |
242 | __u32 NotPresent; /* Page not present or supervisor privilege */ |
243 | __u32 ReadOnly; /* Write access to a read-only page */ |
244 | __u32 NoExecute; /* Execute access to a page marked NX */ |
245 | __u32 imprecise; /* Can't determine the exact fault address */ |
246 | }; |
247 | |
248 | /* memory exception data*/ |
249 | struct kfd_hsa_memory_exception_data { |
250 | struct kfd_memory_exception_failure failure; |
251 | __u64 va; |
252 | __u32 gpu_id; |
253 | __u32 pad; |
254 | }; |
255 | |
256 | /* hw exception data */ |
257 | struct kfd_hsa_hw_exception_data { |
258 | __u32 reset_type; |
259 | __u32 reset_cause; |
260 | __u32 memory_lost; |
261 | __u32 gpu_id; |
262 | }; |
263 | |
264 | /* Event data */ |
265 | struct kfd_event_data { |
266 | union { |
267 | struct kfd_hsa_memory_exception_data memory_exception_data; |
268 | struct kfd_hsa_hw_exception_data hw_exception_data; |
269 | }; /* From KFD */ |
270 | __u64 kfd_event_data_ext; /* pointer to an extension structure |
271 | for future exception types */ |
272 | __u32 event_id; /* to KFD */ |
273 | __u32 pad; |
274 | }; |
275 | |
276 | struct kfd_ioctl_wait_events_args { |
277 | __u64 events_ptr; /* pointed to struct |
278 | kfd_event_data array, to KFD */ |
279 | __u32 num_events; /* to KFD */ |
280 | __u32 wait_for_all; /* to KFD */ |
281 | __u32 timeout; /* to KFD */ |
282 | __u32 wait_result; /* from KFD */ |
283 | }; |
284 | |
285 | struct kfd_ioctl_set_scratch_backing_va_args { |
286 | __u64 va_addr; /* to KFD */ |
287 | __u32 gpu_id; /* to KFD */ |
288 | __u32 pad; |
289 | }; |
290 | |
291 | struct kfd_ioctl_get_tile_config_args { |
292 | /* to KFD: pointer to tile array */ |
293 | __u64 tile_config_ptr; |
294 | /* to KFD: pointer to macro tile array */ |
295 | __u64 macro_tile_config_ptr; |
296 | /* to KFD: array size allocated by user mode |
297 | * from KFD: array size filled by kernel |
298 | */ |
299 | __u32 num_tile_configs; |
300 | /* to KFD: array size allocated by user mode |
301 | * from KFD: array size filled by kernel |
302 | */ |
303 | __u32 num_macro_tile_configs; |
304 | |
305 | __u32 gpu_id; /* to KFD */ |
306 | __u32 gb_addr_config; /* from KFD */ |
307 | __u32 num_banks; /* from KFD */ |
308 | __u32 num_ranks; /* from KFD */ |
309 | /* struct size can be extended later if needed |
310 | * without breaking ABI compatibility |
311 | */ |
312 | }; |
313 | |
314 | struct kfd_ioctl_set_trap_handler_args { |
315 | __u64 tba_addr; /* to KFD */ |
316 | __u64 tma_addr; /* to KFD */ |
317 | __u32 gpu_id; /* to KFD */ |
318 | __u32 pad; |
319 | }; |
320 | |
321 | struct kfd_ioctl_acquire_vm_args { |
322 | __u32 drm_fd; /* to KFD */ |
323 | __u32 gpu_id; /* to KFD */ |
324 | }; |
325 | |
326 | /* Allocation flags: memory types */ |
327 | #define KFD_IOC_ALLOC_MEM_FLAGS_VRAM (1 << 0) |
328 | #define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1) |
329 | #define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2) |
330 | #define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3) |
331 | /* Allocation flags: attributes/access options */ |
332 | #define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31) |
333 | #define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30) |
334 | #define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC (1 << 29) |
335 | #define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) |
336 | #define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27) |
337 | #define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26) |
338 | |
339 | /* Allocate memory for later SVM (shared virtual memory) mapping. |
340 | * |
341 | * @va_addr: virtual address of the memory to be allocated |
342 | * all later mappings on all GPUs will use this address |
343 | * @size: size in bytes |
344 | * @handle: buffer handle returned to user mode, used to refer to |
345 | * this allocation for mapping, unmapping and freeing |
346 | * @mmap_offset: for CPU-mapping the allocation by mmapping a render node |
347 | * for userptrs this is overloaded to specify the CPU address |
348 | * @gpu_id: device identifier |
349 | * @flags: memory type and attributes. See KFD_IOC_ALLOC_MEM_FLAGS above |
350 | */ |
351 | struct kfd_ioctl_alloc_memory_of_gpu_args { |
352 | __u64 va_addr; /* to KFD */ |
353 | __u64 size; /* to KFD */ |
354 | __u64 handle; /* from KFD */ |
355 | __u64 mmap_offset; /* to KFD (userptr), from KFD (mmap offset) */ |
356 | __u32 gpu_id; /* to KFD */ |
357 | __u32 flags; |
358 | }; |
359 | |
360 | /* Free memory allocated with kfd_ioctl_alloc_memory_of_gpu |
361 | * |
362 | * @handle: memory handle returned by alloc |
363 | */ |
364 | struct kfd_ioctl_free_memory_of_gpu_args { |
365 | __u64 handle; /* to KFD */ |
366 | }; |
367 | |
368 | /* Map memory to one or more GPUs |
369 | * |
370 | * @handle: memory handle returned by alloc |
371 | * @device_ids_array_ptr: array of gpu_ids (__u32 per device) |
372 | * @n_devices: number of devices in the array |
373 | * @n_success: number of devices mapped successfully |
374 | * |
375 | * @n_success returns information to the caller how many devices from |
376 | * the start of the array have mapped the buffer successfully. It can |
377 | * be passed into a subsequent retry call to skip those devices. For |
378 | * the first call the caller should initialize it to 0. |
379 | * |
380 | * If the ioctl completes with return code 0 (success), n_success == |
381 | * n_devices. |
382 | */ |
383 | struct kfd_ioctl_map_memory_to_gpu_args { |
384 | __u64 handle; /* to KFD */ |
385 | __u64 device_ids_array_ptr; /* to KFD */ |
386 | __u32 n_devices; /* to KFD */ |
387 | __u32 n_success; /* to/from KFD */ |
388 | }; |
389 | |
390 | /* Unmap memory from one or more GPUs |
391 | * |
392 | * same arguments as for mapping |
393 | */ |
394 | struct kfd_ioctl_unmap_memory_from_gpu_args { |
395 | __u64 handle; /* to KFD */ |
396 | __u64 device_ids_array_ptr; /* to KFD */ |
397 | __u32 n_devices; /* to KFD */ |
398 | __u32 n_success; /* to/from KFD */ |
399 | }; |
400 | |
401 | struct kfd_ioctl_get_dmabuf_info_args { |
402 | __u64 size; /* from KFD */ |
403 | __u64 metadata_ptr; /* to KFD */ |
404 | __u32 metadata_size; /* to KFD (space allocated by user) |
405 | * from KFD (actual metadata size) |
406 | */ |
407 | __u32 gpu_id; /* from KFD */ |
408 | __u32 flags; /* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */ |
409 | __u32 dmabuf_fd; /* to KFD */ |
410 | }; |
411 | |
412 | struct kfd_ioctl_import_dmabuf_args { |
413 | __u64 va_addr; /* to KFD */ |
414 | __u64 handle; /* from KFD */ |
415 | __u32 gpu_id; /* to KFD */ |
416 | __u32 dmabuf_fd; /* to KFD */ |
417 | }; |
418 | |
419 | #define AMDKFD_IOCTL_BASE 'K' |
420 | #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr) |
421 | #define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type) |
422 | #define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type) |
423 | #define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type) |
424 | |
425 | #define AMDKFD_IOC_GET_VERSION \ |
426 | AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args) |
427 | |
428 | #define AMDKFD_IOC_CREATE_QUEUE \ |
429 | AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args) |
430 | |
431 | #define AMDKFD_IOC_DESTROY_QUEUE \ |
432 | AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args) |
433 | |
434 | #define AMDKFD_IOC_SET_MEMORY_POLICY \ |
435 | AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args) |
436 | |
437 | #define AMDKFD_IOC_GET_CLOCK_COUNTERS \ |
438 | AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args) |
439 | |
440 | #define AMDKFD_IOC_GET_PROCESS_APERTURES \ |
441 | AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args) |
442 | |
443 | #define AMDKFD_IOC_UPDATE_QUEUE \ |
444 | AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args) |
445 | |
446 | #define AMDKFD_IOC_CREATE_EVENT \ |
447 | AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args) |
448 | |
449 | #define AMDKFD_IOC_DESTROY_EVENT \ |
450 | AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args) |
451 | |
452 | #define AMDKFD_IOC_SET_EVENT \ |
453 | AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args) |
454 | |
455 | #define AMDKFD_IOC_RESET_EVENT \ |
456 | AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args) |
457 | |
458 | #define AMDKFD_IOC_WAIT_EVENTS \ |
459 | AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args) |
460 | |
461 | #define AMDKFD_IOC_DBG_REGISTER \ |
462 | AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args) |
463 | |
464 | #define AMDKFD_IOC_DBG_UNREGISTER \ |
465 | AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args) |
466 | |
467 | #define AMDKFD_IOC_DBG_ADDRESS_WATCH \ |
468 | AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args) |
469 | |
470 | #define AMDKFD_IOC_DBG_WAVE_CONTROL \ |
471 | AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args) |
472 | |
473 | #define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \ |
474 | AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args) |
475 | |
476 | #define AMDKFD_IOC_GET_TILE_CONFIG \ |
477 | AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args) |
478 | |
479 | #define AMDKFD_IOC_SET_TRAP_HANDLER \ |
480 | AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args) |
481 | |
482 | #define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW \ |
483 | AMDKFD_IOWR(0x14, \ |
484 | struct kfd_ioctl_get_process_apertures_new_args) |
485 | |
486 | #define AMDKFD_IOC_ACQUIRE_VM \ |
487 | AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args) |
488 | |
489 | #define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU \ |
490 | AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args) |
491 | |
492 | #define AMDKFD_IOC_FREE_MEMORY_OF_GPU \ |
493 | AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args) |
494 | |
495 | #define AMDKFD_IOC_MAP_MEMORY_TO_GPU \ |
496 | AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args) |
497 | |
498 | #define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \ |
499 | AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args) |
500 | |
501 | #define AMDKFD_IOC_SET_CU_MASK \ |
502 | AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args) |
503 | |
504 | #define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \ |
505 | AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args) |
506 | |
507 | #define AMDKFD_IOC_GET_DMABUF_INFO \ |
508 | AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args) |
509 | |
510 | #define AMDKFD_IOC_IMPORT_DMABUF \ |
511 | AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args) |
512 | |
513 | #define AMDKFD_COMMAND_START 0x01 |
514 | #define AMDKFD_COMMAND_END 0x1E |
515 | |
516 | #endif |
517 | |