1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
26 | * Jerome Glisse |
27 | */ |
28 | #include <drm/drmP.h> |
29 | #include <drm/radeon_drm.h> |
30 | #include "radeon.h" |
31 | |
32 | void radeon_gem_object_free(struct drm_gem_object *gobj) |
33 | { |
34 | struct radeon_bo *robj = gem_to_radeon_bo(gobj); |
35 | |
36 | if (robj) { |
37 | radeon_mn_unregister(robj); |
38 | radeon_bo_unref(&robj); |
39 | } |
40 | } |
41 | |
42 | int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, |
43 | int alignment, int initial_domain, |
44 | u32 flags, bool kernel, |
45 | struct drm_gem_object **obj) |
46 | { |
47 | struct radeon_bo *robj; |
48 | unsigned long max_size; |
49 | int r; |
50 | |
51 | *obj = NULL; |
52 | /* At least align on page size */ |
53 | if (alignment < PAGE_SIZE) { |
54 | alignment = PAGE_SIZE; |
55 | } |
56 | |
57 | /* Maximum bo size is the unpinned gtt size since we use the gtt to |
58 | * handle vram to system pool migrations. |
59 | */ |
60 | max_size = rdev->mc.gtt_size - rdev->gart_pin_size; |
61 | if (size > max_size) { |
62 | DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n" , |
63 | size >> 20, max_size >> 20); |
64 | return -ENOMEM; |
65 | } |
66 | |
67 | retry: |
68 | r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, |
69 | flags, NULL, NULL, &robj); |
70 | if (r) { |
71 | if (r != -ERESTARTSYS) { |
72 | if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { |
73 | initial_domain |= RADEON_GEM_DOMAIN_GTT; |
74 | goto retry; |
75 | } |
76 | DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n" , |
77 | size, initial_domain, alignment, r); |
78 | } |
79 | return r; |
80 | } |
81 | *obj = &robj->gem_base; |
82 | robj->pid = task_pid_nr(current); |
83 | |
84 | mutex_lock(&rdev->gem.mutex); |
85 | list_add_tail(&robj->list, &rdev->gem.objects); |
86 | mutex_unlock(&rdev->gem.mutex); |
87 | |
88 | return 0; |
89 | } |
90 | |
91 | static int radeon_gem_set_domain(struct drm_gem_object *gobj, |
92 | uint32_t rdomain, uint32_t wdomain) |
93 | { |
94 | struct radeon_bo *robj; |
95 | uint32_t domain; |
96 | long r; |
97 | |
98 | /* FIXME: reeimplement */ |
99 | robj = gem_to_radeon_bo(gobj); |
100 | /* work out where to validate the buffer to */ |
101 | domain = wdomain; |
102 | if (!domain) { |
103 | domain = rdomain; |
104 | } |
105 | if (!domain) { |
106 | /* Do nothings */ |
107 | pr_warn("Set domain without domain !\n" ); |
108 | return 0; |
109 | } |
110 | if (domain == RADEON_GEM_DOMAIN_CPU) { |
111 | /* Asking for cpu access wait for object idle */ |
112 | r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); |
113 | if (!r) |
114 | r = -EBUSY; |
115 | |
116 | if (r < 0 && r != -EINTR) { |
117 | pr_err("Failed to wait for object: %li\n" , r); |
118 | return r; |
119 | } |
120 | } |
121 | if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) { |
122 | /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */ |
123 | return -EINVAL; |
124 | } |
125 | return 0; |
126 | } |
127 | |
128 | int radeon_gem_init(struct radeon_device *rdev) |
129 | { |
130 | INIT_LIST_HEAD(&rdev->gem.objects); |
131 | return 0; |
132 | } |
133 | |
134 | void radeon_gem_fini(struct radeon_device *rdev) |
135 | { |
136 | radeon_bo_force_delete(rdev); |
137 | } |
138 | |
139 | /* |
140 | * Call from drm_gem_handle_create which appear in both new and open ioctl |
141 | * case. |
142 | */ |
143 | int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) |
144 | { |
145 | struct radeon_bo *rbo = gem_to_radeon_bo(obj); |
146 | struct radeon_device *rdev = rbo->rdev; |
147 | struct radeon_fpriv *fpriv = file_priv->driver_priv; |
148 | struct radeon_vm *vm = &fpriv->vm; |
149 | struct radeon_bo_va *bo_va; |
150 | int r; |
151 | |
152 | if ((rdev->family < CHIP_CAYMAN) || |
153 | (!rdev->accel_working)) { |
154 | return 0; |
155 | } |
156 | |
157 | r = radeon_bo_reserve(rbo, false); |
158 | if (r) { |
159 | return r; |
160 | } |
161 | |
162 | bo_va = radeon_vm_bo_find(vm, rbo); |
163 | if (!bo_va) { |
164 | bo_va = radeon_vm_bo_add(rdev, vm, rbo); |
165 | } else { |
166 | ++bo_va->ref_count; |
167 | } |
168 | radeon_bo_unreserve(rbo); |
169 | |
170 | return 0; |
171 | } |
172 | |
173 | void radeon_gem_object_close(struct drm_gem_object *obj, |
174 | struct drm_file *file_priv) |
175 | { |
176 | struct radeon_bo *rbo = gem_to_radeon_bo(obj); |
177 | struct radeon_device *rdev = rbo->rdev; |
178 | struct radeon_fpriv *fpriv = file_priv->driver_priv; |
179 | struct radeon_vm *vm = &fpriv->vm; |
180 | struct radeon_bo_va *bo_va; |
181 | int r; |
182 | |
183 | if ((rdev->family < CHIP_CAYMAN) || |
184 | (!rdev->accel_working)) { |
185 | return; |
186 | } |
187 | |
188 | r = radeon_bo_reserve(rbo, true); |
189 | if (r) { |
190 | dev_err(rdev->dev, "leaking bo va because " |
191 | "we fail to reserve bo (%d)\n" , r); |
192 | return; |
193 | } |
194 | bo_va = radeon_vm_bo_find(vm, rbo); |
195 | if (bo_va) { |
196 | if (--bo_va->ref_count == 0) { |
197 | radeon_vm_bo_rmv(rdev, bo_va); |
198 | } |
199 | } |
200 | radeon_bo_unreserve(rbo); |
201 | } |
202 | |
203 | static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) |
204 | { |
205 | if (r == -EDEADLK) { |
206 | r = radeon_gpu_reset(rdev); |
207 | if (!r) |
208 | r = -EAGAIN; |
209 | } |
210 | return r; |
211 | } |
212 | |
213 | /* |
214 | * GEM ioctls. |
215 | */ |
216 | int radeon_gem_info_ioctl(struct drm_device *dev, void *data, |
217 | struct drm_file *filp) |
218 | { |
219 | struct radeon_device *rdev = dev->dev_private; |
220 | struct drm_radeon_gem_info *args = data; |
221 | struct ttm_mem_type_manager *man; |
222 | |
223 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; |
224 | |
225 | args->vram_size = (u64)man->size << PAGE_SHIFT; |
226 | args->vram_visible = rdev->mc.visible_vram_size; |
227 | args->vram_visible -= rdev->vram_pin_size; |
228 | args->gart_size = rdev->mc.gtt_size; |
229 | args->gart_size -= rdev->gart_pin_size; |
230 | |
231 | return 0; |
232 | } |
233 | |
234 | int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, |
235 | struct drm_file *filp) |
236 | { |
237 | /* TODO: implement */ |
238 | DRM_ERROR("unimplemented %s\n" , __func__); |
239 | return -ENOSYS; |
240 | } |
241 | |
242 | int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, |
243 | struct drm_file *filp) |
244 | { |
245 | /* TODO: implement */ |
246 | DRM_ERROR("unimplemented %s\n" , __func__); |
247 | return -ENOSYS; |
248 | } |
249 | |
250 | int radeon_gem_create_ioctl(struct drm_device *dev, void *data, |
251 | struct drm_file *filp) |
252 | { |
253 | struct radeon_device *rdev = dev->dev_private; |
254 | struct drm_radeon_gem_create *args = data; |
255 | struct drm_gem_object *gobj; |
256 | uint32_t handle; |
257 | int r; |
258 | |
259 | down_read(&rdev->exclusive_lock); |
260 | /* create a gem object to contain this object in */ |
261 | args->size = roundup(args->size, PAGE_SIZE); |
262 | r = radeon_gem_object_create(rdev, args->size, args->alignment, |
263 | args->initial_domain, args->flags, |
264 | false, &gobj); |
265 | if (r) { |
266 | up_read(&rdev->exclusive_lock); |
267 | r = radeon_gem_handle_lockup(rdev, r); |
268 | return r; |
269 | } |
270 | r = drm_gem_handle_create(filp, gobj, &handle); |
271 | /* drop reference from allocate - handle holds it now */ |
272 | drm_gem_object_put_unlocked(gobj); |
273 | if (r) { |
274 | up_read(&rdev->exclusive_lock); |
275 | r = radeon_gem_handle_lockup(rdev, r); |
276 | return r; |
277 | } |
278 | args->handle = handle; |
279 | up_read(&rdev->exclusive_lock); |
280 | return 0; |
281 | } |
282 | |
283 | int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, |
284 | struct drm_file *filp) |
285 | { |
286 | struct ttm_operation_ctx ctx = { true, false }; |
287 | struct radeon_device *rdev = dev->dev_private; |
288 | struct drm_radeon_gem_userptr *args = data; |
289 | struct drm_gem_object *gobj; |
290 | struct radeon_bo *bo; |
291 | uint32_t handle; |
292 | int r; |
293 | |
294 | if (offset_in_page(args->addr | args->size)) |
295 | return -EINVAL; |
296 | |
297 | /* reject unknown flag values */ |
298 | if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | |
299 | RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE | |
300 | RADEON_GEM_USERPTR_REGISTER)) |
301 | return -EINVAL; |
302 | |
303 | if (args->flags & RADEON_GEM_USERPTR_READONLY) { |
304 | /* readonly pages not tested on older hardware */ |
305 | if (rdev->family < CHIP_R600) |
306 | return -EINVAL; |
307 | |
308 | } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) || |
309 | !(args->flags & RADEON_GEM_USERPTR_REGISTER)) { |
310 | |
311 | /* if we want to write to it we must require anonymous |
312 | memory and install a MMU notifier */ |
313 | return -EACCES; |
314 | } |
315 | |
316 | down_read(&rdev->exclusive_lock); |
317 | |
318 | /* create a gem object to contain this object in */ |
319 | r = radeon_gem_object_create(rdev, args->size, 0, |
320 | RADEON_GEM_DOMAIN_CPU, 0, |
321 | false, &gobj); |
322 | if (r) |
323 | goto handle_lockup; |
324 | |
325 | bo = gem_to_radeon_bo(gobj); |
326 | r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); |
327 | if (r) |
328 | goto release_object; |
329 | |
330 | if (args->flags & RADEON_GEM_USERPTR_REGISTER) { |
331 | r = radeon_mn_register(bo, args->addr); |
332 | if (r) |
333 | goto release_object; |
334 | } |
335 | |
336 | if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { |
337 | down_read(¤t->mm->mmap_sem); |
338 | r = radeon_bo_reserve(bo, true); |
339 | if (r) { |
340 | up_read(¤t->mm->mmap_sem); |
341 | goto release_object; |
342 | } |
343 | |
344 | radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); |
345 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
346 | radeon_bo_unreserve(bo); |
347 | up_read(¤t->mm->mmap_sem); |
348 | if (r) |
349 | goto release_object; |
350 | } |
351 | |
352 | r = drm_gem_handle_create(filp, gobj, &handle); |
353 | /* drop reference from allocate - handle holds it now */ |
354 | drm_gem_object_put_unlocked(gobj); |
355 | if (r) |
356 | goto handle_lockup; |
357 | |
358 | args->handle = handle; |
359 | up_read(&rdev->exclusive_lock); |
360 | return 0; |
361 | |
362 | release_object: |
363 | drm_gem_object_put_unlocked(gobj); |
364 | |
365 | handle_lockup: |
366 | up_read(&rdev->exclusive_lock); |
367 | r = radeon_gem_handle_lockup(rdev, r); |
368 | |
369 | return r; |
370 | } |
371 | |
372 | int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
373 | struct drm_file *filp) |
374 | { |
375 | /* transition the BO to a domain - |
376 | * just validate the BO into a certain domain */ |
377 | struct radeon_device *rdev = dev->dev_private; |
378 | struct drm_radeon_gem_set_domain *args = data; |
379 | struct drm_gem_object *gobj; |
380 | struct radeon_bo *robj; |
381 | int r; |
382 | |
383 | /* for now if someone requests domain CPU - |
384 | * just make sure the buffer is finished with */ |
385 | down_read(&rdev->exclusive_lock); |
386 | |
387 | /* just do a BO wait for now */ |
388 | gobj = drm_gem_object_lookup(filp, args->handle); |
389 | if (gobj == NULL) { |
390 | up_read(&rdev->exclusive_lock); |
391 | return -ENOENT; |
392 | } |
393 | robj = gem_to_radeon_bo(gobj); |
394 | |
395 | r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); |
396 | |
397 | drm_gem_object_put_unlocked(gobj); |
398 | up_read(&rdev->exclusive_lock); |
399 | r = radeon_gem_handle_lockup(robj->rdev, r); |
400 | return r; |
401 | } |
402 | |
403 | int radeon_mode_dumb_mmap(struct drm_file *filp, |
404 | struct drm_device *dev, |
405 | uint32_t handle, uint64_t *offset_p) |
406 | { |
407 | struct drm_gem_object *gobj; |
408 | struct radeon_bo *robj; |
409 | |
410 | gobj = drm_gem_object_lookup(filp, handle); |
411 | if (gobj == NULL) { |
412 | return -ENOENT; |
413 | } |
414 | robj = gem_to_radeon_bo(gobj); |
415 | if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { |
416 | drm_gem_object_put_unlocked(gobj); |
417 | return -EPERM; |
418 | } |
419 | *offset_p = radeon_bo_mmap_offset(robj); |
420 | drm_gem_object_put_unlocked(gobj); |
421 | return 0; |
422 | } |
423 | |
424 | int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, |
425 | struct drm_file *filp) |
426 | { |
427 | struct drm_radeon_gem_mmap *args = data; |
428 | |
429 | return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); |
430 | } |
431 | |
432 | int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, |
433 | struct drm_file *filp) |
434 | { |
435 | struct drm_radeon_gem_busy *args = data; |
436 | struct drm_gem_object *gobj; |
437 | struct radeon_bo *robj; |
438 | int r; |
439 | uint32_t cur_placement = 0; |
440 | |
441 | gobj = drm_gem_object_lookup(filp, args->handle); |
442 | if (gobj == NULL) { |
443 | return -ENOENT; |
444 | } |
445 | robj = gem_to_radeon_bo(gobj); |
446 | |
447 | r = reservation_object_test_signaled_rcu(robj->tbo.resv, true); |
448 | if (r == 0) |
449 | r = -EBUSY; |
450 | else |
451 | r = 0; |
452 | |
453 | cur_placement = READ_ONCE(robj->tbo.mem.mem_type); |
454 | args->domain = radeon_mem_type_to_domain(cur_placement); |
455 | drm_gem_object_put_unlocked(gobj); |
456 | return r; |
457 | } |
458 | |
459 | int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, |
460 | struct drm_file *filp) |
461 | { |
462 | struct radeon_device *rdev = dev->dev_private; |
463 | struct drm_radeon_gem_wait_idle *args = data; |
464 | struct drm_gem_object *gobj; |
465 | struct radeon_bo *robj; |
466 | int r = 0; |
467 | uint32_t cur_placement = 0; |
468 | long ret; |
469 | |
470 | gobj = drm_gem_object_lookup(filp, args->handle); |
471 | if (gobj == NULL) { |
472 | return -ENOENT; |
473 | } |
474 | robj = gem_to_radeon_bo(gobj); |
475 | |
476 | ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); |
477 | if (ret == 0) |
478 | r = -EBUSY; |
479 | else if (ret < 0) |
480 | r = ret; |
481 | |
482 | /* Flush HDP cache via MMIO if necessary */ |
483 | cur_placement = READ_ONCE(robj->tbo.mem.mem_type); |
484 | if (rdev->asic->mmio_hdp_flush && |
485 | radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) |
486 | robj->rdev->asic->mmio_hdp_flush(rdev); |
487 | drm_gem_object_put_unlocked(gobj); |
488 | r = radeon_gem_handle_lockup(rdev, r); |
489 | return r; |
490 | } |
491 | |
492 | int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, |
493 | struct drm_file *filp) |
494 | { |
495 | struct drm_radeon_gem_set_tiling *args = data; |
496 | struct drm_gem_object *gobj; |
497 | struct radeon_bo *robj; |
498 | int r = 0; |
499 | |
500 | DRM_DEBUG("%d \n" , args->handle); |
501 | gobj = drm_gem_object_lookup(filp, args->handle); |
502 | if (gobj == NULL) |
503 | return -ENOENT; |
504 | robj = gem_to_radeon_bo(gobj); |
505 | r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); |
506 | drm_gem_object_put_unlocked(gobj); |
507 | return r; |
508 | } |
509 | |
510 | int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, |
511 | struct drm_file *filp) |
512 | { |
513 | struct drm_radeon_gem_get_tiling *args = data; |
514 | struct drm_gem_object *gobj; |
515 | struct radeon_bo *rbo; |
516 | int r = 0; |
517 | |
518 | DRM_DEBUG("\n" ); |
519 | gobj = drm_gem_object_lookup(filp, args->handle); |
520 | if (gobj == NULL) |
521 | return -ENOENT; |
522 | rbo = gem_to_radeon_bo(gobj); |
523 | r = radeon_bo_reserve(rbo, false); |
524 | if (unlikely(r != 0)) |
525 | goto out; |
526 | radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); |
527 | radeon_bo_unreserve(rbo); |
528 | out: |
529 | drm_gem_object_put_unlocked(gobj); |
530 | return r; |
531 | } |
532 | |
533 | /** |
534 | * radeon_gem_va_update_vm -update the bo_va in its VM |
535 | * |
536 | * @rdev: radeon_device pointer |
537 | * @bo_va: bo_va to update |
538 | * |
539 | * Update the bo_va directly after setting it's address. Errors are not |
540 | * vital here, so they are not reported back to userspace. |
541 | */ |
542 | static void radeon_gem_va_update_vm(struct radeon_device *rdev, |
543 | struct radeon_bo_va *bo_va) |
544 | { |
545 | struct ttm_validate_buffer tv, *entry; |
546 | struct radeon_bo_list *vm_bos; |
547 | struct ww_acquire_ctx ticket; |
548 | struct list_head list; |
549 | unsigned domain; |
550 | int r; |
551 | |
552 | INIT_LIST_HEAD(&list); |
553 | |
554 | tv.bo = &bo_va->bo->tbo; |
555 | tv.num_shared = 1; |
556 | list_add(&tv.head, &list); |
557 | |
558 | vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list); |
559 | if (!vm_bos) |
560 | return; |
561 | |
562 | r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); |
563 | if (r) |
564 | goto error_free; |
565 | |
566 | list_for_each_entry(entry, &list, head) { |
567 | domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type); |
568 | /* if anything is swapped out don't swap it in here, |
569 | just abort and wait for the next CS */ |
570 | if (domain == RADEON_GEM_DOMAIN_CPU) |
571 | goto error_unreserve; |
572 | } |
573 | |
574 | mutex_lock(&bo_va->vm->mutex); |
575 | r = radeon_vm_clear_freed(rdev, bo_va->vm); |
576 | if (r) |
577 | goto error_unlock; |
578 | |
579 | if (bo_va->it.start) |
580 | r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem); |
581 | |
582 | error_unlock: |
583 | mutex_unlock(&bo_va->vm->mutex); |
584 | |
585 | error_unreserve: |
586 | ttm_eu_backoff_reservation(&ticket, &list); |
587 | |
588 | error_free: |
589 | kvfree(vm_bos); |
590 | |
591 | if (r && r != -ERESTARTSYS) |
592 | DRM_ERROR("Couldn't update BO_VA (%d)\n" , r); |
593 | } |
594 | |
595 | int radeon_gem_va_ioctl(struct drm_device *dev, void *data, |
596 | struct drm_file *filp) |
597 | { |
598 | struct drm_radeon_gem_va *args = data; |
599 | struct drm_gem_object *gobj; |
600 | struct radeon_device *rdev = dev->dev_private; |
601 | struct radeon_fpriv *fpriv = filp->driver_priv; |
602 | struct radeon_bo *rbo; |
603 | struct radeon_bo_va *bo_va; |
604 | u32 invalid_flags; |
605 | int r = 0; |
606 | |
607 | if (!rdev->vm_manager.enabled) { |
608 | args->operation = RADEON_VA_RESULT_ERROR; |
609 | return -ENOTTY; |
610 | } |
611 | |
612 | /* !! DONT REMOVE !! |
613 | * We don't support vm_id yet, to be sure we don't have have broken |
614 | * userspace, reject anyone trying to use non 0 value thus moving |
615 | * forward we can use those fields without breaking existant userspace |
616 | */ |
617 | if (args->vm_id) { |
618 | args->operation = RADEON_VA_RESULT_ERROR; |
619 | return -EINVAL; |
620 | } |
621 | |
622 | if (args->offset < RADEON_VA_RESERVED_SIZE) { |
623 | dev_err(&dev->pdev->dev, |
624 | "offset 0x%lX is in reserved area 0x%X\n" , |
625 | (unsigned long)args->offset, |
626 | RADEON_VA_RESERVED_SIZE); |
627 | args->operation = RADEON_VA_RESULT_ERROR; |
628 | return -EINVAL; |
629 | } |
630 | |
631 | /* don't remove, we need to enforce userspace to set the snooped flag |
632 | * otherwise we will endup with broken userspace and we won't be able |
633 | * to enable this feature without adding new interface |
634 | */ |
635 | invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; |
636 | if ((args->flags & invalid_flags)) { |
637 | dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n" , |
638 | args->flags, invalid_flags); |
639 | args->operation = RADEON_VA_RESULT_ERROR; |
640 | return -EINVAL; |
641 | } |
642 | |
643 | switch (args->operation) { |
644 | case RADEON_VA_MAP: |
645 | case RADEON_VA_UNMAP: |
646 | break; |
647 | default: |
648 | dev_err(&dev->pdev->dev, "unsupported operation %d\n" , |
649 | args->operation); |
650 | args->operation = RADEON_VA_RESULT_ERROR; |
651 | return -EINVAL; |
652 | } |
653 | |
654 | gobj = drm_gem_object_lookup(filp, args->handle); |
655 | if (gobj == NULL) { |
656 | args->operation = RADEON_VA_RESULT_ERROR; |
657 | return -ENOENT; |
658 | } |
659 | rbo = gem_to_radeon_bo(gobj); |
660 | r = radeon_bo_reserve(rbo, false); |
661 | if (r) { |
662 | args->operation = RADEON_VA_RESULT_ERROR; |
663 | drm_gem_object_put_unlocked(gobj); |
664 | return r; |
665 | } |
666 | bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); |
667 | if (!bo_va) { |
668 | args->operation = RADEON_VA_RESULT_ERROR; |
669 | radeon_bo_unreserve(rbo); |
670 | drm_gem_object_put_unlocked(gobj); |
671 | return -ENOENT; |
672 | } |
673 | |
674 | switch (args->operation) { |
675 | case RADEON_VA_MAP: |
676 | if (bo_va->it.start) { |
677 | args->operation = RADEON_VA_RESULT_VA_EXIST; |
678 | args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; |
679 | radeon_bo_unreserve(rbo); |
680 | goto out; |
681 | } |
682 | r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); |
683 | break; |
684 | case RADEON_VA_UNMAP: |
685 | r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0); |
686 | break; |
687 | default: |
688 | break; |
689 | } |
690 | if (!r) |
691 | radeon_gem_va_update_vm(rdev, bo_va); |
692 | args->operation = RADEON_VA_RESULT_OK; |
693 | if (r) { |
694 | args->operation = RADEON_VA_RESULT_ERROR; |
695 | } |
696 | out: |
697 | drm_gem_object_put_unlocked(gobj); |
698 | return r; |
699 | } |
700 | |
701 | int radeon_gem_op_ioctl(struct drm_device *dev, void *data, |
702 | struct drm_file *filp) |
703 | { |
704 | struct drm_radeon_gem_op *args = data; |
705 | struct drm_gem_object *gobj; |
706 | struct radeon_bo *robj; |
707 | int r; |
708 | |
709 | gobj = drm_gem_object_lookup(filp, args->handle); |
710 | if (gobj == NULL) { |
711 | return -ENOENT; |
712 | } |
713 | robj = gem_to_radeon_bo(gobj); |
714 | |
715 | r = -EPERM; |
716 | if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) |
717 | goto out; |
718 | |
719 | r = radeon_bo_reserve(robj, false); |
720 | if (unlikely(r)) |
721 | goto out; |
722 | |
723 | switch (args->op) { |
724 | case RADEON_GEM_OP_GET_INITIAL_DOMAIN: |
725 | args->value = robj->initial_domain; |
726 | break; |
727 | case RADEON_GEM_OP_SET_INITIAL_DOMAIN: |
728 | robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | |
729 | RADEON_GEM_DOMAIN_GTT | |
730 | RADEON_GEM_DOMAIN_CPU); |
731 | break; |
732 | default: |
733 | r = -EINVAL; |
734 | } |
735 | |
736 | radeon_bo_unreserve(robj); |
737 | out: |
738 | drm_gem_object_put_unlocked(gobj); |
739 | return r; |
740 | } |
741 | |
742 | int radeon_mode_dumb_create(struct drm_file *file_priv, |
743 | struct drm_device *dev, |
744 | struct drm_mode_create_dumb *args) |
745 | { |
746 | struct radeon_device *rdev = dev->dev_private; |
747 | struct drm_gem_object *gobj; |
748 | uint32_t handle; |
749 | int r; |
750 | |
751 | args->pitch = radeon_align_pitch(rdev, args->width, |
752 | DIV_ROUND_UP(args->bpp, 8), 0); |
753 | args->size = args->pitch * args->height; |
754 | args->size = ALIGN(args->size, PAGE_SIZE); |
755 | |
756 | r = radeon_gem_object_create(rdev, args->size, 0, |
757 | RADEON_GEM_DOMAIN_VRAM, 0, |
758 | false, &gobj); |
759 | if (r) |
760 | return -ENOMEM; |
761 | |
762 | r = drm_gem_handle_create(file_priv, gobj, &handle); |
763 | /* drop reference from allocate - handle holds it now */ |
764 | drm_gem_object_put_unlocked(gobj); |
765 | if (r) { |
766 | return r; |
767 | } |
768 | args->handle = handle; |
769 | return 0; |
770 | } |
771 | |
772 | #if defined(CONFIG_DEBUG_FS) |
773 | static int radeon_debugfs_gem_info(struct seq_file *m, void *data) |
774 | { |
775 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
776 | struct drm_device *dev = node->minor->dev; |
777 | struct radeon_device *rdev = dev->dev_private; |
778 | struct radeon_bo *rbo; |
779 | unsigned i = 0; |
780 | |
781 | mutex_lock(&rdev->gem.mutex); |
782 | list_for_each_entry(rbo, &rdev->gem.objects, list) { |
783 | unsigned domain; |
784 | const char *placement; |
785 | |
786 | domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type); |
787 | switch (domain) { |
788 | case RADEON_GEM_DOMAIN_VRAM: |
789 | placement = "VRAM" ; |
790 | break; |
791 | case RADEON_GEM_DOMAIN_GTT: |
792 | placement = " GTT" ; |
793 | break; |
794 | case RADEON_GEM_DOMAIN_CPU: |
795 | default: |
796 | placement = " CPU" ; |
797 | break; |
798 | } |
799 | seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n" , |
800 | i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20, |
801 | placement, (unsigned long)rbo->pid); |
802 | i++; |
803 | } |
804 | mutex_unlock(&rdev->gem.mutex); |
805 | return 0; |
806 | } |
807 | |
808 | static struct drm_info_list radeon_debugfs_gem_list[] = { |
809 | {"radeon_gem_info" , &radeon_debugfs_gem_info, 0, NULL}, |
810 | }; |
811 | #endif |
812 | |
813 | int radeon_gem_debugfs_init(struct radeon_device *rdev) |
814 | { |
815 | #if defined(CONFIG_DEBUG_FS) |
816 | return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1); |
817 | #endif |
818 | return 0; |
819 | } |
820 | |