1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. |
3 | * All Rights Reserved. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the |
7 | * "Software"), to deal in the Software without restriction, including |
8 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * the following conditions: |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
20 | * |
21 | * The above copyright notice and this permission notice (including the |
22 | * next paragraph) shall be included in all copies or substantial portions |
23 | * of the Software. |
24 | * |
25 | */ |
26 | /* |
27 | * Authors: |
28 | * Christian König <deathsimple@vodafone.de> |
29 | */ |
30 | |
31 | #include <linux/sort.h> |
32 | #include <linux/uaccess.h> |
33 | |
34 | #include "amdgpu.h" |
35 | #include "amdgpu_trace.h" |
36 | |
37 | #define AMDGPU_BO_LIST_MAX_PRIORITY 32u |
38 | #define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1) |
39 | |
40 | static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu) |
41 | { |
42 | struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list, |
43 | rhead); |
44 | mutex_destroy(lock: &list->bo_list_mutex); |
45 | kvfree(addr: list); |
46 | } |
47 | |
48 | static void amdgpu_bo_list_free(struct kref *ref) |
49 | { |
50 | struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list, |
51 | refcount); |
52 | struct amdgpu_bo_list_entry *e; |
53 | |
54 | amdgpu_bo_list_for_each_entry(e, list) |
55 | amdgpu_bo_unref(bo: &e->bo); |
56 | call_rcu(head: &list->rhead, func: amdgpu_bo_list_free_rcu); |
57 | } |
58 | |
59 | static int amdgpu_bo_list_entry_cmp(const void *_a, const void *_b) |
60 | { |
61 | const struct amdgpu_bo_list_entry *a = _a, *b = _b; |
62 | |
63 | if (a->priority > b->priority) |
64 | return 1; |
65 | if (a->priority < b->priority) |
66 | return -1; |
67 | return 0; |
68 | } |
69 | |
70 | int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, |
71 | struct drm_amdgpu_bo_list_entry *info, |
72 | size_t num_entries, struct amdgpu_bo_list **result) |
73 | { |
74 | unsigned last_entry = 0, first_userptr = num_entries; |
75 | struct amdgpu_bo_list_entry *array; |
76 | struct amdgpu_bo_list *list; |
77 | uint64_t total_size = 0; |
78 | unsigned i; |
79 | int r; |
80 | |
81 | list = kvzalloc(struct_size(list, entries, num_entries), GFP_KERNEL); |
82 | if (!list) |
83 | return -ENOMEM; |
84 | |
85 | kref_init(kref: &list->refcount); |
86 | |
87 | list->num_entries = num_entries; |
88 | array = list->entries; |
89 | |
90 | for (i = 0; i < num_entries; ++i) { |
91 | struct amdgpu_bo_list_entry *entry; |
92 | struct drm_gem_object *gobj; |
93 | struct amdgpu_bo *bo; |
94 | struct mm_struct *usermm; |
95 | |
96 | gobj = drm_gem_object_lookup(filp, handle: info[i].bo_handle); |
97 | if (!gobj) { |
98 | r = -ENOENT; |
99 | goto error_free; |
100 | } |
101 | |
102 | bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); |
103 | drm_gem_object_put(obj: gobj); |
104 | |
105 | usermm = amdgpu_ttm_tt_get_usermm(ttm: bo->tbo.ttm); |
106 | if (usermm) { |
107 | if (usermm != current->mm) { |
108 | amdgpu_bo_unref(bo: &bo); |
109 | r = -EPERM; |
110 | goto error_free; |
111 | } |
112 | entry = &array[--first_userptr]; |
113 | } else { |
114 | entry = &array[last_entry++]; |
115 | } |
116 | |
117 | entry->priority = min(info[i].bo_priority, |
118 | AMDGPU_BO_LIST_MAX_PRIORITY); |
119 | entry->bo = bo; |
120 | |
121 | if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS) |
122 | list->gds_obj = bo; |
123 | if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GWS) |
124 | list->gws_obj = bo; |
125 | if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_OA) |
126 | list->oa_obj = bo; |
127 | |
128 | total_size += amdgpu_bo_size(bo); |
129 | trace_amdgpu_bo_list_set(list, bo); |
130 | } |
131 | |
132 | list->first_userptr = first_userptr; |
133 | sort(base: array, num: last_entry, size: sizeof(struct amdgpu_bo_list_entry), |
134 | cmp_func: amdgpu_bo_list_entry_cmp, NULL); |
135 | |
136 | trace_amdgpu_cs_bo_status(total_bo: list->num_entries, total_size); |
137 | |
138 | mutex_init(&list->bo_list_mutex); |
139 | *result = list; |
140 | return 0; |
141 | |
142 | error_free: |
143 | for (i = 0; i < last_entry; ++i) |
144 | amdgpu_bo_unref(bo: &array[i].bo); |
145 | for (i = first_userptr; i < num_entries; ++i) |
146 | amdgpu_bo_unref(bo: &array[i].bo); |
147 | kvfree(addr: list); |
148 | return r; |
149 | |
150 | } |
151 | |
152 | static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id) |
153 | { |
154 | struct amdgpu_bo_list *list; |
155 | |
156 | mutex_lock(&fpriv->bo_list_lock); |
157 | list = idr_remove(&fpriv->bo_list_handles, id); |
158 | mutex_unlock(lock: &fpriv->bo_list_lock); |
159 | if (list) |
160 | kref_put(kref: &list->refcount, release: amdgpu_bo_list_free); |
161 | } |
162 | |
163 | int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id, |
164 | struct amdgpu_bo_list **result) |
165 | { |
166 | rcu_read_lock(); |
167 | *result = idr_find(&fpriv->bo_list_handles, id); |
168 | |
169 | if (*result && kref_get_unless_zero(kref: &(*result)->refcount)) { |
170 | rcu_read_unlock(); |
171 | return 0; |
172 | } |
173 | |
174 | rcu_read_unlock(); |
175 | return -ENOENT; |
176 | } |
177 | |
178 | void amdgpu_bo_list_put(struct amdgpu_bo_list *list) |
179 | { |
180 | kref_put(kref: &list->refcount, release: amdgpu_bo_list_free); |
181 | } |
182 | |
183 | int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in, |
184 | struct drm_amdgpu_bo_list_entry **info_param) |
185 | { |
186 | const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr); |
187 | const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry); |
188 | struct drm_amdgpu_bo_list_entry *info; |
189 | int r; |
190 | |
191 | info = kvmalloc_array(n: in->bo_number, size: info_size, GFP_KERNEL); |
192 | if (!info) |
193 | return -ENOMEM; |
194 | |
195 | /* copy the handle array from userspace to a kernel buffer */ |
196 | r = -EFAULT; |
197 | if (likely(info_size == in->bo_info_size)) { |
198 | unsigned long bytes = in->bo_number * |
199 | in->bo_info_size; |
200 | |
201 | if (copy_from_user(to: info, from: uptr, n: bytes)) |
202 | goto error_free; |
203 | |
204 | } else { |
205 | unsigned long bytes = min(in->bo_info_size, info_size); |
206 | unsigned i; |
207 | |
208 | memset(info, 0, in->bo_number * info_size); |
209 | for (i = 0; i < in->bo_number; ++i) { |
210 | if (copy_from_user(to: &info[i], from: uptr, n: bytes)) |
211 | goto error_free; |
212 | |
213 | uptr += in->bo_info_size; |
214 | } |
215 | } |
216 | |
217 | *info_param = info; |
218 | return 0; |
219 | |
220 | error_free: |
221 | kvfree(addr: info); |
222 | return r; |
223 | } |
224 | |
225 | int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, |
226 | struct drm_file *filp) |
227 | { |
228 | struct amdgpu_device *adev = drm_to_adev(ddev: dev); |
229 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
230 | union drm_amdgpu_bo_list *args = data; |
231 | uint32_t handle = args->in.list_handle; |
232 | struct drm_amdgpu_bo_list_entry *info = NULL; |
233 | struct amdgpu_bo_list *list, *old; |
234 | int r; |
235 | |
236 | r = amdgpu_bo_create_list_entry_array(in: &args->in, info_param: &info); |
237 | if (r) |
238 | return r; |
239 | |
240 | switch (args->in.operation) { |
241 | case AMDGPU_BO_LIST_OP_CREATE: |
242 | r = amdgpu_bo_list_create(adev, filp, info, num_entries: args->in.bo_number, |
243 | result: &list); |
244 | if (r) |
245 | goto error_free; |
246 | |
247 | mutex_lock(&fpriv->bo_list_lock); |
248 | r = idr_alloc(&fpriv->bo_list_handles, ptr: list, start: 1, end: 0, GFP_KERNEL); |
249 | mutex_unlock(lock: &fpriv->bo_list_lock); |
250 | if (r < 0) { |
251 | goto error_put_list; |
252 | } |
253 | |
254 | handle = r; |
255 | break; |
256 | |
257 | case AMDGPU_BO_LIST_OP_DESTROY: |
258 | amdgpu_bo_list_destroy(fpriv, id: handle); |
259 | handle = 0; |
260 | break; |
261 | |
262 | case AMDGPU_BO_LIST_OP_UPDATE: |
263 | r = amdgpu_bo_list_create(adev, filp, info, num_entries: args->in.bo_number, |
264 | result: &list); |
265 | if (r) |
266 | goto error_free; |
267 | |
268 | mutex_lock(&fpriv->bo_list_lock); |
269 | old = idr_replace(&fpriv->bo_list_handles, list, id: handle); |
270 | mutex_unlock(lock: &fpriv->bo_list_lock); |
271 | |
272 | if (IS_ERR(ptr: old)) { |
273 | r = PTR_ERR(ptr: old); |
274 | goto error_put_list; |
275 | } |
276 | |
277 | amdgpu_bo_list_put(list: old); |
278 | break; |
279 | |
280 | default: |
281 | r = -EINVAL; |
282 | goto error_free; |
283 | } |
284 | |
285 | memset(args, 0, sizeof(*args)); |
286 | args->out.list_handle = handle; |
287 | kvfree(addr: info); |
288 | |
289 | return 0; |
290 | |
291 | error_put_list: |
292 | amdgpu_bo_list_put(list); |
293 | |
294 | error_free: |
295 | kvfree(addr: info); |
296 | return r; |
297 | } |
298 | |