1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
2 | /* |
3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
4 | * Copyright (c) 2012 David Airlie <airlied@linux.ie> |
5 | * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com> |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a |
8 | * copy of this software and associated documentation files (the "Software"), |
9 | * to deal in the Software without restriction, including without limitation |
10 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
11 | * and/or sell copies of the Software, and to permit persons to whom the |
12 | * Software is furnished to do so, subject to the following conditions: |
13 | * |
14 | * The above copyright notice and this permission notice shall be included in |
15 | * all copies or substantial portions of the Software. |
16 | * |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
20 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
23 | * OTHER DEALINGS IN THE SOFTWARE. |
24 | */ |
25 | |
26 | #include <linux/mm.h> |
27 | #include <linux/module.h> |
28 | #include <linux/rbtree.h> |
29 | #include <linux/slab.h> |
30 | #include <linux/spinlock.h> |
31 | #include <linux/types.h> |
32 | |
33 | #include <drm/drm_mm.h> |
34 | #include <drm/drm_vma_manager.h> |
35 | |
36 | /** |
37 | * DOC: vma offset manager |
38 | * |
39 | * The vma-manager is responsible to map arbitrary driver-dependent memory |
40 | * regions into the linear user address-space. It provides offsets to the |
41 | * caller which can then be used on the address_space of the drm-device. It |
42 | * takes care to not overlap regions, size them appropriately and to not |
43 | * confuse mm-core by inconsistent fake vm_pgoff fields. |
44 | * Drivers shouldn't use this for object placement in VMEM. This manager should |
45 | * only be used to manage mappings into linear user-space VMs. |
46 | * |
47 | * We use drm_mm as backend to manage object allocations. But it is highly |
48 | * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to |
49 | * speed up offset lookups. |
50 | * |
51 | * You must not use multiple offset managers on a single address_space. |
52 | * Otherwise, mm-core will be unable to tear down memory mappings as the VM will |
53 | * no longer be linear. |
54 | * |
55 | * This offset manager works on page-based addresses. That is, every argument |
56 | * and return code (with the exception of drm_vma_node_offset_addr()) is given |
57 | * in number of pages, not number of bytes. That means, object sizes and offsets |
58 | * must always be page-aligned (as usual). |
59 | * If you want to get a valid byte-based user-space address for a given offset, |
60 | * please see drm_vma_node_offset_addr(). |
61 | * |
62 | * Additionally to offset management, the vma offset manager also handles access |
63 | * management. For every open-file context that is allowed to access a given |
64 | * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this |
65 | * open-file with the offset of the node will fail with -EACCES. To revoke |
66 | * access again, use drm_vma_node_revoke(). However, the caller is responsible |
67 | * for destroying already existing mappings, if required. |
68 | */ |
69 | |
70 | /** |
71 | * drm_vma_offset_manager_init - Initialize new offset-manager |
72 | * @mgr: Manager object |
73 | * @page_offset: Offset of available memory area (page-based) |
74 | * @size: Size of available address space range (page-based) |
75 | * |
76 | * Initialize a new offset-manager. The offset and area size available for the |
77 | * manager are given as @page_offset and @size. Both are interpreted as |
78 | * page-numbers, not bytes. |
79 | * |
80 | * Adding/removing nodes from the manager is locked internally and protected |
81 | * against concurrent access. However, node allocation and destruction is left |
82 | * for the caller. While calling into the vma-manager, a given node must |
83 | * always be guaranteed to be referenced. |
84 | */ |
85 | void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr, |
86 | unsigned long page_offset, unsigned long size) |
87 | { |
88 | rwlock_init(&mgr->vm_lock); |
89 | drm_mm_init(mm: &mgr->vm_addr_space_mm, start: page_offset, size); |
90 | } |
91 | EXPORT_SYMBOL(drm_vma_offset_manager_init); |
92 | |
93 | /** |
94 | * drm_vma_offset_manager_destroy() - Destroy offset manager |
95 | * @mgr: Manager object |
96 | * |
97 | * Destroy an object manager which was previously created via |
98 | * drm_vma_offset_manager_init(). The caller must remove all allocated nodes |
99 | * before destroying the manager. Otherwise, drm_mm will refuse to free the |
100 | * requested resources. |
101 | * |
102 | * The manager must not be accessed after this function is called. |
103 | */ |
104 | void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr) |
105 | { |
106 | drm_mm_takedown(mm: &mgr->vm_addr_space_mm); |
107 | } |
108 | EXPORT_SYMBOL(drm_vma_offset_manager_destroy); |
109 | |
110 | /** |
111 | * drm_vma_offset_lookup_locked() - Find node in offset space |
112 | * @mgr: Manager object |
113 | * @start: Start address for object (page-based) |
114 | * @pages: Size of object (page-based) |
115 | * |
116 | * Find a node given a start address and object size. This returns the _best_ |
117 | * match for the given node. That is, @start may point somewhere into a valid |
118 | * region and the given node will be returned, as long as the node spans the |
119 | * whole requested area (given the size in number of pages as @pages). |
120 | * |
121 | * Note that before lookup the vma offset manager lookup lock must be acquired |
122 | * with drm_vma_offset_lock_lookup(). See there for an example. This can then be |
123 | * used to implement weakly referenced lookups using kref_get_unless_zero(). |
124 | * |
125 | * Example: |
126 | * |
127 | * :: |
128 | * |
129 | * drm_vma_offset_lock_lookup(mgr); |
130 | * node = drm_vma_offset_lookup_locked(mgr); |
131 | * if (node) |
132 | * kref_get_unless_zero(container_of(node, sth, entr)); |
133 | * drm_vma_offset_unlock_lookup(mgr); |
134 | * |
135 | * RETURNS: |
136 | * Returns NULL if no suitable node can be found. Otherwise, the best match |
137 | * is returned. It's the caller's responsibility to make sure the node doesn't |
138 | * get destroyed before the caller can access it. |
139 | */ |
140 | struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr, |
141 | unsigned long start, |
142 | unsigned long pages) |
143 | { |
144 | struct drm_mm_node *node, *best; |
145 | struct rb_node *iter; |
146 | unsigned long offset; |
147 | |
148 | iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node; |
149 | best = NULL; |
150 | |
151 | while (likely(iter)) { |
152 | node = rb_entry(iter, struct drm_mm_node, rb); |
153 | offset = node->start; |
154 | if (start >= offset) { |
155 | iter = iter->rb_right; |
156 | best = node; |
157 | if (start == offset) |
158 | break; |
159 | } else { |
160 | iter = iter->rb_left; |
161 | } |
162 | } |
163 | |
164 | /* verify that the node spans the requested area */ |
165 | if (best) { |
166 | offset = best->start + best->size; |
167 | if (offset < start + pages) |
168 | best = NULL; |
169 | } |
170 | |
171 | if (!best) |
172 | return NULL; |
173 | |
174 | return container_of(best, struct drm_vma_offset_node, vm_node); |
175 | } |
176 | EXPORT_SYMBOL(drm_vma_offset_lookup_locked); |
177 | |
178 | /** |
179 | * drm_vma_offset_add() - Add offset node to manager |
180 | * @mgr: Manager object |
181 | * @node: Node to be added |
182 | * @pages: Allocation size visible to user-space (in number of pages) |
183 | * |
184 | * Add a node to the offset-manager. If the node was already added, this does |
185 | * nothing and return 0. @pages is the size of the object given in number of |
186 | * pages. |
187 | * After this call succeeds, you can access the offset of the node until it |
188 | * is removed again. |
189 | * |
190 | * If this call fails, it is safe to retry the operation or call |
191 | * drm_vma_offset_remove(), anyway. However, no cleanup is required in that |
192 | * case. |
193 | * |
194 | * @pages is not required to be the same size as the underlying memory object |
195 | * that you want to map. It only limits the size that user-space can map into |
196 | * their address space. |
197 | * |
198 | * RETURNS: |
199 | * 0 on success, negative error code on failure. |
200 | */ |
201 | int drm_vma_offset_add(struct drm_vma_offset_manager *mgr, |
202 | struct drm_vma_offset_node *node, unsigned long pages) |
203 | { |
204 | int ret = 0; |
205 | |
206 | write_lock(&mgr->vm_lock); |
207 | |
208 | if (!drm_mm_node_allocated(node: &node->vm_node)) |
209 | ret = drm_mm_insert_node(mm: &mgr->vm_addr_space_mm, |
210 | node: &node->vm_node, size: pages); |
211 | |
212 | write_unlock(&mgr->vm_lock); |
213 | |
214 | return ret; |
215 | } |
216 | EXPORT_SYMBOL(drm_vma_offset_add); |
217 | |
218 | /** |
219 | * drm_vma_offset_remove() - Remove offset node from manager |
220 | * @mgr: Manager object |
221 | * @node: Node to be removed |
222 | * |
223 | * Remove a node from the offset manager. If the node wasn't added before, this |
224 | * does nothing. After this call returns, the offset and size will be 0 until a |
225 | * new offset is allocated via drm_vma_offset_add() again. Helper functions like |
226 | * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no |
227 | * offset is allocated. |
228 | */ |
229 | void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr, |
230 | struct drm_vma_offset_node *node) |
231 | { |
232 | write_lock(&mgr->vm_lock); |
233 | |
234 | if (drm_mm_node_allocated(node: &node->vm_node)) { |
235 | drm_mm_remove_node(node: &node->vm_node); |
236 | memset(&node->vm_node, 0, sizeof(node->vm_node)); |
237 | } |
238 | |
239 | write_unlock(&mgr->vm_lock); |
240 | } |
241 | EXPORT_SYMBOL(drm_vma_offset_remove); |
242 | |
243 | static int vma_node_allow(struct drm_vma_offset_node *node, |
244 | struct drm_file *tag, bool ref_counted) |
245 | { |
246 | struct rb_node **iter; |
247 | struct rb_node *parent = NULL; |
248 | struct drm_vma_offset_file *new, *entry; |
249 | int ret = 0; |
250 | |
251 | /* Preallocate entry to avoid atomic allocations below. It is quite |
252 | * unlikely that an open-file is added twice to a single node so we |
253 | * don't optimize for this case. OOM is checked below only if the entry |
254 | * is actually used. */ |
255 | new = kmalloc(size: sizeof(*entry), GFP_KERNEL); |
256 | |
257 | write_lock(&node->vm_lock); |
258 | |
259 | iter = &node->vm_files.rb_node; |
260 | |
261 | while (likely(*iter)) { |
262 | parent = *iter; |
263 | entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb); |
264 | |
265 | if (tag == entry->vm_tag) { |
266 | if (ref_counted) |
267 | entry->vm_count++; |
268 | goto unlock; |
269 | } else if (tag > entry->vm_tag) { |
270 | iter = &(*iter)->rb_right; |
271 | } else { |
272 | iter = &(*iter)->rb_left; |
273 | } |
274 | } |
275 | |
276 | if (!new) { |
277 | ret = -ENOMEM; |
278 | goto unlock; |
279 | } |
280 | |
281 | new->vm_tag = tag; |
282 | new->vm_count = 1; |
283 | rb_link_node(node: &new->vm_rb, parent, rb_link: iter); |
284 | rb_insert_color(&new->vm_rb, &node->vm_files); |
285 | new = NULL; |
286 | |
287 | unlock: |
288 | write_unlock(&node->vm_lock); |
289 | kfree(objp: new); |
290 | return ret; |
291 | } |
292 | |
293 | /** |
294 | * drm_vma_node_allow - Add open-file to list of allowed users |
295 | * @node: Node to modify |
296 | * @tag: Tag of file to remove |
297 | * |
298 | * Add @tag to the list of allowed open-files for this node. If @tag is |
299 | * already on this list, the ref-count is incremented. |
300 | * |
301 | * The list of allowed-users is preserved across drm_vma_offset_add() and |
302 | * drm_vma_offset_remove() calls. You may even call it if the node is currently |
303 | * not added to any offset-manager. |
304 | * |
305 | * You must remove all open-files the same number of times as you added them |
306 | * before destroying the node. Otherwise, you will leak memory. |
307 | * |
308 | * This is locked against concurrent access internally. |
309 | * |
310 | * RETURNS: |
311 | * 0 on success, negative error code on internal failure (out-of-mem) |
312 | */ |
313 | int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag) |
314 | { |
315 | return vma_node_allow(node, tag, ref_counted: true); |
316 | } |
317 | EXPORT_SYMBOL(drm_vma_node_allow); |
318 | |
319 | /** |
320 | * drm_vma_node_allow_once - Add open-file to list of allowed users |
321 | * @node: Node to modify |
322 | * @tag: Tag of file to remove |
323 | * |
324 | * Add @tag to the list of allowed open-files for this node. |
325 | * |
326 | * The list of allowed-users is preserved across drm_vma_offset_add() and |
327 | * drm_vma_offset_remove() calls. You may even call it if the node is currently |
328 | * not added to any offset-manager. |
329 | * |
330 | * This is not ref-counted unlike drm_vma_node_allow() hence drm_vma_node_revoke() |
331 | * should only be called once after this. |
332 | * |
333 | * This is locked against concurrent access internally. |
334 | * |
335 | * RETURNS: |
336 | * 0 on success, negative error code on internal failure (out-of-mem) |
337 | */ |
338 | int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag) |
339 | { |
340 | return vma_node_allow(node, tag, ref_counted: false); |
341 | } |
342 | EXPORT_SYMBOL(drm_vma_node_allow_once); |
343 | |
344 | /** |
345 | * drm_vma_node_revoke - Remove open-file from list of allowed users |
346 | * @node: Node to modify |
347 | * @tag: Tag of file to remove |
348 | * |
349 | * Decrement the ref-count of @tag in the list of allowed open-files on @node. |
350 | * If the ref-count drops to zero, remove @tag from the list. You must call |
351 | * this once for every drm_vma_node_allow() on @tag. |
352 | * |
353 | * This is locked against concurrent access internally. |
354 | * |
355 | * If @tag is not on the list, nothing is done. |
356 | */ |
357 | void drm_vma_node_revoke(struct drm_vma_offset_node *node, |
358 | struct drm_file *tag) |
359 | { |
360 | struct drm_vma_offset_file *entry; |
361 | struct rb_node *iter; |
362 | |
363 | write_lock(&node->vm_lock); |
364 | |
365 | iter = node->vm_files.rb_node; |
366 | while (likely(iter)) { |
367 | entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); |
368 | if (tag == entry->vm_tag) { |
369 | if (!--entry->vm_count) { |
370 | rb_erase(&entry->vm_rb, &node->vm_files); |
371 | kfree(objp: entry); |
372 | } |
373 | break; |
374 | } else if (tag > entry->vm_tag) { |
375 | iter = iter->rb_right; |
376 | } else { |
377 | iter = iter->rb_left; |
378 | } |
379 | } |
380 | |
381 | write_unlock(&node->vm_lock); |
382 | } |
383 | EXPORT_SYMBOL(drm_vma_node_revoke); |
384 | |
385 | /** |
386 | * drm_vma_node_is_allowed - Check whether an open-file is granted access |
387 | * @node: Node to check |
388 | * @tag: Tag of file to remove |
389 | * |
390 | * Search the list in @node whether @tag is currently on the list of allowed |
391 | * open-files (see drm_vma_node_allow()). |
392 | * |
393 | * This is locked against concurrent access internally. |
394 | * |
395 | * RETURNS: |
396 | * true if @filp is on the list |
397 | */ |
398 | bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node, |
399 | struct drm_file *tag) |
400 | { |
401 | struct drm_vma_offset_file *entry; |
402 | struct rb_node *iter; |
403 | |
404 | read_lock(&node->vm_lock); |
405 | |
406 | iter = node->vm_files.rb_node; |
407 | while (likely(iter)) { |
408 | entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); |
409 | if (tag == entry->vm_tag) |
410 | break; |
411 | else if (tag > entry->vm_tag) |
412 | iter = iter->rb_right; |
413 | else |
414 | iter = iter->rb_left; |
415 | } |
416 | |
417 | read_unlock(&node->vm_lock); |
418 | |
419 | return iter; |
420 | } |
421 | EXPORT_SYMBOL(drm_vma_node_is_allowed); |
422 | |