1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
2 | /************************************************************************** |
3 | * |
4 | * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
26 | **************************************************************************/ |
27 | |
28 | #include <drm/ttm/ttm_placement.h> |
29 | |
30 | #include "vmwgfx_binding.h" |
31 | #include "vmwgfx_bo.h" |
32 | #include "vmwgfx_drv.h" |
33 | #include "vmwgfx_resource_priv.h" |
34 | |
35 | #define VMW_RES_EVICT_ERR_COUNT 10 |
36 | |
37 | /** |
38 | * vmw_resource_mob_attach - Mark a resource as attached to its backing mob |
39 | * @res: The resource |
40 | */ |
41 | void vmw_resource_mob_attach(struct vmw_resource *res) |
42 | { |
43 | struct vmw_bo *gbo = res->guest_memory_bo; |
44 | struct rb_node **new = &gbo->res_tree.rb_node, *parent = NULL; |
45 | |
46 | dma_resv_assert_held(gbo->tbo.base.resv); |
47 | res->used_prio = (res->res_dirty) ? res->func->dirty_prio : |
48 | res->func->prio; |
49 | |
50 | while (*new) { |
51 | struct vmw_resource *this = |
52 | container_of(*new, struct vmw_resource, mob_node); |
53 | |
54 | parent = *new; |
55 | new = (res->guest_memory_offset < this->guest_memory_offset) ? |
56 | &((*new)->rb_left) : &((*new)->rb_right); |
57 | } |
58 | |
59 | rb_link_node(node: &res->mob_node, parent, rb_link: new); |
60 | rb_insert_color(&res->mob_node, &gbo->res_tree); |
61 | |
62 | vmw_bo_prio_add(vbo: gbo, prio: res->used_prio); |
63 | } |
64 | |
65 | /** |
66 | * vmw_resource_mob_detach - Mark a resource as detached from its backing mob |
67 | * @res: The resource |
68 | */ |
69 | void vmw_resource_mob_detach(struct vmw_resource *res) |
70 | { |
71 | struct vmw_bo *gbo = res->guest_memory_bo; |
72 | |
73 | dma_resv_assert_held(gbo->tbo.base.resv); |
74 | if (vmw_resource_mob_attached(res)) { |
75 | rb_erase(&res->mob_node, &gbo->res_tree); |
76 | RB_CLEAR_NODE(&res->mob_node); |
77 | vmw_bo_prio_del(vbo: gbo, prio: res->used_prio); |
78 | } |
79 | } |
80 | |
81 | struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) |
82 | { |
83 | kref_get(kref: &res->kref); |
84 | return res; |
85 | } |
86 | |
87 | struct vmw_resource * |
88 | vmw_resource_reference_unless_doomed(struct vmw_resource *res) |
89 | { |
90 | return kref_get_unless_zero(kref: &res->kref) ? res : NULL; |
91 | } |
92 | |
93 | /** |
94 | * vmw_resource_release_id - release a resource id to the id manager. |
95 | * |
96 | * @res: Pointer to the resource. |
97 | * |
98 | * Release the resource id to the resource id manager and set it to -1 |
99 | */ |
100 | void vmw_resource_release_id(struct vmw_resource *res) |
101 | { |
102 | struct vmw_private *dev_priv = res->dev_priv; |
103 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
104 | |
105 | spin_lock(lock: &dev_priv->resource_lock); |
106 | if (res->id != -1) |
107 | idr_remove(idr, id: res->id); |
108 | res->id = -1; |
109 | spin_unlock(lock: &dev_priv->resource_lock); |
110 | } |
111 | |
112 | static void vmw_resource_release(struct kref *kref) |
113 | { |
114 | struct vmw_resource *res = |
115 | container_of(kref, struct vmw_resource, kref); |
116 | struct vmw_private *dev_priv = res->dev_priv; |
117 | int id; |
118 | int ret; |
119 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
120 | |
121 | spin_lock(lock: &dev_priv->resource_lock); |
122 | list_del_init(entry: &res->lru_head); |
123 | spin_unlock(lock: &dev_priv->resource_lock); |
124 | if (res->guest_memory_bo) { |
125 | struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo; |
126 | |
127 | ret = ttm_bo_reserve(bo, interruptible: false, no_wait: false, NULL); |
128 | BUG_ON(ret); |
129 | if (vmw_resource_mob_attached(res) && |
130 | res->func->unbind != NULL) { |
131 | struct ttm_validate_buffer val_buf; |
132 | |
133 | val_buf.bo = bo; |
134 | val_buf.num_shared = 0; |
135 | res->func->unbind(res, false, &val_buf); |
136 | } |
137 | res->guest_memory_size = false; |
138 | vmw_resource_mob_detach(res); |
139 | if (res->dirty) |
140 | res->func->dirty_free(res); |
141 | if (res->coherent) |
142 | vmw_bo_dirty_release(vbo: res->guest_memory_bo); |
143 | ttm_bo_unreserve(bo); |
144 | vmw_user_bo_unref(buf: &res->guest_memory_bo); |
145 | } |
146 | |
147 | if (likely(res->hw_destroy != NULL)) { |
148 | mutex_lock(&dev_priv->binding_mutex); |
149 | vmw_binding_res_list_kill(head: &res->binding_head); |
150 | mutex_unlock(lock: &dev_priv->binding_mutex); |
151 | res->hw_destroy(res); |
152 | } |
153 | |
154 | id = res->id; |
155 | if (res->res_free != NULL) |
156 | res->res_free(res); |
157 | else |
158 | kfree(objp: res); |
159 | |
160 | spin_lock(lock: &dev_priv->resource_lock); |
161 | if (id != -1) |
162 | idr_remove(idr, id); |
163 | spin_unlock(lock: &dev_priv->resource_lock); |
164 | } |
165 | |
166 | void vmw_resource_unreference(struct vmw_resource **p_res) |
167 | { |
168 | struct vmw_resource *res = *p_res; |
169 | |
170 | *p_res = NULL; |
171 | kref_put(kref: &res->kref, release: vmw_resource_release); |
172 | } |
173 | |
174 | |
175 | /** |
176 | * vmw_resource_alloc_id - release a resource id to the id manager. |
177 | * |
178 | * @res: Pointer to the resource. |
179 | * |
180 | * Allocate the lowest free resource from the resource manager, and set |
181 | * @res->id to that id. Returns 0 on success and -ENOMEM on failure. |
182 | */ |
183 | int vmw_resource_alloc_id(struct vmw_resource *res) |
184 | { |
185 | struct vmw_private *dev_priv = res->dev_priv; |
186 | int ret; |
187 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
188 | |
189 | BUG_ON(res->id != -1); |
190 | |
191 | idr_preload(GFP_KERNEL); |
192 | spin_lock(lock: &dev_priv->resource_lock); |
193 | |
194 | ret = idr_alloc(idr, ptr: res, start: 1, end: 0, GFP_NOWAIT); |
195 | if (ret >= 0) |
196 | res->id = ret; |
197 | |
198 | spin_unlock(lock: &dev_priv->resource_lock); |
199 | idr_preload_end(); |
200 | return ret < 0 ? ret : 0; |
201 | } |
202 | |
203 | /** |
204 | * vmw_resource_init - initialize a struct vmw_resource |
205 | * |
206 | * @dev_priv: Pointer to a device private struct. |
207 | * @res: The struct vmw_resource to initialize. |
208 | * @delay_id: Boolean whether to defer device id allocation until |
209 | * the first validation. |
210 | * @res_free: Resource destructor. |
211 | * @func: Resource function table. |
212 | */ |
213 | int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, |
214 | bool delay_id, |
215 | void (*res_free) (struct vmw_resource *res), |
216 | const struct vmw_res_func *func) |
217 | { |
218 | kref_init(kref: &res->kref); |
219 | res->hw_destroy = NULL; |
220 | res->res_free = res_free; |
221 | res->dev_priv = dev_priv; |
222 | res->func = func; |
223 | RB_CLEAR_NODE(&res->mob_node); |
224 | INIT_LIST_HEAD(list: &res->lru_head); |
225 | INIT_LIST_HEAD(list: &res->binding_head); |
226 | res->id = -1; |
227 | res->guest_memory_bo = NULL; |
228 | res->guest_memory_offset = 0; |
229 | res->guest_memory_dirty = false; |
230 | res->res_dirty = false; |
231 | res->coherent = false; |
232 | res->used_prio = 3; |
233 | res->dirty = NULL; |
234 | if (delay_id) |
235 | return 0; |
236 | else |
237 | return vmw_resource_alloc_id(res); |
238 | } |
239 | |
240 | |
241 | /** |
242 | * vmw_user_resource_lookup_handle - lookup a struct resource from a |
243 | * TTM user-space handle and perform basic type checks |
244 | * |
245 | * @dev_priv: Pointer to a device private struct |
246 | * @tfile: Pointer to a struct ttm_object_file identifying the caller |
247 | * @handle: The TTM user-space handle |
248 | * @converter: Pointer to an object describing the resource type |
249 | * @p_res: On successful return the location pointed to will contain |
250 | * a pointer to a refcounted struct vmw_resource. |
251 | * |
252 | * If the handle can't be found or is associated with an incorrect resource |
253 | * type, -EINVAL will be returned. |
254 | */ |
255 | int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, |
256 | struct ttm_object_file *tfile, |
257 | uint32_t handle, |
258 | const struct vmw_user_resource_conv |
259 | *converter, |
260 | struct vmw_resource **p_res) |
261 | { |
262 | struct ttm_base_object *base; |
263 | struct vmw_resource *res; |
264 | int ret = -EINVAL; |
265 | |
266 | base = ttm_base_object_lookup(tfile, key: handle); |
267 | if (unlikely(!base)) |
268 | return -EINVAL; |
269 | |
270 | if (unlikely(ttm_base_object_type(base) != converter->object_type)) |
271 | goto out_bad_resource; |
272 | |
273 | res = converter->base_obj_to_res(base); |
274 | kref_get(kref: &res->kref); |
275 | |
276 | *p_res = res; |
277 | ret = 0; |
278 | |
279 | out_bad_resource: |
280 | ttm_base_object_unref(p_base: &base); |
281 | |
282 | return ret; |
283 | } |
284 | |
285 | /* |
286 | * Helper function that looks either a surface or bo. |
287 | * |
288 | * The pointer this pointed at by out_surf and out_buf needs to be null. |
289 | */ |
290 | int vmw_user_lookup_handle(struct vmw_private *dev_priv, |
291 | struct drm_file *filp, |
292 | uint32_t handle, |
293 | struct vmw_surface **out_surf, |
294 | struct vmw_bo **out_buf) |
295 | { |
296 | struct ttm_object_file *tfile = vmw_fpriv(file_priv: filp)->tfile; |
297 | struct vmw_resource *res; |
298 | int ret; |
299 | |
300 | BUG_ON(*out_surf || *out_buf); |
301 | |
302 | ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle, |
303 | converter: user_surface_converter, |
304 | p_res: &res); |
305 | if (!ret) { |
306 | *out_surf = vmw_res_to_srf(res); |
307 | return 0; |
308 | } |
309 | |
310 | *out_surf = NULL; |
311 | ret = vmw_user_bo_lookup(filp, handle, out: out_buf); |
312 | return ret; |
313 | } |
314 | |
315 | /** |
316 | * vmw_resource_buf_alloc - Allocate a guest memory buffer for a resource. |
317 | * |
318 | * @res: The resource for which to allocate a gbo buffer. |
319 | * @interruptible: Whether any sleeps during allocation should be |
320 | * performed while interruptible. |
321 | */ |
322 | static int vmw_resource_buf_alloc(struct vmw_resource *res, |
323 | bool interruptible) |
324 | { |
325 | unsigned long size = PFN_ALIGN(res->guest_memory_size); |
326 | struct vmw_bo *gbo; |
327 | struct vmw_bo_params bo_params = { |
328 | .domain = res->func->domain, |
329 | .busy_domain = res->func->busy_domain, |
330 | .bo_type = ttm_bo_type_device, |
331 | .size = res->guest_memory_size, |
332 | .pin = false |
333 | }; |
334 | int ret; |
335 | |
336 | if (likely(res->guest_memory_bo)) { |
337 | BUG_ON(res->guest_memory_bo->tbo.base.size < size); |
338 | return 0; |
339 | } |
340 | |
341 | ret = vmw_gem_object_create(vmw: res->dev_priv, params: &bo_params, p_vbo: &gbo); |
342 | if (unlikely(ret != 0)) |
343 | goto out_no_bo; |
344 | |
345 | res->guest_memory_bo = gbo; |
346 | |
347 | out_no_bo: |
348 | return ret; |
349 | } |
350 | |
351 | /** |
352 | * vmw_resource_do_validate - Make a resource up-to-date and visible |
353 | * to the device. |
354 | * |
355 | * @res: The resource to make visible to the device. |
356 | * @val_buf: Information about a buffer possibly |
357 | * containing backup data if a bind operation is needed. |
358 | * @dirtying: Transfer dirty regions. |
359 | * |
360 | * On hardware resource shortage, this function returns -EBUSY and |
361 | * should be retried once resources have been freed up. |
362 | */ |
363 | static int vmw_resource_do_validate(struct vmw_resource *res, |
364 | struct ttm_validate_buffer *val_buf, |
365 | bool dirtying) |
366 | { |
367 | int ret = 0; |
368 | const struct vmw_res_func *func = res->func; |
369 | |
370 | if (unlikely(res->id == -1)) { |
371 | ret = func->create(res); |
372 | if (unlikely(ret != 0)) |
373 | return ret; |
374 | } |
375 | |
376 | if (func->bind && |
377 | ((func->needs_guest_memory && !vmw_resource_mob_attached(res) && |
378 | val_buf->bo) || |
379 | (!func->needs_guest_memory && val_buf->bo))) { |
380 | ret = func->bind(res, val_buf); |
381 | if (unlikely(ret != 0)) |
382 | goto out_bind_failed; |
383 | if (func->needs_guest_memory) |
384 | vmw_resource_mob_attach(res); |
385 | } |
386 | |
387 | /* |
388 | * Handle the case where the backup mob is marked coherent but |
389 | * the resource isn't. |
390 | */ |
391 | if (func->dirty_alloc && vmw_resource_mob_attached(res) && |
392 | !res->coherent) { |
393 | if (res->guest_memory_bo->dirty && !res->dirty) { |
394 | ret = func->dirty_alloc(res); |
395 | if (ret) |
396 | return ret; |
397 | } else if (!res->guest_memory_bo->dirty && res->dirty) { |
398 | func->dirty_free(res); |
399 | } |
400 | } |
401 | |
402 | /* |
403 | * Transfer the dirty regions to the resource and update |
404 | * the resource. |
405 | */ |
406 | if (res->dirty) { |
407 | if (dirtying && !res->res_dirty) { |
408 | pgoff_t start = res->guest_memory_offset >> PAGE_SHIFT; |
409 | pgoff_t end = __KERNEL_DIV_ROUND_UP |
410 | (res->guest_memory_offset + res->guest_memory_size, |
411 | PAGE_SIZE); |
412 | |
413 | vmw_bo_dirty_unmap(vbo: res->guest_memory_bo, start, end); |
414 | } |
415 | |
416 | vmw_bo_dirty_transfer_to_res(res); |
417 | return func->dirty_sync(res); |
418 | } |
419 | |
420 | return 0; |
421 | |
422 | out_bind_failed: |
423 | func->destroy(res); |
424 | |
425 | return ret; |
426 | } |
427 | |
428 | /** |
429 | * vmw_resource_unreserve - Unreserve a resource previously reserved for |
430 | * command submission. |
431 | * |
432 | * @res: Pointer to the struct vmw_resource to unreserve. |
433 | * @dirty_set: Change dirty status of the resource. |
434 | * @dirty: When changing dirty status indicates the new status. |
435 | * @switch_guest_memory: Guest memory buffer has been switched. |
436 | * @new_guest_memory_bo: Pointer to new guest memory buffer if command submission |
437 | * switched. May be NULL. |
438 | * @new_guest_memory_offset: New gbo offset if @switch_guest_memory is true. |
439 | * |
440 | * Currently unreserving a resource means putting it back on the device's |
441 | * resource lru list, so that it can be evicted if necessary. |
442 | */ |
443 | void vmw_resource_unreserve(struct vmw_resource *res, |
444 | bool dirty_set, |
445 | bool dirty, |
446 | bool switch_guest_memory, |
447 | struct vmw_bo *new_guest_memory_bo, |
448 | unsigned long new_guest_memory_offset) |
449 | { |
450 | struct vmw_private *dev_priv = res->dev_priv; |
451 | |
452 | if (!list_empty(head: &res->lru_head)) |
453 | return; |
454 | |
455 | if (switch_guest_memory && new_guest_memory_bo != res->guest_memory_bo) { |
456 | if (res->guest_memory_bo) { |
457 | vmw_resource_mob_detach(res); |
458 | if (res->coherent) |
459 | vmw_bo_dirty_release(vbo: res->guest_memory_bo); |
460 | vmw_user_bo_unref(buf: &res->guest_memory_bo); |
461 | } |
462 | |
463 | if (new_guest_memory_bo) { |
464 | res->guest_memory_bo = vmw_user_bo_ref(vbo: new_guest_memory_bo); |
465 | |
466 | /* |
467 | * The validation code should already have added a |
468 | * dirty tracker here. |
469 | */ |
470 | WARN_ON(res->coherent && !new_guest_memory_bo->dirty); |
471 | |
472 | vmw_resource_mob_attach(res); |
473 | } else { |
474 | res->guest_memory_bo = NULL; |
475 | } |
476 | } else if (switch_guest_memory && res->coherent) { |
477 | vmw_bo_dirty_release(vbo: res->guest_memory_bo); |
478 | } |
479 | |
480 | if (switch_guest_memory) |
481 | res->guest_memory_offset = new_guest_memory_offset; |
482 | |
483 | if (dirty_set) |
484 | res->res_dirty = dirty; |
485 | |
486 | if (!res->func->may_evict || res->id == -1 || res->pin_count) |
487 | return; |
488 | |
489 | spin_lock(lock: &dev_priv->resource_lock); |
490 | list_add_tail(new: &res->lru_head, |
491 | head: &res->dev_priv->res_lru[res->func->res_type]); |
492 | spin_unlock(lock: &dev_priv->resource_lock); |
493 | } |
494 | |
495 | /** |
496 | * vmw_resource_check_buffer - Check whether a backup buffer is needed |
497 | * for a resource and in that case, allocate |
498 | * one, reserve and validate it. |
499 | * |
500 | * @ticket: The ww acquire context to use, or NULL if trylocking. |
501 | * @res: The resource for which to allocate a backup buffer. |
502 | * @interruptible: Whether any sleeps during allocation should be |
503 | * performed while interruptible. |
504 | * @val_buf: On successful return contains data about the |
505 | * reserved and validated backup buffer. |
506 | */ |
507 | static int |
508 | vmw_resource_check_buffer(struct ww_acquire_ctx *ticket, |
509 | struct vmw_resource *res, |
510 | bool interruptible, |
511 | struct ttm_validate_buffer *val_buf) |
512 | { |
513 | struct ttm_operation_ctx ctx = { true, false }; |
514 | struct list_head val_list; |
515 | bool guest_memory_dirty = false; |
516 | int ret; |
517 | |
518 | if (unlikely(!res->guest_memory_bo)) { |
519 | ret = vmw_resource_buf_alloc(res, interruptible); |
520 | if (unlikely(ret != 0)) |
521 | return ret; |
522 | } |
523 | |
524 | INIT_LIST_HEAD(list: &val_list); |
525 | ttm_bo_get(bo: &res->guest_memory_bo->tbo); |
526 | val_buf->bo = &res->guest_memory_bo->tbo; |
527 | val_buf->num_shared = 0; |
528 | list_add_tail(new: &val_buf->head, head: &val_list); |
529 | ret = ttm_eu_reserve_buffers(ticket, list: &val_list, intr: interruptible, NULL); |
530 | if (unlikely(ret != 0)) |
531 | goto out_no_reserve; |
532 | |
533 | if (res->func->needs_guest_memory && !vmw_resource_mob_attached(res)) |
534 | return 0; |
535 | |
536 | guest_memory_dirty = res->guest_memory_dirty; |
537 | vmw_bo_placement_set(bo: res->guest_memory_bo, domain: res->func->domain, |
538 | busy_domain: res->func->busy_domain); |
539 | ret = ttm_bo_validate(bo: &res->guest_memory_bo->tbo, |
540 | placement: &res->guest_memory_bo->placement, |
541 | ctx: &ctx); |
542 | |
543 | if (unlikely(ret != 0)) |
544 | goto out_no_validate; |
545 | |
546 | return 0; |
547 | |
548 | out_no_validate: |
549 | ttm_eu_backoff_reservation(ticket, list: &val_list); |
550 | out_no_reserve: |
551 | ttm_bo_put(bo: val_buf->bo); |
552 | val_buf->bo = NULL; |
553 | if (guest_memory_dirty) |
554 | vmw_user_bo_unref(buf: &res->guest_memory_bo); |
555 | |
556 | return ret; |
557 | } |
558 | |
559 | /* |
560 | * vmw_resource_reserve - Reserve a resource for command submission |
561 | * |
562 | * @res: The resource to reserve. |
563 | * |
564 | * This function takes the resource off the LRU list and make sure |
565 | * a guest memory buffer is present for guest-backed resources. |
566 | * However, the buffer may not be bound to the resource at this |
567 | * point. |
568 | * |
569 | */ |
570 | int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, |
571 | bool no_guest_memory) |
572 | { |
573 | struct vmw_private *dev_priv = res->dev_priv; |
574 | int ret; |
575 | |
576 | spin_lock(lock: &dev_priv->resource_lock); |
577 | list_del_init(entry: &res->lru_head); |
578 | spin_unlock(lock: &dev_priv->resource_lock); |
579 | |
580 | if (res->func->needs_guest_memory && !res->guest_memory_bo && |
581 | !no_guest_memory) { |
582 | ret = vmw_resource_buf_alloc(res, interruptible); |
583 | if (unlikely(ret != 0)) { |
584 | DRM_ERROR("Failed to allocate a guest memory buffer " |
585 | "of size %lu. bytes\n" , |
586 | (unsigned long) res->guest_memory_size); |
587 | return ret; |
588 | } |
589 | } |
590 | |
591 | return 0; |
592 | } |
593 | |
594 | /** |
595 | * vmw_resource_backoff_reservation - Unreserve and unreference a |
596 | * guest memory buffer |
597 | *. |
598 | * @ticket: The ww acquire ctx used for reservation. |
599 | * @val_buf: Guest memory buffer information. |
600 | */ |
601 | static void |
602 | vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, |
603 | struct ttm_validate_buffer *val_buf) |
604 | { |
605 | struct list_head val_list; |
606 | |
607 | if (likely(val_buf->bo == NULL)) |
608 | return; |
609 | |
610 | INIT_LIST_HEAD(list: &val_list); |
611 | list_add_tail(new: &val_buf->head, head: &val_list); |
612 | ttm_eu_backoff_reservation(ticket, list: &val_list); |
613 | ttm_bo_put(bo: val_buf->bo); |
614 | val_buf->bo = NULL; |
615 | } |
616 | |
617 | /** |
618 | * vmw_resource_do_evict - Evict a resource, and transfer its data |
619 | * to a backup buffer. |
620 | * |
621 | * @ticket: The ww acquire ticket to use, or NULL if trylocking. |
622 | * @res: The resource to evict. |
623 | * @interruptible: Whether to wait interruptible. |
624 | */ |
625 | static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket, |
626 | struct vmw_resource *res, bool interruptible) |
627 | { |
628 | struct ttm_validate_buffer val_buf; |
629 | const struct vmw_res_func *func = res->func; |
630 | int ret; |
631 | |
632 | BUG_ON(!func->may_evict); |
633 | |
634 | val_buf.bo = NULL; |
635 | val_buf.num_shared = 0; |
636 | ret = vmw_resource_check_buffer(ticket, res, interruptible, val_buf: &val_buf); |
637 | if (unlikely(ret != 0)) |
638 | return ret; |
639 | |
640 | if (unlikely(func->unbind != NULL && |
641 | (!func->needs_guest_memory || vmw_resource_mob_attached(res)))) { |
642 | ret = func->unbind(res, res->res_dirty, &val_buf); |
643 | if (unlikely(ret != 0)) |
644 | goto out_no_unbind; |
645 | vmw_resource_mob_detach(res); |
646 | } |
647 | ret = func->destroy(res); |
648 | res->guest_memory_dirty = true; |
649 | res->res_dirty = false; |
650 | out_no_unbind: |
651 | vmw_resource_backoff_reservation(ticket, val_buf: &val_buf); |
652 | |
653 | return ret; |
654 | } |
655 | |
656 | |
657 | /** |
658 | * vmw_resource_validate - Make a resource up-to-date and visible |
659 | * to the device. |
660 | * @res: The resource to make visible to the device. |
661 | * @intr: Perform waits interruptible if possible. |
662 | * @dirtying: Pending GPU operation will dirty the resource |
663 | * |
664 | * On successful return, any backup DMA buffer pointed to by @res->backup will |
665 | * be reserved and validated. |
666 | * On hardware resource shortage, this function will repeatedly evict |
667 | * resources of the same type until the validation succeeds. |
668 | * |
669 | * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code |
670 | * on failure. |
671 | */ |
672 | int vmw_resource_validate(struct vmw_resource *res, bool intr, |
673 | bool dirtying) |
674 | { |
675 | int ret; |
676 | struct vmw_resource *evict_res; |
677 | struct vmw_private *dev_priv = res->dev_priv; |
678 | struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; |
679 | struct ttm_validate_buffer val_buf; |
680 | unsigned err_count = 0; |
681 | |
682 | if (!res->func->create) |
683 | return 0; |
684 | |
685 | val_buf.bo = NULL; |
686 | val_buf.num_shared = 0; |
687 | if (res->guest_memory_bo) |
688 | val_buf.bo = &res->guest_memory_bo->tbo; |
689 | do { |
690 | ret = vmw_resource_do_validate(res, val_buf: &val_buf, dirtying); |
691 | if (likely(ret != -EBUSY)) |
692 | break; |
693 | |
694 | spin_lock(lock: &dev_priv->resource_lock); |
695 | if (list_empty(head: lru_list) || !res->func->may_evict) { |
696 | DRM_ERROR("Out of device device resources " |
697 | "for %s.\n" , res->func->type_name); |
698 | ret = -EBUSY; |
699 | spin_unlock(lock: &dev_priv->resource_lock); |
700 | break; |
701 | } |
702 | |
703 | evict_res = vmw_resource_reference |
704 | (list_first_entry(lru_list, struct vmw_resource, |
705 | lru_head)); |
706 | list_del_init(entry: &evict_res->lru_head); |
707 | |
708 | spin_unlock(lock: &dev_priv->resource_lock); |
709 | |
710 | /* Trylock backup buffers with a NULL ticket. */ |
711 | ret = vmw_resource_do_evict(NULL, res: evict_res, interruptible: intr); |
712 | if (unlikely(ret != 0)) { |
713 | spin_lock(lock: &dev_priv->resource_lock); |
714 | list_add_tail(new: &evict_res->lru_head, head: lru_list); |
715 | spin_unlock(lock: &dev_priv->resource_lock); |
716 | if (ret == -ERESTARTSYS || |
717 | ++err_count > VMW_RES_EVICT_ERR_COUNT) { |
718 | vmw_resource_unreference(p_res: &evict_res); |
719 | goto out_no_validate; |
720 | } |
721 | } |
722 | |
723 | vmw_resource_unreference(p_res: &evict_res); |
724 | } while (1); |
725 | |
726 | if (unlikely(ret != 0)) |
727 | goto out_no_validate; |
728 | else if (!res->func->needs_guest_memory && res->guest_memory_bo) { |
729 | WARN_ON_ONCE(vmw_resource_mob_attached(res)); |
730 | vmw_user_bo_unref(buf: &res->guest_memory_bo); |
731 | } |
732 | |
733 | return 0; |
734 | |
735 | out_no_validate: |
736 | return ret; |
737 | } |
738 | |
739 | |
740 | /** |
741 | * vmw_resource_unbind_list |
742 | * |
743 | * @vbo: Pointer to the current backing MOB. |
744 | * |
745 | * Evicts the Guest Backed hardware resource if the backup |
746 | * buffer is being moved out of MOB memory. |
747 | * Note that this function will not race with the resource |
748 | * validation code, since resource validation and eviction |
749 | * both require the backup buffer to be reserved. |
750 | */ |
751 | void vmw_resource_unbind_list(struct vmw_bo *vbo) |
752 | { |
753 | struct ttm_validate_buffer val_buf = { |
754 | .bo = &vbo->tbo, |
755 | .num_shared = 0 |
756 | }; |
757 | |
758 | dma_resv_assert_held(vbo->tbo.base.resv); |
759 | while (!RB_EMPTY_ROOT(&vbo->res_tree)) { |
760 | struct rb_node *node = vbo->res_tree.rb_node; |
761 | struct vmw_resource *res = |
762 | container_of(node, struct vmw_resource, mob_node); |
763 | |
764 | if (!WARN_ON_ONCE(!res->func->unbind)) |
765 | (void) res->func->unbind(res, res->res_dirty, &val_buf); |
766 | |
767 | res->guest_memory_size = true; |
768 | res->res_dirty = false; |
769 | vmw_resource_mob_detach(res); |
770 | } |
771 | |
772 | (void) ttm_bo_wait(bo: &vbo->tbo, intr: false, no_wait: false); |
773 | } |
774 | |
775 | |
776 | /** |
777 | * vmw_query_readback_all - Read back cached query states |
778 | * |
779 | * @dx_query_mob: Buffer containing the DX query MOB |
780 | * |
781 | * Read back cached states from the device if they exist. This function |
782 | * assumes binding_mutex is held. |
783 | */ |
784 | int vmw_query_readback_all(struct vmw_bo *dx_query_mob) |
785 | { |
786 | struct vmw_resource *dx_query_ctx; |
787 | struct vmw_private *dev_priv; |
788 | struct { |
789 | SVGA3dCmdHeader ; |
790 | SVGA3dCmdDXReadbackAllQuery body; |
791 | } *cmd; |
792 | |
793 | |
794 | /* No query bound, so do nothing */ |
795 | if (!dx_query_mob || !dx_query_mob->dx_query_ctx) |
796 | return 0; |
797 | |
798 | dx_query_ctx = dx_query_mob->dx_query_ctx; |
799 | dev_priv = dx_query_ctx->dev_priv; |
800 | |
801 | cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id); |
802 | if (unlikely(cmd == NULL)) |
803 | return -ENOMEM; |
804 | |
805 | cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY; |
806 | cmd->header.size = sizeof(cmd->body); |
807 | cmd->body.cid = dx_query_ctx->id; |
808 | |
809 | vmw_cmd_commit(dev_priv, bytes: sizeof(*cmd)); |
810 | |
811 | /* Triggers a rebind the next time affected context is bound */ |
812 | dx_query_mob->dx_query_ctx = NULL; |
813 | |
814 | return 0; |
815 | } |
816 | |
817 | |
818 | |
819 | /** |
820 | * vmw_query_move_notify - Read back cached query states |
821 | * |
822 | * @bo: The TTM buffer object about to move. |
823 | * @old_mem: The memory region @bo is moving from. |
824 | * @new_mem: The memory region @bo is moving to. |
825 | * |
826 | * Called before the query MOB is swapped out to read back cached query |
827 | * states from the device. |
828 | */ |
829 | void vmw_query_move_notify(struct ttm_buffer_object *bo, |
830 | struct ttm_resource *old_mem, |
831 | struct ttm_resource *new_mem) |
832 | { |
833 | struct vmw_bo *dx_query_mob; |
834 | struct ttm_device *bdev = bo->bdev; |
835 | struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev); |
836 | |
837 | mutex_lock(&dev_priv->binding_mutex); |
838 | |
839 | /* If BO is being moved from MOB to system memory */ |
840 | if (old_mem && |
841 | new_mem->mem_type == TTM_PL_SYSTEM && |
842 | old_mem->mem_type == VMW_PL_MOB) { |
843 | struct vmw_fence_obj *fence; |
844 | |
845 | dx_query_mob = to_vmw_bo(gobj: &bo->base); |
846 | if (!dx_query_mob || !dx_query_mob->dx_query_ctx) { |
847 | mutex_unlock(lock: &dev_priv->binding_mutex); |
848 | return; |
849 | } |
850 | |
851 | (void) vmw_query_readback_all(dx_query_mob); |
852 | mutex_unlock(lock: &dev_priv->binding_mutex); |
853 | |
854 | /* Create a fence and attach the BO to it */ |
855 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, p_fence: &fence, NULL); |
856 | vmw_bo_fence_single(bo, fence); |
857 | |
858 | if (fence != NULL) |
859 | vmw_fence_obj_unreference(fence_p: &fence); |
860 | |
861 | (void) ttm_bo_wait(bo, intr: false, no_wait: false); |
862 | } else |
863 | mutex_unlock(lock: &dev_priv->binding_mutex); |
864 | } |
865 | |
866 | /** |
867 | * vmw_resource_needs_backup - Return whether a resource needs a backup buffer. |
868 | * |
869 | * @res: The resource being queried. |
870 | */ |
871 | bool vmw_resource_needs_backup(const struct vmw_resource *res) |
872 | { |
873 | return res->func->needs_guest_memory; |
874 | } |
875 | |
876 | /** |
877 | * vmw_resource_evict_type - Evict all resources of a specific type |
878 | * |
879 | * @dev_priv: Pointer to a device private struct |
880 | * @type: The resource type to evict |
881 | * |
882 | * To avoid thrashing starvation or as part of the hibernation sequence, |
883 | * try to evict all evictable resources of a specific type. |
884 | */ |
885 | static void vmw_resource_evict_type(struct vmw_private *dev_priv, |
886 | enum vmw_res_type type) |
887 | { |
888 | struct list_head *lru_list = &dev_priv->res_lru[type]; |
889 | struct vmw_resource *evict_res; |
890 | unsigned err_count = 0; |
891 | int ret; |
892 | struct ww_acquire_ctx ticket; |
893 | |
894 | do { |
895 | spin_lock(lock: &dev_priv->resource_lock); |
896 | |
897 | if (list_empty(head: lru_list)) |
898 | goto out_unlock; |
899 | |
900 | evict_res = vmw_resource_reference( |
901 | list_first_entry(lru_list, struct vmw_resource, |
902 | lru_head)); |
903 | list_del_init(entry: &evict_res->lru_head); |
904 | spin_unlock(lock: &dev_priv->resource_lock); |
905 | |
906 | /* Wait lock backup buffers with a ticket. */ |
907 | ret = vmw_resource_do_evict(ticket: &ticket, res: evict_res, interruptible: false); |
908 | if (unlikely(ret != 0)) { |
909 | spin_lock(lock: &dev_priv->resource_lock); |
910 | list_add_tail(new: &evict_res->lru_head, head: lru_list); |
911 | spin_unlock(lock: &dev_priv->resource_lock); |
912 | if (++err_count > VMW_RES_EVICT_ERR_COUNT) { |
913 | vmw_resource_unreference(p_res: &evict_res); |
914 | return; |
915 | } |
916 | } |
917 | |
918 | vmw_resource_unreference(p_res: &evict_res); |
919 | } while (1); |
920 | |
921 | out_unlock: |
922 | spin_unlock(lock: &dev_priv->resource_lock); |
923 | } |
924 | |
925 | /** |
926 | * vmw_resource_evict_all - Evict all evictable resources |
927 | * |
928 | * @dev_priv: Pointer to a device private struct |
929 | * |
930 | * To avoid thrashing starvation or as part of the hibernation sequence, |
931 | * evict all evictable resources. In particular this means that all |
932 | * guest-backed resources that are registered with the device are |
933 | * evicted and the OTable becomes clean. |
934 | */ |
935 | void vmw_resource_evict_all(struct vmw_private *dev_priv) |
936 | { |
937 | enum vmw_res_type type; |
938 | |
939 | mutex_lock(&dev_priv->cmdbuf_mutex); |
940 | |
941 | for (type = 0; type < vmw_res_max; ++type) |
942 | vmw_resource_evict_type(dev_priv, type); |
943 | |
944 | mutex_unlock(lock: &dev_priv->cmdbuf_mutex); |
945 | } |
946 | |
947 | /* |
948 | * vmw_resource_pin - Add a pin reference on a resource |
949 | * |
950 | * @res: The resource to add a pin reference on |
951 | * |
952 | * This function adds a pin reference, and if needed validates the resource. |
953 | * Having a pin reference means that the resource can never be evicted, and |
954 | * its id will never change as long as there is a pin reference. |
955 | * This function returns 0 on success and a negative error code on failure. |
956 | */ |
957 | int vmw_resource_pin(struct vmw_resource *res, bool interruptible) |
958 | { |
959 | struct ttm_operation_ctx ctx = { interruptible, false }; |
960 | struct vmw_private *dev_priv = res->dev_priv; |
961 | int ret; |
962 | |
963 | mutex_lock(&dev_priv->cmdbuf_mutex); |
964 | ret = vmw_resource_reserve(res, interruptible, no_guest_memory: false); |
965 | if (ret) |
966 | goto out_no_reserve; |
967 | |
968 | if (res->pin_count == 0) { |
969 | struct vmw_bo *vbo = NULL; |
970 | |
971 | if (res->guest_memory_bo) { |
972 | vbo = res->guest_memory_bo; |
973 | |
974 | ret = ttm_bo_reserve(bo: &vbo->tbo, interruptible, no_wait: false, NULL); |
975 | if (ret) |
976 | goto out_no_validate; |
977 | if (!vbo->tbo.pin_count) { |
978 | vmw_bo_placement_set(bo: vbo, |
979 | domain: res->func->domain, |
980 | busy_domain: res->func->busy_domain); |
981 | ret = ttm_bo_validate |
982 | (bo: &vbo->tbo, |
983 | placement: &vbo->placement, |
984 | ctx: &ctx); |
985 | if (ret) { |
986 | ttm_bo_unreserve(bo: &vbo->tbo); |
987 | goto out_no_validate; |
988 | } |
989 | } |
990 | |
991 | /* Do we really need to pin the MOB as well? */ |
992 | vmw_bo_pin_reserved(bo: vbo, pin: true); |
993 | } |
994 | ret = vmw_resource_validate(res, intr: interruptible, dirtying: true); |
995 | if (vbo) |
996 | ttm_bo_unreserve(bo: &vbo->tbo); |
997 | if (ret) |
998 | goto out_no_validate; |
999 | } |
1000 | res->pin_count++; |
1001 | |
1002 | out_no_validate: |
1003 | vmw_resource_unreserve(res, dirty_set: false, dirty: false, switch_guest_memory: false, NULL, new_guest_memory_offset: 0UL); |
1004 | out_no_reserve: |
1005 | mutex_unlock(lock: &dev_priv->cmdbuf_mutex); |
1006 | |
1007 | return ret; |
1008 | } |
1009 | |
1010 | /** |
1011 | * vmw_resource_unpin - Remove a pin reference from a resource |
1012 | * |
1013 | * @res: The resource to remove a pin reference from |
1014 | * |
1015 | * Having a pin reference means that the resource can never be evicted, and |
1016 | * its id will never change as long as there is a pin reference. |
1017 | */ |
1018 | void vmw_resource_unpin(struct vmw_resource *res) |
1019 | { |
1020 | struct vmw_private *dev_priv = res->dev_priv; |
1021 | int ret; |
1022 | |
1023 | mutex_lock(&dev_priv->cmdbuf_mutex); |
1024 | |
1025 | ret = vmw_resource_reserve(res, interruptible: false, no_guest_memory: true); |
1026 | WARN_ON(ret); |
1027 | |
1028 | WARN_ON(res->pin_count == 0); |
1029 | if (--res->pin_count == 0 && res->guest_memory_bo) { |
1030 | struct vmw_bo *vbo = res->guest_memory_bo; |
1031 | |
1032 | (void) ttm_bo_reserve(bo: &vbo->tbo, interruptible: false, no_wait: false, NULL); |
1033 | vmw_bo_pin_reserved(bo: vbo, pin: false); |
1034 | ttm_bo_unreserve(bo: &vbo->tbo); |
1035 | } |
1036 | |
1037 | vmw_resource_unreserve(res, dirty_set: false, dirty: false, switch_guest_memory: false, NULL, new_guest_memory_offset: 0UL); |
1038 | |
1039 | mutex_unlock(lock: &dev_priv->cmdbuf_mutex); |
1040 | } |
1041 | |
1042 | /** |
1043 | * vmw_res_type - Return the resource type |
1044 | * |
1045 | * @res: Pointer to the resource |
1046 | */ |
1047 | enum vmw_res_type vmw_res_type(const struct vmw_resource *res) |
1048 | { |
1049 | return res->func->res_type; |
1050 | } |
1051 | |
1052 | /** |
1053 | * vmw_resource_dirty_update - Update a resource's dirty tracker with a |
1054 | * sequential range of touched backing store memory. |
1055 | * @res: The resource. |
1056 | * @start: The first page touched. |
1057 | * @end: The last page touched + 1. |
1058 | */ |
1059 | void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start, |
1060 | pgoff_t end) |
1061 | { |
1062 | if (res->dirty) |
1063 | res->func->dirty_range_add(res, start << PAGE_SHIFT, |
1064 | end << PAGE_SHIFT); |
1065 | } |
1066 | |
1067 | /** |
1068 | * vmw_resources_clean - Clean resources intersecting a mob range |
1069 | * @vbo: The mob buffer object |
1070 | * @start: The mob page offset starting the range |
1071 | * @end: The mob page offset ending the range |
1072 | * @num_prefault: Returns how many pages including the first have been |
1073 | * cleaned and are ok to prefault |
1074 | */ |
1075 | int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start, |
1076 | pgoff_t end, pgoff_t *num_prefault) |
1077 | { |
1078 | struct rb_node *cur = vbo->res_tree.rb_node; |
1079 | struct vmw_resource *found = NULL; |
1080 | unsigned long res_start = start << PAGE_SHIFT; |
1081 | unsigned long res_end = end << PAGE_SHIFT; |
1082 | unsigned long last_cleaned = 0; |
1083 | |
1084 | /* |
1085 | * Find the resource with lowest backup_offset that intersects the |
1086 | * range. |
1087 | */ |
1088 | while (cur) { |
1089 | struct vmw_resource *cur_res = |
1090 | container_of(cur, struct vmw_resource, mob_node); |
1091 | |
1092 | if (cur_res->guest_memory_offset >= res_end) { |
1093 | cur = cur->rb_left; |
1094 | } else if (cur_res->guest_memory_offset + cur_res->guest_memory_size <= |
1095 | res_start) { |
1096 | cur = cur->rb_right; |
1097 | } else { |
1098 | found = cur_res; |
1099 | cur = cur->rb_left; |
1100 | /* Continue to look for resources with lower offsets */ |
1101 | } |
1102 | } |
1103 | |
1104 | /* |
1105 | * In order of increasing guest_memory_offset, clean dirty resources |
1106 | * intersecting the range. |
1107 | */ |
1108 | while (found) { |
1109 | if (found->res_dirty) { |
1110 | int ret; |
1111 | |
1112 | if (!found->func->clean) |
1113 | return -EINVAL; |
1114 | |
1115 | ret = found->func->clean(found); |
1116 | if (ret) |
1117 | return ret; |
1118 | |
1119 | found->res_dirty = false; |
1120 | } |
1121 | last_cleaned = found->guest_memory_offset + found->guest_memory_size; |
1122 | cur = rb_next(&found->mob_node); |
1123 | if (!cur) |
1124 | break; |
1125 | |
1126 | found = container_of(cur, struct vmw_resource, mob_node); |
1127 | if (found->guest_memory_offset >= res_end) |
1128 | break; |
1129 | } |
1130 | |
1131 | /* |
1132 | * Set number of pages allowed prefaulting and fence the buffer object |
1133 | */ |
1134 | *num_prefault = 1; |
1135 | if (last_cleaned > res_start) { |
1136 | struct ttm_buffer_object *bo = &vbo->tbo; |
1137 | |
1138 | *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start, |
1139 | PAGE_SIZE); |
1140 | vmw_bo_fence_single(bo, NULL); |
1141 | } |
1142 | |
1143 | return 0; |
1144 | } |
1145 | |