1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
2 | /************************************************************************** |
3 | * |
4 | * Copyright © 2018 - 2023 VMware, Inc., Palo Alto, CA., USA |
5 | * All Rights Reserved. |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a |
8 | * copy of this software and associated documentation files (the |
9 | * "Software"), to deal in the Software without restriction, including |
10 | * without limitation the rights to use, copy, modify, merge, publish, |
11 | * distribute, sub license, and/or sell copies of the Software, and to |
12 | * permit persons to whom the Software is furnished to do so, subject to |
13 | * the following conditions: |
14 | * |
15 | * The above copyright notice and this permission notice (including the |
16 | * next paragraph) shall be included in all copies or substantial portions |
17 | * of the Software. |
18 | * |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
26 | * |
27 | **************************************************************************/ |
28 | #include "vmwgfx_bo.h" |
29 | #include "vmwgfx_drv.h" |
30 | #include "vmwgfx_resource_priv.h" |
31 | #include "vmwgfx_validation.h" |
32 | |
33 | #include <linux/slab.h> |
34 | |
35 | |
36 | #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE) |
37 | |
38 | /** |
39 | * struct vmw_validation_bo_node - Buffer object validation metadata. |
40 | * @base: Metadata used for TTM reservation- and validation. |
41 | * @hash: A hash entry used for the duplicate detection hash table. |
42 | * @coherent_count: If switching backup buffers, number of new coherent |
43 | * resources that will have this buffer as a backup buffer. |
44 | * |
45 | * Bit fields are used since these structures are allocated and freed in |
46 | * large numbers and space conservation is desired. |
47 | */ |
48 | struct vmw_validation_bo_node { |
49 | struct ttm_validate_buffer base; |
50 | struct vmwgfx_hash_item hash; |
51 | unsigned int coherent_count; |
52 | }; |
53 | /** |
54 | * struct vmw_validation_res_node - Resource validation metadata. |
55 | * @head: List head for the resource validation list. |
56 | * @hash: A hash entry used for the duplicate detection hash table. |
57 | * @res: Reference counted resource pointer. |
58 | * @new_guest_memory_bo: Non ref-counted pointer to new guest memory buffer |
59 | * to be assigned to a resource. |
60 | * @new_guest_memory_offset: Offset into the new backup mob for resources |
61 | * that can share MOBs. |
62 | * @no_buffer_needed: Kernel does not need to allocate a MOB during validation, |
63 | * the command stream provides a mob bind operation. |
64 | * @switching_guest_memory_bo: The validation process is switching backup MOB. |
65 | * @first_usage: True iff the resource has been seen only once in the current |
66 | * validation batch. |
67 | * @reserved: Whether the resource is currently reserved by this process. |
68 | * @dirty_set: Change dirty status of the resource. |
69 | * @dirty: Dirty information VMW_RES_DIRTY_XX. |
70 | * @private: Optionally additional memory for caller-private data. |
71 | * |
72 | * Bit fields are used since these structures are allocated and freed in |
73 | * large numbers and space conservation is desired. |
74 | */ |
75 | struct vmw_validation_res_node { |
76 | struct list_head head; |
77 | struct vmwgfx_hash_item hash; |
78 | struct vmw_resource *res; |
79 | struct vmw_bo *new_guest_memory_bo; |
80 | unsigned long new_guest_memory_offset; |
81 | u32 no_buffer_needed : 1; |
82 | u32 switching_guest_memory_bo : 1; |
83 | u32 first_usage : 1; |
84 | u32 reserved : 1; |
85 | u32 dirty : 1; |
86 | u32 dirty_set : 1; |
87 | unsigned long private[]; |
88 | }; |
89 | |
90 | /** |
91 | * vmw_validation_mem_alloc - Allocate kernel memory from the validation |
92 | * context based allocator |
93 | * @ctx: The validation context |
94 | * @size: The number of bytes to allocated. |
95 | * |
96 | * The memory allocated may not exceed PAGE_SIZE, and the returned |
97 | * address is aligned to sizeof(long). All memory allocated this way is |
98 | * reclaimed after validation when calling any of the exported functions: |
99 | * vmw_validation_unref_lists() |
100 | * vmw_validation_revert() |
101 | * vmw_validation_done() |
102 | * |
103 | * Return: Pointer to the allocated memory on success. NULL on failure. |
104 | */ |
105 | void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx, |
106 | unsigned int size) |
107 | { |
108 | void *addr; |
109 | |
110 | size = vmw_validation_align(val: size); |
111 | if (size > PAGE_SIZE) |
112 | return NULL; |
113 | |
114 | if (ctx->mem_size_left < size) { |
115 | struct page *page; |
116 | |
117 | if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) { |
118 | ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN; |
119 | ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN; |
120 | } |
121 | |
122 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
123 | if (!page) |
124 | return NULL; |
125 | |
126 | if (ctx->vm) |
127 | ctx->vm_size_left -= PAGE_SIZE; |
128 | |
129 | list_add_tail(new: &page->lru, head: &ctx->page_list); |
130 | ctx->page_address = page_address(page); |
131 | ctx->mem_size_left = PAGE_SIZE; |
132 | } |
133 | |
134 | addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left)); |
135 | ctx->mem_size_left -= size; |
136 | |
137 | return addr; |
138 | } |
139 | |
140 | /** |
141 | * vmw_validation_mem_free - Free all memory allocated using |
142 | * vmw_validation_mem_alloc() |
143 | * @ctx: The validation context |
144 | * |
145 | * All memory previously allocated for this context using |
146 | * vmw_validation_mem_alloc() is freed. |
147 | */ |
148 | static void vmw_validation_mem_free(struct vmw_validation_context *ctx) |
149 | { |
150 | struct page *entry, *next; |
151 | |
152 | list_for_each_entry_safe(entry, next, &ctx->page_list, lru) { |
153 | list_del_init(entry: &entry->lru); |
154 | __free_page(entry); |
155 | } |
156 | |
157 | ctx->mem_size_left = 0; |
158 | if (ctx->vm && ctx->total_mem) { |
159 | ctx->total_mem = 0; |
160 | ctx->vm_size_left = 0; |
161 | } |
162 | } |
163 | |
164 | /** |
165 | * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the |
166 | * validation context's lists. |
167 | * @ctx: The validation context to search. |
168 | * @vbo: The buffer object to search for. |
169 | * |
170 | * Return: Pointer to the struct vmw_validation_bo_node referencing the |
171 | * duplicate, or NULL if none found. |
172 | */ |
173 | static struct vmw_validation_bo_node * |
174 | vmw_validation_find_bo_dup(struct vmw_validation_context *ctx, |
175 | struct vmw_bo *vbo) |
176 | { |
177 | struct vmw_validation_bo_node *bo_node = NULL; |
178 | |
179 | if (!ctx->merge_dups) |
180 | return NULL; |
181 | |
182 | if (ctx->sw_context) { |
183 | struct vmwgfx_hash_item *hash; |
184 | unsigned long key = (unsigned long) vbo; |
185 | |
186 | hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) { |
187 | if (hash->key == key) { |
188 | bo_node = container_of(hash, typeof(*bo_node), hash); |
189 | break; |
190 | } |
191 | } |
192 | } else { |
193 | struct vmw_validation_bo_node *entry; |
194 | |
195 | list_for_each_entry(entry, &ctx->bo_list, base.head) { |
196 | if (entry->base.bo == &vbo->tbo) { |
197 | bo_node = entry; |
198 | break; |
199 | } |
200 | } |
201 | } |
202 | |
203 | return bo_node; |
204 | } |
205 | |
206 | /** |
207 | * vmw_validation_find_res_dup - Find a duplicate resource entry in the |
208 | * validation context's lists. |
209 | * @ctx: The validation context to search. |
210 | * @res: Reference counted resource pointer. |
211 | * |
212 | * Return: Pointer to the struct vmw_validation_bo_node referencing the |
213 | * duplicate, or NULL if none found. |
214 | */ |
215 | static struct vmw_validation_res_node * |
216 | vmw_validation_find_res_dup(struct vmw_validation_context *ctx, |
217 | struct vmw_resource *res) |
218 | { |
219 | struct vmw_validation_res_node *res_node = NULL; |
220 | |
221 | if (!ctx->merge_dups) |
222 | return NULL; |
223 | |
224 | if (ctx->sw_context) { |
225 | struct vmwgfx_hash_item *hash; |
226 | unsigned long key = (unsigned long) res; |
227 | |
228 | hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) { |
229 | if (hash->key == key) { |
230 | res_node = container_of(hash, typeof(*res_node), hash); |
231 | break; |
232 | } |
233 | } |
234 | } else { |
235 | struct vmw_validation_res_node *entry; |
236 | |
237 | list_for_each_entry(entry, &ctx->resource_ctx_list, head) { |
238 | if (entry->res == res) { |
239 | res_node = entry; |
240 | goto out; |
241 | } |
242 | } |
243 | |
244 | list_for_each_entry(entry, &ctx->resource_list, head) { |
245 | if (entry->res == res) { |
246 | res_node = entry; |
247 | break; |
248 | } |
249 | } |
250 | |
251 | } |
252 | out: |
253 | return res_node; |
254 | } |
255 | |
256 | /** |
257 | * vmw_validation_add_bo - Add a buffer object to the validation context. |
258 | * @ctx: The validation context. |
259 | * @vbo: The buffer object. |
260 | * |
261 | * Return: Zero on success, negative error code otherwise. |
262 | */ |
263 | int vmw_validation_add_bo(struct vmw_validation_context *ctx, |
264 | struct vmw_bo *vbo) |
265 | { |
266 | struct vmw_validation_bo_node *bo_node; |
267 | |
268 | bo_node = vmw_validation_find_bo_dup(ctx, vbo); |
269 | if (!bo_node) { |
270 | struct ttm_validate_buffer *val_buf; |
271 | |
272 | bo_node = vmw_validation_mem_alloc(ctx, size: sizeof(*bo_node)); |
273 | if (!bo_node) |
274 | return -ENOMEM; |
275 | |
276 | if (ctx->sw_context) { |
277 | bo_node->hash.key = (unsigned long) vbo; |
278 | hash_add_rcu(ctx->sw_context->res_ht, &bo_node->hash.head, |
279 | bo_node->hash.key); |
280 | } |
281 | val_buf = &bo_node->base; |
282 | val_buf->bo = ttm_bo_get_unless_zero(bo: &vbo->tbo); |
283 | if (!val_buf->bo) |
284 | return -ESRCH; |
285 | val_buf->num_shared = 0; |
286 | list_add_tail(new: &val_buf->head, head: &ctx->bo_list); |
287 | } |
288 | |
289 | return 0; |
290 | } |
291 | |
292 | /** |
293 | * vmw_validation_add_resource - Add a resource to the validation context. |
294 | * @ctx: The validation context. |
295 | * @res: The resource. |
296 | * @priv_size: Size of private, additional metadata. |
297 | * @dirty: Whether to change dirty status. |
298 | * @p_node: Output pointer of additional metadata address. |
299 | * @first_usage: Whether this was the first time this resource was seen. |
300 | * |
301 | * Return: Zero on success, negative error code otherwise. |
302 | */ |
303 | int vmw_validation_add_resource(struct vmw_validation_context *ctx, |
304 | struct vmw_resource *res, |
305 | size_t priv_size, |
306 | u32 dirty, |
307 | void **p_node, |
308 | bool *first_usage) |
309 | { |
310 | struct vmw_validation_res_node *node; |
311 | |
312 | node = vmw_validation_find_res_dup(ctx, res); |
313 | if (node) { |
314 | node->first_usage = 0; |
315 | goto out_fill; |
316 | } |
317 | |
318 | node = vmw_validation_mem_alloc(ctx, size: sizeof(*node) + priv_size); |
319 | if (!node) { |
320 | VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n" ); |
321 | return -ENOMEM; |
322 | } |
323 | |
324 | if (ctx->sw_context) { |
325 | node->hash.key = (unsigned long) res; |
326 | hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key); |
327 | } |
328 | node->res = vmw_resource_reference_unless_doomed(res); |
329 | if (!node->res) |
330 | return -ESRCH; |
331 | |
332 | node->first_usage = 1; |
333 | if (!res->dev_priv->has_mob) { |
334 | list_add_tail(new: &node->head, head: &ctx->resource_list); |
335 | } else { |
336 | switch (vmw_res_type(res)) { |
337 | case vmw_res_context: |
338 | case vmw_res_dx_context: |
339 | list_add(new: &node->head, head: &ctx->resource_ctx_list); |
340 | break; |
341 | case vmw_res_cotable: |
342 | list_add_tail(new: &node->head, head: &ctx->resource_ctx_list); |
343 | break; |
344 | default: |
345 | list_add_tail(new: &node->head, head: &ctx->resource_list); |
346 | break; |
347 | } |
348 | } |
349 | |
350 | out_fill: |
351 | if (dirty) { |
352 | node->dirty_set = 1; |
353 | /* Overwriting previous information here is intentional! */ |
354 | node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0; |
355 | } |
356 | if (first_usage) |
357 | *first_usage = node->first_usage; |
358 | if (p_node) |
359 | *p_node = &node->private; |
360 | |
361 | return 0; |
362 | } |
363 | |
364 | /** |
365 | * vmw_validation_res_set_dirty - Register a resource dirty set or clear during |
366 | * validation. |
367 | * @ctx: The validation context. |
368 | * @val_private: The additional meta-data pointer returned when the |
369 | * resource was registered with the validation context. Used to identify |
370 | * the resource. |
371 | * @dirty: Dirty information VMW_RES_DIRTY_XX |
372 | */ |
373 | void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx, |
374 | void *val_private, u32 dirty) |
375 | { |
376 | struct vmw_validation_res_node *val; |
377 | |
378 | if (!dirty) |
379 | return; |
380 | |
381 | val = container_of(val_private, typeof(*val), private); |
382 | val->dirty_set = 1; |
383 | /* Overwriting previous information here is intentional! */ |
384 | val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0; |
385 | } |
386 | |
387 | /** |
388 | * vmw_validation_res_switch_backup - Register a backup MOB switch during |
389 | * validation. |
390 | * @ctx: The validation context. |
391 | * @val_private: The additional meta-data pointer returned when the |
392 | * resource was registered with the validation context. Used to identify |
393 | * the resource. |
394 | * @vbo: The new backup buffer object MOB. This buffer object needs to have |
395 | * already been registered with the validation context. |
396 | * @guest_memory_offset: Offset into the new backup MOB. |
397 | */ |
398 | void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx, |
399 | void *val_private, |
400 | struct vmw_bo *vbo, |
401 | unsigned long guest_memory_offset) |
402 | { |
403 | struct vmw_validation_res_node *val; |
404 | |
405 | val = container_of(val_private, typeof(*val), private); |
406 | |
407 | val->switching_guest_memory_bo = 1; |
408 | if (val->first_usage) |
409 | val->no_buffer_needed = 1; |
410 | |
411 | val->new_guest_memory_bo = vbo; |
412 | val->new_guest_memory_offset = guest_memory_offset; |
413 | } |
414 | |
415 | /** |
416 | * vmw_validation_res_reserve - Reserve all resources registered with this |
417 | * validation context. |
418 | * @ctx: The validation context. |
419 | * @intr: Use interruptible waits when possible. |
420 | * |
421 | * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error |
422 | * code on failure. |
423 | */ |
424 | int vmw_validation_res_reserve(struct vmw_validation_context *ctx, |
425 | bool intr) |
426 | { |
427 | struct vmw_validation_res_node *val; |
428 | int ret = 0; |
429 | |
430 | list_splice_init(list: &ctx->resource_ctx_list, head: &ctx->resource_list); |
431 | |
432 | list_for_each_entry(val, &ctx->resource_list, head) { |
433 | struct vmw_resource *res = val->res; |
434 | |
435 | ret = vmw_resource_reserve(res, interruptible: intr, no_backup: val->no_buffer_needed); |
436 | if (ret) |
437 | goto out_unreserve; |
438 | |
439 | val->reserved = 1; |
440 | if (res->guest_memory_bo) { |
441 | struct vmw_bo *vbo = res->guest_memory_bo; |
442 | |
443 | vmw_bo_placement_set(bo: vbo, |
444 | domain: res->func->domain, |
445 | busy_domain: res->func->busy_domain); |
446 | ret = vmw_validation_add_bo(ctx, vbo); |
447 | if (ret) |
448 | goto out_unreserve; |
449 | } |
450 | |
451 | if (val->switching_guest_memory_bo && val->new_guest_memory_bo && |
452 | res->coherent) { |
453 | struct vmw_validation_bo_node *bo_node = |
454 | vmw_validation_find_bo_dup(ctx, |
455 | vbo: val->new_guest_memory_bo); |
456 | |
457 | if (WARN_ON(!bo_node)) { |
458 | ret = -EINVAL; |
459 | goto out_unreserve; |
460 | } |
461 | bo_node->coherent_count++; |
462 | } |
463 | } |
464 | |
465 | return 0; |
466 | |
467 | out_unreserve: |
468 | vmw_validation_res_unreserve(ctx, backoff: true); |
469 | return ret; |
470 | } |
471 | |
472 | /** |
473 | * vmw_validation_res_unreserve - Unreserve all reserved resources |
474 | * registered with this validation context. |
475 | * @ctx: The validation context. |
476 | * @backoff: Whether this is a backoff- of a commit-type operation. This |
477 | * is used to determine whether to switch backup MOBs or not. |
478 | */ |
479 | void vmw_validation_res_unreserve(struct vmw_validation_context *ctx, |
480 | bool backoff) |
481 | { |
482 | struct vmw_validation_res_node *val; |
483 | |
484 | list_splice_init(list: &ctx->resource_ctx_list, head: &ctx->resource_list); |
485 | if (backoff) |
486 | list_for_each_entry(val, &ctx->resource_list, head) { |
487 | if (val->reserved) |
488 | vmw_resource_unreserve(res: val->res, |
489 | dirty_set: false, dirty: false, switch_guest_memory: false, |
490 | NULL, new_guest_memory_offset: 0); |
491 | } |
492 | else |
493 | list_for_each_entry(val, &ctx->resource_list, head) { |
494 | if (val->reserved) |
495 | vmw_resource_unreserve(res: val->res, |
496 | dirty_set: val->dirty_set, |
497 | dirty: val->dirty, |
498 | switch_guest_memory: val->switching_guest_memory_bo, |
499 | new_guest_memory: val->new_guest_memory_bo, |
500 | new_guest_memory_offset: val->new_guest_memory_offset); |
501 | } |
502 | } |
503 | |
504 | /** |
505 | * vmw_validation_bo_validate_single - Validate a single buffer object. |
506 | * @bo: The TTM buffer object base. |
507 | * @interruptible: Whether to perform waits interruptible if possible. |
508 | * |
509 | * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error |
510 | * code on failure. |
511 | */ |
512 | static int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo, |
513 | bool interruptible) |
514 | { |
515 | struct vmw_bo *vbo = to_vmw_bo(gobj: &bo->base); |
516 | struct ttm_operation_ctx ctx = { |
517 | .interruptible = interruptible, |
518 | .no_wait_gpu = false |
519 | }; |
520 | int ret; |
521 | |
522 | if (atomic_read(v: &vbo->cpu_writers)) |
523 | return -EBUSY; |
524 | |
525 | if (vbo->tbo.pin_count > 0) |
526 | return 0; |
527 | |
528 | ret = ttm_bo_validate(bo, placement: &vbo->placement, ctx: &ctx); |
529 | if (ret == 0 || ret == -ERESTARTSYS) |
530 | return ret; |
531 | |
532 | /* |
533 | * If that failed, try again, this time evicting |
534 | * previous contents. |
535 | */ |
536 | ctx.allow_res_evict = true; |
537 | |
538 | return ttm_bo_validate(bo, placement: &vbo->placement, ctx: &ctx); |
539 | } |
540 | |
541 | /** |
542 | * vmw_validation_bo_validate - Validate all buffer objects registered with |
543 | * the validation context. |
544 | * @ctx: The validation context. |
545 | * @intr: Whether to perform waits interruptible if possible. |
546 | * |
547 | * Return: Zero on success, -ERESTARTSYS if interrupted, |
548 | * negative error code on failure. |
549 | */ |
550 | int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr) |
551 | { |
552 | struct vmw_validation_bo_node *entry; |
553 | int ret; |
554 | |
555 | list_for_each_entry(entry, &ctx->bo_list, base.head) { |
556 | struct vmw_bo *vbo = to_vmw_bo(gobj: &entry->base.bo->base); |
557 | |
558 | ret = vmw_validation_bo_validate_single(bo: entry->base.bo, interruptible: intr); |
559 | |
560 | if (ret) |
561 | return ret; |
562 | |
563 | /* |
564 | * Rather than having the resource code allocating the bo |
565 | * dirty tracker in resource_unreserve() where we can't fail, |
566 | * Do it here when validating the buffer object. |
567 | */ |
568 | if (entry->coherent_count) { |
569 | unsigned int coherent_count = entry->coherent_count; |
570 | |
571 | while (coherent_count) { |
572 | ret = vmw_bo_dirty_add(vbo); |
573 | if (ret) |
574 | return ret; |
575 | |
576 | coherent_count--; |
577 | } |
578 | entry->coherent_count -= coherent_count; |
579 | } |
580 | |
581 | if (vbo->dirty) |
582 | vmw_bo_dirty_scan(vbo); |
583 | } |
584 | return 0; |
585 | } |
586 | |
587 | /** |
588 | * vmw_validation_res_validate - Validate all resources registered with the |
589 | * validation context. |
590 | * @ctx: The validation context. |
591 | * @intr: Whether to perform waits interruptible if possible. |
592 | * |
593 | * Before this function is called, all resource backup buffers must have |
594 | * been validated. |
595 | * |
596 | * Return: Zero on success, -ERESTARTSYS if interrupted, |
597 | * negative error code on failure. |
598 | */ |
599 | int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr) |
600 | { |
601 | struct vmw_validation_res_node *val; |
602 | int ret; |
603 | |
604 | list_for_each_entry(val, &ctx->resource_list, head) { |
605 | struct vmw_resource *res = val->res; |
606 | struct vmw_bo *backup = res->guest_memory_bo; |
607 | |
608 | ret = vmw_resource_validate(res, intr, dirtying: val->dirty_set && |
609 | val->dirty); |
610 | if (ret) { |
611 | if (ret != -ERESTARTSYS) |
612 | DRM_ERROR("Failed to validate resource.\n" ); |
613 | return ret; |
614 | } |
615 | |
616 | /* Check if the resource switched backup buffer */ |
617 | if (backup && res->guest_memory_bo && backup != res->guest_memory_bo) { |
618 | struct vmw_bo *vbo = res->guest_memory_bo; |
619 | |
620 | vmw_bo_placement_set(bo: vbo, domain: res->func->domain, |
621 | busy_domain: res->func->busy_domain); |
622 | ret = vmw_validation_add_bo(ctx, vbo); |
623 | if (ret) |
624 | return ret; |
625 | } |
626 | } |
627 | return 0; |
628 | } |
629 | |
630 | /** |
631 | * vmw_validation_drop_ht - Reset the hash table used for duplicate finding |
632 | * and unregister it from this validation context. |
633 | * @ctx: The validation context. |
634 | * |
635 | * The hash table used for duplicate finding is an expensive resource and |
636 | * may be protected by mutexes that may cause deadlocks during resource |
637 | * unreferencing if held. After resource- and buffer object registering, |
638 | * there is no longer any use for this hash table, so allow freeing it |
639 | * either to shorten any mutex locking time, or before resources- and |
640 | * buffer objects are freed during validation context cleanup. |
641 | */ |
642 | void vmw_validation_drop_ht(struct vmw_validation_context *ctx) |
643 | { |
644 | struct vmw_validation_bo_node *entry; |
645 | struct vmw_validation_res_node *val; |
646 | |
647 | if (!ctx->sw_context) |
648 | return; |
649 | |
650 | list_for_each_entry(entry, &ctx->bo_list, base.head) |
651 | hash_del_rcu(node: &entry->hash.head); |
652 | |
653 | list_for_each_entry(val, &ctx->resource_list, head) |
654 | hash_del_rcu(node: &val->hash.head); |
655 | |
656 | list_for_each_entry(val, &ctx->resource_ctx_list, head) |
657 | hash_del_rcu(node: &entry->hash.head); |
658 | |
659 | ctx->sw_context = NULL; |
660 | } |
661 | |
662 | /** |
663 | * vmw_validation_unref_lists - Unregister previously registered buffer |
664 | * object and resources. |
665 | * @ctx: The validation context. |
666 | * |
667 | * Note that this function may cause buffer object- and resource destructors |
668 | * to be invoked. |
669 | */ |
670 | void vmw_validation_unref_lists(struct vmw_validation_context *ctx) |
671 | { |
672 | struct vmw_validation_bo_node *entry; |
673 | struct vmw_validation_res_node *val; |
674 | |
675 | list_for_each_entry(entry, &ctx->bo_list, base.head) { |
676 | ttm_bo_put(bo: entry->base.bo); |
677 | entry->base.bo = NULL; |
678 | } |
679 | |
680 | list_splice_init(list: &ctx->resource_ctx_list, head: &ctx->resource_list); |
681 | list_for_each_entry(val, &ctx->resource_list, head) |
682 | vmw_resource_unreference(p_res: &val->res); |
683 | |
684 | /* |
685 | * No need to detach each list entry since they are all freed with |
686 | * vmw_validation_free_mem. Just make the inaccessible. |
687 | */ |
688 | INIT_LIST_HEAD(list: &ctx->bo_list); |
689 | INIT_LIST_HEAD(list: &ctx->resource_list); |
690 | |
691 | vmw_validation_mem_free(ctx); |
692 | } |
693 | |
694 | /** |
695 | * vmw_validation_prepare - Prepare a validation context for command |
696 | * submission. |
697 | * @ctx: The validation context. |
698 | * @mutex: The mutex used to protect resource reservation. |
699 | * @intr: Whether to perform waits interruptible if possible. |
700 | * |
701 | * Note that the single reservation mutex @mutex is an unfortunate |
702 | * construct. Ideally resource reservation should be moved to per-resource |
703 | * ww_mutexes. |
704 | * If this functions doesn't return Zero to indicate success, all resources |
705 | * are left unreserved but still referenced. |
706 | * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code |
707 | * on error. |
708 | */ |
709 | int vmw_validation_prepare(struct vmw_validation_context *ctx, |
710 | struct mutex *mutex, |
711 | bool intr) |
712 | { |
713 | int ret = 0; |
714 | |
715 | if (mutex) { |
716 | if (intr) |
717 | ret = mutex_lock_interruptible(mutex); |
718 | else |
719 | mutex_lock(mutex); |
720 | if (ret) |
721 | return -ERESTARTSYS; |
722 | } |
723 | |
724 | ctx->res_mutex = mutex; |
725 | ret = vmw_validation_res_reserve(ctx, intr); |
726 | if (ret) |
727 | goto out_no_res_reserve; |
728 | |
729 | ret = vmw_validation_bo_reserve(ctx, intr); |
730 | if (ret) |
731 | goto out_no_bo_reserve; |
732 | |
733 | ret = vmw_validation_bo_validate(ctx, intr); |
734 | if (ret) |
735 | goto out_no_validate; |
736 | |
737 | ret = vmw_validation_res_validate(ctx, intr); |
738 | if (ret) |
739 | goto out_no_validate; |
740 | |
741 | return 0; |
742 | |
743 | out_no_validate: |
744 | vmw_validation_bo_backoff(ctx); |
745 | out_no_bo_reserve: |
746 | vmw_validation_res_unreserve(ctx, backoff: true); |
747 | out_no_res_reserve: |
748 | if (mutex) |
749 | mutex_unlock(lock: mutex); |
750 | |
751 | return ret; |
752 | } |
753 | |
754 | /** |
755 | * vmw_validation_revert - Revert validation actions if command submission |
756 | * failed. |
757 | * |
758 | * @ctx: The validation context. |
759 | * |
760 | * The caller still needs to unref resources after a call to this function. |
761 | */ |
762 | void vmw_validation_revert(struct vmw_validation_context *ctx) |
763 | { |
764 | vmw_validation_bo_backoff(ctx); |
765 | vmw_validation_res_unreserve(ctx, backoff: true); |
766 | if (ctx->res_mutex) |
767 | mutex_unlock(lock: ctx->res_mutex); |
768 | vmw_validation_unref_lists(ctx); |
769 | } |
770 | |
771 | /** |
772 | * vmw_validation_done - Commit validation actions after command submission |
773 | * success. |
774 | * @ctx: The validation context. |
775 | * @fence: Fence with which to fence all buffer objects taking part in the |
776 | * command submission. |
777 | * |
778 | * The caller does NOT need to unref resources after a call to this function. |
779 | */ |
780 | void vmw_validation_done(struct vmw_validation_context *ctx, |
781 | struct vmw_fence_obj *fence) |
782 | { |
783 | vmw_validation_bo_fence(ctx, fence); |
784 | vmw_validation_res_unreserve(ctx, backoff: false); |
785 | if (ctx->res_mutex) |
786 | mutex_unlock(lock: ctx->res_mutex); |
787 | vmw_validation_unref_lists(ctx); |
788 | } |
789 | |
790 | /** |
791 | * vmw_validation_preload_bo - Preload the validation memory allocator for a |
792 | * call to vmw_validation_add_bo(). |
793 | * @ctx: Pointer to the validation context. |
794 | * |
795 | * Iff this function returns successfully, the next call to |
796 | * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal |
797 | * but voids the guarantee. |
798 | * |
799 | * Returns: Zero if successful, %-EINVAL otherwise. |
800 | */ |
801 | int vmw_validation_preload_bo(struct vmw_validation_context *ctx) |
802 | { |
803 | unsigned int size = sizeof(struct vmw_validation_bo_node); |
804 | |
805 | if (!vmw_validation_mem_alloc(ctx, size)) |
806 | return -ENOMEM; |
807 | |
808 | ctx->mem_size_left += size; |
809 | return 0; |
810 | } |
811 | |
812 | /** |
813 | * vmw_validation_preload_res - Preload the validation memory allocator for a |
814 | * call to vmw_validation_add_res(). |
815 | * @ctx: Pointer to the validation context. |
816 | * @size: Size of the validation node extra data. See below. |
817 | * |
818 | * Iff this function returns successfully, the next call to |
819 | * vmw_validation_add_res() with the same or smaller @size is guaranteed not to |
820 | * sleep. An error is not fatal but voids the guarantee. |
821 | * |
822 | * Returns: Zero if successful, %-EINVAL otherwise. |
823 | */ |
824 | int vmw_validation_preload_res(struct vmw_validation_context *ctx, |
825 | unsigned int size) |
826 | { |
827 | size = vmw_validation_align(val: sizeof(struct vmw_validation_res_node) + |
828 | size) + |
829 | vmw_validation_align(val: sizeof(struct vmw_validation_bo_node)); |
830 | if (!vmw_validation_mem_alloc(ctx, size)) |
831 | return -ENOMEM; |
832 | |
833 | ctx->mem_size_left += size; |
834 | return 0; |
835 | } |
836 | |
837 | /** |
838 | * vmw_validation_bo_backoff - Unreserve buffer objects registered with a |
839 | * validation context |
840 | * @ctx: The validation context |
841 | * |
842 | * This function unreserves the buffer objects previously reserved using |
843 | * vmw_validation_bo_reserve. It's typically used as part of an error path |
844 | */ |
845 | void vmw_validation_bo_backoff(struct vmw_validation_context *ctx) |
846 | { |
847 | struct vmw_validation_bo_node *entry; |
848 | |
849 | /* |
850 | * Switching coherent resource backup buffers failed. |
851 | * Release corresponding buffer object dirty trackers. |
852 | */ |
853 | list_for_each_entry(entry, &ctx->bo_list, base.head) { |
854 | if (entry->coherent_count) { |
855 | unsigned int coherent_count = entry->coherent_count; |
856 | struct vmw_bo *vbo = to_vmw_bo(gobj: &entry->base.bo->base); |
857 | |
858 | while (coherent_count--) |
859 | vmw_bo_dirty_release(vbo); |
860 | } |
861 | } |
862 | |
863 | ttm_eu_backoff_reservation(ticket: &ctx->ticket, list: &ctx->bo_list); |
864 | } |
865 | |