1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
2 | /************************************************************************** |
3 | * |
4 | * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
26 | **************************************************************************/ |
27 | #include "vmwgfx_binding.h" |
28 | #include "vmwgfx_bo.h" |
29 | #include "vmwgfx_drv.h" |
30 | #include "vmwgfx_mksstat.h" |
31 | #include "vmwgfx_so.h" |
32 | |
33 | #include <drm/ttm/ttm_bo.h> |
34 | #include <drm/ttm/ttm_placement.h> |
35 | |
36 | #include <linux/sync_file.h> |
37 | #include <linux/hashtable.h> |
38 | |
39 | /* |
40 | * Helper macro to get dx_ctx_node if available otherwise print an error |
41 | * message. This is for use in command verifier function where if dx_ctx_node |
42 | * is not set then command is invalid. |
43 | */ |
44 | #define VMW_GET_CTX_NODE(__sw_context) \ |
45 | ({ \ |
46 | __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \ |
47 | VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \ |
48 | __sw_context->dx_ctx_node; \ |
49 | }); \ |
50 | }) |
51 | |
52 | #define VMW_DECLARE_CMD_VAR(__var, __type) \ |
53 | struct { \ |
54 | SVGA3dCmdHeader ; \ |
55 | __type body; \ |
56 | } __var |
57 | |
58 | /** |
59 | * struct vmw_relocation - Buffer object relocation |
60 | * |
61 | * @head: List head for the command submission context's relocation list |
62 | * @vbo: Non ref-counted pointer to buffer object |
63 | * @mob_loc: Pointer to location for mob id to be modified |
64 | * @location: Pointer to location for guest pointer to be modified |
65 | */ |
66 | struct vmw_relocation { |
67 | struct list_head head; |
68 | struct vmw_bo *vbo; |
69 | union { |
70 | SVGAMobId *mob_loc; |
71 | SVGAGuestPtr *location; |
72 | }; |
73 | }; |
74 | |
75 | /** |
76 | * enum vmw_resource_relocation_type - Relocation type for resources |
77 | * |
78 | * @vmw_res_rel_normal: Traditional relocation. The resource id in the |
79 | * command stream is replaced with the actual id after validation. |
80 | * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced |
81 | * with a NOP. |
82 | * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after |
83 | * validation is -1, the command is replaced with a NOP. Otherwise no action. |
84 | * @vmw_res_rel_max: Last value in the enum - used for error checking |
85 | */ |
86 | enum vmw_resource_relocation_type { |
87 | vmw_res_rel_normal, |
88 | vmw_res_rel_nop, |
89 | vmw_res_rel_cond_nop, |
90 | vmw_res_rel_max |
91 | }; |
92 | |
93 | /** |
94 | * struct vmw_resource_relocation - Relocation info for resources |
95 | * |
96 | * @head: List head for the software context's relocation list. |
97 | * @res: Non-ref-counted pointer to the resource. |
98 | * @offset: Offset of single byte entries into the command buffer where the id |
99 | * that needs fixup is located. |
100 | * @rel_type: Type of relocation. |
101 | */ |
102 | struct vmw_resource_relocation { |
103 | struct list_head head; |
104 | const struct vmw_resource *res; |
105 | u32 offset:29; |
106 | enum vmw_resource_relocation_type rel_type:3; |
107 | }; |
108 | |
109 | /** |
110 | * struct vmw_ctx_validation_info - Extra validation metadata for contexts |
111 | * |
112 | * @head: List head of context list |
113 | * @ctx: The context resource |
114 | * @cur: The context's persistent binding state |
115 | * @staged: The binding state changes of this command buffer |
116 | */ |
117 | struct vmw_ctx_validation_info { |
118 | struct list_head head; |
119 | struct vmw_resource *ctx; |
120 | struct vmw_ctx_binding_state *cur; |
121 | struct vmw_ctx_binding_state *staged; |
122 | }; |
123 | |
124 | /** |
125 | * struct vmw_cmd_entry - Describe a command for the verifier |
126 | * |
127 | * @func: Call-back to handle the command. |
128 | * @user_allow: Whether allowed from the execbuf ioctl. |
129 | * @gb_disable: Whether disabled if guest-backed objects are available. |
130 | * @gb_enable: Whether enabled iff guest-backed objects are available. |
131 | * @cmd_name: Name of the command. |
132 | */ |
133 | struct vmw_cmd_entry { |
134 | int (*func) (struct vmw_private *, struct vmw_sw_context *, |
135 | SVGA3dCmdHeader *); |
136 | bool user_allow; |
137 | bool gb_disable; |
138 | bool gb_enable; |
139 | const char *cmd_name; |
140 | }; |
141 | |
142 | #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ |
143 | [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ |
144 | (_gb_disable), (_gb_enable), #_cmd} |
145 | |
146 | static int vmw_resource_context_res_add(struct vmw_private *dev_priv, |
147 | struct vmw_sw_context *sw_context, |
148 | struct vmw_resource *ctx); |
149 | static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, |
150 | struct vmw_sw_context *sw_context, |
151 | SVGAMobId *id, |
152 | struct vmw_bo **vmw_bo_p); |
153 | /** |
154 | * vmw_ptr_diff - Compute the offset from a to b in bytes |
155 | * |
156 | * @a: A starting pointer. |
157 | * @b: A pointer offset in the same address space. |
158 | * |
159 | * Returns: The offset in bytes between the two pointers. |
160 | */ |
161 | static size_t vmw_ptr_diff(void *a, void *b) |
162 | { |
163 | return (unsigned long) b - (unsigned long) a; |
164 | } |
165 | |
166 | /** |
167 | * vmw_execbuf_bindings_commit - Commit modified binding state |
168 | * |
169 | * @sw_context: The command submission context |
170 | * @backoff: Whether this is part of the error path and binding state changes |
171 | * should be ignored |
172 | */ |
173 | static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context, |
174 | bool backoff) |
175 | { |
176 | struct vmw_ctx_validation_info *entry; |
177 | |
178 | list_for_each_entry(entry, &sw_context->ctx_list, head) { |
179 | if (!backoff) |
180 | vmw_binding_state_commit(to: entry->cur, from: entry->staged); |
181 | |
182 | if (entry->staged != sw_context->staged_bindings) |
183 | vmw_binding_state_free(cbs: entry->staged); |
184 | else |
185 | sw_context->staged_bindings_inuse = false; |
186 | } |
187 | |
188 | /* List entries are freed with the validation context */ |
189 | INIT_LIST_HEAD(list: &sw_context->ctx_list); |
190 | } |
191 | |
192 | /** |
193 | * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced |
194 | * |
195 | * @sw_context: The command submission context |
196 | */ |
197 | static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context) |
198 | { |
199 | if (sw_context->dx_query_mob) |
200 | vmw_context_bind_dx_query(ctx_res: sw_context->dx_query_ctx, |
201 | mob: sw_context->dx_query_mob); |
202 | } |
203 | |
204 | /** |
205 | * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to |
206 | * the validate list. |
207 | * |
208 | * @dev_priv: Pointer to the device private: |
209 | * @sw_context: The command submission context |
210 | * @res: Pointer to the resource |
211 | * @node: The validation node holding the context resource metadata |
212 | */ |
213 | static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, |
214 | struct vmw_sw_context *sw_context, |
215 | struct vmw_resource *res, |
216 | struct vmw_ctx_validation_info *node) |
217 | { |
218 | int ret; |
219 | |
220 | ret = vmw_resource_context_res_add(dev_priv, sw_context, ctx: res); |
221 | if (unlikely(ret != 0)) |
222 | goto out_err; |
223 | |
224 | if (!sw_context->staged_bindings) { |
225 | sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv); |
226 | if (IS_ERR(ptr: sw_context->staged_bindings)) { |
227 | ret = PTR_ERR(ptr: sw_context->staged_bindings); |
228 | sw_context->staged_bindings = NULL; |
229 | goto out_err; |
230 | } |
231 | } |
232 | |
233 | if (sw_context->staged_bindings_inuse) { |
234 | node->staged = vmw_binding_state_alloc(dev_priv); |
235 | if (IS_ERR(ptr: node->staged)) { |
236 | ret = PTR_ERR(ptr: node->staged); |
237 | node->staged = NULL; |
238 | goto out_err; |
239 | } |
240 | } else { |
241 | node->staged = sw_context->staged_bindings; |
242 | sw_context->staged_bindings_inuse = true; |
243 | } |
244 | |
245 | node->ctx = res; |
246 | node->cur = vmw_context_binding_state(ctx: res); |
247 | list_add_tail(new: &node->head, head: &sw_context->ctx_list); |
248 | |
249 | return 0; |
250 | |
251 | out_err: |
252 | return ret; |
253 | } |
254 | |
255 | /** |
256 | * vmw_execbuf_res_size - calculate extra size fore the resource validation node |
257 | * |
258 | * @dev_priv: Pointer to the device private struct. |
259 | * @res_type: The resource type. |
260 | * |
261 | * Guest-backed contexts and DX contexts require extra size to store execbuf |
262 | * private information in the validation node. Typically the binding manager |
263 | * associated data structures. |
264 | * |
265 | * Returns: The extra size requirement based on resource type. |
266 | */ |
267 | static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv, |
268 | enum vmw_res_type res_type) |
269 | { |
270 | return (res_type == vmw_res_dx_context || |
271 | (res_type == vmw_res_context && dev_priv->has_mob)) ? |
272 | sizeof(struct vmw_ctx_validation_info) : 0; |
273 | } |
274 | |
275 | /** |
276 | * vmw_execbuf_rcache_update - Update a resource-node cache entry |
277 | * |
278 | * @rcache: Pointer to the entry to update. |
279 | * @res: Pointer to the resource. |
280 | * @private: Pointer to the execbuf-private space in the resource validation |
281 | * node. |
282 | */ |
283 | static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache, |
284 | struct vmw_resource *res, |
285 | void *private) |
286 | { |
287 | rcache->res = res; |
288 | rcache->private = private; |
289 | rcache->valid = 1; |
290 | rcache->valid_handle = 0; |
291 | } |
292 | |
293 | enum vmw_val_add_flags { |
294 | vmw_val_add_flag_none = 0, |
295 | vmw_val_add_flag_noctx = 1 << 0, |
296 | }; |
297 | |
298 | /** |
299 | * vmw_execbuf_res_val_add - Add a resource to the validation list. |
300 | * |
301 | * @sw_context: Pointer to the software context. |
302 | * @res: Unreferenced rcu-protected pointer to the resource. |
303 | * @dirty: Whether to change dirty status. |
304 | * @flags: specifies whether to use the context or not |
305 | * |
306 | * Returns: 0 on success. Negative error code on failure. Typical error codes |
307 | * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed. |
308 | */ |
309 | static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context, |
310 | struct vmw_resource *res, |
311 | u32 dirty, |
312 | u32 flags) |
313 | { |
314 | struct vmw_private *dev_priv = res->dev_priv; |
315 | int ret; |
316 | enum vmw_res_type res_type = vmw_res_type(res); |
317 | struct vmw_res_cache_entry *rcache; |
318 | struct vmw_ctx_validation_info *ctx_info; |
319 | bool first_usage; |
320 | unsigned int priv_size; |
321 | |
322 | rcache = &sw_context->res_cache[res_type]; |
323 | if (likely(rcache->valid && rcache->res == res)) { |
324 | if (dirty) |
325 | vmw_validation_res_set_dirty(ctx: sw_context->ctx, |
326 | val_private: rcache->private, dirty); |
327 | return 0; |
328 | } |
329 | |
330 | if ((flags & vmw_val_add_flag_noctx) != 0) { |
331 | ret = vmw_validation_add_resource(ctx: sw_context->ctx, res, priv_size: 0, dirty, |
332 | p_node: (void **)&ctx_info, NULL); |
333 | if (ret) |
334 | return ret; |
335 | |
336 | } else { |
337 | priv_size = vmw_execbuf_res_size(dev_priv, res_type); |
338 | ret = vmw_validation_add_resource(ctx: sw_context->ctx, res, priv_size, |
339 | dirty, p_node: (void **)&ctx_info, |
340 | first_usage: &first_usage); |
341 | if (ret) |
342 | return ret; |
343 | |
344 | if (priv_size && first_usage) { |
345 | ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res, |
346 | node: ctx_info); |
347 | if (ret) { |
348 | VMW_DEBUG_USER("Failed first usage context setup.\n" ); |
349 | return ret; |
350 | } |
351 | } |
352 | } |
353 | |
354 | vmw_execbuf_rcache_update(rcache, res, private: ctx_info); |
355 | return 0; |
356 | } |
357 | |
358 | /** |
359 | * vmw_view_res_val_add - Add a view and the surface it's pointing to to the |
360 | * validation list |
361 | * |
362 | * @sw_context: The software context holding the validation list. |
363 | * @view: Pointer to the view resource. |
364 | * |
365 | * Returns 0 if success, negative error code otherwise. |
366 | */ |
367 | static int vmw_view_res_val_add(struct vmw_sw_context *sw_context, |
368 | struct vmw_resource *view) |
369 | { |
370 | int ret; |
371 | |
372 | /* |
373 | * First add the resource the view is pointing to, otherwise it may be |
374 | * swapped out when the view is validated. |
375 | */ |
376 | ret = vmw_execbuf_res_val_add(sw_context, res: vmw_view_srf(res: view), |
377 | dirty: vmw_view_dirtying(res: view), flags: vmw_val_add_flag_noctx); |
378 | if (ret) |
379 | return ret; |
380 | |
381 | return vmw_execbuf_res_val_add(sw_context, res: view, VMW_RES_DIRTY_NONE, |
382 | flags: vmw_val_add_flag_noctx); |
383 | } |
384 | |
385 | /** |
386 | * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing |
387 | * to to the validation list. |
388 | * |
389 | * @sw_context: The software context holding the validation list. |
390 | * @view_type: The view type to look up. |
391 | * @id: view id of the view. |
392 | * |
393 | * The view is represented by a view id and the DX context it's created on, or |
394 | * scheduled for creation on. If there is no DX context set, the function will |
395 | * return an -EINVAL error pointer. |
396 | * |
397 | * Returns: Unreferenced pointer to the resource on success, negative error |
398 | * pointer on failure. |
399 | */ |
400 | static struct vmw_resource * |
401 | vmw_view_id_val_add(struct vmw_sw_context *sw_context, |
402 | enum vmw_view_type view_type, u32 id) |
403 | { |
404 | struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; |
405 | struct vmw_resource *view; |
406 | int ret; |
407 | |
408 | if (!ctx_node) |
409 | return ERR_PTR(error: -EINVAL); |
410 | |
411 | view = vmw_view_lookup(man: sw_context->man, view_type, user_key: id); |
412 | if (IS_ERR(ptr: view)) |
413 | return view; |
414 | |
415 | ret = vmw_view_res_val_add(sw_context, view); |
416 | if (ret) |
417 | return ERR_PTR(error: ret); |
418 | |
419 | return view; |
420 | } |
421 | |
422 | /** |
423 | * vmw_resource_context_res_add - Put resources previously bound to a context on |
424 | * the validation list |
425 | * |
426 | * @dev_priv: Pointer to a device private structure |
427 | * @sw_context: Pointer to a software context used for this command submission |
428 | * @ctx: Pointer to the context resource |
429 | * |
430 | * This function puts all resources that were previously bound to @ctx on the |
431 | * resource validation list. This is part of the context state reemission |
432 | */ |
433 | static int vmw_resource_context_res_add(struct vmw_private *dev_priv, |
434 | struct vmw_sw_context *sw_context, |
435 | struct vmw_resource *ctx) |
436 | { |
437 | struct list_head *binding_list; |
438 | struct vmw_ctx_bindinfo *entry; |
439 | int ret = 0; |
440 | struct vmw_resource *res; |
441 | u32 i; |
442 | u32 cotable_max = has_sm5_context(dev_priv: ctx->dev_priv) ? |
443 | SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX; |
444 | |
445 | /* Add all cotables to the validation list. */ |
446 | if (has_sm4_context(dev_priv) && |
447 | vmw_res_type(res: ctx) == vmw_res_dx_context) { |
448 | for (i = 0; i < cotable_max; ++i) { |
449 | res = vmw_context_cotable(ctx, cotable_type: i); |
450 | if (IS_ERR_OR_NULL(ptr: res)) |
451 | continue; |
452 | |
453 | ret = vmw_execbuf_res_val_add(sw_context, res, |
454 | VMW_RES_DIRTY_SET, |
455 | flags: vmw_val_add_flag_noctx); |
456 | if (unlikely(ret != 0)) |
457 | return ret; |
458 | } |
459 | } |
460 | |
461 | /* Add all resources bound to the context to the validation list */ |
462 | mutex_lock(&dev_priv->binding_mutex); |
463 | binding_list = vmw_context_binding_list(ctx); |
464 | |
465 | list_for_each_entry(entry, binding_list, ctx_list) { |
466 | if (vmw_res_type(res: entry->res) == vmw_res_view) |
467 | ret = vmw_view_res_val_add(sw_context, view: entry->res); |
468 | else |
469 | ret = vmw_execbuf_res_val_add(sw_context, res: entry->res, |
470 | dirty: vmw_binding_dirtying(binding_type: entry->bt), |
471 | flags: vmw_val_add_flag_noctx); |
472 | if (unlikely(ret != 0)) |
473 | break; |
474 | } |
475 | |
476 | if (has_sm4_context(dev_priv) && |
477 | vmw_res_type(res: ctx) == vmw_res_dx_context) { |
478 | struct vmw_bo *dx_query_mob; |
479 | |
480 | dx_query_mob = vmw_context_get_dx_query_mob(ctx_res: ctx); |
481 | if (dx_query_mob) { |
482 | vmw_bo_placement_set(bo: dx_query_mob, |
483 | domain: VMW_BO_DOMAIN_MOB, |
484 | busy_domain: VMW_BO_DOMAIN_MOB); |
485 | ret = vmw_validation_add_bo(ctx: sw_context->ctx, |
486 | vbo: dx_query_mob); |
487 | } |
488 | } |
489 | |
490 | mutex_unlock(lock: &dev_priv->binding_mutex); |
491 | return ret; |
492 | } |
493 | |
494 | /** |
495 | * vmw_resource_relocation_add - Add a relocation to the relocation list |
496 | * |
497 | * @sw_context: Pointer to the software context. |
498 | * @res: The resource. |
499 | * @offset: Offset into the command buffer currently being parsed where the id |
500 | * that needs fixup is located. Granularity is one byte. |
501 | * @rel_type: Relocation type. |
502 | */ |
503 | static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context, |
504 | const struct vmw_resource *res, |
505 | unsigned long offset, |
506 | enum vmw_resource_relocation_type |
507 | rel_type) |
508 | { |
509 | struct vmw_resource_relocation *rel; |
510 | |
511 | rel = vmw_validation_mem_alloc(ctx: sw_context->ctx, size: sizeof(*rel)); |
512 | if (unlikely(!rel)) { |
513 | VMW_DEBUG_USER("Failed to allocate a resource relocation.\n" ); |
514 | return -ENOMEM; |
515 | } |
516 | |
517 | rel->res = res; |
518 | rel->offset = offset; |
519 | rel->rel_type = rel_type; |
520 | list_add_tail(new: &rel->head, head: &sw_context->res_relocations); |
521 | |
522 | return 0; |
523 | } |
524 | |
525 | /** |
526 | * vmw_resource_relocations_free - Free all relocations on a list |
527 | * |
528 | * @list: Pointer to the head of the relocation list |
529 | */ |
530 | static void vmw_resource_relocations_free(struct list_head *list) |
531 | { |
532 | /* Memory is validation context memory, so no need to free it */ |
533 | INIT_LIST_HEAD(list); |
534 | } |
535 | |
536 | /** |
537 | * vmw_resource_relocations_apply - Apply all relocations on a list |
538 | * |
539 | * @cb: Pointer to the start of the command buffer bein patch. This need not be |
540 | * the same buffer as the one being parsed when the relocation list was built, |
541 | * but the contents must be the same modulo the resource ids. |
542 | * @list: Pointer to the head of the relocation list. |
543 | */ |
544 | static void vmw_resource_relocations_apply(uint32_t *cb, |
545 | struct list_head *list) |
546 | { |
547 | struct vmw_resource_relocation *rel; |
548 | |
549 | /* Validate the struct vmw_resource_relocation member size */ |
550 | BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29)); |
551 | BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3)); |
552 | |
553 | list_for_each_entry(rel, list, head) { |
554 | u32 *addr = (u32 *)((unsigned long) cb + rel->offset); |
555 | switch (rel->rel_type) { |
556 | case vmw_res_rel_normal: |
557 | *addr = rel->res->id; |
558 | break; |
559 | case vmw_res_rel_nop: |
560 | *addr = SVGA_3D_CMD_NOP; |
561 | break; |
562 | default: |
563 | if (rel->res->id == -1) |
564 | *addr = SVGA_3D_CMD_NOP; |
565 | break; |
566 | } |
567 | } |
568 | } |
569 | |
570 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, |
571 | struct vmw_sw_context *sw_context, |
572 | SVGA3dCmdHeader *) |
573 | { |
574 | return -EINVAL; |
575 | } |
576 | |
577 | static int vmw_cmd_ok(struct vmw_private *dev_priv, |
578 | struct vmw_sw_context *sw_context, |
579 | SVGA3dCmdHeader *) |
580 | { |
581 | return 0; |
582 | } |
583 | |
584 | /** |
585 | * vmw_resources_reserve - Reserve all resources on the sw_context's resource |
586 | * list. |
587 | * |
588 | * @sw_context: Pointer to the software context. |
589 | * |
590 | * Note that since vmware's command submission currently is protected by the |
591 | * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since |
592 | * only a single thread at once will attempt this. |
593 | */ |
594 | static int vmw_resources_reserve(struct vmw_sw_context *sw_context) |
595 | { |
596 | int ret; |
597 | |
598 | ret = vmw_validation_res_reserve(ctx: sw_context->ctx, intr: true); |
599 | if (ret) |
600 | return ret; |
601 | |
602 | if (sw_context->dx_query_mob) { |
603 | struct vmw_bo *expected_dx_query_mob; |
604 | |
605 | expected_dx_query_mob = |
606 | vmw_context_get_dx_query_mob(ctx_res: sw_context->dx_query_ctx); |
607 | if (expected_dx_query_mob && |
608 | expected_dx_query_mob != sw_context->dx_query_mob) { |
609 | ret = -EINVAL; |
610 | } |
611 | } |
612 | |
613 | return ret; |
614 | } |
615 | |
616 | /** |
617 | * vmw_cmd_res_check - Check that a resource is present and if so, put it on the |
618 | * resource validate list unless it's already there. |
619 | * |
620 | * @dev_priv: Pointer to a device private structure. |
621 | * @sw_context: Pointer to the software context. |
622 | * @res_type: Resource type. |
623 | * @dirty: Whether to change dirty status. |
624 | * @converter: User-space visible type specific information. |
625 | * @id_loc: Pointer to the location in the command buffer currently being parsed |
626 | * from where the user-space resource id handle is located. |
627 | * @p_res: Pointer to pointer to resource validation node. Populated on |
628 | * exit. |
629 | */ |
630 | static int |
631 | vmw_cmd_res_check(struct vmw_private *dev_priv, |
632 | struct vmw_sw_context *sw_context, |
633 | enum vmw_res_type res_type, |
634 | u32 dirty, |
635 | const struct vmw_user_resource_conv *converter, |
636 | uint32_t *id_loc, |
637 | struct vmw_resource **p_res) |
638 | { |
639 | struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type]; |
640 | struct vmw_resource *res; |
641 | int ret = 0; |
642 | bool needs_unref = false; |
643 | |
644 | if (p_res) |
645 | *p_res = NULL; |
646 | |
647 | if (*id_loc == SVGA3D_INVALID_ID) { |
648 | if (res_type == vmw_res_context) { |
649 | VMW_DEBUG_USER("Illegal context invalid id.\n" ); |
650 | return -EINVAL; |
651 | } |
652 | return 0; |
653 | } |
654 | |
655 | if (likely(rcache->valid_handle && *id_loc == rcache->handle)) { |
656 | res = rcache->res; |
657 | if (dirty) |
658 | vmw_validation_res_set_dirty(ctx: sw_context->ctx, |
659 | val_private: rcache->private, dirty); |
660 | } else { |
661 | unsigned int size = vmw_execbuf_res_size(dev_priv, res_type); |
662 | |
663 | ret = vmw_validation_preload_res(ctx: sw_context->ctx, size); |
664 | if (ret) |
665 | return ret; |
666 | |
667 | ret = vmw_user_resource_lookup_handle |
668 | (dev_priv, tfile: sw_context->fp->tfile, handle: *id_loc, converter, p_res: &res); |
669 | if (ret != 0) { |
670 | VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n" , |
671 | (unsigned int) *id_loc); |
672 | return ret; |
673 | } |
674 | needs_unref = true; |
675 | |
676 | ret = vmw_execbuf_res_val_add(sw_context, res, dirty, flags: vmw_val_add_flag_none); |
677 | if (unlikely(ret != 0)) |
678 | goto res_check_done; |
679 | |
680 | if (rcache->valid && rcache->res == res) { |
681 | rcache->valid_handle = true; |
682 | rcache->handle = *id_loc; |
683 | } |
684 | } |
685 | |
686 | ret = vmw_resource_relocation_add(sw_context, res, |
687 | offset: vmw_ptr_diff(a: sw_context->buf_start, |
688 | b: id_loc), |
689 | rel_type: vmw_res_rel_normal); |
690 | if (p_res) |
691 | *p_res = res; |
692 | |
693 | res_check_done: |
694 | if (needs_unref) |
695 | vmw_resource_unreference(p_res: &res); |
696 | |
697 | return ret; |
698 | } |
699 | |
700 | /** |
701 | * vmw_rebind_all_dx_query - Rebind DX query associated with the context |
702 | * |
703 | * @ctx_res: context the query belongs to |
704 | * |
705 | * This function assumes binding_mutex is held. |
706 | */ |
707 | static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) |
708 | { |
709 | struct vmw_private *dev_priv = ctx_res->dev_priv; |
710 | struct vmw_bo *dx_query_mob; |
711 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery); |
712 | |
713 | dx_query_mob = vmw_context_get_dx_query_mob(ctx_res); |
714 | |
715 | if (!dx_query_mob || dx_query_mob->dx_query_ctx) |
716 | return 0; |
717 | |
718 | cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id); |
719 | if (cmd == NULL) |
720 | return -ENOMEM; |
721 | |
722 | cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY; |
723 | cmd->header.size = sizeof(cmd->body); |
724 | cmd->body.cid = ctx_res->id; |
725 | cmd->body.mobid = dx_query_mob->tbo.resource->start; |
726 | vmw_cmd_commit(dev_priv, bytes: sizeof(*cmd)); |
727 | |
728 | vmw_context_bind_dx_query(ctx_res, mob: dx_query_mob); |
729 | |
730 | return 0; |
731 | } |
732 | |
733 | /** |
734 | * vmw_rebind_contexts - Rebind all resources previously bound to referenced |
735 | * contexts. |
736 | * |
737 | * @sw_context: Pointer to the software context. |
738 | * |
739 | * Rebind context binding points that have been scrubbed because of eviction. |
740 | */ |
741 | static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) |
742 | { |
743 | struct vmw_ctx_validation_info *val; |
744 | int ret; |
745 | |
746 | list_for_each_entry(val, &sw_context->ctx_list, head) { |
747 | ret = vmw_binding_rebind_all(cbs: val->cur); |
748 | if (unlikely(ret != 0)) { |
749 | if (ret != -ERESTARTSYS) |
750 | VMW_DEBUG_USER("Failed to rebind context.\n" ); |
751 | return ret; |
752 | } |
753 | |
754 | ret = vmw_rebind_all_dx_query(ctx_res: val->ctx); |
755 | if (ret != 0) { |
756 | VMW_DEBUG_USER("Failed to rebind queries.\n" ); |
757 | return ret; |
758 | } |
759 | } |
760 | |
761 | return 0; |
762 | } |
763 | |
764 | /** |
765 | * vmw_view_bindings_add - Add an array of view bindings to a context binding |
766 | * state tracker. |
767 | * |
768 | * @sw_context: The execbuf state used for this command. |
769 | * @view_type: View type for the bindings. |
770 | * @binding_type: Binding type for the bindings. |
771 | * @shader_slot: The shader slot to user for the bindings. |
772 | * @view_ids: Array of view ids to be bound. |
773 | * @num_views: Number of view ids in @view_ids. |
774 | * @first_slot: The binding slot to be used for the first view id in @view_ids. |
775 | */ |
776 | static int vmw_view_bindings_add(struct vmw_sw_context *sw_context, |
777 | enum vmw_view_type view_type, |
778 | enum vmw_ctx_binding_type binding_type, |
779 | uint32 shader_slot, |
780 | uint32 view_ids[], u32 num_views, |
781 | u32 first_slot) |
782 | { |
783 | struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); |
784 | u32 i; |
785 | |
786 | if (!ctx_node) |
787 | return -EINVAL; |
788 | |
789 | for (i = 0; i < num_views; ++i) { |
790 | struct vmw_ctx_bindinfo_view binding; |
791 | struct vmw_resource *view = NULL; |
792 | |
793 | if (view_ids[i] != SVGA3D_INVALID_ID) { |
794 | view = vmw_view_id_val_add(sw_context, view_type, |
795 | id: view_ids[i]); |
796 | if (IS_ERR(ptr: view)) { |
797 | VMW_DEBUG_USER("View not found.\n" ); |
798 | return PTR_ERR(ptr: view); |
799 | } |
800 | } |
801 | binding.bi.ctx = ctx_node->ctx; |
802 | binding.bi.res = view; |
803 | binding.bi.bt = binding_type; |
804 | binding.shader_slot = shader_slot; |
805 | binding.slot = first_slot + i; |
806 | vmw_binding_add(cbs: ctx_node->staged, ci: &binding.bi, |
807 | shader_slot, slot: binding.slot); |
808 | } |
809 | |
810 | return 0; |
811 | } |
812 | |
813 | /** |
814 | * vmw_cmd_cid_check - Check a command header for valid context information. |
815 | * |
816 | * @dev_priv: Pointer to a device private structure. |
817 | * @sw_context: Pointer to the software context. |
818 | * @header: A command header with an embedded user-space context handle. |
819 | * |
820 | * Convenience function: Call vmw_cmd_res_check with the user-space context |
821 | * handle embedded in @header. |
822 | */ |
823 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, |
824 | struct vmw_sw_context *sw_context, |
825 | SVGA3dCmdHeader *) |
826 | { |
827 | VMW_DECLARE_CMD_VAR(*cmd, uint32_t) = |
828 | container_of(header, typeof(*cmd), header); |
829 | |
830 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_context, |
831 | VMW_RES_DIRTY_SET, converter: user_context_converter, |
832 | id_loc: &cmd->body, NULL); |
833 | } |
834 | |
835 | /** |
836 | * vmw_execbuf_info_from_res - Get the private validation metadata for a |
837 | * recently validated resource |
838 | * |
839 | * @sw_context: Pointer to the command submission context |
840 | * @res: The resource |
841 | * |
842 | * The resource pointed to by @res needs to be present in the command submission |
843 | * context's resource cache and hence the last resource of that type to be |
844 | * processed by the validation code. |
845 | * |
846 | * Return: a pointer to the private metadata of the resource, or NULL if it |
847 | * wasn't found |
848 | */ |
849 | static struct vmw_ctx_validation_info * |
850 | vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context, |
851 | struct vmw_resource *res) |
852 | { |
853 | struct vmw_res_cache_entry *rcache = |
854 | &sw_context->res_cache[vmw_res_type(res)]; |
855 | |
856 | if (rcache->valid && rcache->res == res) |
857 | return rcache->private; |
858 | |
859 | WARN_ON_ONCE(true); |
860 | return NULL; |
861 | } |
862 | |
863 | static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, |
864 | struct vmw_sw_context *sw_context, |
865 | SVGA3dCmdHeader *) |
866 | { |
867 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget); |
868 | struct vmw_resource *ctx; |
869 | struct vmw_resource *res; |
870 | int ret; |
871 | |
872 | cmd = container_of(header, typeof(*cmd), header); |
873 | |
874 | if (cmd->body.type >= SVGA3D_RT_MAX) { |
875 | VMW_DEBUG_USER("Illegal render target type %u.\n" , |
876 | (unsigned int) cmd->body.type); |
877 | return -EINVAL; |
878 | } |
879 | |
880 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_context, |
881 | VMW_RES_DIRTY_SET, converter: user_context_converter, |
882 | id_loc: &cmd->body.cid, p_res: &ctx); |
883 | if (unlikely(ret != 0)) |
884 | return ret; |
885 | |
886 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
887 | VMW_RES_DIRTY_SET, converter: user_surface_converter, |
888 | id_loc: &cmd->body.target.sid, p_res: &res); |
889 | if (unlikely(ret)) |
890 | return ret; |
891 | |
892 | if (dev_priv->has_mob) { |
893 | struct vmw_ctx_bindinfo_view binding; |
894 | struct vmw_ctx_validation_info *node; |
895 | |
896 | node = vmw_execbuf_info_from_res(sw_context, res: ctx); |
897 | if (!node) |
898 | return -EINVAL; |
899 | |
900 | binding.bi.ctx = ctx; |
901 | binding.bi.res = res; |
902 | binding.bi.bt = vmw_ctx_binding_rt; |
903 | binding.slot = cmd->body.type; |
904 | vmw_binding_add(cbs: node->staged, ci: &binding.bi, shader_slot: 0, slot: binding.slot); |
905 | } |
906 | |
907 | return 0; |
908 | } |
909 | |
910 | static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, |
911 | struct vmw_sw_context *sw_context, |
912 | SVGA3dCmdHeader *) |
913 | { |
914 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy); |
915 | int ret; |
916 | |
917 | cmd = container_of(header, typeof(*cmd), header); |
918 | |
919 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
920 | VMW_RES_DIRTY_NONE, converter: user_surface_converter, |
921 | id_loc: &cmd->body.src.sid, NULL); |
922 | if (ret) |
923 | return ret; |
924 | |
925 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
926 | VMW_RES_DIRTY_SET, converter: user_surface_converter, |
927 | id_loc: &cmd->body.dest.sid, NULL); |
928 | } |
929 | |
930 | static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv, |
931 | struct vmw_sw_context *sw_context, |
932 | SVGA3dCmdHeader *) |
933 | { |
934 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy); |
935 | int ret; |
936 | |
937 | cmd = container_of(header, typeof(*cmd), header); |
938 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
939 | VMW_RES_DIRTY_NONE, converter: user_surface_converter, |
940 | id_loc: &cmd->body.src, NULL); |
941 | if (ret != 0) |
942 | return ret; |
943 | |
944 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
945 | VMW_RES_DIRTY_SET, converter: user_surface_converter, |
946 | id_loc: &cmd->body.dest, NULL); |
947 | } |
948 | |
949 | static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv, |
950 | struct vmw_sw_context *sw_context, |
951 | SVGA3dCmdHeader *) |
952 | { |
953 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion); |
954 | int ret; |
955 | |
956 | cmd = container_of(header, typeof(*cmd), header); |
957 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
958 | VMW_RES_DIRTY_NONE, converter: user_surface_converter, |
959 | id_loc: &cmd->body.srcSid, NULL); |
960 | if (ret != 0) |
961 | return ret; |
962 | |
963 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
964 | VMW_RES_DIRTY_SET, converter: user_surface_converter, |
965 | id_loc: &cmd->body.dstSid, NULL); |
966 | } |
967 | |
968 | static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, |
969 | struct vmw_sw_context *sw_context, |
970 | SVGA3dCmdHeader *) |
971 | { |
972 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt); |
973 | int ret; |
974 | |
975 | cmd = container_of(header, typeof(*cmd), header); |
976 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
977 | VMW_RES_DIRTY_NONE, converter: user_surface_converter, |
978 | id_loc: &cmd->body.src.sid, NULL); |
979 | if (unlikely(ret != 0)) |
980 | return ret; |
981 | |
982 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
983 | VMW_RES_DIRTY_SET, converter: user_surface_converter, |
984 | id_loc: &cmd->body.dest.sid, NULL); |
985 | } |
986 | |
987 | static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, |
988 | struct vmw_sw_context *sw_context, |
989 | SVGA3dCmdHeader *) |
990 | { |
991 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) = |
992 | container_of(header, typeof(*cmd), header); |
993 | |
994 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
995 | VMW_RES_DIRTY_NONE, converter: user_surface_converter, |
996 | id_loc: &cmd->body.srcImage.sid, NULL); |
997 | } |
998 | |
999 | static int vmw_cmd_present_check(struct vmw_private *dev_priv, |
1000 | struct vmw_sw_context *sw_context, |
1001 | SVGA3dCmdHeader *) |
1002 | { |
1003 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) = |
1004 | container_of(header, typeof(*cmd), header); |
1005 | |
1006 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
1007 | VMW_RES_DIRTY_NONE, converter: user_surface_converter, |
1008 | id_loc: &cmd->body.sid, NULL); |
1009 | } |
1010 | |
1011 | /** |
1012 | * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. |
1013 | * |
1014 | * @dev_priv: The device private structure. |
1015 | * @new_query_bo: The new buffer holding query results. |
1016 | * @sw_context: The software context used for this command submission. |
1017 | * |
1018 | * This function checks whether @new_query_bo is suitable for holding query |
1019 | * results, and if another buffer currently is pinned for query results. If so, |
1020 | * the function prepares the state of @sw_context for switching pinned buffers |
1021 | * after successful submission of the current command batch. |
1022 | */ |
1023 | static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, |
1024 | struct vmw_bo *new_query_bo, |
1025 | struct vmw_sw_context *sw_context) |
1026 | { |
1027 | struct vmw_res_cache_entry *ctx_entry = |
1028 | &sw_context->res_cache[vmw_res_context]; |
1029 | int ret; |
1030 | |
1031 | BUG_ON(!ctx_entry->valid); |
1032 | sw_context->last_query_ctx = ctx_entry->res; |
1033 | |
1034 | if (unlikely(new_query_bo != sw_context->cur_query_bo)) { |
1035 | |
1036 | if (unlikely(PFN_UP(new_query_bo->tbo.resource->size) > 4)) { |
1037 | VMW_DEBUG_USER("Query buffer too large.\n" ); |
1038 | return -EINVAL; |
1039 | } |
1040 | |
1041 | if (unlikely(sw_context->cur_query_bo != NULL)) { |
1042 | sw_context->needs_post_query_barrier = true; |
1043 | vmw_bo_placement_set_default_accelerated(bo: sw_context->cur_query_bo); |
1044 | ret = vmw_validation_add_bo(ctx: sw_context->ctx, |
1045 | vbo: sw_context->cur_query_bo); |
1046 | if (unlikely(ret != 0)) |
1047 | return ret; |
1048 | } |
1049 | sw_context->cur_query_bo = new_query_bo; |
1050 | |
1051 | vmw_bo_placement_set_default_accelerated(bo: dev_priv->dummy_query_bo); |
1052 | ret = vmw_validation_add_bo(ctx: sw_context->ctx, |
1053 | vbo: dev_priv->dummy_query_bo); |
1054 | if (unlikely(ret != 0)) |
1055 | return ret; |
1056 | } |
1057 | |
1058 | return 0; |
1059 | } |
1060 | |
1061 | /** |
1062 | * vmw_query_bo_switch_commit - Finalize switching pinned query buffer |
1063 | * |
1064 | * @dev_priv: The device private structure. |
1065 | * @sw_context: The software context used for this command submission batch. |
1066 | * |
1067 | * This function will check if we're switching query buffers, and will then, |
1068 | * issue a dummy occlusion query wait used as a query barrier. When the fence |
1069 | * object following that query wait has signaled, we are sure that all preceding |
1070 | * queries have finished, and the old query buffer can be unpinned. However, |
1071 | * since both the new query buffer and the old one are fenced with that fence, |
1072 | * we can do an asynchronus unpin now, and be sure that the old query buffer |
1073 | * won't be moved until the fence has signaled. |
1074 | * |
1075 | * As mentioned above, both the new - and old query buffers need to be fenced |
1076 | * using a sequence emitted *after* calling this function. |
1077 | */ |
1078 | static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, |
1079 | struct vmw_sw_context *sw_context) |
1080 | { |
1081 | /* |
1082 | * The validate list should still hold references to all |
1083 | * contexts here. |
1084 | */ |
1085 | if (sw_context->needs_post_query_barrier) { |
1086 | struct vmw_res_cache_entry *ctx_entry = |
1087 | &sw_context->res_cache[vmw_res_context]; |
1088 | struct vmw_resource *ctx; |
1089 | int ret; |
1090 | |
1091 | BUG_ON(!ctx_entry->valid); |
1092 | ctx = ctx_entry->res; |
1093 | |
1094 | ret = vmw_cmd_emit_dummy_query(dev_priv, cid: ctx->id); |
1095 | |
1096 | if (unlikely(ret != 0)) |
1097 | VMW_DEBUG_USER("Out of fifo space for dummy query.\n" ); |
1098 | } |
1099 | |
1100 | if (dev_priv->pinned_bo != sw_context->cur_query_bo) { |
1101 | if (dev_priv->pinned_bo) { |
1102 | vmw_bo_pin_reserved(bo: dev_priv->pinned_bo, pin: false); |
1103 | vmw_bo_unreference(buf: &dev_priv->pinned_bo); |
1104 | } |
1105 | |
1106 | if (!sw_context->needs_post_query_barrier) { |
1107 | vmw_bo_pin_reserved(bo: sw_context->cur_query_bo, pin: true); |
1108 | |
1109 | /* |
1110 | * We pin also the dummy_query_bo buffer so that we |
1111 | * don't need to validate it when emitting dummy queries |
1112 | * in context destroy paths. |
1113 | */ |
1114 | if (!dev_priv->dummy_query_bo_pinned) { |
1115 | vmw_bo_pin_reserved(bo: dev_priv->dummy_query_bo, |
1116 | pin: true); |
1117 | dev_priv->dummy_query_bo_pinned = true; |
1118 | } |
1119 | |
1120 | BUG_ON(sw_context->last_query_ctx == NULL); |
1121 | dev_priv->query_cid = sw_context->last_query_ctx->id; |
1122 | dev_priv->query_cid_valid = true; |
1123 | dev_priv->pinned_bo = |
1124 | vmw_bo_reference(buf: sw_context->cur_query_bo); |
1125 | } |
1126 | } |
1127 | } |
1128 | |
1129 | /** |
1130 | * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle |
1131 | * to a MOB id. |
1132 | * |
1133 | * @dev_priv: Pointer to a device private structure. |
1134 | * @sw_context: The software context used for this command batch validation. |
1135 | * @id: Pointer to the user-space handle to be translated. |
1136 | * @vmw_bo_p: Points to a location that, on successful return will carry a |
1137 | * non-reference-counted pointer to the buffer object identified by the |
1138 | * user-space handle in @id. |
1139 | * |
1140 | * This function saves information needed to translate a user-space buffer |
1141 | * handle to a MOB id. The translation does not take place immediately, but |
1142 | * during a call to vmw_apply_relocations(). |
1143 | * |
1144 | * This function builds a relocation list and a list of buffers to validate. The |
1145 | * former needs to be freed using either vmw_apply_relocations() or |
1146 | * vmw_free_relocations(). The latter needs to be freed using |
1147 | * vmw_clear_validations. |
1148 | */ |
1149 | static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, |
1150 | struct vmw_sw_context *sw_context, |
1151 | SVGAMobId *id, |
1152 | struct vmw_bo **vmw_bo_p) |
1153 | { |
1154 | struct vmw_bo *vmw_bo, *tmp_bo; |
1155 | uint32_t handle = *id; |
1156 | struct vmw_relocation *reloc; |
1157 | int ret; |
1158 | |
1159 | vmw_validation_preload_bo(ctx: sw_context->ctx); |
1160 | ret = vmw_user_bo_lookup(filp: sw_context->filp, handle, out: &vmw_bo); |
1161 | if (ret != 0) { |
1162 | drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n" ); |
1163 | return PTR_ERR(ptr: vmw_bo); |
1164 | } |
1165 | vmw_bo_placement_set(bo: vmw_bo, domain: VMW_BO_DOMAIN_MOB, busy_domain: VMW_BO_DOMAIN_MOB); |
1166 | ret = vmw_validation_add_bo(ctx: sw_context->ctx, vbo: vmw_bo); |
1167 | tmp_bo = vmw_bo; |
1168 | vmw_user_bo_unref(buf: &tmp_bo); |
1169 | if (unlikely(ret != 0)) |
1170 | return ret; |
1171 | |
1172 | reloc = vmw_validation_mem_alloc(ctx: sw_context->ctx, size: sizeof(*reloc)); |
1173 | if (!reloc) |
1174 | return -ENOMEM; |
1175 | |
1176 | reloc->mob_loc = id; |
1177 | reloc->vbo = vmw_bo; |
1178 | |
1179 | *vmw_bo_p = vmw_bo; |
1180 | list_add_tail(new: &reloc->head, head: &sw_context->bo_relocations); |
1181 | |
1182 | return 0; |
1183 | } |
1184 | |
1185 | /** |
1186 | * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle |
1187 | * to a valid SVGAGuestPtr |
1188 | * |
1189 | * @dev_priv: Pointer to a device private structure. |
1190 | * @sw_context: The software context used for this command batch validation. |
1191 | * @ptr: Pointer to the user-space handle to be translated. |
1192 | * @vmw_bo_p: Points to a location that, on successful return will carry a |
1193 | * non-reference-counted pointer to the DMA buffer identified by the user-space |
1194 | * handle in @id. |
1195 | * |
1196 | * This function saves information needed to translate a user-space buffer |
1197 | * handle to a valid SVGAGuestPtr. The translation does not take place |
1198 | * immediately, but during a call to vmw_apply_relocations(). |
1199 | * |
1200 | * This function builds a relocation list and a list of buffers to validate. |
1201 | * The former needs to be freed using either vmw_apply_relocations() or |
1202 | * vmw_free_relocations(). The latter needs to be freed using |
1203 | * vmw_clear_validations. |
1204 | */ |
1205 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, |
1206 | struct vmw_sw_context *sw_context, |
1207 | SVGAGuestPtr *ptr, |
1208 | struct vmw_bo **vmw_bo_p) |
1209 | { |
1210 | struct vmw_bo *vmw_bo, *tmp_bo; |
1211 | uint32_t handle = ptr->gmrId; |
1212 | struct vmw_relocation *reloc; |
1213 | int ret; |
1214 | |
1215 | vmw_validation_preload_bo(ctx: sw_context->ctx); |
1216 | ret = vmw_user_bo_lookup(filp: sw_context->filp, handle, out: &vmw_bo); |
1217 | if (ret != 0) { |
1218 | drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n" ); |
1219 | return PTR_ERR(ptr: vmw_bo); |
1220 | } |
1221 | vmw_bo_placement_set(bo: vmw_bo, domain: VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, |
1222 | busy_domain: VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); |
1223 | ret = vmw_validation_add_bo(ctx: sw_context->ctx, vbo: vmw_bo); |
1224 | tmp_bo = vmw_bo; |
1225 | vmw_user_bo_unref(buf: &tmp_bo); |
1226 | if (unlikely(ret != 0)) |
1227 | return ret; |
1228 | |
1229 | reloc = vmw_validation_mem_alloc(ctx: sw_context->ctx, size: sizeof(*reloc)); |
1230 | if (!reloc) |
1231 | return -ENOMEM; |
1232 | |
1233 | reloc->location = ptr; |
1234 | reloc->vbo = vmw_bo; |
1235 | *vmw_bo_p = vmw_bo; |
1236 | list_add_tail(new: &reloc->head, head: &sw_context->bo_relocations); |
1237 | |
1238 | return 0; |
1239 | } |
1240 | |
1241 | /** |
1242 | * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command. |
1243 | * |
1244 | * @dev_priv: Pointer to a device private struct. |
1245 | * @sw_context: The software context used for this command submission. |
1246 | * @header: Pointer to the command header in the command stream. |
1247 | * |
1248 | * This function adds the new query into the query COTABLE |
1249 | */ |
1250 | static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv, |
1251 | struct vmw_sw_context *sw_context, |
1252 | SVGA3dCmdHeader *) |
1253 | { |
1254 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery); |
1255 | struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); |
1256 | struct vmw_resource *cotable_res; |
1257 | int ret; |
1258 | |
1259 | if (!ctx_node) |
1260 | return -EINVAL; |
1261 | |
1262 | cmd = container_of(header, typeof(*cmd), header); |
1263 | |
1264 | if (cmd->body.type < SVGA3D_QUERYTYPE_MIN || |
1265 | cmd->body.type >= SVGA3D_QUERYTYPE_MAX) |
1266 | return -EINVAL; |
1267 | |
1268 | cotable_res = vmw_context_cotable(ctx: ctx_node->ctx, cotable_type: SVGA_COTABLE_DXQUERY); |
1269 | if (IS_ERR_OR_NULL(ptr: cotable_res)) |
1270 | return cotable_res ? PTR_ERR(ptr: cotable_res) : -EINVAL; |
1271 | ret = vmw_cotable_notify(res: cotable_res, id: cmd->body.queryId); |
1272 | |
1273 | return ret; |
1274 | } |
1275 | |
1276 | /** |
1277 | * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command. |
1278 | * |
1279 | * @dev_priv: Pointer to a device private struct. |
1280 | * @sw_context: The software context used for this command submission. |
1281 | * @header: Pointer to the command header in the command stream. |
1282 | * |
1283 | * The query bind operation will eventually associate the query ID with its |
1284 | * backing MOB. In this function, we take the user mode MOB ID and use |
1285 | * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent. |
1286 | */ |
1287 | static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, |
1288 | struct vmw_sw_context *sw_context, |
1289 | SVGA3dCmdHeader *) |
1290 | { |
1291 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery); |
1292 | struct vmw_bo *vmw_bo; |
1293 | int ret; |
1294 | |
1295 | cmd = container_of(header, typeof(*cmd), header); |
1296 | |
1297 | /* |
1298 | * Look up the buffer pointed to by q.mobid, put it on the relocation |
1299 | * list so its kernel mode MOB ID can be filled in later |
1300 | */ |
1301 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, id: &cmd->body.mobid, |
1302 | vmw_bo_p: &vmw_bo); |
1303 | |
1304 | if (ret != 0) |
1305 | return ret; |
1306 | |
1307 | sw_context->dx_query_mob = vmw_bo; |
1308 | sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx; |
1309 | return 0; |
1310 | } |
1311 | |
1312 | /** |
1313 | * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command. |
1314 | * |
1315 | * @dev_priv: Pointer to a device private struct. |
1316 | * @sw_context: The software context used for this command submission. |
1317 | * @header: Pointer to the command header in the command stream. |
1318 | */ |
1319 | static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, |
1320 | struct vmw_sw_context *sw_context, |
1321 | SVGA3dCmdHeader *) |
1322 | { |
1323 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) = |
1324 | container_of(header, typeof(*cmd), header); |
1325 | |
1326 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_context, |
1327 | VMW_RES_DIRTY_SET, converter: user_context_converter, |
1328 | id_loc: &cmd->body.cid, NULL); |
1329 | } |
1330 | |
1331 | /** |
1332 | * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command. |
1333 | * |
1334 | * @dev_priv: Pointer to a device private struct. |
1335 | * @sw_context: The software context used for this command submission. |
1336 | * @header: Pointer to the command header in the command stream. |
1337 | */ |
1338 | static int vmw_cmd_begin_query(struct vmw_private *dev_priv, |
1339 | struct vmw_sw_context *sw_context, |
1340 | SVGA3dCmdHeader *) |
1341 | { |
1342 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) = |
1343 | container_of(header, typeof(*cmd), header); |
1344 | |
1345 | if (unlikely(dev_priv->has_mob)) { |
1346 | VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery); |
1347 | |
1348 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
1349 | |
1350 | gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY; |
1351 | gb_cmd.header.size = cmd->header.size; |
1352 | gb_cmd.body.cid = cmd->body.cid; |
1353 | gb_cmd.body.type = cmd->body.type; |
1354 | |
1355 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
1356 | return vmw_cmd_begin_gb_query(dev_priv, sw_context, header); |
1357 | } |
1358 | |
1359 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_context, |
1360 | VMW_RES_DIRTY_SET, converter: user_context_converter, |
1361 | id_loc: &cmd->body.cid, NULL); |
1362 | } |
1363 | |
1364 | /** |
1365 | * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command. |
1366 | * |
1367 | * @dev_priv: Pointer to a device private struct. |
1368 | * @sw_context: The software context used for this command submission. |
1369 | * @header: Pointer to the command header in the command stream. |
1370 | */ |
1371 | static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, |
1372 | struct vmw_sw_context *sw_context, |
1373 | SVGA3dCmdHeader *) |
1374 | { |
1375 | struct vmw_bo *vmw_bo; |
1376 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery); |
1377 | int ret; |
1378 | |
1379 | cmd = container_of(header, typeof(*cmd), header); |
1380 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1381 | if (unlikely(ret != 0)) |
1382 | return ret; |
1383 | |
1384 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, id: &cmd->body.mobid, |
1385 | vmw_bo_p: &vmw_bo); |
1386 | if (unlikely(ret != 0)) |
1387 | return ret; |
1388 | |
1389 | ret = vmw_query_bo_switch_prepare(dev_priv, new_query_bo: vmw_bo, sw_context); |
1390 | |
1391 | return ret; |
1392 | } |
1393 | |
1394 | /** |
1395 | * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command. |
1396 | * |
1397 | * @dev_priv: Pointer to a device private struct. |
1398 | * @sw_context: The software context used for this command submission. |
1399 | * @header: Pointer to the command header in the command stream. |
1400 | */ |
1401 | static int vmw_cmd_end_query(struct vmw_private *dev_priv, |
1402 | struct vmw_sw_context *sw_context, |
1403 | SVGA3dCmdHeader *) |
1404 | { |
1405 | struct vmw_bo *vmw_bo; |
1406 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery); |
1407 | int ret; |
1408 | |
1409 | cmd = container_of(header, typeof(*cmd), header); |
1410 | if (dev_priv->has_mob) { |
1411 | VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery); |
1412 | |
1413 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
1414 | |
1415 | gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY; |
1416 | gb_cmd.header.size = cmd->header.size; |
1417 | gb_cmd.body.cid = cmd->body.cid; |
1418 | gb_cmd.body.type = cmd->body.type; |
1419 | gb_cmd.body.mobid = cmd->body.guestResult.gmrId; |
1420 | gb_cmd.body.offset = cmd->body.guestResult.offset; |
1421 | |
1422 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
1423 | return vmw_cmd_end_gb_query(dev_priv, sw_context, header); |
1424 | } |
1425 | |
1426 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1427 | if (unlikely(ret != 0)) |
1428 | return ret; |
1429 | |
1430 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1431 | ptr: &cmd->body.guestResult, vmw_bo_p: &vmw_bo); |
1432 | if (unlikely(ret != 0)) |
1433 | return ret; |
1434 | |
1435 | ret = vmw_query_bo_switch_prepare(dev_priv, new_query_bo: vmw_bo, sw_context); |
1436 | |
1437 | return ret; |
1438 | } |
1439 | |
1440 | /** |
1441 | * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command. |
1442 | * |
1443 | * @dev_priv: Pointer to a device private struct. |
1444 | * @sw_context: The software context used for this command submission. |
1445 | * @header: Pointer to the command header in the command stream. |
1446 | */ |
1447 | static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, |
1448 | struct vmw_sw_context *sw_context, |
1449 | SVGA3dCmdHeader *) |
1450 | { |
1451 | struct vmw_bo *vmw_bo; |
1452 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery); |
1453 | int ret; |
1454 | |
1455 | cmd = container_of(header, typeof(*cmd), header); |
1456 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1457 | if (unlikely(ret != 0)) |
1458 | return ret; |
1459 | |
1460 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, id: &cmd->body.mobid, |
1461 | vmw_bo_p: &vmw_bo); |
1462 | if (unlikely(ret != 0)) |
1463 | return ret; |
1464 | |
1465 | return 0; |
1466 | } |
1467 | |
1468 | /** |
1469 | * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command. |
1470 | * |
1471 | * @dev_priv: Pointer to a device private struct. |
1472 | * @sw_context: The software context used for this command submission. |
1473 | * @header: Pointer to the command header in the command stream. |
1474 | */ |
1475 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, |
1476 | struct vmw_sw_context *sw_context, |
1477 | SVGA3dCmdHeader *) |
1478 | { |
1479 | struct vmw_bo *vmw_bo; |
1480 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery); |
1481 | int ret; |
1482 | |
1483 | cmd = container_of(header, typeof(*cmd), header); |
1484 | if (dev_priv->has_mob) { |
1485 | VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery); |
1486 | |
1487 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
1488 | |
1489 | gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; |
1490 | gb_cmd.header.size = cmd->header.size; |
1491 | gb_cmd.body.cid = cmd->body.cid; |
1492 | gb_cmd.body.type = cmd->body.type; |
1493 | gb_cmd.body.mobid = cmd->body.guestResult.gmrId; |
1494 | gb_cmd.body.offset = cmd->body.guestResult.offset; |
1495 | |
1496 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
1497 | return vmw_cmd_wait_gb_query(dev_priv, sw_context, header); |
1498 | } |
1499 | |
1500 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1501 | if (unlikely(ret != 0)) |
1502 | return ret; |
1503 | |
1504 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1505 | ptr: &cmd->body.guestResult, vmw_bo_p: &vmw_bo); |
1506 | if (unlikely(ret != 0)) |
1507 | return ret; |
1508 | |
1509 | return 0; |
1510 | } |
1511 | |
1512 | static int vmw_cmd_dma(struct vmw_private *dev_priv, |
1513 | struct vmw_sw_context *sw_context, |
1514 | SVGA3dCmdHeader *) |
1515 | { |
1516 | struct vmw_bo *vmw_bo = NULL; |
1517 | struct vmw_surface *srf = NULL; |
1518 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA); |
1519 | int ret; |
1520 | SVGA3dCmdSurfaceDMASuffix *suffix; |
1521 | uint32_t bo_size; |
1522 | bool dirty; |
1523 | |
1524 | cmd = container_of(header, typeof(*cmd), header); |
1525 | suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body + |
1526 | header->size - sizeof(*suffix)); |
1527 | |
1528 | /* Make sure device and verifier stays in sync. */ |
1529 | if (unlikely(suffix->suffixSize != sizeof(*suffix))) { |
1530 | VMW_DEBUG_USER("Invalid DMA suffix size.\n" ); |
1531 | return -EINVAL; |
1532 | } |
1533 | |
1534 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1535 | ptr: &cmd->body.guest.ptr, vmw_bo_p: &vmw_bo); |
1536 | if (unlikely(ret != 0)) |
1537 | return ret; |
1538 | |
1539 | /* Make sure DMA doesn't cross BO boundaries. */ |
1540 | bo_size = vmw_bo->tbo.base.size; |
1541 | if (unlikely(cmd->body.guest.ptr.offset > bo_size)) { |
1542 | VMW_DEBUG_USER("Invalid DMA offset.\n" ); |
1543 | return -EINVAL; |
1544 | } |
1545 | |
1546 | bo_size -= cmd->body.guest.ptr.offset; |
1547 | if (unlikely(suffix->maximumOffset > bo_size)) |
1548 | suffix->maximumOffset = bo_size; |
1549 | |
1550 | dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ? |
1551 | VMW_RES_DIRTY_SET : 0; |
1552 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
1553 | dirty, converter: user_surface_converter, |
1554 | id_loc: &cmd->body.host.sid, NULL); |
1555 | if (unlikely(ret != 0)) { |
1556 | if (unlikely(ret != -ERESTARTSYS)) |
1557 | VMW_DEBUG_USER("could not find surface for DMA.\n" ); |
1558 | return ret; |
1559 | } |
1560 | |
1561 | srf = vmw_res_to_srf(res: sw_context->res_cache[vmw_res_surface].res); |
1562 | |
1563 | vmw_kms_cursor_snoop(srf, tfile: sw_context->fp->tfile, bo: &vmw_bo->tbo, header); |
1564 | |
1565 | return 0; |
1566 | } |
1567 | |
1568 | static int vmw_cmd_draw(struct vmw_private *dev_priv, |
1569 | struct vmw_sw_context *sw_context, |
1570 | SVGA3dCmdHeader *) |
1571 | { |
1572 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives); |
1573 | SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)( |
1574 | (unsigned long)header + sizeof(*cmd)); |
1575 | SVGA3dPrimitiveRange *range; |
1576 | uint32_t i; |
1577 | uint32_t maxnum; |
1578 | int ret; |
1579 | |
1580 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1581 | if (unlikely(ret != 0)) |
1582 | return ret; |
1583 | |
1584 | cmd = container_of(header, typeof(*cmd), header); |
1585 | maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl); |
1586 | |
1587 | if (unlikely(cmd->body.numVertexDecls > maxnum)) { |
1588 | VMW_DEBUG_USER("Illegal number of vertex declarations.\n" ); |
1589 | return -EINVAL; |
1590 | } |
1591 | |
1592 | for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { |
1593 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
1594 | VMW_RES_DIRTY_NONE, |
1595 | converter: user_surface_converter, |
1596 | id_loc: &decl->array.surfaceId, NULL); |
1597 | if (unlikely(ret != 0)) |
1598 | return ret; |
1599 | } |
1600 | |
1601 | maxnum = (header->size - sizeof(cmd->body) - |
1602 | cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range); |
1603 | if (unlikely(cmd->body.numRanges > maxnum)) { |
1604 | VMW_DEBUG_USER("Illegal number of index ranges.\n" ); |
1605 | return -EINVAL; |
1606 | } |
1607 | |
1608 | range = (SVGA3dPrimitiveRange *) decl; |
1609 | for (i = 0; i < cmd->body.numRanges; ++i, ++range) { |
1610 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
1611 | VMW_RES_DIRTY_NONE, |
1612 | converter: user_surface_converter, |
1613 | id_loc: &range->indexArray.surfaceId, NULL); |
1614 | if (unlikely(ret != 0)) |
1615 | return ret; |
1616 | } |
1617 | return 0; |
1618 | } |
1619 | |
1620 | static int vmw_cmd_tex_state(struct vmw_private *dev_priv, |
1621 | struct vmw_sw_context *sw_context, |
1622 | SVGA3dCmdHeader *) |
1623 | { |
1624 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState); |
1625 | SVGA3dTextureState *last_state = (SVGA3dTextureState *) |
1626 | ((unsigned long) header + header->size + sizeof(*header)); |
1627 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) |
1628 | ((unsigned long) header + sizeof(*cmd)); |
1629 | struct vmw_resource *ctx; |
1630 | struct vmw_resource *res; |
1631 | int ret; |
1632 | |
1633 | cmd = container_of(header, typeof(*cmd), header); |
1634 | |
1635 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_context, |
1636 | VMW_RES_DIRTY_SET, converter: user_context_converter, |
1637 | id_loc: &cmd->body.cid, p_res: &ctx); |
1638 | if (unlikely(ret != 0)) |
1639 | return ret; |
1640 | |
1641 | for (; cur_state < last_state; ++cur_state) { |
1642 | if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) |
1643 | continue; |
1644 | |
1645 | if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) { |
1646 | VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n" , |
1647 | (unsigned int) cur_state->stage); |
1648 | return -EINVAL; |
1649 | } |
1650 | |
1651 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
1652 | VMW_RES_DIRTY_NONE, |
1653 | converter: user_surface_converter, |
1654 | id_loc: &cur_state->value, p_res: &res); |
1655 | if (unlikely(ret != 0)) |
1656 | return ret; |
1657 | |
1658 | if (dev_priv->has_mob) { |
1659 | struct vmw_ctx_bindinfo_tex binding; |
1660 | struct vmw_ctx_validation_info *node; |
1661 | |
1662 | node = vmw_execbuf_info_from_res(sw_context, res: ctx); |
1663 | if (!node) |
1664 | return -EINVAL; |
1665 | |
1666 | binding.bi.ctx = ctx; |
1667 | binding.bi.res = res; |
1668 | binding.bi.bt = vmw_ctx_binding_tex; |
1669 | binding.texture_stage = cur_state->stage; |
1670 | vmw_binding_add(cbs: node->staged, ci: &binding.bi, shader_slot: 0, |
1671 | slot: binding.texture_stage); |
1672 | } |
1673 | } |
1674 | |
1675 | return 0; |
1676 | } |
1677 | |
1678 | static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, |
1679 | struct vmw_sw_context *sw_context, |
1680 | void *buf) |
1681 | { |
1682 | struct vmw_bo *vmw_bo; |
1683 | |
1684 | struct { |
1685 | uint32_t ; |
1686 | SVGAFifoCmdDefineGMRFB body; |
1687 | } *cmd = buf; |
1688 | |
1689 | return vmw_translate_guest_ptr(dev_priv, sw_context, ptr: &cmd->body.ptr, |
1690 | vmw_bo_p: &vmw_bo); |
1691 | } |
1692 | |
1693 | /** |
1694 | * vmw_cmd_res_switch_backup - Utility function to handle backup buffer |
1695 | * switching |
1696 | * |
1697 | * @dev_priv: Pointer to a device private struct. |
1698 | * @sw_context: The software context being used for this batch. |
1699 | * @res: Pointer to the resource. |
1700 | * @buf_id: Pointer to the user-space backup buffer handle in the command |
1701 | * stream. |
1702 | * @backup_offset: Offset of backup into MOB. |
1703 | * |
1704 | * This function prepares for registering a switch of backup buffers in the |
1705 | * resource metadata just prior to unreserving. It's basically a wrapper around |
1706 | * vmw_cmd_res_switch_backup with a different interface. |
1707 | */ |
1708 | static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, |
1709 | struct vmw_sw_context *sw_context, |
1710 | struct vmw_resource *res, uint32_t *buf_id, |
1711 | unsigned long backup_offset) |
1712 | { |
1713 | struct vmw_bo *vbo; |
1714 | void *info; |
1715 | int ret; |
1716 | |
1717 | info = vmw_execbuf_info_from_res(sw_context, res); |
1718 | if (!info) |
1719 | return -EINVAL; |
1720 | |
1721 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, id: buf_id, vmw_bo_p: &vbo); |
1722 | if (ret) |
1723 | return ret; |
1724 | |
1725 | vmw_validation_res_switch_backup(ctx: sw_context->ctx, val_private: info, vbo, |
1726 | backup_offset); |
1727 | return 0; |
1728 | } |
1729 | |
1730 | /** |
1731 | * vmw_cmd_switch_backup - Utility function to handle backup buffer switching |
1732 | * |
1733 | * @dev_priv: Pointer to a device private struct. |
1734 | * @sw_context: The software context being used for this batch. |
1735 | * @res_type: The resource type. |
1736 | * @converter: Information about user-space binding for this resource type. |
1737 | * @res_id: Pointer to the user-space resource handle in the command stream. |
1738 | * @buf_id: Pointer to the user-space backup buffer handle in the command |
1739 | * stream. |
1740 | * @backup_offset: Offset of backup into MOB. |
1741 | * |
1742 | * This function prepares for registering a switch of backup buffers in the |
1743 | * resource metadata just prior to unreserving. It's basically a wrapper around |
1744 | * vmw_cmd_res_switch_backup with a different interface. |
1745 | */ |
1746 | static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, |
1747 | struct vmw_sw_context *sw_context, |
1748 | enum vmw_res_type res_type, |
1749 | const struct vmw_user_resource_conv |
1750 | *converter, uint32_t *res_id, uint32_t *buf_id, |
1751 | unsigned long backup_offset) |
1752 | { |
1753 | struct vmw_resource *res; |
1754 | int ret; |
1755 | |
1756 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, |
1757 | VMW_RES_DIRTY_NONE, converter, id_loc: res_id, p_res: &res); |
1758 | if (ret) |
1759 | return ret; |
1760 | |
1761 | return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id, |
1762 | backup_offset); |
1763 | } |
1764 | |
1765 | /** |
1766 | * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command |
1767 | * |
1768 | * @dev_priv: Pointer to a device private struct. |
1769 | * @sw_context: The software context being used for this batch. |
1770 | * @header: Pointer to the command header in the command stream. |
1771 | */ |
1772 | static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, |
1773 | struct vmw_sw_context *sw_context, |
1774 | SVGA3dCmdHeader *) |
1775 | { |
1776 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) = |
1777 | container_of(header, typeof(*cmd), header); |
1778 | |
1779 | return vmw_cmd_switch_backup(dev_priv, sw_context, res_type: vmw_res_surface, |
1780 | converter: user_surface_converter, res_id: &cmd->body.sid, |
1781 | buf_id: &cmd->body.mobid, backup_offset: 0); |
1782 | } |
1783 | |
1784 | /** |
1785 | * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command |
1786 | * |
1787 | * @dev_priv: Pointer to a device private struct. |
1788 | * @sw_context: The software context being used for this batch. |
1789 | * @header: Pointer to the command header in the command stream. |
1790 | */ |
1791 | static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, |
1792 | struct vmw_sw_context *sw_context, |
1793 | SVGA3dCmdHeader *) |
1794 | { |
1795 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) = |
1796 | container_of(header, typeof(*cmd), header); |
1797 | |
1798 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
1799 | VMW_RES_DIRTY_NONE, converter: user_surface_converter, |
1800 | id_loc: &cmd->body.image.sid, NULL); |
1801 | } |
1802 | |
1803 | /** |
1804 | * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command |
1805 | * |
1806 | * @dev_priv: Pointer to a device private struct. |
1807 | * @sw_context: The software context being used for this batch. |
1808 | * @header: Pointer to the command header in the command stream. |
1809 | */ |
1810 | static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, |
1811 | struct vmw_sw_context *sw_context, |
1812 | SVGA3dCmdHeader *) |
1813 | { |
1814 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) = |
1815 | container_of(header, typeof(*cmd), header); |
1816 | |
1817 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
1818 | VMW_RES_DIRTY_CLEAR, converter: user_surface_converter, |
1819 | id_loc: &cmd->body.sid, NULL); |
1820 | } |
1821 | |
1822 | /** |
1823 | * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command |
1824 | * |
1825 | * @dev_priv: Pointer to a device private struct. |
1826 | * @sw_context: The software context being used for this batch. |
1827 | * @header: Pointer to the command header in the command stream. |
1828 | */ |
1829 | static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, |
1830 | struct vmw_sw_context *sw_context, |
1831 | SVGA3dCmdHeader *) |
1832 | { |
1833 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) = |
1834 | container_of(header, typeof(*cmd), header); |
1835 | |
1836 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
1837 | VMW_RES_DIRTY_NONE, converter: user_surface_converter, |
1838 | id_loc: &cmd->body.image.sid, NULL); |
1839 | } |
1840 | |
1841 | /** |
1842 | * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE |
1843 | * command |
1844 | * |
1845 | * @dev_priv: Pointer to a device private struct. |
1846 | * @sw_context: The software context being used for this batch. |
1847 | * @header: Pointer to the command header in the command stream. |
1848 | */ |
1849 | static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, |
1850 | struct vmw_sw_context *sw_context, |
1851 | SVGA3dCmdHeader *) |
1852 | { |
1853 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) = |
1854 | container_of(header, typeof(*cmd), header); |
1855 | |
1856 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
1857 | VMW_RES_DIRTY_CLEAR, converter: user_surface_converter, |
1858 | id_loc: &cmd->body.sid, NULL); |
1859 | } |
1860 | |
1861 | /** |
1862 | * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE |
1863 | * command |
1864 | * |
1865 | * @dev_priv: Pointer to a device private struct. |
1866 | * @sw_context: The software context being used for this batch. |
1867 | * @header: Pointer to the command header in the command stream. |
1868 | */ |
1869 | static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, |
1870 | struct vmw_sw_context *sw_context, |
1871 | SVGA3dCmdHeader *) |
1872 | { |
1873 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) = |
1874 | container_of(header, typeof(*cmd), header); |
1875 | |
1876 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
1877 | VMW_RES_DIRTY_NONE, converter: user_surface_converter, |
1878 | id_loc: &cmd->body.image.sid, NULL); |
1879 | } |
1880 | |
1881 | /** |
1882 | * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE |
1883 | * command |
1884 | * |
1885 | * @dev_priv: Pointer to a device private struct. |
1886 | * @sw_context: The software context being used for this batch. |
1887 | * @header: Pointer to the command header in the command stream. |
1888 | */ |
1889 | static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, |
1890 | struct vmw_sw_context *sw_context, |
1891 | SVGA3dCmdHeader *) |
1892 | { |
1893 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) = |
1894 | container_of(header, typeof(*cmd), header); |
1895 | |
1896 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
1897 | VMW_RES_DIRTY_CLEAR, converter: user_surface_converter, |
1898 | id_loc: &cmd->body.sid, NULL); |
1899 | } |
1900 | |
1901 | /** |
1902 | * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command |
1903 | * |
1904 | * @dev_priv: Pointer to a device private struct. |
1905 | * @sw_context: The software context being used for this batch. |
1906 | * @header: Pointer to the command header in the command stream. |
1907 | */ |
1908 | static int vmw_cmd_shader_define(struct vmw_private *dev_priv, |
1909 | struct vmw_sw_context *sw_context, |
1910 | SVGA3dCmdHeader *) |
1911 | { |
1912 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader); |
1913 | int ret; |
1914 | size_t size; |
1915 | struct vmw_resource *ctx; |
1916 | |
1917 | cmd = container_of(header, typeof(*cmd), header); |
1918 | |
1919 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_context, |
1920 | VMW_RES_DIRTY_SET, converter: user_context_converter, |
1921 | id_loc: &cmd->body.cid, p_res: &ctx); |
1922 | if (unlikely(ret != 0)) |
1923 | return ret; |
1924 | |
1925 | if (unlikely(!dev_priv->has_mob)) |
1926 | return 0; |
1927 | |
1928 | size = cmd->header.size - sizeof(cmd->body); |
1929 | ret = vmw_compat_shader_add(dev_priv, man: vmw_context_res_man(ctx), |
1930 | user_key: cmd->body.shid, bytecode: cmd + 1, shader_type: cmd->body.type, |
1931 | size, list: &sw_context->staged_cmd_res); |
1932 | if (unlikely(ret != 0)) |
1933 | return ret; |
1934 | |
1935 | return vmw_resource_relocation_add(sw_context, NULL, |
1936 | offset: vmw_ptr_diff(a: sw_context->buf_start, |
1937 | b: &cmd->header.id), |
1938 | rel_type: vmw_res_rel_nop); |
1939 | } |
1940 | |
1941 | /** |
1942 | * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command |
1943 | * |
1944 | * @dev_priv: Pointer to a device private struct. |
1945 | * @sw_context: The software context being used for this batch. |
1946 | * @header: Pointer to the command header in the command stream. |
1947 | */ |
1948 | static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, |
1949 | struct vmw_sw_context *sw_context, |
1950 | SVGA3dCmdHeader *) |
1951 | { |
1952 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader); |
1953 | int ret; |
1954 | struct vmw_resource *ctx; |
1955 | |
1956 | cmd = container_of(header, typeof(*cmd), header); |
1957 | |
1958 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_context, |
1959 | VMW_RES_DIRTY_SET, converter: user_context_converter, |
1960 | id_loc: &cmd->body.cid, p_res: &ctx); |
1961 | if (unlikely(ret != 0)) |
1962 | return ret; |
1963 | |
1964 | if (unlikely(!dev_priv->has_mob)) |
1965 | return 0; |
1966 | |
1967 | ret = vmw_shader_remove(man: vmw_context_res_man(ctx), user_key: cmd->body.shid, |
1968 | shader_type: cmd->body.type, list: &sw_context->staged_cmd_res); |
1969 | if (unlikely(ret != 0)) |
1970 | return ret; |
1971 | |
1972 | return vmw_resource_relocation_add(sw_context, NULL, |
1973 | offset: vmw_ptr_diff(a: sw_context->buf_start, |
1974 | b: &cmd->header.id), |
1975 | rel_type: vmw_res_rel_nop); |
1976 | } |
1977 | |
1978 | /** |
1979 | * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command |
1980 | * |
1981 | * @dev_priv: Pointer to a device private struct. |
1982 | * @sw_context: The software context being used for this batch. |
1983 | * @header: Pointer to the command header in the command stream. |
1984 | */ |
1985 | static int vmw_cmd_set_shader(struct vmw_private *dev_priv, |
1986 | struct vmw_sw_context *sw_context, |
1987 | SVGA3dCmdHeader *) |
1988 | { |
1989 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader); |
1990 | struct vmw_ctx_bindinfo_shader binding; |
1991 | struct vmw_resource *ctx, *res = NULL; |
1992 | struct vmw_ctx_validation_info *ctx_info; |
1993 | int ret; |
1994 | |
1995 | cmd = container_of(header, typeof(*cmd), header); |
1996 | |
1997 | if (!vmw_shadertype_is_valid(shader_model: VMW_SM_LEGACY, shader_type: cmd->body.type)) { |
1998 | VMW_DEBUG_USER("Illegal shader type %u.\n" , |
1999 | (unsigned int) cmd->body.type); |
2000 | return -EINVAL; |
2001 | } |
2002 | |
2003 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_context, |
2004 | VMW_RES_DIRTY_SET, converter: user_context_converter, |
2005 | id_loc: &cmd->body.cid, p_res: &ctx); |
2006 | if (unlikely(ret != 0)) |
2007 | return ret; |
2008 | |
2009 | if (!dev_priv->has_mob) |
2010 | return 0; |
2011 | |
2012 | if (cmd->body.shid != SVGA3D_INVALID_ID) { |
2013 | /* |
2014 | * This is the compat shader path - Per device guest-backed |
2015 | * shaders, but user-space thinks it's per context host- |
2016 | * backed shaders. |
2017 | */ |
2018 | res = vmw_shader_lookup(man: vmw_context_res_man(ctx), |
2019 | user_key: cmd->body.shid, shader_type: cmd->body.type); |
2020 | if (!IS_ERR(ptr: res)) { |
2021 | ret = vmw_execbuf_res_val_add(sw_context, res, |
2022 | VMW_RES_DIRTY_NONE, |
2023 | flags: vmw_val_add_flag_noctx); |
2024 | if (unlikely(ret != 0)) |
2025 | return ret; |
2026 | |
2027 | ret = vmw_resource_relocation_add |
2028 | (sw_context, res, |
2029 | offset: vmw_ptr_diff(a: sw_context->buf_start, |
2030 | b: &cmd->body.shid), |
2031 | rel_type: vmw_res_rel_normal); |
2032 | if (unlikely(ret != 0)) |
2033 | return ret; |
2034 | } |
2035 | } |
2036 | |
2037 | if (IS_ERR_OR_NULL(ptr: res)) { |
2038 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_shader, |
2039 | VMW_RES_DIRTY_NONE, |
2040 | converter: user_shader_converter, id_loc: &cmd->body.shid, |
2041 | p_res: &res); |
2042 | if (unlikely(ret != 0)) |
2043 | return ret; |
2044 | } |
2045 | |
2046 | ctx_info = vmw_execbuf_info_from_res(sw_context, res: ctx); |
2047 | if (!ctx_info) |
2048 | return -EINVAL; |
2049 | |
2050 | binding.bi.ctx = ctx; |
2051 | binding.bi.res = res; |
2052 | binding.bi.bt = vmw_ctx_binding_shader; |
2053 | binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; |
2054 | vmw_binding_add(cbs: ctx_info->staged, ci: &binding.bi, shader_slot: binding.shader_slot, slot: 0); |
2055 | |
2056 | return 0; |
2057 | } |
2058 | |
2059 | /** |
2060 | * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command |
2061 | * |
2062 | * @dev_priv: Pointer to a device private struct. |
2063 | * @sw_context: The software context being used for this batch. |
2064 | * @header: Pointer to the command header in the command stream. |
2065 | */ |
2066 | static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, |
2067 | struct vmw_sw_context *sw_context, |
2068 | SVGA3dCmdHeader *) |
2069 | { |
2070 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst); |
2071 | int ret; |
2072 | |
2073 | cmd = container_of(header, typeof(*cmd), header); |
2074 | |
2075 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_context, |
2076 | VMW_RES_DIRTY_SET, converter: user_context_converter, |
2077 | id_loc: &cmd->body.cid, NULL); |
2078 | if (unlikely(ret != 0)) |
2079 | return ret; |
2080 | |
2081 | if (dev_priv->has_mob) |
2082 | header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE; |
2083 | |
2084 | return 0; |
2085 | } |
2086 | |
2087 | /** |
2088 | * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command |
2089 | * |
2090 | * @dev_priv: Pointer to a device private struct. |
2091 | * @sw_context: The software context being used for this batch. |
2092 | * @header: Pointer to the command header in the command stream. |
2093 | */ |
2094 | static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, |
2095 | struct vmw_sw_context *sw_context, |
2096 | SVGA3dCmdHeader *) |
2097 | { |
2098 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) = |
2099 | container_of(header, typeof(*cmd), header); |
2100 | |
2101 | return vmw_cmd_switch_backup(dev_priv, sw_context, res_type: vmw_res_shader, |
2102 | converter: user_shader_converter, res_id: &cmd->body.shid, |
2103 | buf_id: &cmd->body.mobid, backup_offset: cmd->body.offsetInBytes); |
2104 | } |
2105 | |
2106 | /** |
2107 | * vmw_cmd_dx_set_single_constant_buffer - Validate |
2108 | * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command. |
2109 | * |
2110 | * @dev_priv: Pointer to a device private struct. |
2111 | * @sw_context: The software context being used for this batch. |
2112 | * @header: Pointer to the command header in the command stream. |
2113 | */ |
2114 | static int |
2115 | vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, |
2116 | struct vmw_sw_context *sw_context, |
2117 | SVGA3dCmdHeader *) |
2118 | { |
2119 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer); |
2120 | |
2121 | struct vmw_resource *res = NULL; |
2122 | struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); |
2123 | struct vmw_ctx_bindinfo_cb binding; |
2124 | int ret; |
2125 | |
2126 | if (!ctx_node) |
2127 | return -EINVAL; |
2128 | |
2129 | cmd = container_of(header, typeof(*cmd), header); |
2130 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
2131 | VMW_RES_DIRTY_NONE, converter: user_surface_converter, |
2132 | id_loc: &cmd->body.sid, p_res: &res); |
2133 | if (unlikely(ret != 0)) |
2134 | return ret; |
2135 | |
2136 | if (!vmw_shadertype_is_valid(shader_model: dev_priv->sm_type, shader_type: cmd->body.type) || |
2137 | cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { |
2138 | VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n" , |
2139 | (unsigned int) cmd->body.type, |
2140 | (unsigned int) cmd->body.slot); |
2141 | return -EINVAL; |
2142 | } |
2143 | |
2144 | binding.bi.ctx = ctx_node->ctx; |
2145 | binding.bi.res = res; |
2146 | binding.bi.bt = vmw_ctx_binding_cb; |
2147 | binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; |
2148 | binding.offset = cmd->body.offsetInBytes; |
2149 | binding.size = cmd->body.sizeInBytes; |
2150 | binding.slot = cmd->body.slot; |
2151 | |
2152 | vmw_binding_add(cbs: ctx_node->staged, ci: &binding.bi, shader_slot: binding.shader_slot, |
2153 | slot: binding.slot); |
2154 | |
2155 | return 0; |
2156 | } |
2157 | |
2158 | /** |
2159 | * vmw_cmd_dx_set_constant_buffer_offset - Validate |
2160 | * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command. |
2161 | * |
2162 | * @dev_priv: Pointer to a device private struct. |
2163 | * @sw_context: The software context being used for this batch. |
2164 | * @header: Pointer to the command header in the command stream. |
2165 | */ |
2166 | static int |
2167 | vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv, |
2168 | struct vmw_sw_context *sw_context, |
2169 | SVGA3dCmdHeader *) |
2170 | { |
2171 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset); |
2172 | |
2173 | struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); |
2174 | u32 shader_slot; |
2175 | |
2176 | if (!has_sm5_context(dev_priv)) |
2177 | return -EINVAL; |
2178 | |
2179 | if (!ctx_node) |
2180 | return -EINVAL; |
2181 | |
2182 | cmd = container_of(header, typeof(*cmd), header); |
2183 | if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { |
2184 | VMW_DEBUG_USER("Illegal const buffer slot %u.\n" , |
2185 | (unsigned int) cmd->body.slot); |
2186 | return -EINVAL; |
2187 | } |
2188 | |
2189 | shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET; |
2190 | vmw_binding_cb_offset_update(cbs: ctx_node->staged, shader_slot, |
2191 | slot: cmd->body.slot, offsetInBytes: cmd->body.offsetInBytes); |
2192 | |
2193 | return 0; |
2194 | } |
2195 | |
2196 | /** |
2197 | * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES |
2198 | * command |
2199 | * |
2200 | * @dev_priv: Pointer to a device private struct. |
2201 | * @sw_context: The software context being used for this batch. |
2202 | * @header: Pointer to the command header in the command stream. |
2203 | */ |
2204 | static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv, |
2205 | struct vmw_sw_context *sw_context, |
2206 | SVGA3dCmdHeader *) |
2207 | { |
2208 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) = |
2209 | container_of(header, typeof(*cmd), header); |
2210 | |
2211 | u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) / |
2212 | sizeof(SVGA3dShaderResourceViewId); |
2213 | |
2214 | if ((u64) cmd->body.startView + (u64) num_sr_view > |
2215 | (u64) SVGA3D_DX_MAX_SRVIEWS || |
2216 | !vmw_shadertype_is_valid(shader_model: dev_priv->sm_type, shader_type: cmd->body.type)) { |
2217 | VMW_DEBUG_USER("Invalid shader binding.\n" ); |
2218 | return -EINVAL; |
2219 | } |
2220 | |
2221 | return vmw_view_bindings_add(sw_context, view_type: vmw_view_sr, |
2222 | binding_type: vmw_ctx_binding_sr, |
2223 | shader_slot: cmd->body.type - SVGA3D_SHADERTYPE_MIN, |
2224 | view_ids: (void *) &cmd[1], num_views: num_sr_view, |
2225 | first_slot: cmd->body.startView); |
2226 | } |
2227 | |
2228 | /** |
2229 | * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command |
2230 | * |
2231 | * @dev_priv: Pointer to a device private struct. |
2232 | * @sw_context: The software context being used for this batch. |
2233 | * @header: Pointer to the command header in the command stream. |
2234 | */ |
2235 | static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, |
2236 | struct vmw_sw_context *sw_context, |
2237 | SVGA3dCmdHeader *) |
2238 | { |
2239 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader); |
2240 | struct vmw_resource *res = NULL; |
2241 | struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); |
2242 | struct vmw_ctx_bindinfo_shader binding; |
2243 | int ret = 0; |
2244 | |
2245 | if (!ctx_node) |
2246 | return -EINVAL; |
2247 | |
2248 | cmd = container_of(header, typeof(*cmd), header); |
2249 | |
2250 | if (!vmw_shadertype_is_valid(shader_model: dev_priv->sm_type, shader_type: cmd->body.type)) { |
2251 | VMW_DEBUG_USER("Illegal shader type %u.\n" , |
2252 | (unsigned int) cmd->body.type); |
2253 | return -EINVAL; |
2254 | } |
2255 | |
2256 | if (cmd->body.shaderId != SVGA3D_INVALID_ID) { |
2257 | res = vmw_shader_lookup(man: sw_context->man, user_key: cmd->body.shaderId, shader_type: 0); |
2258 | if (IS_ERR(ptr: res)) { |
2259 | VMW_DEBUG_USER("Could not find shader for binding.\n" ); |
2260 | return PTR_ERR(ptr: res); |
2261 | } |
2262 | |
2263 | ret = vmw_execbuf_res_val_add(sw_context, res, |
2264 | VMW_RES_DIRTY_NONE, |
2265 | flags: vmw_val_add_flag_noctx); |
2266 | if (ret) |
2267 | return ret; |
2268 | } |
2269 | |
2270 | binding.bi.ctx = ctx_node->ctx; |
2271 | binding.bi.res = res; |
2272 | binding.bi.bt = vmw_ctx_binding_dx_shader; |
2273 | binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; |
2274 | |
2275 | vmw_binding_add(cbs: ctx_node->staged, ci: &binding.bi, shader_slot: binding.shader_slot, slot: 0); |
2276 | |
2277 | return 0; |
2278 | } |
2279 | |
2280 | /** |
2281 | * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS |
2282 | * command |
2283 | * |
2284 | * @dev_priv: Pointer to a device private struct. |
2285 | * @sw_context: The software context being used for this batch. |
2286 | * @header: Pointer to the command header in the command stream. |
2287 | */ |
2288 | static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, |
2289 | struct vmw_sw_context *sw_context, |
2290 | SVGA3dCmdHeader *) |
2291 | { |
2292 | struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); |
2293 | struct vmw_ctx_bindinfo_vb binding; |
2294 | struct vmw_resource *res; |
2295 | struct { |
2296 | SVGA3dCmdHeader ; |
2297 | SVGA3dCmdDXSetVertexBuffers body; |
2298 | SVGA3dVertexBuffer buf[]; |
2299 | } *cmd; |
2300 | int i, ret, num; |
2301 | |
2302 | if (!ctx_node) |
2303 | return -EINVAL; |
2304 | |
2305 | cmd = container_of(header, typeof(*cmd), header); |
2306 | num = (cmd->header.size - sizeof(cmd->body)) / |
2307 | sizeof(SVGA3dVertexBuffer); |
2308 | if ((u64)num + (u64)cmd->body.startBuffer > |
2309 | (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) { |
2310 | VMW_DEBUG_USER("Invalid number of vertex buffers.\n" ); |
2311 | return -EINVAL; |
2312 | } |
2313 | |
2314 | for (i = 0; i < num; i++) { |
2315 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
2316 | VMW_RES_DIRTY_NONE, |
2317 | converter: user_surface_converter, |
2318 | id_loc: &cmd->buf[i].sid, p_res: &res); |
2319 | if (unlikely(ret != 0)) |
2320 | return ret; |
2321 | |
2322 | binding.bi.ctx = ctx_node->ctx; |
2323 | binding.bi.bt = vmw_ctx_binding_vb; |
2324 | binding.bi.res = res; |
2325 | binding.offset = cmd->buf[i].offset; |
2326 | binding.stride = cmd->buf[i].stride; |
2327 | binding.slot = i + cmd->body.startBuffer; |
2328 | |
2329 | vmw_binding_add(cbs: ctx_node->staged, ci: &binding.bi, shader_slot: 0, slot: binding.slot); |
2330 | } |
2331 | |
2332 | return 0; |
2333 | } |
2334 | |
2335 | /** |
2336 | * vmw_cmd_dx_set_index_buffer - Validate |
2337 | * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command. |
2338 | * |
2339 | * @dev_priv: Pointer to a device private struct. |
2340 | * @sw_context: The software context being used for this batch. |
2341 | * @header: Pointer to the command header in the command stream. |
2342 | */ |
2343 | static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv, |
2344 | struct vmw_sw_context *sw_context, |
2345 | SVGA3dCmdHeader *) |
2346 | { |
2347 | struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); |
2348 | struct vmw_ctx_bindinfo_ib binding; |
2349 | struct vmw_resource *res; |
2350 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer); |
2351 | int ret; |
2352 | |
2353 | if (!ctx_node) |
2354 | return -EINVAL; |
2355 | |
2356 | cmd = container_of(header, typeof(*cmd), header); |
2357 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
2358 | VMW_RES_DIRTY_NONE, converter: user_surface_converter, |
2359 | id_loc: &cmd->body.sid, p_res: &res); |
2360 | if (unlikely(ret != 0)) |
2361 | return ret; |
2362 | |
2363 | binding.bi.ctx = ctx_node->ctx; |
2364 | binding.bi.res = res; |
2365 | binding.bi.bt = vmw_ctx_binding_ib; |
2366 | binding.offset = cmd->body.offset; |
2367 | binding.format = cmd->body.format; |
2368 | |
2369 | vmw_binding_add(cbs: ctx_node->staged, ci: &binding.bi, shader_slot: 0, slot: 0); |
2370 | |
2371 | return 0; |
2372 | } |
2373 | |
2374 | /** |
2375 | * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS |
2376 | * command |
2377 | * |
2378 | * @dev_priv: Pointer to a device private struct. |
2379 | * @sw_context: The software context being used for this batch. |
2380 | * @header: Pointer to the command header in the command stream. |
2381 | */ |
2382 | static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv, |
2383 | struct vmw_sw_context *sw_context, |
2384 | SVGA3dCmdHeader *) |
2385 | { |
2386 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) = |
2387 | container_of(header, typeof(*cmd), header); |
2388 | u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) / |
2389 | sizeof(SVGA3dRenderTargetViewId); |
2390 | int ret; |
2391 | |
2392 | if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) { |
2393 | VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n" ); |
2394 | return -EINVAL; |
2395 | } |
2396 | |
2397 | ret = vmw_view_bindings_add(sw_context, view_type: vmw_view_ds, binding_type: vmw_ctx_binding_ds, |
2398 | shader_slot: 0, view_ids: &cmd->body.depthStencilViewId, num_views: 1, first_slot: 0); |
2399 | if (ret) |
2400 | return ret; |
2401 | |
2402 | return vmw_view_bindings_add(sw_context, view_type: vmw_view_rt, |
2403 | binding_type: vmw_ctx_binding_dx_rt, shader_slot: 0, view_ids: (void *)&cmd[1], |
2404 | num_views: num_rt_view, first_slot: 0); |
2405 | } |
2406 | |
2407 | /** |
2408 | * vmw_cmd_dx_clear_rendertarget_view - Validate |
2409 | * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command |
2410 | * |
2411 | * @dev_priv: Pointer to a device private struct. |
2412 | * @sw_context: The software context being used for this batch. |
2413 | * @header: Pointer to the command header in the command stream. |
2414 | */ |
2415 | static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv, |
2416 | struct vmw_sw_context *sw_context, |
2417 | SVGA3dCmdHeader *) |
2418 | { |
2419 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) = |
2420 | container_of(header, typeof(*cmd), header); |
2421 | struct vmw_resource *ret; |
2422 | |
2423 | ret = vmw_view_id_val_add(sw_context, view_type: vmw_view_rt, |
2424 | id: cmd->body.renderTargetViewId); |
2425 | |
2426 | return PTR_ERR_OR_ZERO(ptr: ret); |
2427 | } |
2428 | |
2429 | /** |
2430 | * vmw_cmd_dx_clear_depthstencil_view - Validate |
2431 | * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command |
2432 | * |
2433 | * @dev_priv: Pointer to a device private struct. |
2434 | * @sw_context: The software context being used for this batch. |
2435 | * @header: Pointer to the command header in the command stream. |
2436 | */ |
2437 | static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv, |
2438 | struct vmw_sw_context *sw_context, |
2439 | SVGA3dCmdHeader *) |
2440 | { |
2441 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) = |
2442 | container_of(header, typeof(*cmd), header); |
2443 | struct vmw_resource *ret; |
2444 | |
2445 | ret = vmw_view_id_val_add(sw_context, view_type: vmw_view_ds, |
2446 | id: cmd->body.depthStencilViewId); |
2447 | |
2448 | return PTR_ERR_OR_ZERO(ptr: ret); |
2449 | } |
2450 | |
2451 | static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, |
2452 | struct vmw_sw_context *sw_context, |
2453 | SVGA3dCmdHeader *) |
2454 | { |
2455 | struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); |
2456 | struct vmw_resource *srf; |
2457 | struct vmw_resource *res; |
2458 | enum vmw_view_type view_type; |
2459 | int ret; |
2460 | /* |
2461 | * This is based on the fact that all affected define commands have the |
2462 | * same initial command body layout. |
2463 | */ |
2464 | struct { |
2465 | SVGA3dCmdHeader ; |
2466 | uint32 defined_id; |
2467 | uint32 sid; |
2468 | } *cmd; |
2469 | |
2470 | if (!ctx_node) |
2471 | return -EINVAL; |
2472 | |
2473 | view_type = vmw_view_cmd_to_type(id: header->id); |
2474 | if (view_type == vmw_view_max) |
2475 | return -EINVAL; |
2476 | |
2477 | cmd = container_of(header, typeof(*cmd), header); |
2478 | if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) { |
2479 | VMW_DEBUG_USER("Invalid surface id.\n" ); |
2480 | return -EINVAL; |
2481 | } |
2482 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
2483 | VMW_RES_DIRTY_NONE, converter: user_surface_converter, |
2484 | id_loc: &cmd->sid, p_res: &srf); |
2485 | if (unlikely(ret != 0)) |
2486 | return ret; |
2487 | |
2488 | res = vmw_context_cotable(ctx: ctx_node->ctx, cotable_type: vmw_view_cotables[view_type]); |
2489 | if (IS_ERR_OR_NULL(ptr: res)) |
2490 | return res ? PTR_ERR(ptr: res) : -EINVAL; |
2491 | ret = vmw_cotable_notify(res, id: cmd->defined_id); |
2492 | if (unlikely(ret != 0)) |
2493 | return ret; |
2494 | |
2495 | return vmw_view_add(man: sw_context->man, ctx: ctx_node->ctx, srf, view_type, |
2496 | user_key: cmd->defined_id, cmd: header, |
2497 | cmd_size: header->size + sizeof(*header), |
2498 | list: &sw_context->staged_cmd_res); |
2499 | } |
2500 | |
2501 | /** |
2502 | * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command. |
2503 | * |
2504 | * @dev_priv: Pointer to a device private struct. |
2505 | * @sw_context: The software context being used for this batch. |
2506 | * @header: Pointer to the command header in the command stream. |
2507 | */ |
2508 | static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv, |
2509 | struct vmw_sw_context *sw_context, |
2510 | SVGA3dCmdHeader *) |
2511 | { |
2512 | struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); |
2513 | struct vmw_ctx_bindinfo_so_target binding; |
2514 | struct vmw_resource *res; |
2515 | struct { |
2516 | SVGA3dCmdHeader ; |
2517 | SVGA3dCmdDXSetSOTargets body; |
2518 | SVGA3dSoTarget targets[]; |
2519 | } *cmd; |
2520 | int i, ret, num; |
2521 | |
2522 | if (!ctx_node) |
2523 | return -EINVAL; |
2524 | |
2525 | cmd = container_of(header, typeof(*cmd), header); |
2526 | num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget); |
2527 | |
2528 | if (num > SVGA3D_DX_MAX_SOTARGETS) { |
2529 | VMW_DEBUG_USER("Invalid DX SO binding.\n" ); |
2530 | return -EINVAL; |
2531 | } |
2532 | |
2533 | for (i = 0; i < num; i++) { |
2534 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
2535 | VMW_RES_DIRTY_SET, |
2536 | converter: user_surface_converter, |
2537 | id_loc: &cmd->targets[i].sid, p_res: &res); |
2538 | if (unlikely(ret != 0)) |
2539 | return ret; |
2540 | |
2541 | binding.bi.ctx = ctx_node->ctx; |
2542 | binding.bi.res = res; |
2543 | binding.bi.bt = vmw_ctx_binding_so_target; |
2544 | binding.offset = cmd->targets[i].offset; |
2545 | binding.size = cmd->targets[i].sizeInBytes; |
2546 | binding.slot = i; |
2547 | |
2548 | vmw_binding_add(cbs: ctx_node->staged, ci: &binding.bi, shader_slot: 0, slot: binding.slot); |
2549 | } |
2550 | |
2551 | return 0; |
2552 | } |
2553 | |
2554 | static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, |
2555 | struct vmw_sw_context *sw_context, |
2556 | SVGA3dCmdHeader *) |
2557 | { |
2558 | struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); |
2559 | struct vmw_resource *res; |
2560 | /* |
2561 | * This is based on the fact that all affected define commands have |
2562 | * the same initial command body layout. |
2563 | */ |
2564 | struct { |
2565 | SVGA3dCmdHeader ; |
2566 | uint32 defined_id; |
2567 | } *cmd; |
2568 | enum vmw_so_type so_type; |
2569 | int ret; |
2570 | |
2571 | if (!ctx_node) |
2572 | return -EINVAL; |
2573 | |
2574 | so_type = vmw_so_cmd_to_type(id: header->id); |
2575 | res = vmw_context_cotable(ctx: ctx_node->ctx, cotable_type: vmw_so_cotables[so_type]); |
2576 | if (IS_ERR_OR_NULL(ptr: res)) |
2577 | return res ? PTR_ERR(ptr: res) : -EINVAL; |
2578 | cmd = container_of(header, typeof(*cmd), header); |
2579 | ret = vmw_cotable_notify(res, id: cmd->defined_id); |
2580 | |
2581 | return ret; |
2582 | } |
2583 | |
2584 | /** |
2585 | * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE |
2586 | * command |
2587 | * |
2588 | * @dev_priv: Pointer to a device private struct. |
2589 | * @sw_context: The software context being used for this batch. |
2590 | * @header: Pointer to the command header in the command stream. |
2591 | */ |
2592 | static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv, |
2593 | struct vmw_sw_context *sw_context, |
2594 | SVGA3dCmdHeader *) |
2595 | { |
2596 | struct { |
2597 | SVGA3dCmdHeader ; |
2598 | union { |
2599 | SVGA3dCmdDXReadbackSubResource r_body; |
2600 | SVGA3dCmdDXInvalidateSubResource i_body; |
2601 | SVGA3dCmdDXUpdateSubResource u_body; |
2602 | SVGA3dSurfaceId sid; |
2603 | }; |
2604 | } *cmd; |
2605 | |
2606 | BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) != |
2607 | offsetof(typeof(*cmd), sid)); |
2608 | BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) != |
2609 | offsetof(typeof(*cmd), sid)); |
2610 | BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) != |
2611 | offsetof(typeof(*cmd), sid)); |
2612 | |
2613 | cmd = container_of(header, typeof(*cmd), header); |
2614 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
2615 | VMW_RES_DIRTY_NONE, converter: user_surface_converter, |
2616 | id_loc: &cmd->sid, NULL); |
2617 | } |
2618 | |
2619 | static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv, |
2620 | struct vmw_sw_context *sw_context, |
2621 | SVGA3dCmdHeader *) |
2622 | { |
2623 | struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); |
2624 | |
2625 | if (!ctx_node) |
2626 | return -EINVAL; |
2627 | |
2628 | return 0; |
2629 | } |
2630 | |
2631 | /** |
2632 | * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view |
2633 | * resource for removal. |
2634 | * |
2635 | * @dev_priv: Pointer to a device private struct. |
2636 | * @sw_context: The software context being used for this batch. |
2637 | * @header: Pointer to the command header in the command stream. |
2638 | * |
2639 | * Check that the view exists, and if it was not created using this command |
2640 | * batch, conditionally make this command a NOP. |
2641 | */ |
2642 | static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, |
2643 | struct vmw_sw_context *sw_context, |
2644 | SVGA3dCmdHeader *) |
2645 | { |
2646 | struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); |
2647 | struct { |
2648 | SVGA3dCmdHeader ; |
2649 | union vmw_view_destroy body; |
2650 | } *cmd = container_of(header, typeof(*cmd), header); |
2651 | enum vmw_view_type view_type = vmw_view_cmd_to_type(id: header->id); |
2652 | struct vmw_resource *view; |
2653 | int ret; |
2654 | |
2655 | if (!ctx_node) |
2656 | return -EINVAL; |
2657 | |
2658 | ret = vmw_view_remove(man: sw_context->man, user_key: cmd->body.view_id, view_type, |
2659 | list: &sw_context->staged_cmd_res, res_p: &view); |
2660 | if (ret || !view) |
2661 | return ret; |
2662 | |
2663 | /* |
2664 | * If the view wasn't created during this command batch, it might |
2665 | * have been removed due to a context swapout, so add a |
2666 | * relocation to conditionally make this command a NOP to avoid |
2667 | * device errors. |
2668 | */ |
2669 | return vmw_resource_relocation_add(sw_context, res: view, |
2670 | offset: vmw_ptr_diff(a: sw_context->buf_start, |
2671 | b: &cmd->header.id), |
2672 | rel_type: vmw_res_rel_cond_nop); |
2673 | } |
2674 | |
2675 | /** |
2676 | * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command |
2677 | * |
2678 | * @dev_priv: Pointer to a device private struct. |
2679 | * @sw_context: The software context being used for this batch. |
2680 | * @header: Pointer to the command header in the command stream. |
2681 | */ |
2682 | static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv, |
2683 | struct vmw_sw_context *sw_context, |
2684 | SVGA3dCmdHeader *) |
2685 | { |
2686 | struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); |
2687 | struct vmw_resource *res; |
2688 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) = |
2689 | container_of(header, typeof(*cmd), header); |
2690 | int ret; |
2691 | |
2692 | if (!ctx_node) |
2693 | return -EINVAL; |
2694 | |
2695 | res = vmw_context_cotable(ctx: ctx_node->ctx, cotable_type: SVGA_COTABLE_DXSHADER); |
2696 | if (IS_ERR_OR_NULL(ptr: res)) |
2697 | return res ? PTR_ERR(ptr: res) : -EINVAL; |
2698 | ret = vmw_cotable_notify(res, id: cmd->body.shaderId); |
2699 | if (ret) |
2700 | return ret; |
2701 | |
2702 | return vmw_dx_shader_add(man: sw_context->man, ctx: ctx_node->ctx, |
2703 | user_key: cmd->body.shaderId, shader_type: cmd->body.type, |
2704 | list: &sw_context->staged_cmd_res); |
2705 | } |
2706 | |
2707 | /** |
2708 | * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command |
2709 | * |
2710 | * @dev_priv: Pointer to a device private struct. |
2711 | * @sw_context: The software context being used for this batch. |
2712 | * @header: Pointer to the command header in the command stream. |
2713 | */ |
2714 | static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv, |
2715 | struct vmw_sw_context *sw_context, |
2716 | SVGA3dCmdHeader *) |
2717 | { |
2718 | struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); |
2719 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) = |
2720 | container_of(header, typeof(*cmd), header); |
2721 | int ret; |
2722 | |
2723 | if (!ctx_node) |
2724 | return -EINVAL; |
2725 | |
2726 | ret = vmw_shader_remove(man: sw_context->man, user_key: cmd->body.shaderId, shader_type: 0, |
2727 | list: &sw_context->staged_cmd_res); |
2728 | |
2729 | return ret; |
2730 | } |
2731 | |
2732 | /** |
2733 | * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command |
2734 | * |
2735 | * @dev_priv: Pointer to a device private struct. |
2736 | * @sw_context: The software context being used for this batch. |
2737 | * @header: Pointer to the command header in the command stream. |
2738 | */ |
2739 | static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv, |
2740 | struct vmw_sw_context *sw_context, |
2741 | SVGA3dCmdHeader *) |
2742 | { |
2743 | struct vmw_resource *ctx; |
2744 | struct vmw_resource *res; |
2745 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) = |
2746 | container_of(header, typeof(*cmd), header); |
2747 | int ret; |
2748 | |
2749 | if (cmd->body.cid != SVGA3D_INVALID_ID) { |
2750 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_context, |
2751 | VMW_RES_DIRTY_SET, |
2752 | converter: user_context_converter, id_loc: &cmd->body.cid, |
2753 | p_res: &ctx); |
2754 | if (ret) |
2755 | return ret; |
2756 | } else { |
2757 | struct vmw_ctx_validation_info *ctx_node = |
2758 | VMW_GET_CTX_NODE(sw_context); |
2759 | |
2760 | if (!ctx_node) |
2761 | return -EINVAL; |
2762 | |
2763 | ctx = ctx_node->ctx; |
2764 | } |
2765 | |
2766 | res = vmw_shader_lookup(man: vmw_context_res_man(ctx), user_key: cmd->body.shid, shader_type: 0); |
2767 | if (IS_ERR(ptr: res)) { |
2768 | VMW_DEBUG_USER("Could not find shader to bind.\n" ); |
2769 | return PTR_ERR(ptr: res); |
2770 | } |
2771 | |
2772 | ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE, |
2773 | flags: vmw_val_add_flag_noctx); |
2774 | if (ret) { |
2775 | VMW_DEBUG_USER("Error creating resource validation node.\n" ); |
2776 | return ret; |
2777 | } |
2778 | |
2779 | return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, |
2780 | buf_id: &cmd->body.mobid, |
2781 | backup_offset: cmd->body.offsetInBytes); |
2782 | } |
2783 | |
2784 | /** |
2785 | * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command |
2786 | * |
2787 | * @dev_priv: Pointer to a device private struct. |
2788 | * @sw_context: The software context being used for this batch. |
2789 | * @header: Pointer to the command header in the command stream. |
2790 | */ |
2791 | static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv, |
2792 | struct vmw_sw_context *sw_context, |
2793 | SVGA3dCmdHeader *) |
2794 | { |
2795 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) = |
2796 | container_of(header, typeof(*cmd), header); |
2797 | struct vmw_resource *view; |
2798 | struct vmw_res_cache_entry *rcache; |
2799 | |
2800 | view = vmw_view_id_val_add(sw_context, view_type: vmw_view_sr, |
2801 | id: cmd->body.shaderResourceViewId); |
2802 | if (IS_ERR(ptr: view)) |
2803 | return PTR_ERR(ptr: view); |
2804 | |
2805 | /* |
2806 | * Normally the shader-resource view is not gpu-dirtying, but for |
2807 | * this particular command it is... |
2808 | * So mark the last looked-up surface, which is the surface |
2809 | * the view points to, gpu-dirty. |
2810 | */ |
2811 | rcache = &sw_context->res_cache[vmw_res_surface]; |
2812 | vmw_validation_res_set_dirty(ctx: sw_context->ctx, val_private: rcache->private, |
2813 | VMW_RES_DIRTY_SET); |
2814 | return 0; |
2815 | } |
2816 | |
2817 | /** |
2818 | * vmw_cmd_dx_transfer_from_buffer - Validate |
2819 | * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command |
2820 | * |
2821 | * @dev_priv: Pointer to a device private struct. |
2822 | * @sw_context: The software context being used for this batch. |
2823 | * @header: Pointer to the command header in the command stream. |
2824 | */ |
2825 | static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv, |
2826 | struct vmw_sw_context *sw_context, |
2827 | SVGA3dCmdHeader *) |
2828 | { |
2829 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) = |
2830 | container_of(header, typeof(*cmd), header); |
2831 | int ret; |
2832 | |
2833 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
2834 | VMW_RES_DIRTY_NONE, converter: user_surface_converter, |
2835 | id_loc: &cmd->body.srcSid, NULL); |
2836 | if (ret != 0) |
2837 | return ret; |
2838 | |
2839 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
2840 | VMW_RES_DIRTY_SET, converter: user_surface_converter, |
2841 | id_loc: &cmd->body.destSid, NULL); |
2842 | } |
2843 | |
2844 | /** |
2845 | * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command |
2846 | * |
2847 | * @dev_priv: Pointer to a device private struct. |
2848 | * @sw_context: The software context being used for this batch. |
2849 | * @header: Pointer to the command header in the command stream. |
2850 | */ |
2851 | static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv, |
2852 | struct vmw_sw_context *sw_context, |
2853 | SVGA3dCmdHeader *) |
2854 | { |
2855 | VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) = |
2856 | container_of(header, typeof(*cmd), header); |
2857 | |
2858 | if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)) |
2859 | return -EINVAL; |
2860 | |
2861 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
2862 | VMW_RES_DIRTY_SET, converter: user_surface_converter, |
2863 | id_loc: &cmd->body.surface.sid, NULL); |
2864 | } |
2865 | |
2866 | static int vmw_cmd_sm5(struct vmw_private *dev_priv, |
2867 | struct vmw_sw_context *sw_context, |
2868 | SVGA3dCmdHeader *) |
2869 | { |
2870 | if (!has_sm5_context(dev_priv)) |
2871 | return -EINVAL; |
2872 | |
2873 | return 0; |
2874 | } |
2875 | |
2876 | static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv, |
2877 | struct vmw_sw_context *sw_context, |
2878 | SVGA3dCmdHeader *) |
2879 | { |
2880 | if (!has_sm5_context(dev_priv)) |
2881 | return -EINVAL; |
2882 | |
2883 | return vmw_cmd_dx_view_define(dev_priv, sw_context, header); |
2884 | } |
2885 | |
2886 | static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv, |
2887 | struct vmw_sw_context *sw_context, |
2888 | SVGA3dCmdHeader *) |
2889 | { |
2890 | if (!has_sm5_context(dev_priv)) |
2891 | return -EINVAL; |
2892 | |
2893 | return vmw_cmd_dx_view_remove(dev_priv, sw_context, header); |
2894 | } |
2895 | |
2896 | static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv, |
2897 | struct vmw_sw_context *sw_context, |
2898 | SVGA3dCmdHeader *) |
2899 | { |
2900 | struct { |
2901 | SVGA3dCmdHeader ; |
2902 | SVGA3dCmdDXClearUAViewUint body; |
2903 | } *cmd = container_of(header, typeof(*cmd), header); |
2904 | struct vmw_resource *ret; |
2905 | |
2906 | if (!has_sm5_context(dev_priv)) |
2907 | return -EINVAL; |
2908 | |
2909 | ret = vmw_view_id_val_add(sw_context, view_type: vmw_view_ua, |
2910 | id: cmd->body.uaViewId); |
2911 | |
2912 | return PTR_ERR_OR_ZERO(ptr: ret); |
2913 | } |
2914 | |
2915 | static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv, |
2916 | struct vmw_sw_context *sw_context, |
2917 | SVGA3dCmdHeader *) |
2918 | { |
2919 | struct { |
2920 | SVGA3dCmdHeader ; |
2921 | SVGA3dCmdDXClearUAViewFloat body; |
2922 | } *cmd = container_of(header, typeof(*cmd), header); |
2923 | struct vmw_resource *ret; |
2924 | |
2925 | if (!has_sm5_context(dev_priv)) |
2926 | return -EINVAL; |
2927 | |
2928 | ret = vmw_view_id_val_add(sw_context, view_type: vmw_view_ua, |
2929 | id: cmd->body.uaViewId); |
2930 | |
2931 | return PTR_ERR_OR_ZERO(ptr: ret); |
2932 | } |
2933 | |
2934 | static int vmw_cmd_set_uav(struct vmw_private *dev_priv, |
2935 | struct vmw_sw_context *sw_context, |
2936 | SVGA3dCmdHeader *) |
2937 | { |
2938 | struct { |
2939 | SVGA3dCmdHeader ; |
2940 | SVGA3dCmdDXSetUAViews body; |
2941 | } *cmd = container_of(header, typeof(*cmd), header); |
2942 | u32 num_uav = (cmd->header.size - sizeof(cmd->body)) / |
2943 | sizeof(SVGA3dUAViewId); |
2944 | int ret; |
2945 | |
2946 | if (!has_sm5_context(dev_priv)) |
2947 | return -EINVAL; |
2948 | |
2949 | if (num_uav > vmw_max_num_uavs(dev_priv)) { |
2950 | VMW_DEBUG_USER("Invalid UAV binding.\n" ); |
2951 | return -EINVAL; |
2952 | } |
2953 | |
2954 | ret = vmw_view_bindings_add(sw_context, view_type: vmw_view_ua, |
2955 | binding_type: vmw_ctx_binding_uav, shader_slot: 0, view_ids: (void *)&cmd[1], |
2956 | num_views: num_uav, first_slot: 0); |
2957 | if (ret) |
2958 | return ret; |
2959 | |
2960 | vmw_binding_add_uav_index(cbs: sw_context->dx_ctx_node->staged, slot: 0, |
2961 | splice_index: cmd->body.uavSpliceIndex); |
2962 | |
2963 | return ret; |
2964 | } |
2965 | |
2966 | static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv, |
2967 | struct vmw_sw_context *sw_context, |
2968 | SVGA3dCmdHeader *) |
2969 | { |
2970 | struct { |
2971 | SVGA3dCmdHeader ; |
2972 | SVGA3dCmdDXSetCSUAViews body; |
2973 | } *cmd = container_of(header, typeof(*cmd), header); |
2974 | u32 num_uav = (cmd->header.size - sizeof(cmd->body)) / |
2975 | sizeof(SVGA3dUAViewId); |
2976 | int ret; |
2977 | |
2978 | if (!has_sm5_context(dev_priv)) |
2979 | return -EINVAL; |
2980 | |
2981 | if (num_uav > vmw_max_num_uavs(dev_priv)) { |
2982 | VMW_DEBUG_USER("Invalid UAV binding.\n" ); |
2983 | return -EINVAL; |
2984 | } |
2985 | |
2986 | ret = vmw_view_bindings_add(sw_context, view_type: vmw_view_ua, |
2987 | binding_type: vmw_ctx_binding_cs_uav, shader_slot: 0, view_ids: (void *)&cmd[1], |
2988 | num_views: num_uav, first_slot: 0); |
2989 | if (ret) |
2990 | return ret; |
2991 | |
2992 | vmw_binding_add_uav_index(cbs: sw_context->dx_ctx_node->staged, slot: 1, |
2993 | splice_index: cmd->body.startIndex); |
2994 | |
2995 | return ret; |
2996 | } |
2997 | |
2998 | static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv, |
2999 | struct vmw_sw_context *sw_context, |
3000 | SVGA3dCmdHeader *) |
3001 | { |
3002 | struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; |
3003 | struct vmw_resource *res; |
3004 | struct { |
3005 | SVGA3dCmdHeader ; |
3006 | SVGA3dCmdDXDefineStreamOutputWithMob body; |
3007 | } *cmd = container_of(header, typeof(*cmd), header); |
3008 | int ret; |
3009 | |
3010 | if (!has_sm5_context(dev_priv)) |
3011 | return -EINVAL; |
3012 | |
3013 | if (!ctx_node) { |
3014 | DRM_ERROR("DX Context not set.\n" ); |
3015 | return -EINVAL; |
3016 | } |
3017 | |
3018 | res = vmw_context_cotable(ctx: ctx_node->ctx, cotable_type: SVGA_COTABLE_STREAMOUTPUT); |
3019 | if (IS_ERR_OR_NULL(ptr: res)) |
3020 | return res ? PTR_ERR(ptr: res) : -EINVAL; |
3021 | ret = vmw_cotable_notify(res, id: cmd->body.soid); |
3022 | if (ret) |
3023 | return ret; |
3024 | |
3025 | return vmw_dx_streamoutput_add(man: sw_context->man, ctx: ctx_node->ctx, |
3026 | user_key: cmd->body.soid, |
3027 | list: &sw_context->staged_cmd_res); |
3028 | } |
3029 | |
3030 | static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv, |
3031 | struct vmw_sw_context *sw_context, |
3032 | SVGA3dCmdHeader *) |
3033 | { |
3034 | struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; |
3035 | struct vmw_resource *res; |
3036 | struct { |
3037 | SVGA3dCmdHeader ; |
3038 | SVGA3dCmdDXDestroyStreamOutput body; |
3039 | } *cmd = container_of(header, typeof(*cmd), header); |
3040 | |
3041 | if (!ctx_node) { |
3042 | DRM_ERROR("DX Context not set.\n" ); |
3043 | return -EINVAL; |
3044 | } |
3045 | |
3046 | /* |
3047 | * When device does not support SM5 then streamoutput with mob command is |
3048 | * not available to user-space. Simply return in this case. |
3049 | */ |
3050 | if (!has_sm5_context(dev_priv)) |
3051 | return 0; |
3052 | |
3053 | /* |
3054 | * With SM5 capable device if lookup fails then user-space probably used |
3055 | * old streamoutput define command. Return without an error. |
3056 | */ |
3057 | res = vmw_dx_streamoutput_lookup(man: vmw_context_res_man(ctx: ctx_node->ctx), |
3058 | user_key: cmd->body.soid); |
3059 | if (IS_ERR(ptr: res)) |
3060 | return 0; |
3061 | |
3062 | return vmw_dx_streamoutput_remove(man: sw_context->man, user_key: cmd->body.soid, |
3063 | list: &sw_context->staged_cmd_res); |
3064 | } |
3065 | |
3066 | static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv, |
3067 | struct vmw_sw_context *sw_context, |
3068 | SVGA3dCmdHeader *) |
3069 | { |
3070 | struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; |
3071 | struct vmw_resource *res; |
3072 | struct { |
3073 | SVGA3dCmdHeader ; |
3074 | SVGA3dCmdDXBindStreamOutput body; |
3075 | } *cmd = container_of(header, typeof(*cmd), header); |
3076 | int ret; |
3077 | |
3078 | if (!has_sm5_context(dev_priv)) |
3079 | return -EINVAL; |
3080 | |
3081 | if (!ctx_node) { |
3082 | DRM_ERROR("DX Context not set.\n" ); |
3083 | return -EINVAL; |
3084 | } |
3085 | |
3086 | res = vmw_dx_streamoutput_lookup(man: vmw_context_res_man(ctx: ctx_node->ctx), |
3087 | user_key: cmd->body.soid); |
3088 | if (IS_ERR(ptr: res)) { |
3089 | DRM_ERROR("Could not find streamoutput to bind.\n" ); |
3090 | return PTR_ERR(ptr: res); |
3091 | } |
3092 | |
3093 | vmw_dx_streamoutput_set_size(res, size: cmd->body.sizeInBytes); |
3094 | |
3095 | ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE, |
3096 | flags: vmw_val_add_flag_noctx); |
3097 | if (ret) { |
3098 | DRM_ERROR("Error creating resource validation node.\n" ); |
3099 | return ret; |
3100 | } |
3101 | |
3102 | return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, |
3103 | buf_id: &cmd->body.mobid, |
3104 | backup_offset: cmd->body.offsetInBytes); |
3105 | } |
3106 | |
3107 | static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv, |
3108 | struct vmw_sw_context *sw_context, |
3109 | SVGA3dCmdHeader *) |
3110 | { |
3111 | struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; |
3112 | struct vmw_resource *res; |
3113 | struct vmw_ctx_bindinfo_so binding; |
3114 | struct { |
3115 | SVGA3dCmdHeader ; |
3116 | SVGA3dCmdDXSetStreamOutput body; |
3117 | } *cmd = container_of(header, typeof(*cmd), header); |
3118 | int ret; |
3119 | |
3120 | if (!ctx_node) { |
3121 | DRM_ERROR("DX Context not set.\n" ); |
3122 | return -EINVAL; |
3123 | } |
3124 | |
3125 | if (cmd->body.soid == SVGA3D_INVALID_ID) |
3126 | return 0; |
3127 | |
3128 | /* |
3129 | * When device does not support SM5 then streamoutput with mob command is |
3130 | * not available to user-space. Simply return in this case. |
3131 | */ |
3132 | if (!has_sm5_context(dev_priv)) |
3133 | return 0; |
3134 | |
3135 | /* |
3136 | * With SM5 capable device if lookup fails then user-space probably used |
3137 | * old streamoutput define command. Return without an error. |
3138 | */ |
3139 | res = vmw_dx_streamoutput_lookup(man: vmw_context_res_man(ctx: ctx_node->ctx), |
3140 | user_key: cmd->body.soid); |
3141 | if (IS_ERR(ptr: res)) { |
3142 | return 0; |
3143 | } |
3144 | |
3145 | ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE, |
3146 | flags: vmw_val_add_flag_noctx); |
3147 | if (ret) { |
3148 | DRM_ERROR("Error creating resource validation node.\n" ); |
3149 | return ret; |
3150 | } |
3151 | |
3152 | binding.bi.ctx = ctx_node->ctx; |
3153 | binding.bi.res = res; |
3154 | binding.bi.bt = vmw_ctx_binding_so; |
3155 | binding.slot = 0; /* Only one SO set to context at a time. */ |
3156 | |
3157 | vmw_binding_add(cbs: sw_context->dx_ctx_node->staged, ci: &binding.bi, shader_slot: 0, |
3158 | slot: binding.slot); |
3159 | |
3160 | return ret; |
3161 | } |
3162 | |
3163 | static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv, |
3164 | struct vmw_sw_context *sw_context, |
3165 | SVGA3dCmdHeader *) |
3166 | { |
3167 | struct vmw_draw_indexed_instanced_indirect_cmd { |
3168 | SVGA3dCmdHeader ; |
3169 | SVGA3dCmdDXDrawIndexedInstancedIndirect body; |
3170 | } *cmd = container_of(header, typeof(*cmd), header); |
3171 | |
3172 | if (!has_sm5_context(dev_priv)) |
3173 | return -EINVAL; |
3174 | |
3175 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
3176 | VMW_RES_DIRTY_NONE, converter: user_surface_converter, |
3177 | id_loc: &cmd->body.argsBufferSid, NULL); |
3178 | } |
3179 | |
3180 | static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv, |
3181 | struct vmw_sw_context *sw_context, |
3182 | SVGA3dCmdHeader *) |
3183 | { |
3184 | struct vmw_draw_instanced_indirect_cmd { |
3185 | SVGA3dCmdHeader ; |
3186 | SVGA3dCmdDXDrawInstancedIndirect body; |
3187 | } *cmd = container_of(header, typeof(*cmd), header); |
3188 | |
3189 | if (!has_sm5_context(dev_priv)) |
3190 | return -EINVAL; |
3191 | |
3192 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
3193 | VMW_RES_DIRTY_NONE, converter: user_surface_converter, |
3194 | id_loc: &cmd->body.argsBufferSid, NULL); |
3195 | } |
3196 | |
3197 | static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv, |
3198 | struct vmw_sw_context *sw_context, |
3199 | SVGA3dCmdHeader *) |
3200 | { |
3201 | struct vmw_dispatch_indirect_cmd { |
3202 | SVGA3dCmdHeader ; |
3203 | SVGA3dCmdDXDispatchIndirect body; |
3204 | } *cmd = container_of(header, typeof(*cmd), header); |
3205 | |
3206 | if (!has_sm5_context(dev_priv)) |
3207 | return -EINVAL; |
3208 | |
3209 | return vmw_cmd_res_check(dev_priv, sw_context, res_type: vmw_res_surface, |
3210 | VMW_RES_DIRTY_NONE, converter: user_surface_converter, |
3211 | id_loc: &cmd->body.argsBufferSid, NULL); |
3212 | } |
3213 | |
3214 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, |
3215 | struct vmw_sw_context *sw_context, |
3216 | void *buf, uint32_t *size) |
3217 | { |
3218 | uint32_t size_remaining = *size; |
3219 | uint32_t cmd_id; |
3220 | |
3221 | cmd_id = ((uint32_t *)buf)[0]; |
3222 | switch (cmd_id) { |
3223 | case SVGA_CMD_UPDATE: |
3224 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); |
3225 | break; |
3226 | case SVGA_CMD_DEFINE_GMRFB: |
3227 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB); |
3228 | break; |
3229 | case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: |
3230 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); |
3231 | break; |
3232 | case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: |
3233 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); |
3234 | break; |
3235 | default: |
3236 | VMW_DEBUG_USER("Unsupported SVGA command: %u.\n" , cmd_id); |
3237 | return -EINVAL; |
3238 | } |
3239 | |
3240 | if (*size > size_remaining) { |
3241 | VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n" , |
3242 | cmd_id); |
3243 | return -EINVAL; |
3244 | } |
3245 | |
3246 | if (unlikely(!sw_context->kernel)) { |
3247 | VMW_DEBUG_USER("Kernel only SVGA command: %u.\n" , cmd_id); |
3248 | return -EPERM; |
3249 | } |
3250 | |
3251 | if (cmd_id == SVGA_CMD_DEFINE_GMRFB) |
3252 | return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf); |
3253 | |
3254 | return 0; |
3255 | } |
3256 | |
3257 | static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { |
3258 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, |
3259 | false, false, false), |
3260 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, |
3261 | false, false, false), |
3262 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check, |
3263 | true, false, false), |
3264 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check, |
3265 | true, false, false), |
3266 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma, |
3267 | true, false, false), |
3268 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid, |
3269 | false, false, false), |
3270 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid, |
3271 | false, false, false), |
3272 | VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check, |
3273 | true, false, false), |
3274 | VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check, |
3275 | true, false, false), |
3276 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check, |
3277 | true, false, false), |
3278 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, |
3279 | &vmw_cmd_set_render_target_check, true, false, false), |
3280 | VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state, |
3281 | true, false, false), |
3282 | VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check, |
3283 | true, false, false), |
3284 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check, |
3285 | true, false, false), |
3286 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check, |
3287 | true, false, false), |
3288 | VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check, |
3289 | true, false, false), |
3290 | VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check, |
3291 | true, false, false), |
3292 | VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check, |
3293 | true, false, false), |
3294 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, |
3295 | false, false, false), |
3296 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define, |
3297 | true, false, false), |
3298 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy, |
3299 | true, false, false), |
3300 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, |
3301 | true, false, false), |
3302 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, |
3303 | true, false, false), |
3304 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, |
3305 | true, false, false), |
3306 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, |
3307 | true, false, false), |
3308 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query, |
3309 | true, false, false), |
3310 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query, |
3311 | true, false, false), |
3312 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query, |
3313 | true, false, false), |
3314 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok, |
3315 | true, false, false), |
3316 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, |
3317 | &vmw_cmd_blt_surf_screen_check, false, false, false), |
3318 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid, |
3319 | false, false, false), |
3320 | VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid, |
3321 | false, false, false), |
3322 | VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid, |
3323 | false, false, false), |
3324 | VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid, |
3325 | false, false, false), |
3326 | VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, |
3327 | false, false, false), |
3328 | VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid, |
3329 | false, false, false), |
3330 | VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid, |
3331 | false, false, false), |
3332 | VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false), |
3333 | VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false), |
3334 | VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false), |
3335 | VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false), |
3336 | VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false), |
3337 | VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false), |
3338 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid, |
3339 | false, false, true), |
3340 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid, |
3341 | false, false, true), |
3342 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid, |
3343 | false, false, true), |
3344 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, |
3345 | false, false, true), |
3346 | VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid, |
3347 | false, false, true), |
3348 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, |
3349 | false, false, true), |
3350 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, |
3351 | false, false, true), |
3352 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid, |
3353 | false, false, true), |
3354 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface, |
3355 | true, false, true), |
3356 | VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid, |
3357 | false, false, true), |
3358 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image, |
3359 | true, false, true), |
3360 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, |
3361 | &vmw_cmd_update_gb_surface, true, false, true), |
3362 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, |
3363 | &vmw_cmd_readback_gb_image, true, false, true), |
3364 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, |
3365 | &vmw_cmd_readback_gb_surface, true, false, true), |
3366 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, |
3367 | &vmw_cmd_invalidate_gb_image, true, false, true), |
3368 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, |
3369 | &vmw_cmd_invalidate_gb_surface, true, false, true), |
3370 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid, |
3371 | false, false, true), |
3372 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid, |
3373 | false, false, true), |
3374 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid, |
3375 | false, false, true), |
3376 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid, |
3377 | false, false, true), |
3378 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid, |
3379 | false, false, true), |
3380 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid, |
3381 | false, false, true), |
3382 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader, |
3383 | true, false, true), |
3384 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid, |
3385 | false, false, true), |
3386 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid, |
3387 | false, false, false), |
3388 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query, |
3389 | true, false, true), |
3390 | VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query, |
3391 | true, false, true), |
3392 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query, |
3393 | true, false, true), |
3394 | VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, |
3395 | true, false, true), |
3396 | VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok, |
3397 | true, false, true), |
3398 | VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, |
3399 | false, false, true), |
3400 | VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, |
3401 | false, false, true), |
3402 | VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid, |
3403 | false, false, true), |
3404 | VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid, |
3405 | false, false, true), |
3406 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid, |
3407 | false, false, true), |
3408 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid, |
3409 | false, false, true), |
3410 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid, |
3411 | false, false, true), |
3412 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid, |
3413 | false, false, true), |
3414 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, |
3415 | false, false, true), |
3416 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, |
3417 | false, false, true), |
3418 | VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, |
3419 | true, false, true), |
3420 | VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid, |
3421 | false, false, true), |
3422 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid, |
3423 | false, false, true), |
3424 | VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid, |
3425 | false, false, true), |
3426 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid, |
3427 | false, false, true), |
3428 | |
3429 | /* SM commands */ |
3430 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid, |
3431 | false, false, true), |
3432 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid, |
3433 | false, false, true), |
3434 | VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid, |
3435 | false, false, true), |
3436 | VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid, |
3437 | false, false, true), |
3438 | VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid, |
3439 | false, false, true), |
3440 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER, |
3441 | &vmw_cmd_dx_set_single_constant_buffer, true, false, true), |
3442 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES, |
3443 | &vmw_cmd_dx_set_shader_res, true, false, true), |
3444 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader, |
3445 | true, false, true), |
3446 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check, |
3447 | true, false, true), |
3448 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check, |
3449 | true, false, true), |
3450 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check, |
3451 | true, false, true), |
3452 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check, |
3453 | true, false, true), |
3454 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED, |
3455 | &vmw_cmd_dx_cid_check, true, false, true), |
3456 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check, |
3457 | true, false, true), |
3458 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS, |
3459 | &vmw_cmd_dx_set_vertex_buffers, true, false, true), |
3460 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER, |
3461 | &vmw_cmd_dx_set_index_buffer, true, false, true), |
3462 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS, |
3463 | &vmw_cmd_dx_set_rendertargets, true, false, true), |
3464 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check, |
3465 | true, false, true), |
3466 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE, |
3467 | &vmw_cmd_dx_cid_check, true, false, true), |
3468 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE, |
3469 | &vmw_cmd_dx_cid_check, true, false, true), |
3470 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query, |
3471 | true, false, true), |
3472 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check, |
3473 | true, false, true), |
3474 | VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query, |
3475 | true, false, true), |
3476 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET, |
3477 | &vmw_cmd_dx_cid_check, true, false, true), |
3478 | VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check, |
3479 | true, false, true), |
3480 | VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check, |
3481 | true, false, true), |
3482 | VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid, |
3483 | true, false, true), |
3484 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check, |
3485 | true, false, true), |
3486 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check, |
3487 | true, false, true), |
3488 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check, |
3489 | true, false, true), |
3490 | VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW, |
3491 | &vmw_cmd_dx_clear_rendertarget_view, true, false, true), |
3492 | VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW, |
3493 | &vmw_cmd_dx_clear_depthstencil_view, true, false, true), |
3494 | VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid, |
3495 | true, false, true), |
3496 | VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips, |
3497 | true, false, true), |
3498 | VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE, |
3499 | &vmw_cmd_dx_check_subresource, true, false, true), |
3500 | VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE, |
3501 | &vmw_cmd_dx_check_subresource, true, false, true), |
3502 | VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE, |
3503 | &vmw_cmd_dx_check_subresource, true, false, true), |
3504 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW, |
3505 | &vmw_cmd_dx_view_define, true, false, true), |
3506 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW, |
3507 | &vmw_cmd_dx_view_remove, true, false, true), |
3508 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW, |
3509 | &vmw_cmd_dx_view_define, true, false, true), |
3510 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW, |
3511 | &vmw_cmd_dx_view_remove, true, false, true), |
3512 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW, |
3513 | &vmw_cmd_dx_view_define, true, false, true), |
3514 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW, |
3515 | &vmw_cmd_dx_view_remove, true, false, true), |
3516 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT, |
3517 | &vmw_cmd_dx_so_define, true, false, true), |
3518 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT, |
3519 | &vmw_cmd_dx_cid_check, true, false, true), |
3520 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE, |
3521 | &vmw_cmd_dx_so_define, true, false, true), |
3522 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE, |
3523 | &vmw_cmd_dx_cid_check, true, false, true), |
3524 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE, |
3525 | &vmw_cmd_dx_so_define, true, false, true), |
3526 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE, |
3527 | &vmw_cmd_dx_cid_check, true, false, true), |
3528 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE, |
3529 | &vmw_cmd_dx_so_define, true, false, true), |
3530 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE, |
3531 | &vmw_cmd_dx_cid_check, true, false, true), |
3532 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE, |
3533 | &vmw_cmd_dx_so_define, true, false, true), |
3534 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE, |
3535 | &vmw_cmd_dx_cid_check, true, false, true), |
3536 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER, |
3537 | &vmw_cmd_dx_define_shader, true, false, true), |
3538 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER, |
3539 | &vmw_cmd_dx_destroy_shader, true, false, true), |
3540 | VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER, |
3541 | &vmw_cmd_dx_bind_shader, true, false, true), |
3542 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT, |
3543 | &vmw_cmd_dx_so_define, true, false, true), |
3544 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT, |
3545 | &vmw_cmd_dx_destroy_streamoutput, true, false, true), |
3546 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, |
3547 | &vmw_cmd_dx_set_streamoutput, true, false, true), |
3548 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS, |
3549 | &vmw_cmd_dx_set_so_targets, true, false, true), |
3550 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT, |
3551 | &vmw_cmd_dx_cid_check, true, false, true), |
3552 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY, |
3553 | &vmw_cmd_dx_cid_check, true, false, true), |
3554 | VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY, |
3555 | &vmw_cmd_buffer_copy_check, true, false, true), |
3556 | VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, |
3557 | &vmw_cmd_pred_copy_check, true, false, true), |
3558 | VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER, |
3559 | &vmw_cmd_dx_transfer_from_buffer, |
3560 | true, false, true), |
3561 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET, |
3562 | &vmw_cmd_dx_set_constant_buffer_offset, |
3563 | true, false, true), |
3564 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET, |
3565 | &vmw_cmd_dx_set_constant_buffer_offset, |
3566 | true, false, true), |
3567 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET, |
3568 | &vmw_cmd_dx_set_constant_buffer_offset, |
3569 | true, false, true), |
3570 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET, |
3571 | &vmw_cmd_dx_set_constant_buffer_offset, |
3572 | true, false, true), |
3573 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET, |
3574 | &vmw_cmd_dx_set_constant_buffer_offset, |
3575 | true, false, true), |
3576 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET, |
3577 | &vmw_cmd_dx_set_constant_buffer_offset, |
3578 | true, false, true), |
3579 | VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy, |
3580 | true, false, true), |
3581 | |
3582 | /* |
3583 | * SM5 commands |
3584 | */ |
3585 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define, |
3586 | true, false, true), |
3587 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove, |
3588 | true, false, true), |
3589 | VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint, |
3590 | true, false, true), |
3591 | VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT, |
3592 | &vmw_cmd_clear_uav_float, true, false, true), |
3593 | VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true, |
3594 | false, true), |
3595 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false, |
3596 | true), |
3597 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT, |
3598 | &vmw_cmd_indexed_instanced_indirect, true, false, true), |
3599 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT, |
3600 | &vmw_cmd_instanced_indirect, true, false, true), |
3601 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true), |
3602 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT, |
3603 | &vmw_cmd_dispatch_indirect, true, false, true), |
3604 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true, |
3605 | false, true), |
3606 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2, |
3607 | &vmw_cmd_sm5_view_define, true, false, true), |
3608 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB, |
3609 | &vmw_cmd_dx_define_streamoutput, true, false, true), |
3610 | VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT, |
3611 | &vmw_cmd_dx_bind_streamoutput, true, false, true), |
3612 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2, |
3613 | &vmw_cmd_dx_so_define, true, false, true), |
3614 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V4, |
3615 | &vmw_cmd_invalid, false, false, true), |
3616 | }; |
3617 | |
3618 | bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd) |
3619 | { |
3620 | u32 cmd_id = ((u32 *) buf)[0]; |
3621 | |
3622 | if (cmd_id >= SVGA_CMD_MAX) { |
3623 | SVGA3dCmdHeader * = (SVGA3dCmdHeader *) buf; |
3624 | const struct vmw_cmd_entry *entry; |
3625 | |
3626 | *size = header->size + sizeof(SVGA3dCmdHeader); |
3627 | cmd_id = header->id; |
3628 | if (cmd_id >= SVGA_3D_CMD_MAX) |
3629 | return false; |
3630 | |
3631 | cmd_id -= SVGA_3D_CMD_BASE; |
3632 | entry = &vmw_cmd_entries[cmd_id]; |
3633 | *cmd = entry->cmd_name; |
3634 | return true; |
3635 | } |
3636 | |
3637 | switch (cmd_id) { |
3638 | case SVGA_CMD_UPDATE: |
3639 | *cmd = "SVGA_CMD_UPDATE" ; |
3640 | *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate); |
3641 | break; |
3642 | case SVGA_CMD_DEFINE_GMRFB: |
3643 | *cmd = "SVGA_CMD_DEFINE_GMRFB" ; |
3644 | *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB); |
3645 | break; |
3646 | case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: |
3647 | *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN" ; |
3648 | *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); |
3649 | break; |
3650 | case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: |
3651 | *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB" ; |
3652 | *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); |
3653 | break; |
3654 | default: |
3655 | *cmd = "UNKNOWN" ; |
3656 | *size = 0; |
3657 | return false; |
3658 | } |
3659 | |
3660 | return true; |
3661 | } |
3662 | |
3663 | static int vmw_cmd_check(struct vmw_private *dev_priv, |
3664 | struct vmw_sw_context *sw_context, void *buf, |
3665 | uint32_t *size) |
3666 | { |
3667 | uint32_t cmd_id; |
3668 | uint32_t size_remaining = *size; |
3669 | SVGA3dCmdHeader * = (SVGA3dCmdHeader *) buf; |
3670 | int ret; |
3671 | const struct vmw_cmd_entry *entry; |
3672 | bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; |
3673 | |
3674 | cmd_id = ((uint32_t *)buf)[0]; |
3675 | /* Handle any none 3D commands */ |
3676 | if (unlikely(cmd_id < SVGA_CMD_MAX)) |
3677 | return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); |
3678 | |
3679 | |
3680 | cmd_id = header->id; |
3681 | *size = header->size + sizeof(SVGA3dCmdHeader); |
3682 | |
3683 | cmd_id -= SVGA_3D_CMD_BASE; |
3684 | if (unlikely(*size > size_remaining)) |
3685 | goto out_invalid; |
3686 | |
3687 | if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) |
3688 | goto out_invalid; |
3689 | |
3690 | entry = &vmw_cmd_entries[cmd_id]; |
3691 | if (unlikely(!entry->func)) |
3692 | goto out_invalid; |
3693 | |
3694 | if (unlikely(!entry->user_allow && !sw_context->kernel)) |
3695 | goto out_privileged; |
3696 | |
3697 | if (unlikely(entry->gb_disable && gb)) |
3698 | goto out_old; |
3699 | |
3700 | if (unlikely(entry->gb_enable && !gb)) |
3701 | goto out_new; |
3702 | |
3703 | ret = entry->func(dev_priv, sw_context, header); |
3704 | if (unlikely(ret != 0)) { |
3705 | VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n" , |
3706 | cmd_id + SVGA_3D_CMD_BASE, ret); |
3707 | return ret; |
3708 | } |
3709 | |
3710 | return 0; |
3711 | out_invalid: |
3712 | VMW_DEBUG_USER("Invalid SVGA3D command: %d\n" , |
3713 | cmd_id + SVGA_3D_CMD_BASE); |
3714 | return -EINVAL; |
3715 | out_privileged: |
3716 | VMW_DEBUG_USER("Privileged SVGA3D command: %d\n" , |
3717 | cmd_id + SVGA_3D_CMD_BASE); |
3718 | return -EPERM; |
3719 | out_old: |
3720 | VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n" , |
3721 | cmd_id + SVGA_3D_CMD_BASE); |
3722 | return -EINVAL; |
3723 | out_new: |
3724 | VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n" , |
3725 | cmd_id + SVGA_3D_CMD_BASE); |
3726 | return -EINVAL; |
3727 | } |
3728 | |
3729 | static int vmw_cmd_check_all(struct vmw_private *dev_priv, |
3730 | struct vmw_sw_context *sw_context, void *buf, |
3731 | uint32_t size) |
3732 | { |
3733 | int32_t cur_size = size; |
3734 | int ret; |
3735 | |
3736 | sw_context->buf_start = buf; |
3737 | |
3738 | while (cur_size > 0) { |
3739 | size = cur_size; |
3740 | ret = vmw_cmd_check(dev_priv, sw_context, buf, size: &size); |
3741 | if (unlikely(ret != 0)) |
3742 | return ret; |
3743 | buf = (void *)((unsigned long) buf + size); |
3744 | cur_size -= size; |
3745 | } |
3746 | |
3747 | if (unlikely(cur_size != 0)) { |
3748 | VMW_DEBUG_USER("Command verifier out of sync.\n" ); |
3749 | return -EINVAL; |
3750 | } |
3751 | |
3752 | return 0; |
3753 | } |
3754 | |
3755 | static void vmw_free_relocations(struct vmw_sw_context *sw_context) |
3756 | { |
3757 | /* Memory is validation context memory, so no need to free it */ |
3758 | INIT_LIST_HEAD(list: &sw_context->bo_relocations); |
3759 | } |
3760 | |
3761 | static void vmw_apply_relocations(struct vmw_sw_context *sw_context) |
3762 | { |
3763 | struct vmw_relocation *reloc; |
3764 | struct ttm_buffer_object *bo; |
3765 | |
3766 | list_for_each_entry(reloc, &sw_context->bo_relocations, head) { |
3767 | bo = &reloc->vbo->tbo; |
3768 | switch (bo->resource->mem_type) { |
3769 | case TTM_PL_VRAM: |
3770 | reloc->location->offset += bo->resource->start << PAGE_SHIFT; |
3771 | reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; |
3772 | break; |
3773 | case VMW_PL_GMR: |
3774 | reloc->location->gmrId = bo->resource->start; |
3775 | break; |
3776 | case VMW_PL_MOB: |
3777 | *reloc->mob_loc = bo->resource->start; |
3778 | break; |
3779 | default: |
3780 | BUG(); |
3781 | } |
3782 | } |
3783 | vmw_free_relocations(sw_context); |
3784 | } |
3785 | |
3786 | static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, |
3787 | uint32_t size) |
3788 | { |
3789 | if (likely(sw_context->cmd_bounce_size >= size)) |
3790 | return 0; |
3791 | |
3792 | if (sw_context->cmd_bounce_size == 0) |
3793 | sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE; |
3794 | |
3795 | while (sw_context->cmd_bounce_size < size) { |
3796 | sw_context->cmd_bounce_size = |
3797 | PAGE_ALIGN(sw_context->cmd_bounce_size + |
3798 | (sw_context->cmd_bounce_size >> 1)); |
3799 | } |
3800 | |
3801 | vfree(addr: sw_context->cmd_bounce); |
3802 | sw_context->cmd_bounce = vmalloc(size: sw_context->cmd_bounce_size); |
3803 | |
3804 | if (sw_context->cmd_bounce == NULL) { |
3805 | VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n" ); |
3806 | sw_context->cmd_bounce_size = 0; |
3807 | return -ENOMEM; |
3808 | } |
3809 | |
3810 | return 0; |
3811 | } |
3812 | |
3813 | /* |
3814 | * vmw_execbuf_fence_commands - create and submit a command stream fence |
3815 | * |
3816 | * Creates a fence object and submits a command stream marker. |
3817 | * If this fails for some reason, We sync the fifo and return NULL. |
3818 | * It is then safe to fence buffers with a NULL pointer. |
3819 | * |
3820 | * If @p_handle is not NULL @file_priv must also not be NULL. Creates a |
3821 | * userspace handle if @p_handle is not NULL, otherwise not. |
3822 | */ |
3823 | |
3824 | int vmw_execbuf_fence_commands(struct drm_file *file_priv, |
3825 | struct vmw_private *dev_priv, |
3826 | struct vmw_fence_obj **p_fence, |
3827 | uint32_t *p_handle) |
3828 | { |
3829 | uint32_t sequence; |
3830 | int ret; |
3831 | bool synced = false; |
3832 | |
3833 | /* p_handle implies file_priv. */ |
3834 | BUG_ON(p_handle != NULL && file_priv == NULL); |
3835 | |
3836 | ret = vmw_cmd_send_fence(dev_priv, seqno: &sequence); |
3837 | if (unlikely(ret != 0)) { |
3838 | VMW_DEBUG_USER("Fence submission error. Syncing.\n" ); |
3839 | synced = true; |
3840 | } |
3841 | |
3842 | if (p_handle != NULL) |
3843 | ret = vmw_user_fence_create(file_priv, fman: dev_priv->fman, |
3844 | sequence, p_fence, p_handle); |
3845 | else |
3846 | ret = vmw_fence_create(fman: dev_priv->fman, seqno: sequence, p_fence); |
3847 | |
3848 | if (unlikely(ret != 0 && !synced)) { |
3849 | (void) vmw_fallback_wait(dev_priv, lazy: false, fifo_idle: false, seqno: sequence, |
3850 | interruptible: false, VMW_FENCE_WAIT_TIMEOUT); |
3851 | *p_fence = NULL; |
3852 | } |
3853 | |
3854 | return ret; |
3855 | } |
3856 | |
3857 | /** |
3858 | * vmw_execbuf_copy_fence_user - copy fence object information to user-space. |
3859 | * |
3860 | * @dev_priv: Pointer to a vmw_private struct. |
3861 | * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file. |
3862 | * @ret: Return value from fence object creation. |
3863 | * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which |
3864 | * the information should be copied. |
3865 | * @fence: Pointer to the fenc object. |
3866 | * @fence_handle: User-space fence handle. |
3867 | * @out_fence_fd: exported file descriptor for the fence. -1 if not used |
3868 | * |
3869 | * This function copies fence information to user-space. If copying fails, the |
3870 | * user-space struct drm_vmw_fence_rep::error member is hopefully left |
3871 | * untouched, and if it's preloaded with an -EFAULT by user-space, the error |
3872 | * will hopefully be detected. |
3873 | * |
3874 | * Also if copying fails, user-space will be unable to signal the fence object |
3875 | * so we wait for it immediately, and then unreference the user-space reference. |
3876 | */ |
3877 | int |
3878 | vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, |
3879 | struct vmw_fpriv *vmw_fp, int ret, |
3880 | struct drm_vmw_fence_rep __user *user_fence_rep, |
3881 | struct vmw_fence_obj *fence, uint32_t fence_handle, |
3882 | int32_t out_fence_fd) |
3883 | { |
3884 | struct drm_vmw_fence_rep fence_rep; |
3885 | |
3886 | if (user_fence_rep == NULL) |
3887 | return 0; |
3888 | |
3889 | memset(&fence_rep, 0, sizeof(fence_rep)); |
3890 | |
3891 | fence_rep.error = ret; |
3892 | fence_rep.fd = out_fence_fd; |
3893 | if (ret == 0) { |
3894 | BUG_ON(fence == NULL); |
3895 | |
3896 | fence_rep.handle = fence_handle; |
3897 | fence_rep.seqno = fence->base.seqno; |
3898 | vmw_update_seqno(dev_priv); |
3899 | fence_rep.passed_seqno = dev_priv->last_read_seqno; |
3900 | } |
3901 | |
3902 | /* |
3903 | * copy_to_user errors will be detected by user space not seeing |
3904 | * fence_rep::error filled in. Typically user-space would have pre-set |
3905 | * that member to -EFAULT. |
3906 | */ |
3907 | ret = copy_to_user(to: user_fence_rep, from: &fence_rep, |
3908 | n: sizeof(fence_rep)); |
3909 | |
3910 | /* |
3911 | * User-space lost the fence object. We need to sync and unreference the |
3912 | * handle. |
3913 | */ |
3914 | if (unlikely(ret != 0) && (fence_rep.error == 0)) { |
3915 | ttm_ref_object_base_unref(tfile: vmw_fp->tfile, key: fence_handle); |
3916 | VMW_DEBUG_USER("Fence copy error. Syncing.\n" ); |
3917 | (void) vmw_fence_obj_wait(fence, lazy: false, interruptible: false, |
3918 | VMW_FENCE_WAIT_TIMEOUT); |
3919 | } |
3920 | |
3921 | return ret ? -EFAULT : 0; |
3922 | } |
3923 | |
3924 | /** |
3925 | * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo. |
3926 | * |
3927 | * @dev_priv: Pointer to a device private structure. |
3928 | * @kernel_commands: Pointer to the unpatched command batch. |
3929 | * @command_size: Size of the unpatched command batch. |
3930 | * @sw_context: Structure holding the relocation lists. |
3931 | * |
3932 | * Side effects: If this function returns 0, then the command batch pointed to |
3933 | * by @kernel_commands will have been modified. |
3934 | */ |
3935 | static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv, |
3936 | void *kernel_commands, u32 command_size, |
3937 | struct vmw_sw_context *sw_context) |
3938 | { |
3939 | void *cmd; |
3940 | |
3941 | if (sw_context->dx_ctx_node) |
3942 | cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size, |
3943 | sw_context->dx_ctx_node->ctx->id); |
3944 | else |
3945 | cmd = VMW_CMD_RESERVE(dev_priv, command_size); |
3946 | |
3947 | if (!cmd) |
3948 | return -ENOMEM; |
3949 | |
3950 | vmw_apply_relocations(sw_context); |
3951 | memcpy(cmd, kernel_commands, command_size); |
3952 | vmw_resource_relocations_apply(cb: cmd, list: &sw_context->res_relocations); |
3953 | vmw_resource_relocations_free(list: &sw_context->res_relocations); |
3954 | vmw_cmd_commit(dev_priv, bytes: command_size); |
3955 | |
3956 | return 0; |
3957 | } |
3958 | |
3959 | /** |
3960 | * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the |
3961 | * command buffer manager. |
3962 | * |
3963 | * @dev_priv: Pointer to a device private structure. |
3964 | * @header: Opaque handle to the command buffer allocation. |
3965 | * @command_size: Size of the unpatched command batch. |
3966 | * @sw_context: Structure holding the relocation lists. |
3967 | * |
3968 | * Side effects: If this function returns 0, then the command buffer represented |
3969 | * by @header will have been modified. |
3970 | */ |
3971 | static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv, |
3972 | struct vmw_cmdbuf_header *, |
3973 | u32 command_size, |
3974 | struct vmw_sw_context *sw_context) |
3975 | { |
3976 | u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id : |
3977 | SVGA3D_INVALID_ID); |
3978 | void *cmd = vmw_cmdbuf_reserve(man: dev_priv->cman, size: command_size, ctx_id: id, interruptible: false, |
3979 | header); |
3980 | |
3981 | vmw_apply_relocations(sw_context); |
3982 | vmw_resource_relocations_apply(cb: cmd, list: &sw_context->res_relocations); |
3983 | vmw_resource_relocations_free(list: &sw_context->res_relocations); |
3984 | vmw_cmdbuf_commit(man: dev_priv->cman, size: command_size, header, flush: false); |
3985 | |
3986 | return 0; |
3987 | } |
3988 | |
3989 | /** |
3990 | * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for |
3991 | * submission using a command buffer. |
3992 | * |
3993 | * @dev_priv: Pointer to a device private structure. |
3994 | * @user_commands: User-space pointer to the commands to be submitted. |
3995 | * @command_size: Size of the unpatched command batch. |
3996 | * @header: Out parameter returning the opaque pointer to the command buffer. |
3997 | * |
3998 | * This function checks whether we can use the command buffer manager for |
3999 | * submission and if so, creates a command buffer of suitable size and copies |
4000 | * the user data into that buffer. |
4001 | * |
4002 | * On successful return, the function returns a pointer to the data in the |
4003 | * command buffer and *@header is set to non-NULL. |
4004 | * |
4005 | * @kernel_commands: If command buffers could not be used, the function will |
4006 | * return the value of @kernel_commands on function call. That value may be |
4007 | * NULL. In that case, the value of *@header will be set to NULL. |
4008 | * |
4009 | * If an error is encountered, the function will return a pointer error value. |
4010 | * If the function is interrupted by a signal while sleeping, it will return |
4011 | * -ERESTARTSYS casted to a pointer error value. |
4012 | */ |
4013 | static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv, |
4014 | void __user *user_commands, |
4015 | void *kernel_commands, u32 command_size, |
4016 | struct vmw_cmdbuf_header **) |
4017 | { |
4018 | size_t cmdbuf_size; |
4019 | int ret; |
4020 | |
4021 | *header = NULL; |
4022 | if (command_size > SVGA_CB_MAX_SIZE) { |
4023 | VMW_DEBUG_USER("Command buffer is too large.\n" ); |
4024 | return ERR_PTR(error: -EINVAL); |
4025 | } |
4026 | |
4027 | if (!dev_priv->cman || kernel_commands) |
4028 | return kernel_commands; |
4029 | |
4030 | /* If possible, add a little space for fencing. */ |
4031 | cmdbuf_size = command_size + 512; |
4032 | cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE); |
4033 | kernel_commands = vmw_cmdbuf_alloc(man: dev_priv->cman, size: cmdbuf_size, interruptible: true, |
4034 | p_header: header); |
4035 | if (IS_ERR(ptr: kernel_commands)) |
4036 | return kernel_commands; |
4037 | |
4038 | ret = copy_from_user(to: kernel_commands, from: user_commands, n: command_size); |
4039 | if (ret) { |
4040 | VMW_DEBUG_USER("Failed copying commands.\n" ); |
4041 | vmw_cmdbuf_header_free(header: *header); |
4042 | *header = NULL; |
4043 | return ERR_PTR(error: -EFAULT); |
4044 | } |
4045 | |
4046 | return kernel_commands; |
4047 | } |
4048 | |
4049 | static int vmw_execbuf_tie_context(struct vmw_private *dev_priv, |
4050 | struct vmw_sw_context *sw_context, |
4051 | uint32_t handle) |
4052 | { |
4053 | struct vmw_resource *res; |
4054 | int ret; |
4055 | unsigned int size; |
4056 | |
4057 | if (handle == SVGA3D_INVALID_ID) |
4058 | return 0; |
4059 | |
4060 | size = vmw_execbuf_res_size(dev_priv, res_type: vmw_res_dx_context); |
4061 | ret = vmw_validation_preload_res(ctx: sw_context->ctx, size); |
4062 | if (ret) |
4063 | return ret; |
4064 | |
4065 | ret = vmw_user_resource_lookup_handle |
4066 | (dev_priv, tfile: sw_context->fp->tfile, handle, |
4067 | converter: user_context_converter, p_res: &res); |
4068 | if (ret != 0) { |
4069 | VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n" , |
4070 | (unsigned int) handle); |
4071 | return ret; |
4072 | } |
4073 | |
4074 | ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET, |
4075 | flags: vmw_val_add_flag_none); |
4076 | if (unlikely(ret != 0)) { |
4077 | vmw_resource_unreference(p_res: &res); |
4078 | return ret; |
4079 | } |
4080 | |
4081 | sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res); |
4082 | sw_context->man = vmw_context_res_man(ctx: res); |
4083 | |
4084 | vmw_resource_unreference(p_res: &res); |
4085 | return 0; |
4086 | } |
4087 | |
4088 | int vmw_execbuf_process(struct drm_file *file_priv, |
4089 | struct vmw_private *dev_priv, |
4090 | void __user *user_commands, void *kernel_commands, |
4091 | uint32_t command_size, uint64_t throttle_us, |
4092 | uint32_t dx_context_handle, |
4093 | struct drm_vmw_fence_rep __user *user_fence_rep, |
4094 | struct vmw_fence_obj **out_fence, uint32_t flags) |
4095 | { |
4096 | struct vmw_sw_context *sw_context = &dev_priv->ctx; |
4097 | struct vmw_fence_obj *fence = NULL; |
4098 | struct vmw_cmdbuf_header *; |
4099 | uint32_t handle = 0; |
4100 | int ret; |
4101 | int32_t out_fence_fd = -1; |
4102 | struct sync_file *sync_file = NULL; |
4103 | DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1); |
4104 | |
4105 | if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { |
4106 | out_fence_fd = get_unused_fd_flags(O_CLOEXEC); |
4107 | if (out_fence_fd < 0) { |
4108 | VMW_DEBUG_USER("Failed to get a fence fd.\n" ); |
4109 | return out_fence_fd; |
4110 | } |
4111 | } |
4112 | |
4113 | if (throttle_us) { |
4114 | VMW_DEBUG_USER("Throttling is no longer supported.\n" ); |
4115 | } |
4116 | |
4117 | kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands, |
4118 | kernel_commands, command_size, |
4119 | header: &header); |
4120 | if (IS_ERR(ptr: kernel_commands)) { |
4121 | ret = PTR_ERR(ptr: kernel_commands); |
4122 | goto out_free_fence_fd; |
4123 | } |
4124 | |
4125 | ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); |
4126 | if (ret) { |
4127 | ret = -ERESTARTSYS; |
4128 | goto out_free_header; |
4129 | } |
4130 | |
4131 | sw_context->kernel = false; |
4132 | if (kernel_commands == NULL) { |
4133 | ret = vmw_resize_cmd_bounce(sw_context, size: command_size); |
4134 | if (unlikely(ret != 0)) |
4135 | goto out_unlock; |
4136 | |
4137 | ret = copy_from_user(to: sw_context->cmd_bounce, from: user_commands, |
4138 | n: command_size); |
4139 | if (unlikely(ret != 0)) { |
4140 | ret = -EFAULT; |
4141 | VMW_DEBUG_USER("Failed copying commands.\n" ); |
4142 | goto out_unlock; |
4143 | } |
4144 | |
4145 | kernel_commands = sw_context->cmd_bounce; |
4146 | } else if (!header) { |
4147 | sw_context->kernel = true; |
4148 | } |
4149 | |
4150 | sw_context->filp = file_priv; |
4151 | sw_context->fp = vmw_fpriv(file_priv); |
4152 | INIT_LIST_HEAD(list: &sw_context->ctx_list); |
4153 | sw_context->cur_query_bo = dev_priv->pinned_bo; |
4154 | sw_context->last_query_ctx = NULL; |
4155 | sw_context->needs_post_query_barrier = false; |
4156 | sw_context->dx_ctx_node = NULL; |
4157 | sw_context->dx_query_mob = NULL; |
4158 | sw_context->dx_query_ctx = NULL; |
4159 | memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); |
4160 | INIT_LIST_HEAD(list: &sw_context->res_relocations); |
4161 | INIT_LIST_HEAD(list: &sw_context->bo_relocations); |
4162 | |
4163 | if (sw_context->staged_bindings) |
4164 | vmw_binding_state_reset(cbs: sw_context->staged_bindings); |
4165 | |
4166 | INIT_LIST_HEAD(list: &sw_context->staged_cmd_res); |
4167 | sw_context->ctx = &val_ctx; |
4168 | ret = vmw_execbuf_tie_context(dev_priv, sw_context, handle: dx_context_handle); |
4169 | if (unlikely(ret != 0)) |
4170 | goto out_err_nores; |
4171 | |
4172 | ret = vmw_cmd_check_all(dev_priv, sw_context, buf: kernel_commands, |
4173 | size: command_size); |
4174 | if (unlikely(ret != 0)) |
4175 | goto out_err_nores; |
4176 | |
4177 | ret = vmw_resources_reserve(sw_context); |
4178 | if (unlikely(ret != 0)) |
4179 | goto out_err_nores; |
4180 | |
4181 | ret = vmw_validation_bo_reserve(ctx: &val_ctx, intr: true); |
4182 | if (unlikely(ret != 0)) |
4183 | goto out_err_nores; |
4184 | |
4185 | ret = vmw_validation_bo_validate(ctx: &val_ctx, intr: true); |
4186 | if (unlikely(ret != 0)) |
4187 | goto out_err; |
4188 | |
4189 | ret = vmw_validation_res_validate(ctx: &val_ctx, intr: true); |
4190 | if (unlikely(ret != 0)) |
4191 | goto out_err; |
4192 | |
4193 | vmw_validation_drop_ht(ctx: &val_ctx); |
4194 | |
4195 | ret = mutex_lock_interruptible(&dev_priv->binding_mutex); |
4196 | if (unlikely(ret != 0)) { |
4197 | ret = -ERESTARTSYS; |
4198 | goto out_err; |
4199 | } |
4200 | |
4201 | if (dev_priv->has_mob) { |
4202 | ret = vmw_rebind_contexts(sw_context); |
4203 | if (unlikely(ret != 0)) |
4204 | goto out_unlock_binding; |
4205 | } |
4206 | |
4207 | if (!header) { |
4208 | ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands, |
4209 | command_size, sw_context); |
4210 | } else { |
4211 | ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size, |
4212 | sw_context); |
4213 | header = NULL; |
4214 | } |
4215 | mutex_unlock(lock: &dev_priv->binding_mutex); |
4216 | if (ret) |
4217 | goto out_err; |
4218 | |
4219 | vmw_query_bo_switch_commit(dev_priv, sw_context); |
4220 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, p_fence: &fence, |
4221 | p_handle: (user_fence_rep) ? &handle : NULL); |
4222 | /* |
4223 | * This error is harmless, because if fence submission fails, |
4224 | * vmw_fifo_send_fence will sync. The error will be propagated to |
4225 | * user-space in @fence_rep |
4226 | */ |
4227 | if (ret != 0) |
4228 | VMW_DEBUG_USER("Fence submission error. Syncing.\n" ); |
4229 | |
4230 | vmw_execbuf_bindings_commit(sw_context, backoff: false); |
4231 | vmw_bind_dx_query_mob(sw_context); |
4232 | vmw_validation_res_unreserve(ctx: &val_ctx, backoff: false); |
4233 | |
4234 | vmw_validation_bo_fence(ctx: sw_context->ctx, fence); |
4235 | |
4236 | if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid)) |
4237 | __vmw_execbuf_release_pinned_bo(dev_priv, fence); |
4238 | |
4239 | /* |
4240 | * If anything fails here, give up trying to export the fence and do a |
4241 | * sync since the user mode will not be able to sync the fence itself. |
4242 | * This ensures we are still functionally correct. |
4243 | */ |
4244 | if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { |
4245 | |
4246 | sync_file = sync_file_create(fence: &fence->base); |
4247 | if (!sync_file) { |
4248 | VMW_DEBUG_USER("Sync file create failed for fence\n" ); |
4249 | put_unused_fd(fd: out_fence_fd); |
4250 | out_fence_fd = -1; |
4251 | |
4252 | (void) vmw_fence_obj_wait(fence, lazy: false, interruptible: false, |
4253 | VMW_FENCE_WAIT_TIMEOUT); |
4254 | } |
4255 | } |
4256 | |
4257 | ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fp: vmw_fpriv(file_priv), ret, |
4258 | user_fence_rep, fence, fence_handle: handle, out_fence_fd); |
4259 | |
4260 | if (sync_file) { |
4261 | if (ret) { |
4262 | /* usercopy of fence failed, put the file object */ |
4263 | fput(sync_file->file); |
4264 | put_unused_fd(fd: out_fence_fd); |
4265 | } else { |
4266 | /* Link the fence with the FD created earlier */ |
4267 | fd_install(fd: out_fence_fd, file: sync_file->file); |
4268 | } |
4269 | } |
4270 | |
4271 | /* Don't unreference when handing fence out */ |
4272 | if (unlikely(out_fence != NULL)) { |
4273 | *out_fence = fence; |
4274 | fence = NULL; |
4275 | } else if (likely(fence != NULL)) { |
4276 | vmw_fence_obj_unreference(fence_p: &fence); |
4277 | } |
4278 | |
4279 | vmw_cmdbuf_res_commit(list: &sw_context->staged_cmd_res); |
4280 | mutex_unlock(lock: &dev_priv->cmdbuf_mutex); |
4281 | |
4282 | /* |
4283 | * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks |
4284 | * in resource destruction paths. |
4285 | */ |
4286 | vmw_validation_unref_lists(ctx: &val_ctx); |
4287 | |
4288 | return ret; |
4289 | |
4290 | out_unlock_binding: |
4291 | mutex_unlock(lock: &dev_priv->binding_mutex); |
4292 | out_err: |
4293 | vmw_validation_bo_backoff(ctx: &val_ctx); |
4294 | out_err_nores: |
4295 | vmw_execbuf_bindings_commit(sw_context, backoff: true); |
4296 | vmw_validation_res_unreserve(ctx: &val_ctx, backoff: true); |
4297 | vmw_resource_relocations_free(list: &sw_context->res_relocations); |
4298 | vmw_free_relocations(sw_context); |
4299 | if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid)) |
4300 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
4301 | out_unlock: |
4302 | vmw_cmdbuf_res_revert(list: &sw_context->staged_cmd_res); |
4303 | vmw_validation_drop_ht(ctx: &val_ctx); |
4304 | WARN_ON(!list_empty(&sw_context->ctx_list)); |
4305 | mutex_unlock(lock: &dev_priv->cmdbuf_mutex); |
4306 | |
4307 | /* |
4308 | * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks |
4309 | * in resource destruction paths. |
4310 | */ |
4311 | vmw_validation_unref_lists(ctx: &val_ctx); |
4312 | : |
4313 | if (header) |
4314 | vmw_cmdbuf_header_free(header); |
4315 | out_free_fence_fd: |
4316 | if (out_fence_fd >= 0) |
4317 | put_unused_fd(fd: out_fence_fd); |
4318 | |
4319 | return ret; |
4320 | } |
4321 | |
4322 | /** |
4323 | * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer. |
4324 | * |
4325 | * @dev_priv: The device private structure. |
4326 | * |
4327 | * This function is called to idle the fifo and unpin the query buffer if the |
4328 | * normal way to do this hits an error, which should typically be extremely |
4329 | * rare. |
4330 | */ |
4331 | static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) |
4332 | { |
4333 | VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n" ); |
4334 | |
4335 | (void) vmw_fallback_wait(dev_priv, lazy: false, fifo_idle: true, seqno: 0, interruptible: false, timeout: 10*HZ); |
4336 | vmw_bo_pin_reserved(bo: dev_priv->pinned_bo, pin: false); |
4337 | if (dev_priv->dummy_query_bo_pinned) { |
4338 | vmw_bo_pin_reserved(bo: dev_priv->dummy_query_bo, pin: false); |
4339 | dev_priv->dummy_query_bo_pinned = false; |
4340 | } |
4341 | } |
4342 | |
4343 | |
4344 | /** |
4345 | * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query |
4346 | * bo. |
4347 | * |
4348 | * @dev_priv: The device private structure. |
4349 | * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a |
4350 | * query barrier that flushes all queries touching the current buffer pointed to |
4351 | * by @dev_priv->pinned_bo |
4352 | * |
4353 | * This function should be used to unpin the pinned query bo, or as a query |
4354 | * barrier when we need to make sure that all queries have finished before the |
4355 | * next fifo command. (For example on hardware context destructions where the |
4356 | * hardware may otherwise leak unfinished queries). |
4357 | * |
4358 | * This function does not return any failure codes, but make attempts to do safe |
4359 | * unpinning in case of errors. |
4360 | * |
4361 | * The function will synchronize on the previous query barrier, and will thus |
4362 | * not finish until that barrier has executed. |
4363 | * |
4364 | * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before |
4365 | * calling this function. |
4366 | */ |
4367 | void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, |
4368 | struct vmw_fence_obj *fence) |
4369 | { |
4370 | int ret = 0; |
4371 | struct vmw_fence_obj *lfence = NULL; |
4372 | DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); |
4373 | |
4374 | if (dev_priv->pinned_bo == NULL) |
4375 | goto out_unlock; |
4376 | |
4377 | vmw_bo_placement_set(bo: dev_priv->pinned_bo, |
4378 | domain: VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, |
4379 | busy_domain: VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); |
4380 | ret = vmw_validation_add_bo(ctx: &val_ctx, vbo: dev_priv->pinned_bo); |
4381 | if (ret) |
4382 | goto out_no_reserve; |
4383 | |
4384 | vmw_bo_placement_set(bo: dev_priv->dummy_query_bo, |
4385 | domain: VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, |
4386 | busy_domain: VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); |
4387 | ret = vmw_validation_add_bo(ctx: &val_ctx, vbo: dev_priv->dummy_query_bo); |
4388 | if (ret) |
4389 | goto out_no_reserve; |
4390 | |
4391 | ret = vmw_validation_bo_reserve(ctx: &val_ctx, intr: false); |
4392 | if (ret) |
4393 | goto out_no_reserve; |
4394 | |
4395 | if (dev_priv->query_cid_valid) { |
4396 | BUG_ON(fence != NULL); |
4397 | ret = vmw_cmd_emit_dummy_query(dev_priv, cid: dev_priv->query_cid); |
4398 | if (ret) |
4399 | goto out_no_emit; |
4400 | dev_priv->query_cid_valid = false; |
4401 | } |
4402 | |
4403 | vmw_bo_pin_reserved(bo: dev_priv->pinned_bo, pin: false); |
4404 | if (dev_priv->dummy_query_bo_pinned) { |
4405 | vmw_bo_pin_reserved(bo: dev_priv->dummy_query_bo, pin: false); |
4406 | dev_priv->dummy_query_bo_pinned = false; |
4407 | } |
4408 | if (fence == NULL) { |
4409 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, p_fence: &lfence, |
4410 | NULL); |
4411 | fence = lfence; |
4412 | } |
4413 | vmw_validation_bo_fence(ctx: &val_ctx, fence); |
4414 | if (lfence != NULL) |
4415 | vmw_fence_obj_unreference(fence_p: &lfence); |
4416 | |
4417 | vmw_validation_unref_lists(ctx: &val_ctx); |
4418 | vmw_bo_unreference(buf: &dev_priv->pinned_bo); |
4419 | |
4420 | out_unlock: |
4421 | return; |
4422 | out_no_emit: |
4423 | vmw_validation_bo_backoff(ctx: &val_ctx); |
4424 | out_no_reserve: |
4425 | vmw_validation_unref_lists(ctx: &val_ctx); |
4426 | vmw_execbuf_unpin_panic(dev_priv); |
4427 | vmw_bo_unreference(buf: &dev_priv->pinned_bo); |
4428 | } |
4429 | |
4430 | /** |
4431 | * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo. |
4432 | * |
4433 | * @dev_priv: The device private structure. |
4434 | * |
4435 | * This function should be used to unpin the pinned query bo, or as a query |
4436 | * barrier when we need to make sure that all queries have finished before the |
4437 | * next fifo command. (For example on hardware context destructions where the |
4438 | * hardware may otherwise leak unfinished queries). |
4439 | * |
4440 | * This function does not return any failure codes, but make attempts to do safe |
4441 | * unpinning in case of errors. |
4442 | * |
4443 | * The function will synchronize on the previous query barrier, and will thus |
4444 | * not finish until that barrier has executed. |
4445 | */ |
4446 | void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) |
4447 | { |
4448 | mutex_lock(&dev_priv->cmdbuf_mutex); |
4449 | if (dev_priv->query_cid_valid) |
4450 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
4451 | mutex_unlock(lock: &dev_priv->cmdbuf_mutex); |
4452 | } |
4453 | |
4454 | int vmw_execbuf_ioctl(struct drm_device *dev, void *data, |
4455 | struct drm_file *file_priv) |
4456 | { |
4457 | struct vmw_private *dev_priv = vmw_priv(dev); |
4458 | struct drm_vmw_execbuf_arg *arg = data; |
4459 | int ret; |
4460 | struct dma_fence *in_fence = NULL; |
4461 | |
4462 | MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF); |
4463 | MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF); |
4464 | |
4465 | /* |
4466 | * Extend the ioctl argument while maintaining backwards compatibility: |
4467 | * We take different code paths depending on the value of arg->version. |
4468 | * |
4469 | * Note: The ioctl argument is extended and zeropadded by core DRM. |
4470 | */ |
4471 | if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION || |
4472 | arg->version == 0)) { |
4473 | VMW_DEBUG_USER("Incorrect execbuf version.\n" ); |
4474 | ret = -EINVAL; |
4475 | goto mksstats_out; |
4476 | } |
4477 | |
4478 | switch (arg->version) { |
4479 | case 1: |
4480 | /* For v1 core DRM have extended + zeropadded the data */ |
4481 | arg->context_handle = (uint32_t) -1; |
4482 | break; |
4483 | case 2: |
4484 | default: |
4485 | /* For v2 and later core DRM would have correctly copied it */ |
4486 | break; |
4487 | } |
4488 | |
4489 | /* If imported a fence FD from elsewhere, then wait on it */ |
4490 | if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) { |
4491 | in_fence = sync_file_get_fence(fd: arg->imported_fence_fd); |
4492 | |
4493 | if (!in_fence) { |
4494 | VMW_DEBUG_USER("Cannot get imported fence\n" ); |
4495 | ret = -EINVAL; |
4496 | goto mksstats_out; |
4497 | } |
4498 | |
4499 | ret = dma_fence_wait(fence: in_fence, intr: true); |
4500 | if (ret) |
4501 | goto out; |
4502 | } |
4503 | |
4504 | ret = vmw_execbuf_process(file_priv, dev_priv, |
4505 | user_commands: (void __user *)(unsigned long)arg->commands, |
4506 | NULL, command_size: arg->command_size, throttle_us: arg->throttle_us, |
4507 | dx_context_handle: arg->context_handle, |
4508 | user_fence_rep: (void __user *)(unsigned long)arg->fence_rep, |
4509 | NULL, flags: arg->flags); |
4510 | |
4511 | if (unlikely(ret != 0)) |
4512 | goto out; |
4513 | |
4514 | vmw_kms_cursor_post_execbuf(dev_priv); |
4515 | |
4516 | out: |
4517 | if (in_fence) |
4518 | dma_fence_put(fence: in_fence); |
4519 | |
4520 | mksstats_out: |
4521 | MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF); |
4522 | return ret; |
4523 | } |
4524 | |