1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <drm/ttm/ttm_placement.h>
29
30#include "vmwgfx_binding.h"
31#include "vmwgfx_bo.h"
32#include "vmwgfx_drv.h"
33#include "vmwgfx_resource_priv.h"
34
35struct vmw_user_context {
36 struct ttm_base_object base;
37 struct vmw_resource res;
38 struct vmw_ctx_binding_state *cbs;
39 struct vmw_cmdbuf_res_manager *man;
40 struct vmw_resource *cotables[SVGA_COTABLE_MAX];
41 spinlock_t cotable_lock;
42 struct vmw_bo *dx_query_mob;
43};
44
45static void vmw_user_context_free(struct vmw_resource *res);
46static struct vmw_resource *
47vmw_user_context_base_to_res(struct ttm_base_object *base);
48
49static int vmw_gb_context_create(struct vmw_resource *res);
50static int vmw_gb_context_bind(struct vmw_resource *res,
51 struct ttm_validate_buffer *val_buf);
52static int vmw_gb_context_unbind(struct vmw_resource *res,
53 bool readback,
54 struct ttm_validate_buffer *val_buf);
55static int vmw_gb_context_destroy(struct vmw_resource *res);
56static int vmw_dx_context_create(struct vmw_resource *res);
57static int vmw_dx_context_bind(struct vmw_resource *res,
58 struct ttm_validate_buffer *val_buf);
59static int vmw_dx_context_unbind(struct vmw_resource *res,
60 bool readback,
61 struct ttm_validate_buffer *val_buf);
62static int vmw_dx_context_destroy(struct vmw_resource *res);
63
64static const struct vmw_user_resource_conv user_context_conv = {
65 .object_type = VMW_RES_CONTEXT,
66 .base_obj_to_res = vmw_user_context_base_to_res,
67 .res_free = vmw_user_context_free
68};
69
70const struct vmw_user_resource_conv *user_context_converter =
71 &user_context_conv;
72
73
74static const struct vmw_res_func vmw_legacy_context_func = {
75 .res_type = vmw_res_context,
76 .needs_guest_memory = false,
77 .may_evict = false,
78 .type_name = "legacy contexts",
79 .domain = VMW_BO_DOMAIN_SYS,
80 .busy_domain = VMW_BO_DOMAIN_SYS,
81 .create = NULL,
82 .destroy = NULL,
83 .bind = NULL,
84 .unbind = NULL
85};
86
87static const struct vmw_res_func vmw_gb_context_func = {
88 .res_type = vmw_res_context,
89 .needs_guest_memory = true,
90 .may_evict = true,
91 .prio = 3,
92 .dirty_prio = 3,
93 .type_name = "guest backed contexts",
94 .domain = VMW_BO_DOMAIN_MOB,
95 .busy_domain = VMW_BO_DOMAIN_MOB,
96 .create = vmw_gb_context_create,
97 .destroy = vmw_gb_context_destroy,
98 .bind = vmw_gb_context_bind,
99 .unbind = vmw_gb_context_unbind
100};
101
102static const struct vmw_res_func vmw_dx_context_func = {
103 .res_type = vmw_res_dx_context,
104 .needs_guest_memory = true,
105 .may_evict = true,
106 .prio = 3,
107 .dirty_prio = 3,
108 .type_name = "dx contexts",
109 .domain = VMW_BO_DOMAIN_MOB,
110 .busy_domain = VMW_BO_DOMAIN_MOB,
111 .create = vmw_dx_context_create,
112 .destroy = vmw_dx_context_destroy,
113 .bind = vmw_dx_context_bind,
114 .unbind = vmw_dx_context_unbind
115};
116
117/*
118 * Context management:
119 */
120
121static void vmw_context_cotables_unref(struct vmw_private *dev_priv,
122 struct vmw_user_context *uctx)
123{
124 struct vmw_resource *res;
125 int i;
126 u32 cotable_max = has_sm5_context(dev_priv) ?
127 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
128
129 for (i = 0; i < cotable_max; ++i) {
130 spin_lock(lock: &uctx->cotable_lock);
131 res = uctx->cotables[i];
132 uctx->cotables[i] = NULL;
133 spin_unlock(lock: &uctx->cotable_lock);
134
135 if (res)
136 vmw_resource_unreference(p_res: &res);
137 }
138}
139
140static void vmw_hw_context_destroy(struct vmw_resource *res)
141{
142 struct vmw_user_context *uctx =
143 container_of(res, struct vmw_user_context, res);
144 struct vmw_private *dev_priv = res->dev_priv;
145 struct {
146 SVGA3dCmdHeader header;
147 SVGA3dCmdDestroyContext body;
148 } *cmd;
149
150
151 if (res->func->destroy == vmw_gb_context_destroy ||
152 res->func->destroy == vmw_dx_context_destroy) {
153 mutex_lock(&dev_priv->cmdbuf_mutex);
154 vmw_cmdbuf_res_man_destroy(man: uctx->man);
155 mutex_lock(&dev_priv->binding_mutex);
156 vmw_binding_state_kill(cbs: uctx->cbs);
157 (void) res->func->destroy(res);
158 mutex_unlock(lock: &dev_priv->binding_mutex);
159 if (dev_priv->pinned_bo != NULL &&
160 !dev_priv->query_cid_valid)
161 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
162 mutex_unlock(lock: &dev_priv->cmdbuf_mutex);
163 vmw_context_cotables_unref(dev_priv, uctx);
164 return;
165 }
166
167 vmw_execbuf_release_pinned_bo(dev_priv);
168 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
169 if (unlikely(cmd == NULL))
170 return;
171
172 cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
173 cmd->header.size = sizeof(cmd->body);
174 cmd->body.cid = res->id;
175
176 vmw_cmd_commit(dev_priv, bytes: sizeof(*cmd));
177 vmw_fifo_resource_dec(dev_priv);
178}
179
180static int vmw_gb_context_init(struct vmw_private *dev_priv,
181 bool dx,
182 struct vmw_resource *res,
183 void (*res_free)(struct vmw_resource *res))
184{
185 int ret, i;
186 struct vmw_user_context *uctx =
187 container_of(res, struct vmw_user_context, res);
188
189 res->guest_memory_size = (dx ? sizeof(SVGADXContextMobFormat) :
190 sizeof(SVGAGBContextData));
191 ret = vmw_resource_init(dev_priv, res, delay_id: true,
192 res_free,
193 func: dx ? &vmw_dx_context_func :
194 &vmw_gb_context_func);
195 if (unlikely(ret != 0))
196 goto out_err;
197
198 if (dev_priv->has_mob) {
199 uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
200 if (IS_ERR(ptr: uctx->man)) {
201 ret = PTR_ERR(ptr: uctx->man);
202 uctx->man = NULL;
203 goto out_err;
204 }
205 }
206
207 uctx->cbs = vmw_binding_state_alloc(dev_priv);
208 if (IS_ERR(ptr: uctx->cbs)) {
209 ret = PTR_ERR(ptr: uctx->cbs);
210 goto out_err;
211 }
212
213 spin_lock_init(&uctx->cotable_lock);
214
215 if (dx) {
216 u32 cotable_max = has_sm5_context(dev_priv) ?
217 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
218 for (i = 0; i < cotable_max; ++i) {
219 uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
220 ctx: &uctx->res, type: i);
221 if (IS_ERR(ptr: uctx->cotables[i])) {
222 ret = PTR_ERR(ptr: uctx->cotables[i]);
223 goto out_cotables;
224 }
225 }
226 }
227
228 res->hw_destroy = vmw_hw_context_destroy;
229 return 0;
230
231out_cotables:
232 vmw_context_cotables_unref(dev_priv, uctx);
233out_err:
234 if (res_free)
235 res_free(res);
236 else
237 kfree(objp: res);
238 return ret;
239}
240
241static int vmw_context_init(struct vmw_private *dev_priv,
242 struct vmw_resource *res,
243 void (*res_free)(struct vmw_resource *res),
244 bool dx)
245{
246 int ret;
247
248 struct {
249 SVGA3dCmdHeader header;
250 SVGA3dCmdDefineContext body;
251 } *cmd;
252
253 if (dev_priv->has_mob)
254 return vmw_gb_context_init(dev_priv, dx, res, res_free);
255
256 ret = vmw_resource_init(dev_priv, res, delay_id: false,
257 res_free, func: &vmw_legacy_context_func);
258
259 if (unlikely(ret != 0)) {
260 DRM_ERROR("Failed to allocate a resource id.\n");
261 goto out_early;
262 }
263
264 if (unlikely(res->id >= SVGA3D_HB_MAX_CONTEXT_IDS)) {
265 DRM_ERROR("Out of hw context ids.\n");
266 vmw_resource_unreference(p_res: &res);
267 return -ENOMEM;
268 }
269
270 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
271 if (unlikely(cmd == NULL)) {
272 vmw_resource_unreference(p_res: &res);
273 return -ENOMEM;
274 }
275
276 cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
277 cmd->header.size = sizeof(cmd->body);
278 cmd->body.cid = res->id;
279
280 vmw_cmd_commit(dev_priv, bytes: sizeof(*cmd));
281 vmw_fifo_resource_inc(dev_priv);
282 res->hw_destroy = vmw_hw_context_destroy;
283 return 0;
284
285out_early:
286 if (res_free == NULL)
287 kfree(objp: res);
288 else
289 res_free(res);
290 return ret;
291}
292
293
294/*
295 * GB context.
296 */
297
298static int vmw_gb_context_create(struct vmw_resource *res)
299{
300 struct vmw_private *dev_priv = res->dev_priv;
301 int ret;
302 struct {
303 SVGA3dCmdHeader header;
304 SVGA3dCmdDefineGBContext body;
305 } *cmd;
306
307 if (likely(res->id != -1))
308 return 0;
309
310 ret = vmw_resource_alloc_id(res);
311 if (unlikely(ret != 0)) {
312 DRM_ERROR("Failed to allocate a context id.\n");
313 goto out_no_id;
314 }
315
316 if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
317 ret = -EBUSY;
318 goto out_no_fifo;
319 }
320
321 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
322 if (unlikely(cmd == NULL)) {
323 ret = -ENOMEM;
324 goto out_no_fifo;
325 }
326
327 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
328 cmd->header.size = sizeof(cmd->body);
329 cmd->body.cid = res->id;
330 vmw_cmd_commit(dev_priv, bytes: sizeof(*cmd));
331 vmw_fifo_resource_inc(dev_priv);
332
333 return 0;
334
335out_no_fifo:
336 vmw_resource_release_id(res);
337out_no_id:
338 return ret;
339}
340
341static int vmw_gb_context_bind(struct vmw_resource *res,
342 struct ttm_validate_buffer *val_buf)
343{
344 struct vmw_private *dev_priv = res->dev_priv;
345 struct {
346 SVGA3dCmdHeader header;
347 SVGA3dCmdBindGBContext body;
348 } *cmd;
349 struct ttm_buffer_object *bo = val_buf->bo;
350
351 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
352
353 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
354 if (unlikely(cmd == NULL))
355 return -ENOMEM;
356
357 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
358 cmd->header.size = sizeof(cmd->body);
359 cmd->body.cid = res->id;
360 cmd->body.mobid = bo->resource->start;
361 cmd->body.validContents = res->guest_memory_dirty;
362 res->guest_memory_dirty = false;
363 vmw_cmd_commit(dev_priv, bytes: sizeof(*cmd));
364
365 return 0;
366}
367
368static int vmw_gb_context_unbind(struct vmw_resource *res,
369 bool readback,
370 struct ttm_validate_buffer *val_buf)
371{
372 struct vmw_private *dev_priv = res->dev_priv;
373 struct ttm_buffer_object *bo = val_buf->bo;
374 struct vmw_fence_obj *fence;
375 struct vmw_user_context *uctx =
376 container_of(res, struct vmw_user_context, res);
377
378 struct {
379 SVGA3dCmdHeader header;
380 SVGA3dCmdReadbackGBContext body;
381 } *cmd1;
382 struct {
383 SVGA3dCmdHeader header;
384 SVGA3dCmdBindGBContext body;
385 } *cmd2;
386 uint32_t submit_size;
387 uint8_t *cmd;
388
389
390 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
391
392 mutex_lock(&dev_priv->binding_mutex);
393 vmw_binding_state_scrub(cbs: uctx->cbs);
394
395 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
396
397 cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
398 if (unlikely(cmd == NULL)) {
399 mutex_unlock(lock: &dev_priv->binding_mutex);
400 return -ENOMEM;
401 }
402
403 cmd2 = (void *) cmd;
404 if (readback) {
405 cmd1 = (void *) cmd;
406 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
407 cmd1->header.size = sizeof(cmd1->body);
408 cmd1->body.cid = res->id;
409 cmd2 = (void *) (&cmd1[1]);
410 }
411 cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
412 cmd2->header.size = sizeof(cmd2->body);
413 cmd2->body.cid = res->id;
414 cmd2->body.mobid = SVGA3D_INVALID_ID;
415
416 vmw_cmd_commit(dev_priv, bytes: submit_size);
417 mutex_unlock(lock: &dev_priv->binding_mutex);
418
419 /*
420 * Create a fence object and fence the backup buffer.
421 */
422
423 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
424 p_fence: &fence, NULL);
425
426 vmw_bo_fence_single(bo, fence);
427
428 if (likely(fence != NULL))
429 vmw_fence_obj_unreference(fence_p: &fence);
430
431 return 0;
432}
433
434static int vmw_gb_context_destroy(struct vmw_resource *res)
435{
436 struct vmw_private *dev_priv = res->dev_priv;
437 struct {
438 SVGA3dCmdHeader header;
439 SVGA3dCmdDestroyGBContext body;
440 } *cmd;
441
442 if (likely(res->id == -1))
443 return 0;
444
445 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
446 if (unlikely(cmd == NULL))
447 return -ENOMEM;
448
449 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
450 cmd->header.size = sizeof(cmd->body);
451 cmd->body.cid = res->id;
452 vmw_cmd_commit(dev_priv, bytes: sizeof(*cmd));
453 if (dev_priv->query_cid == res->id)
454 dev_priv->query_cid_valid = false;
455 vmw_resource_release_id(res);
456 vmw_fifo_resource_dec(dev_priv);
457
458 return 0;
459}
460
461/*
462 * DX context.
463 */
464
465static int vmw_dx_context_create(struct vmw_resource *res)
466{
467 struct vmw_private *dev_priv = res->dev_priv;
468 int ret;
469 struct {
470 SVGA3dCmdHeader header;
471 SVGA3dCmdDXDefineContext body;
472 } *cmd;
473
474 if (likely(res->id != -1))
475 return 0;
476
477 ret = vmw_resource_alloc_id(res);
478 if (unlikely(ret != 0)) {
479 DRM_ERROR("Failed to allocate a context id.\n");
480 goto out_no_id;
481 }
482
483 if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
484 ret = -EBUSY;
485 goto out_no_fifo;
486 }
487
488 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
489 if (unlikely(cmd == NULL)) {
490 ret = -ENOMEM;
491 goto out_no_fifo;
492 }
493
494 cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
495 cmd->header.size = sizeof(cmd->body);
496 cmd->body.cid = res->id;
497 vmw_cmd_commit(dev_priv, bytes: sizeof(*cmd));
498 vmw_fifo_resource_inc(dev_priv);
499
500 return 0;
501
502out_no_fifo:
503 vmw_resource_release_id(res);
504out_no_id:
505 return ret;
506}
507
508static int vmw_dx_context_bind(struct vmw_resource *res,
509 struct ttm_validate_buffer *val_buf)
510{
511 struct vmw_private *dev_priv = res->dev_priv;
512 struct {
513 SVGA3dCmdHeader header;
514 SVGA3dCmdDXBindContext body;
515 } *cmd;
516 struct ttm_buffer_object *bo = val_buf->bo;
517
518 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
519
520 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
521 if (unlikely(cmd == NULL))
522 return -ENOMEM;
523
524 cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
525 cmd->header.size = sizeof(cmd->body);
526 cmd->body.cid = res->id;
527 cmd->body.mobid = bo->resource->start;
528 cmd->body.validContents = res->guest_memory_dirty;
529 res->guest_memory_dirty = false;
530 vmw_cmd_commit(dev_priv, bytes: sizeof(*cmd));
531
532
533 return 0;
534}
535
536/**
537 * vmw_dx_context_scrub_cotables - Scrub all bindings and
538 * cotables from a context
539 *
540 * @ctx: Pointer to the context resource
541 * @readback: Whether to save the otable contents on scrubbing.
542 *
543 * COtables must be unbound before their context, but unbinding requires
544 * the backup buffer being reserved, whereas scrubbing does not.
545 * This function scrubs all cotables of a context, potentially reading back
546 * the contents into their backup buffers. However, scrubbing cotables
547 * also makes the device context invalid, so scrub all bindings first so
548 * that doesn't have to be done later with an invalid context.
549 */
550void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
551 bool readback)
552{
553 struct vmw_user_context *uctx =
554 container_of(ctx, struct vmw_user_context, res);
555 u32 cotable_max = has_sm5_context(dev_priv: ctx->dev_priv) ?
556 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
557 int i;
558
559 vmw_binding_state_scrub(cbs: uctx->cbs);
560 for (i = 0; i < cotable_max; ++i) {
561 struct vmw_resource *res;
562
563 /* Avoid racing with ongoing cotable destruction. */
564 spin_lock(lock: &uctx->cotable_lock);
565 res = uctx->cotables[vmw_cotable_scrub_order[i]];
566 if (res)
567 res = vmw_resource_reference_unless_doomed(res);
568 spin_unlock(lock: &uctx->cotable_lock);
569 if (!res)
570 continue;
571
572 WARN_ON(vmw_cotable_scrub(res, readback));
573 vmw_resource_unreference(p_res: &res);
574 }
575}
576
577static int vmw_dx_context_unbind(struct vmw_resource *res,
578 bool readback,
579 struct ttm_validate_buffer *val_buf)
580{
581 struct vmw_private *dev_priv = res->dev_priv;
582 struct ttm_buffer_object *bo = val_buf->bo;
583 struct vmw_fence_obj *fence;
584 struct vmw_user_context *uctx =
585 container_of(res, struct vmw_user_context, res);
586
587 struct {
588 SVGA3dCmdHeader header;
589 SVGA3dCmdDXReadbackContext body;
590 } *cmd1;
591 struct {
592 SVGA3dCmdHeader header;
593 SVGA3dCmdDXBindContext body;
594 } *cmd2;
595 uint32_t submit_size;
596 uint8_t *cmd;
597
598
599 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
600
601 mutex_lock(&dev_priv->binding_mutex);
602 vmw_dx_context_scrub_cotables(ctx: res, readback);
603
604 if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
605 readback) {
606 WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
607 if (vmw_query_readback_all(dx_query_mob: uctx->dx_query_mob))
608 DRM_ERROR("Failed to read back query states\n");
609 }
610
611 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
612
613 cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
614 if (unlikely(cmd == NULL)) {
615 mutex_unlock(lock: &dev_priv->binding_mutex);
616 return -ENOMEM;
617 }
618
619 cmd2 = (void *) cmd;
620 if (readback) {
621 cmd1 = (void *) cmd;
622 cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
623 cmd1->header.size = sizeof(cmd1->body);
624 cmd1->body.cid = res->id;
625 cmd2 = (void *) (&cmd1[1]);
626 }
627 cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
628 cmd2->header.size = sizeof(cmd2->body);
629 cmd2->body.cid = res->id;
630 cmd2->body.mobid = SVGA3D_INVALID_ID;
631
632 vmw_cmd_commit(dev_priv, bytes: submit_size);
633 mutex_unlock(lock: &dev_priv->binding_mutex);
634
635 /*
636 * Create a fence object and fence the backup buffer.
637 */
638
639 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
640 p_fence: &fence, NULL);
641
642 vmw_bo_fence_single(bo, fence);
643
644 if (likely(fence != NULL))
645 vmw_fence_obj_unreference(fence_p: &fence);
646
647 return 0;
648}
649
650static int vmw_dx_context_destroy(struct vmw_resource *res)
651{
652 struct vmw_private *dev_priv = res->dev_priv;
653 struct {
654 SVGA3dCmdHeader header;
655 SVGA3dCmdDXDestroyContext body;
656 } *cmd;
657
658 if (likely(res->id == -1))
659 return 0;
660
661 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
662 if (unlikely(cmd == NULL))
663 return -ENOMEM;
664
665 cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
666 cmd->header.size = sizeof(cmd->body);
667 cmd->body.cid = res->id;
668 vmw_cmd_commit(dev_priv, bytes: sizeof(*cmd));
669 if (dev_priv->query_cid == res->id)
670 dev_priv->query_cid_valid = false;
671 vmw_resource_release_id(res);
672 vmw_fifo_resource_dec(dev_priv);
673
674 return 0;
675}
676
677/*
678 * User-space context management:
679 */
680
681static struct vmw_resource *
682vmw_user_context_base_to_res(struct ttm_base_object *base)
683{
684 return &(container_of(base, struct vmw_user_context, base)->res);
685}
686
687static void vmw_user_context_free(struct vmw_resource *res)
688{
689 struct vmw_user_context *ctx =
690 container_of(res, struct vmw_user_context, res);
691
692 if (ctx->cbs)
693 vmw_binding_state_free(cbs: ctx->cbs);
694
695 (void) vmw_context_bind_dx_query(ctx_res: res, NULL);
696
697 ttm_base_object_kfree(ctx, base);
698}
699
700/*
701 * This function is called when user space has no more references on the
702 * base object. It releases the base-object's reference on the resource object.
703 */
704
705static void vmw_user_context_base_release(struct ttm_base_object **p_base)
706{
707 struct ttm_base_object *base = *p_base;
708 struct vmw_user_context *ctx =
709 container_of(base, struct vmw_user_context, base);
710 struct vmw_resource *res = &ctx->res;
711
712 *p_base = NULL;
713 vmw_resource_unreference(p_res: &res);
714}
715
716int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
717 struct drm_file *file_priv)
718{
719 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
720 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
721
722 return ttm_ref_object_base_unref(tfile, key: arg->cid);
723}
724
725static int vmw_context_define(struct drm_device *dev, void *data,
726 struct drm_file *file_priv, bool dx)
727{
728 struct vmw_private *dev_priv = vmw_priv(dev);
729 struct vmw_user_context *ctx;
730 struct vmw_resource *res;
731 struct vmw_resource *tmp;
732 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
733 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
734 int ret;
735
736 if (!has_sm4_context(dev_priv) && dx) {
737 VMW_DEBUG_USER("DX contexts not supported by device.\n");
738 return -EINVAL;
739 }
740
741 ctx = kzalloc(size: sizeof(*ctx), GFP_KERNEL);
742 if (unlikely(!ctx)) {
743 ret = -ENOMEM;
744 goto out_ret;
745 }
746
747 res = &ctx->res;
748 ctx->base.shareable = false;
749 ctx->base.tfile = NULL;
750
751 /*
752 * From here on, the destructor takes over resource freeing.
753 */
754
755 ret = vmw_context_init(dev_priv, res, res_free: vmw_user_context_free, dx);
756 if (unlikely(ret != 0))
757 goto out_ret;
758
759 tmp = vmw_resource_reference(res: &ctx->res);
760 ret = ttm_base_object_init(tfile, base: &ctx->base, shareable: false, VMW_RES_CONTEXT,
761 refcount_release: &vmw_user_context_base_release);
762
763 if (unlikely(ret != 0)) {
764 vmw_resource_unreference(p_res: &tmp);
765 goto out_err;
766 }
767
768 arg->cid = ctx->base.handle;
769out_err:
770 vmw_resource_unreference(p_res: &res);
771out_ret:
772 return ret;
773}
774
775int vmw_context_define_ioctl(struct drm_device *dev, void *data,
776 struct drm_file *file_priv)
777{
778 return vmw_context_define(dev, data, file_priv, dx: false);
779}
780
781int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
782 struct drm_file *file_priv)
783{
784 union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
785 struct drm_vmw_context_arg *rep = &arg->rep;
786
787 switch (arg->req) {
788 case drm_vmw_context_legacy:
789 return vmw_context_define(dev, data: rep, file_priv, dx: false);
790 case drm_vmw_context_dx:
791 return vmw_context_define(dev, data: rep, file_priv, dx: true);
792 default:
793 break;
794 }
795 return -EINVAL;
796}
797
798/**
799 * vmw_context_binding_list - Return a list of context bindings
800 *
801 * @ctx: The context resource
802 *
803 * Returns the current list of bindings of the given context. Note that
804 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
805 */
806struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
807{
808 struct vmw_user_context *uctx =
809 container_of(ctx, struct vmw_user_context, res);
810
811 return vmw_binding_state_list(cbs: uctx->cbs);
812}
813
814struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
815{
816 return container_of(ctx, struct vmw_user_context, res)->man;
817}
818
819struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
820 SVGACOTableType cotable_type)
821{
822 u32 cotable_max = has_sm5_context(dev_priv: ctx->dev_priv) ?
823 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
824
825 if (cotable_type >= cotable_max)
826 return ERR_PTR(error: -EINVAL);
827
828 return container_of(ctx, struct vmw_user_context, res)->
829 cotables[cotable_type];
830}
831
832/**
833 * vmw_context_binding_state -
834 * Return a pointer to a context binding state structure
835 *
836 * @ctx: The context resource
837 *
838 * Returns the current state of bindings of the given context. Note that
839 * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
840 */
841struct vmw_ctx_binding_state *
842vmw_context_binding_state(struct vmw_resource *ctx)
843{
844 return container_of(ctx, struct vmw_user_context, res)->cbs;
845}
846
847/**
848 * vmw_context_bind_dx_query -
849 * Sets query MOB for the context. If @mob is NULL, then this function will
850 * remove the association between the MOB and the context. This function
851 * assumes the binding_mutex is held.
852 *
853 * @ctx_res: The context resource
854 * @mob: a reference to the query MOB
855 *
856 * Returns -EINVAL if a MOB has already been set and does not match the one
857 * specified in the parameter. 0 otherwise.
858 */
859int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
860 struct vmw_bo *mob)
861{
862 struct vmw_user_context *uctx =
863 container_of(ctx_res, struct vmw_user_context, res);
864
865 if (mob == NULL) {
866 if (uctx->dx_query_mob) {
867 uctx->dx_query_mob->dx_query_ctx = NULL;
868 vmw_bo_unreference(buf: &uctx->dx_query_mob);
869 uctx->dx_query_mob = NULL;
870 }
871
872 return 0;
873 }
874
875 /* Can only have one MOB per context for queries */
876 if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
877 return -EINVAL;
878
879 mob->dx_query_ctx = ctx_res;
880
881 if (!uctx->dx_query_mob)
882 uctx->dx_query_mob = vmw_bo_reference(buf: mob);
883
884 return 0;
885}
886
887/**
888 * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
889 *
890 * @ctx_res: The context resource
891 */
892struct vmw_bo *
893vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
894{
895 struct vmw_user_context *uctx =
896 container_of(ctx_res, struct vmw_user_context, res);
897
898 return uctx->dx_query_mob;
899}
900

source code of linux/drivers/gpu/drm/vmwgfx/vmwgfx_context.c