1 | /* |
2 | * SPDX-License-Identifier: MIT |
3 | * |
4 | * Copyright © 2011-2012 Intel Corporation |
5 | */ |
6 | |
7 | /* |
8 | * This file implements HW context support. On gen5+ a HW context consists of an |
9 | * opaque GPU object which is referenced at times of context saves and restores. |
10 | * With RC6 enabled, the context is also referenced as the GPU enters and exists |
11 | * from RC6 (GPU has it's own internal power context, except on gen5). Though |
12 | * something like a context does exist for the media ring, the code only |
13 | * supports contexts for the render ring. |
14 | * |
15 | * In software, there is a distinction between contexts created by the user, |
16 | * and the default HW context. The default HW context is used by GPU clients |
17 | * that do not request setup of their own hardware context. The default |
18 | * context's state is never restored to help prevent programming errors. This |
19 | * would happen if a client ran and piggy-backed off another clients GPU state. |
20 | * The default context only exists to give the GPU some offset to load as the |
21 | * current to invoke a save of the context we actually care about. In fact, the |
22 | * code could likely be constructed, albeit in a more complicated fashion, to |
23 | * never use the default context, though that limits the driver's ability to |
24 | * swap out, and/or destroy other contexts. |
25 | * |
26 | * All other contexts are created as a request by the GPU client. These contexts |
27 | * store GPU state, and thus allow GPU clients to not re-emit state (and |
28 | * potentially query certain state) at any time. The kernel driver makes |
29 | * certain that the appropriate commands are inserted. |
30 | * |
31 | * The context life cycle is semi-complicated in that context BOs may live |
32 | * longer than the context itself because of the way the hardware, and object |
33 | * tracking works. Below is a very crude representation of the state machine |
34 | * describing the context life. |
35 | * refcount pincount active |
36 | * S0: initial state 0 0 0 |
37 | * S1: context created 1 0 0 |
38 | * S2: context is currently running 2 1 X |
39 | * S3: GPU referenced, but not current 2 0 1 |
40 | * S4: context is current, but destroyed 1 1 0 |
41 | * S5: like S3, but destroyed 1 0 1 |
42 | * |
43 | * The most common (but not all) transitions: |
44 | * S0->S1: client creates a context |
45 | * S1->S2: client submits execbuf with context |
46 | * S2->S3: other clients submits execbuf with context |
47 | * S3->S1: context object was retired |
48 | * S3->S2: clients submits another execbuf |
49 | * S2->S4: context destroy called with current context |
50 | * S3->S5->S0: destroy path |
51 | * S4->S5->S0: destroy path on current context |
52 | * |
53 | * There are two confusing terms used above: |
54 | * The "current context" means the context which is currently running on the |
55 | * GPU. The GPU has loaded its state already and has stored away the gtt |
56 | * offset of the BO. The GPU is not actively referencing the data at this |
57 | * offset, but it will on the next context switch. The only way to avoid this |
58 | * is to do a GPU reset. |
59 | * |
60 | * An "active context' is one which was previously the "current context" and is |
61 | * on the active list waiting for the next context switch to occur. Until this |
62 | * happens, the object must remain at the same gtt offset. It is therefore |
63 | * possible to destroy a context, but it is still active. |
64 | * |
65 | */ |
66 | |
67 | #include <linux/highmem.h> |
68 | #include <linux/log2.h> |
69 | #include <linux/nospec.h> |
70 | |
71 | #include <drm/drm_cache.h> |
72 | #include <drm/drm_syncobj.h> |
73 | |
74 | #include "gt/gen6_ppgtt.h" |
75 | #include "gt/intel_context.h" |
76 | #include "gt/intel_context_param.h" |
77 | #include "gt/intel_engine_heartbeat.h" |
78 | #include "gt/intel_engine_user.h" |
79 | #include "gt/intel_gpu_commands.h" |
80 | #include "gt/intel_ring.h" |
81 | |
82 | #include "pxp/intel_pxp.h" |
83 | |
84 | #include "i915_file_private.h" |
85 | #include "i915_gem_context.h" |
86 | #include "i915_trace.h" |
87 | #include "i915_user_extensions.h" |
88 | |
89 | #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 |
90 | |
91 | static struct kmem_cache *slab_luts; |
92 | |
93 | struct i915_lut_handle *i915_lut_handle_alloc(void) |
94 | { |
95 | return kmem_cache_alloc(cachep: slab_luts, GFP_KERNEL); |
96 | } |
97 | |
98 | void i915_lut_handle_free(struct i915_lut_handle *lut) |
99 | { |
100 | return kmem_cache_free(s: slab_luts, objp: lut); |
101 | } |
102 | |
103 | static void lut_close(struct i915_gem_context *ctx) |
104 | { |
105 | struct radix_tree_iter iter; |
106 | void __rcu **slot; |
107 | |
108 | mutex_lock(&ctx->lut_mutex); |
109 | rcu_read_lock(); |
110 | radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { |
111 | struct i915_vma *vma = rcu_dereference_raw(*slot); |
112 | struct drm_i915_gem_object *obj = vma->obj; |
113 | struct i915_lut_handle *lut; |
114 | |
115 | if (!kref_get_unless_zero(kref: &obj->base.refcount)) |
116 | continue; |
117 | |
118 | spin_lock(lock: &obj->lut_lock); |
119 | list_for_each_entry(lut, &obj->lut_list, obj_link) { |
120 | if (lut->ctx != ctx) |
121 | continue; |
122 | |
123 | if (lut->handle != iter.index) |
124 | continue; |
125 | |
126 | list_del(entry: &lut->obj_link); |
127 | break; |
128 | } |
129 | spin_unlock(lock: &obj->lut_lock); |
130 | |
131 | if (&lut->obj_link != &obj->lut_list) { |
132 | i915_lut_handle_free(lut); |
133 | radix_tree_iter_delete(&ctx->handles_vma, iter: &iter, slot); |
134 | i915_vma_close(vma); |
135 | i915_gem_object_put(obj); |
136 | } |
137 | |
138 | i915_gem_object_put(obj); |
139 | } |
140 | rcu_read_unlock(); |
141 | mutex_unlock(lock: &ctx->lut_mutex); |
142 | } |
143 | |
144 | static struct intel_context * |
145 | lookup_user_engine(struct i915_gem_context *ctx, |
146 | unsigned long flags, |
147 | const struct i915_engine_class_instance *ci) |
148 | #define LOOKUP_USER_INDEX BIT(0) |
149 | { |
150 | int idx; |
151 | |
152 | if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx)) |
153 | return ERR_PTR(error: -EINVAL); |
154 | |
155 | if (!i915_gem_context_user_engines(ctx)) { |
156 | struct intel_engine_cs *engine; |
157 | |
158 | engine = intel_engine_lookup_user(i915: ctx->i915, |
159 | class: ci->engine_class, |
160 | instance: ci->engine_instance); |
161 | if (!engine) |
162 | return ERR_PTR(error: -EINVAL); |
163 | |
164 | idx = engine->legacy_idx; |
165 | } else { |
166 | idx = ci->engine_instance; |
167 | } |
168 | |
169 | return i915_gem_context_get_engine(ctx, idx); |
170 | } |
171 | |
172 | static int validate_priority(struct drm_i915_private *i915, |
173 | const struct drm_i915_gem_context_param *args) |
174 | { |
175 | s64 priority = args->value; |
176 | |
177 | if (args->size) |
178 | return -EINVAL; |
179 | |
180 | if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) |
181 | return -ENODEV; |
182 | |
183 | if (priority > I915_CONTEXT_MAX_USER_PRIORITY || |
184 | priority < I915_CONTEXT_MIN_USER_PRIORITY) |
185 | return -EINVAL; |
186 | |
187 | if (priority > I915_CONTEXT_DEFAULT_PRIORITY && |
188 | !capable(CAP_SYS_NICE)) |
189 | return -EPERM; |
190 | |
191 | return 0; |
192 | } |
193 | |
194 | static void proto_context_close(struct drm_i915_private *i915, |
195 | struct i915_gem_proto_context *pc) |
196 | { |
197 | int i; |
198 | |
199 | if (pc->pxp_wakeref) |
200 | intel_runtime_pm_put(rpm: &i915->runtime_pm, wref: pc->pxp_wakeref); |
201 | if (pc->vm) |
202 | i915_vm_put(vm: pc->vm); |
203 | if (pc->user_engines) { |
204 | for (i = 0; i < pc->num_user_engines; i++) |
205 | kfree(objp: pc->user_engines[i].siblings); |
206 | kfree(objp: pc->user_engines); |
207 | } |
208 | kfree(objp: pc); |
209 | } |
210 | |
211 | static int proto_context_set_persistence(struct drm_i915_private *i915, |
212 | struct i915_gem_proto_context *pc, |
213 | bool persist) |
214 | { |
215 | if (persist) { |
216 | /* |
217 | * Only contexts that are short-lived [that will expire or be |
218 | * reset] are allowed to survive past termination. We require |
219 | * hangcheck to ensure that the persistent requests are healthy. |
220 | */ |
221 | if (!i915->params.enable_hangcheck) |
222 | return -EINVAL; |
223 | |
224 | pc->user_flags |= BIT(UCONTEXT_PERSISTENCE); |
225 | } else { |
226 | /* To cancel a context we use "preempt-to-idle" */ |
227 | if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) |
228 | return -ENODEV; |
229 | |
230 | /* |
231 | * If the cancel fails, we then need to reset, cleanly! |
232 | * |
233 | * If the per-engine reset fails, all hope is lost! We resort |
234 | * to a full GPU reset in that unlikely case, but realistically |
235 | * if the engine could not reset, the full reset does not fare |
236 | * much better. The damage has been done. |
237 | * |
238 | * However, if we cannot reset an engine by itself, we cannot |
239 | * cleanup a hanging persistent context without causing |
240 | * colateral damage, and we should not pretend we can by |
241 | * exposing the interface. |
242 | */ |
243 | if (!intel_has_reset_engine(gt: to_gt(i915))) |
244 | return -ENODEV; |
245 | |
246 | pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE); |
247 | } |
248 | |
249 | return 0; |
250 | } |
251 | |
252 | static int proto_context_set_protected(struct drm_i915_private *i915, |
253 | struct i915_gem_proto_context *pc, |
254 | bool protected) |
255 | { |
256 | int ret = 0; |
257 | |
258 | if (!protected) { |
259 | pc->uses_protected_content = false; |
260 | } else if (!intel_pxp_is_enabled(pxp: i915->pxp)) { |
261 | ret = -ENODEV; |
262 | } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) || |
263 | !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) { |
264 | ret = -EPERM; |
265 | } else { |
266 | pc->uses_protected_content = true; |
267 | |
268 | /* |
269 | * protected context usage requires the PXP session to be up, |
270 | * which in turn requires the device to be active. |
271 | */ |
272 | pc->pxp_wakeref = intel_runtime_pm_get(rpm: &i915->runtime_pm); |
273 | |
274 | if (!intel_pxp_is_active(pxp: i915->pxp)) |
275 | ret = intel_pxp_start(pxp: i915->pxp); |
276 | } |
277 | |
278 | return ret; |
279 | } |
280 | |
281 | static struct i915_gem_proto_context * |
282 | proto_context_create(struct drm_i915_file_private *fpriv, |
283 | struct drm_i915_private *i915, unsigned int flags) |
284 | { |
285 | struct i915_gem_proto_context *pc, *err; |
286 | |
287 | pc = kzalloc(size: sizeof(*pc), GFP_KERNEL); |
288 | if (!pc) |
289 | return ERR_PTR(error: -ENOMEM); |
290 | |
291 | pc->fpriv = fpriv; |
292 | pc->num_user_engines = -1; |
293 | pc->user_engines = NULL; |
294 | pc->user_flags = BIT(UCONTEXT_BANNABLE) | |
295 | BIT(UCONTEXT_RECOVERABLE); |
296 | if (i915->params.enable_hangcheck) |
297 | pc->user_flags |= BIT(UCONTEXT_PERSISTENCE); |
298 | pc->sched.priority = I915_PRIORITY_NORMAL; |
299 | |
300 | if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { |
301 | if (!HAS_EXECLISTS(i915)) { |
302 | err = ERR_PTR(error: -EINVAL); |
303 | goto proto_close; |
304 | } |
305 | pc->single_timeline = true; |
306 | } |
307 | |
308 | return pc; |
309 | |
310 | proto_close: |
311 | proto_context_close(i915, pc); |
312 | return err; |
313 | } |
314 | |
315 | static int proto_context_register_locked(struct drm_i915_file_private *fpriv, |
316 | struct i915_gem_proto_context *pc, |
317 | u32 *id) |
318 | { |
319 | int ret; |
320 | void *old; |
321 | |
322 | lockdep_assert_held(&fpriv->proto_context_lock); |
323 | |
324 | ret = xa_alloc(xa: &fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL); |
325 | if (ret) |
326 | return ret; |
327 | |
328 | old = xa_store(&fpriv->proto_context_xa, index: *id, entry: pc, GFP_KERNEL); |
329 | if (xa_is_err(entry: old)) { |
330 | xa_erase(&fpriv->context_xa, index: *id); |
331 | return xa_err(entry: old); |
332 | } |
333 | WARN_ON(old); |
334 | |
335 | return 0; |
336 | } |
337 | |
338 | static int proto_context_register(struct drm_i915_file_private *fpriv, |
339 | struct i915_gem_proto_context *pc, |
340 | u32 *id) |
341 | { |
342 | int ret; |
343 | |
344 | mutex_lock(&fpriv->proto_context_lock); |
345 | ret = proto_context_register_locked(fpriv, pc, id); |
346 | mutex_unlock(lock: &fpriv->proto_context_lock); |
347 | |
348 | return ret; |
349 | } |
350 | |
351 | static struct i915_address_space * |
352 | i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id) |
353 | { |
354 | struct i915_address_space *vm; |
355 | |
356 | xa_lock(&file_priv->vm_xa); |
357 | vm = xa_load(&file_priv->vm_xa, index: id); |
358 | if (vm) |
359 | kref_get(kref: &vm->ref); |
360 | xa_unlock(&file_priv->vm_xa); |
361 | |
362 | return vm; |
363 | } |
364 | |
365 | static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv, |
366 | struct i915_gem_proto_context *pc, |
367 | const struct drm_i915_gem_context_param *args) |
368 | { |
369 | struct drm_i915_private *i915 = fpriv->i915; |
370 | struct i915_address_space *vm; |
371 | |
372 | if (args->size) |
373 | return -EINVAL; |
374 | |
375 | if (!HAS_FULL_PPGTT(i915)) |
376 | return -ENODEV; |
377 | |
378 | if (upper_32_bits(args->value)) |
379 | return -ENOENT; |
380 | |
381 | vm = i915_gem_vm_lookup(file_priv: fpriv, id: args->value); |
382 | if (!vm) |
383 | return -ENOENT; |
384 | |
385 | if (pc->vm) |
386 | i915_vm_put(vm: pc->vm); |
387 | pc->vm = vm; |
388 | |
389 | return 0; |
390 | } |
391 | |
392 | struct set_proto_ctx_engines { |
393 | struct drm_i915_private *i915; |
394 | unsigned num_engines; |
395 | struct i915_gem_proto_engine *engines; |
396 | }; |
397 | |
398 | static int |
399 | set_proto_ctx_engines_balance(struct i915_user_extension __user *base, |
400 | void *data) |
401 | { |
402 | struct i915_context_engines_load_balance __user *ext = |
403 | container_of_user(base, typeof(*ext), base); |
404 | const struct set_proto_ctx_engines *set = data; |
405 | struct drm_i915_private *i915 = set->i915; |
406 | struct intel_engine_cs **siblings; |
407 | u16 num_siblings, idx; |
408 | unsigned int n; |
409 | int err; |
410 | |
411 | if (!HAS_EXECLISTS(i915)) |
412 | return -ENODEV; |
413 | |
414 | if (get_user(idx, &ext->engine_index)) |
415 | return -EFAULT; |
416 | |
417 | if (idx >= set->num_engines) { |
418 | drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n" , |
419 | idx, set->num_engines); |
420 | return -EINVAL; |
421 | } |
422 | |
423 | idx = array_index_nospec(idx, set->num_engines); |
424 | if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) { |
425 | drm_dbg(&i915->drm, |
426 | "Invalid placement[%d], already occupied\n" , idx); |
427 | return -EEXIST; |
428 | } |
429 | |
430 | if (get_user(num_siblings, &ext->num_siblings)) |
431 | return -EFAULT; |
432 | |
433 | err = check_user_mbz(&ext->flags); |
434 | if (err) |
435 | return err; |
436 | |
437 | err = check_user_mbz(&ext->mbz64); |
438 | if (err) |
439 | return err; |
440 | |
441 | if (num_siblings == 0) |
442 | return 0; |
443 | |
444 | siblings = kmalloc_array(n: num_siblings, size: sizeof(*siblings), GFP_KERNEL); |
445 | if (!siblings) |
446 | return -ENOMEM; |
447 | |
448 | for (n = 0; n < num_siblings; n++) { |
449 | struct i915_engine_class_instance ci; |
450 | |
451 | if (copy_from_user(to: &ci, from: &ext->engines[n], n: sizeof(ci))) { |
452 | err = -EFAULT; |
453 | goto err_siblings; |
454 | } |
455 | |
456 | siblings[n] = intel_engine_lookup_user(i915, |
457 | class: ci.engine_class, |
458 | instance: ci.engine_instance); |
459 | if (!siblings[n]) { |
460 | drm_dbg(&i915->drm, |
461 | "Invalid sibling[%d]: { class:%d, inst:%d }\n" , |
462 | n, ci.engine_class, ci.engine_instance); |
463 | err = -EINVAL; |
464 | goto err_siblings; |
465 | } |
466 | } |
467 | |
468 | if (num_siblings == 1) { |
469 | set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL; |
470 | set->engines[idx].engine = siblings[0]; |
471 | kfree(objp: siblings); |
472 | } else { |
473 | set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED; |
474 | set->engines[idx].num_siblings = num_siblings; |
475 | set->engines[idx].siblings = siblings; |
476 | } |
477 | |
478 | return 0; |
479 | |
480 | err_siblings: |
481 | kfree(objp: siblings); |
482 | |
483 | return err; |
484 | } |
485 | |
486 | static int |
487 | set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data) |
488 | { |
489 | struct i915_context_engines_bond __user *ext = |
490 | container_of_user(base, typeof(*ext), base); |
491 | const struct set_proto_ctx_engines *set = data; |
492 | struct drm_i915_private *i915 = set->i915; |
493 | struct i915_engine_class_instance ci; |
494 | struct intel_engine_cs *master; |
495 | u16 idx, num_bonds; |
496 | int err, n; |
497 | |
498 | if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) && |
499 | !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) { |
500 | drm_dbg(&i915->drm, |
501 | "Bonding not supported on this platform\n" ); |
502 | return -ENODEV; |
503 | } |
504 | |
505 | if (get_user(idx, &ext->virtual_index)) |
506 | return -EFAULT; |
507 | |
508 | if (idx >= set->num_engines) { |
509 | drm_dbg(&i915->drm, |
510 | "Invalid index for virtual engine: %d >= %d\n" , |
511 | idx, set->num_engines); |
512 | return -EINVAL; |
513 | } |
514 | |
515 | idx = array_index_nospec(idx, set->num_engines); |
516 | if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) { |
517 | drm_dbg(&i915->drm, "Invalid engine at %d\n" , idx); |
518 | return -EINVAL; |
519 | } |
520 | |
521 | if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) { |
522 | drm_dbg(&i915->drm, |
523 | "Bonding with virtual engines not allowed\n" ); |
524 | return -EINVAL; |
525 | } |
526 | |
527 | err = check_user_mbz(&ext->flags); |
528 | if (err) |
529 | return err; |
530 | |
531 | for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { |
532 | err = check_user_mbz(&ext->mbz64[n]); |
533 | if (err) |
534 | return err; |
535 | } |
536 | |
537 | if (copy_from_user(to: &ci, from: &ext->master, n: sizeof(ci))) |
538 | return -EFAULT; |
539 | |
540 | master = intel_engine_lookup_user(i915, |
541 | class: ci.engine_class, |
542 | instance: ci.engine_instance); |
543 | if (!master) { |
544 | drm_dbg(&i915->drm, |
545 | "Unrecognised master engine: { class:%u, instance:%u }\n" , |
546 | ci.engine_class, ci.engine_instance); |
547 | return -EINVAL; |
548 | } |
549 | |
550 | if (intel_engine_uses_guc(engine: master)) { |
551 | drm_dbg(&i915->drm, "bonding extension not supported with GuC submission" ); |
552 | return -ENODEV; |
553 | } |
554 | |
555 | if (get_user(num_bonds, &ext->num_bonds)) |
556 | return -EFAULT; |
557 | |
558 | for (n = 0; n < num_bonds; n++) { |
559 | struct intel_engine_cs *bond; |
560 | |
561 | if (copy_from_user(to: &ci, from: &ext->engines[n], n: sizeof(ci))) |
562 | return -EFAULT; |
563 | |
564 | bond = intel_engine_lookup_user(i915, |
565 | class: ci.engine_class, |
566 | instance: ci.engine_instance); |
567 | if (!bond) { |
568 | drm_dbg(&i915->drm, |
569 | "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n" , |
570 | n, ci.engine_class, ci.engine_instance); |
571 | return -EINVAL; |
572 | } |
573 | } |
574 | |
575 | return 0; |
576 | } |
577 | |
578 | static int |
579 | set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base, |
580 | void *data) |
581 | { |
582 | struct i915_context_engines_parallel_submit __user *ext = |
583 | container_of_user(base, typeof(*ext), base); |
584 | const struct set_proto_ctx_engines *set = data; |
585 | struct drm_i915_private *i915 = set->i915; |
586 | struct i915_engine_class_instance prev_engine; |
587 | u64 flags; |
588 | int err = 0, n, i, j; |
589 | u16 slot, width, num_siblings; |
590 | struct intel_engine_cs **siblings = NULL; |
591 | intel_engine_mask_t prev_mask; |
592 | |
593 | if (get_user(slot, &ext->engine_index)) |
594 | return -EFAULT; |
595 | |
596 | if (get_user(width, &ext->width)) |
597 | return -EFAULT; |
598 | |
599 | if (get_user(num_siblings, &ext->num_siblings)) |
600 | return -EFAULT; |
601 | |
602 | if (!intel_uc_uses_guc_submission(uc: &to_gt(i915)->uc) && |
603 | num_siblings != 1) { |
604 | drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n" , |
605 | num_siblings); |
606 | return -EINVAL; |
607 | } |
608 | |
609 | if (slot >= set->num_engines) { |
610 | drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n" , |
611 | slot, set->num_engines); |
612 | return -EINVAL; |
613 | } |
614 | |
615 | if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) { |
616 | drm_dbg(&i915->drm, |
617 | "Invalid placement[%d], already occupied\n" , slot); |
618 | return -EINVAL; |
619 | } |
620 | |
621 | if (get_user(flags, &ext->flags)) |
622 | return -EFAULT; |
623 | |
624 | if (flags) { |
625 | drm_dbg(&i915->drm, "Unknown flags 0x%02llx" , flags); |
626 | return -EINVAL; |
627 | } |
628 | |
629 | for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { |
630 | err = check_user_mbz(&ext->mbz64[n]); |
631 | if (err) |
632 | return err; |
633 | } |
634 | |
635 | if (width < 2) { |
636 | drm_dbg(&i915->drm, "Width (%d) < 2\n" , width); |
637 | return -EINVAL; |
638 | } |
639 | |
640 | if (num_siblings < 1) { |
641 | drm_dbg(&i915->drm, "Number siblings (%d) < 1\n" , |
642 | num_siblings); |
643 | return -EINVAL; |
644 | } |
645 | |
646 | siblings = kmalloc_array(n: num_siblings * width, |
647 | size: sizeof(*siblings), |
648 | GFP_KERNEL); |
649 | if (!siblings) |
650 | return -ENOMEM; |
651 | |
652 | /* Create contexts / engines */ |
653 | for (i = 0; i < width; ++i) { |
654 | intel_engine_mask_t current_mask = 0; |
655 | |
656 | for (j = 0; j < num_siblings; ++j) { |
657 | struct i915_engine_class_instance ci; |
658 | |
659 | n = i * num_siblings + j; |
660 | if (copy_from_user(to: &ci, from: &ext->engines[n], n: sizeof(ci))) { |
661 | err = -EFAULT; |
662 | goto out_err; |
663 | } |
664 | |
665 | siblings[n] = |
666 | intel_engine_lookup_user(i915, class: ci.engine_class, |
667 | instance: ci.engine_instance); |
668 | if (!siblings[n]) { |
669 | drm_dbg(&i915->drm, |
670 | "Invalid sibling[%d]: { class:%d, inst:%d }\n" , |
671 | n, ci.engine_class, ci.engine_instance); |
672 | err = -EINVAL; |
673 | goto out_err; |
674 | } |
675 | |
676 | /* |
677 | * We don't support breadcrumb handshake on these |
678 | * classes |
679 | */ |
680 | if (siblings[n]->class == RENDER_CLASS || |
681 | siblings[n]->class == COMPUTE_CLASS) { |
682 | err = -EINVAL; |
683 | goto out_err; |
684 | } |
685 | |
686 | if (n) { |
687 | if (prev_engine.engine_class != |
688 | ci.engine_class) { |
689 | drm_dbg(&i915->drm, |
690 | "Mismatched class %d, %d\n" , |
691 | prev_engine.engine_class, |
692 | ci.engine_class); |
693 | err = -EINVAL; |
694 | goto out_err; |
695 | } |
696 | } |
697 | |
698 | prev_engine = ci; |
699 | current_mask |= siblings[n]->logical_mask; |
700 | } |
701 | |
702 | if (i > 0) { |
703 | if (current_mask != prev_mask << 1) { |
704 | drm_dbg(&i915->drm, |
705 | "Non contiguous logical mask 0x%x, 0x%x\n" , |
706 | prev_mask, current_mask); |
707 | err = -EINVAL; |
708 | goto out_err; |
709 | } |
710 | } |
711 | prev_mask = current_mask; |
712 | } |
713 | |
714 | set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL; |
715 | set->engines[slot].num_siblings = num_siblings; |
716 | set->engines[slot].width = width; |
717 | set->engines[slot].siblings = siblings; |
718 | |
719 | return 0; |
720 | |
721 | out_err: |
722 | kfree(objp: siblings); |
723 | |
724 | return err; |
725 | } |
726 | |
727 | static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = { |
728 | [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance, |
729 | [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond, |
730 | [I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] = |
731 | set_proto_ctx_engines_parallel_submit, |
732 | }; |
733 | |
734 | static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv, |
735 | struct i915_gem_proto_context *pc, |
736 | const struct drm_i915_gem_context_param *args) |
737 | { |
738 | struct drm_i915_private *i915 = fpriv->i915; |
739 | struct set_proto_ctx_engines set = { .i915 = i915 }; |
740 | struct i915_context_param_engines __user *user = |
741 | u64_to_user_ptr(args->value); |
742 | unsigned int n; |
743 | u64 extensions; |
744 | int err; |
745 | |
746 | if (pc->num_user_engines >= 0) { |
747 | drm_dbg(&i915->drm, "Cannot set engines twice" ); |
748 | return -EINVAL; |
749 | } |
750 | |
751 | if (args->size < sizeof(*user) || |
752 | !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) { |
753 | drm_dbg(&i915->drm, "Invalid size for engine array: %d\n" , |
754 | args->size); |
755 | return -EINVAL; |
756 | } |
757 | |
758 | set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); |
759 | /* RING_MASK has no shift so we can use it directly here */ |
760 | if (set.num_engines > I915_EXEC_RING_MASK + 1) |
761 | return -EINVAL; |
762 | |
763 | set.engines = kmalloc_array(n: set.num_engines, size: sizeof(*set.engines), GFP_KERNEL); |
764 | if (!set.engines) |
765 | return -ENOMEM; |
766 | |
767 | for (n = 0; n < set.num_engines; n++) { |
768 | struct i915_engine_class_instance ci; |
769 | struct intel_engine_cs *engine; |
770 | |
771 | if (copy_from_user(to: &ci, from: &user->engines[n], n: sizeof(ci))) { |
772 | kfree(objp: set.engines); |
773 | return -EFAULT; |
774 | } |
775 | |
776 | memset(&set.engines[n], 0, sizeof(set.engines[n])); |
777 | |
778 | if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && |
779 | ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) |
780 | continue; |
781 | |
782 | engine = intel_engine_lookup_user(i915, |
783 | class: ci.engine_class, |
784 | instance: ci.engine_instance); |
785 | if (!engine) { |
786 | drm_dbg(&i915->drm, |
787 | "Invalid engine[%d]: { class:%d, instance:%d }\n" , |
788 | n, ci.engine_class, ci.engine_instance); |
789 | kfree(objp: set.engines); |
790 | return -ENOENT; |
791 | } |
792 | |
793 | set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL; |
794 | set.engines[n].engine = engine; |
795 | } |
796 | |
797 | err = -EFAULT; |
798 | if (!get_user(extensions, &user->extensions)) |
799 | err = i915_user_extensions(u64_to_user_ptr(extensions), |
800 | tbl: set_proto_ctx_engines_extensions, |
801 | ARRAY_SIZE(set_proto_ctx_engines_extensions), |
802 | data: &set); |
803 | if (err) { |
804 | kfree(objp: set.engines); |
805 | return err; |
806 | } |
807 | |
808 | pc->num_user_engines = set.num_engines; |
809 | pc->user_engines = set.engines; |
810 | |
811 | return 0; |
812 | } |
813 | |
814 | static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv, |
815 | struct i915_gem_proto_context *pc, |
816 | struct drm_i915_gem_context_param *args) |
817 | { |
818 | struct drm_i915_private *i915 = fpriv->i915; |
819 | struct drm_i915_gem_context_param_sseu user_sseu; |
820 | struct intel_sseu *sseu; |
821 | int ret; |
822 | |
823 | if (args->size < sizeof(user_sseu)) |
824 | return -EINVAL; |
825 | |
826 | if (GRAPHICS_VER(i915) != 11) |
827 | return -ENODEV; |
828 | |
829 | if (copy_from_user(to: &user_sseu, u64_to_user_ptr(args->value), |
830 | n: sizeof(user_sseu))) |
831 | return -EFAULT; |
832 | |
833 | if (user_sseu.rsvd) |
834 | return -EINVAL; |
835 | |
836 | if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) |
837 | return -EINVAL; |
838 | |
839 | if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0)) |
840 | return -EINVAL; |
841 | |
842 | if (pc->num_user_engines >= 0) { |
843 | int idx = user_sseu.engine.engine_instance; |
844 | struct i915_gem_proto_engine *pe; |
845 | |
846 | if (idx >= pc->num_user_engines) |
847 | return -EINVAL; |
848 | |
849 | idx = array_index_nospec(idx, pc->num_user_engines); |
850 | pe = &pc->user_engines[idx]; |
851 | |
852 | /* Only render engine supports RPCS configuration. */ |
853 | if (pe->engine->class != RENDER_CLASS) |
854 | return -EINVAL; |
855 | |
856 | sseu = &pe->sseu; |
857 | } else { |
858 | /* Only render engine supports RPCS configuration. */ |
859 | if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER) |
860 | return -EINVAL; |
861 | |
862 | /* There is only one render engine */ |
863 | if (user_sseu.engine.engine_instance != 0) |
864 | return -EINVAL; |
865 | |
866 | sseu = &pc->legacy_rcs_sseu; |
867 | } |
868 | |
869 | ret = i915_gem_user_to_context_sseu(gt: to_gt(i915), user: &user_sseu, context: sseu); |
870 | if (ret) |
871 | return ret; |
872 | |
873 | args->size = sizeof(user_sseu); |
874 | |
875 | return 0; |
876 | } |
877 | |
878 | static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, |
879 | struct i915_gem_proto_context *pc, |
880 | struct drm_i915_gem_context_param *args) |
881 | { |
882 | int ret = 0; |
883 | |
884 | switch (args->param) { |
885 | case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: |
886 | if (args->size) |
887 | ret = -EINVAL; |
888 | else if (args->value) |
889 | pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE); |
890 | else |
891 | pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE); |
892 | break; |
893 | |
894 | case I915_CONTEXT_PARAM_BANNABLE: |
895 | if (args->size) |
896 | ret = -EINVAL; |
897 | else if (!capable(CAP_SYS_ADMIN) && !args->value) |
898 | ret = -EPERM; |
899 | else if (args->value) |
900 | pc->user_flags |= BIT(UCONTEXT_BANNABLE); |
901 | else if (pc->uses_protected_content) |
902 | ret = -EPERM; |
903 | else |
904 | pc->user_flags &= ~BIT(UCONTEXT_BANNABLE); |
905 | break; |
906 | |
907 | case I915_CONTEXT_PARAM_RECOVERABLE: |
908 | if (args->size) |
909 | ret = -EINVAL; |
910 | else if (!args->value) |
911 | pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE); |
912 | else if (pc->uses_protected_content) |
913 | ret = -EPERM; |
914 | else |
915 | pc->user_flags |= BIT(UCONTEXT_RECOVERABLE); |
916 | break; |
917 | |
918 | case I915_CONTEXT_PARAM_PRIORITY: |
919 | ret = validate_priority(i915: fpriv->i915, args); |
920 | if (!ret) |
921 | pc->sched.priority = args->value; |
922 | break; |
923 | |
924 | case I915_CONTEXT_PARAM_SSEU: |
925 | ret = set_proto_ctx_sseu(fpriv, pc, args); |
926 | break; |
927 | |
928 | case I915_CONTEXT_PARAM_VM: |
929 | ret = set_proto_ctx_vm(fpriv, pc, args); |
930 | break; |
931 | |
932 | case I915_CONTEXT_PARAM_ENGINES: |
933 | ret = set_proto_ctx_engines(fpriv, pc, args); |
934 | break; |
935 | |
936 | case I915_CONTEXT_PARAM_PERSISTENCE: |
937 | if (args->size) |
938 | ret = -EINVAL; |
939 | else |
940 | ret = proto_context_set_persistence(i915: fpriv->i915, pc, |
941 | persist: args->value); |
942 | break; |
943 | |
944 | case I915_CONTEXT_PARAM_PROTECTED_CONTENT: |
945 | ret = proto_context_set_protected(i915: fpriv->i915, pc, |
946 | protected: args->value); |
947 | break; |
948 | |
949 | case I915_CONTEXT_PARAM_NO_ZEROMAP: |
950 | case I915_CONTEXT_PARAM_BAN_PERIOD: |
951 | case I915_CONTEXT_PARAM_RINGSIZE: |
952 | default: |
953 | ret = -EINVAL; |
954 | break; |
955 | } |
956 | |
957 | return ret; |
958 | } |
959 | |
960 | static int intel_context_set_gem(struct intel_context *ce, |
961 | struct i915_gem_context *ctx, |
962 | struct intel_sseu sseu) |
963 | { |
964 | int ret = 0; |
965 | |
966 | GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); |
967 | RCU_INIT_POINTER(ce->gem_context, ctx); |
968 | |
969 | GEM_BUG_ON(intel_context_is_pinned(ce)); |
970 | |
971 | if (ce->engine->class == COMPUTE_CLASS) |
972 | ce->ring_size = SZ_512K; |
973 | else |
974 | ce->ring_size = SZ_16K; |
975 | |
976 | i915_vm_put(vm: ce->vm); |
977 | ce->vm = i915_gem_context_get_eb_vm(ctx); |
978 | |
979 | if (ctx->sched.priority >= I915_PRIORITY_NORMAL && |
980 | intel_engine_has_timeslices(engine: ce->engine) && |
981 | intel_engine_has_semaphores(engine: ce->engine)) |
982 | __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); |
983 | |
984 | if (CONFIG_DRM_I915_REQUEST_TIMEOUT && |
985 | ctx->i915->params.request_timeout_ms) { |
986 | unsigned int timeout_ms = ctx->i915->params.request_timeout_ms; |
987 | |
988 | intel_context_set_watchdog_us(ce, timeout_us: (u64)timeout_ms * 1000); |
989 | } |
990 | |
991 | /* A valid SSEU has no zero fields */ |
992 | if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS)) |
993 | ret = intel_context_reconfigure_sseu(ce, sseu); |
994 | |
995 | return ret; |
996 | } |
997 | |
998 | static void __unpin_engines(struct i915_gem_engines *e, unsigned int count) |
999 | { |
1000 | while (count--) { |
1001 | struct intel_context *ce = e->engines[count], *child; |
1002 | |
1003 | if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags)) |
1004 | continue; |
1005 | |
1006 | for_each_child(ce, child) |
1007 | intel_context_unpin(ce: child); |
1008 | intel_context_unpin(ce); |
1009 | } |
1010 | } |
1011 | |
1012 | static void unpin_engines(struct i915_gem_engines *e) |
1013 | { |
1014 | __unpin_engines(e, count: e->num_engines); |
1015 | } |
1016 | |
1017 | static void __free_engines(struct i915_gem_engines *e, unsigned int count) |
1018 | { |
1019 | while (count--) { |
1020 | if (!e->engines[count]) |
1021 | continue; |
1022 | |
1023 | intel_context_put(ce: e->engines[count]); |
1024 | } |
1025 | kfree(objp: e); |
1026 | } |
1027 | |
1028 | static void free_engines(struct i915_gem_engines *e) |
1029 | { |
1030 | __free_engines(e, count: e->num_engines); |
1031 | } |
1032 | |
1033 | static void free_engines_rcu(struct rcu_head *rcu) |
1034 | { |
1035 | struct i915_gem_engines *engines = |
1036 | container_of(rcu, struct i915_gem_engines, rcu); |
1037 | |
1038 | i915_sw_fence_fini(fence: &engines->fence); |
1039 | free_engines(e: engines); |
1040 | } |
1041 | |
1042 | static void accumulate_runtime(struct i915_drm_client *client, |
1043 | struct i915_gem_engines *engines) |
1044 | { |
1045 | struct i915_gem_engines_iter it; |
1046 | struct intel_context *ce; |
1047 | |
1048 | if (!client) |
1049 | return; |
1050 | |
1051 | /* Transfer accumulated runtime to the parent GEM context. */ |
1052 | for_each_gem_engine(ce, engines, it) { |
1053 | unsigned int class = ce->engine->uabi_class; |
1054 | |
1055 | GEM_BUG_ON(class >= ARRAY_SIZE(client->past_runtime)); |
1056 | atomic64_add(i: intel_context_get_total_runtime_ns(ce), |
1057 | v: &client->past_runtime[class]); |
1058 | } |
1059 | } |
1060 | |
1061 | static int |
1062 | engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) |
1063 | { |
1064 | struct i915_gem_engines *engines = |
1065 | container_of(fence, typeof(*engines), fence); |
1066 | struct i915_gem_context *ctx = engines->ctx; |
1067 | |
1068 | switch (state) { |
1069 | case FENCE_COMPLETE: |
1070 | if (!list_empty(head: &engines->link)) { |
1071 | unsigned long flags; |
1072 | |
1073 | spin_lock_irqsave(&ctx->stale.lock, flags); |
1074 | list_del(entry: &engines->link); |
1075 | spin_unlock_irqrestore(lock: &ctx->stale.lock, flags); |
1076 | } |
1077 | accumulate_runtime(client: ctx->client, engines); |
1078 | i915_gem_context_put(ctx); |
1079 | |
1080 | break; |
1081 | |
1082 | case FENCE_FREE: |
1083 | init_rcu_head(head: &engines->rcu); |
1084 | call_rcu(head: &engines->rcu, func: free_engines_rcu); |
1085 | break; |
1086 | } |
1087 | |
1088 | return NOTIFY_DONE; |
1089 | } |
1090 | |
1091 | static struct i915_gem_engines *alloc_engines(unsigned int count) |
1092 | { |
1093 | struct i915_gem_engines *e; |
1094 | |
1095 | e = kzalloc(struct_size(e, engines, count), GFP_KERNEL); |
1096 | if (!e) |
1097 | return NULL; |
1098 | |
1099 | i915_sw_fence_init(&e->fence, engines_notify); |
1100 | return e; |
1101 | } |
1102 | |
1103 | static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx, |
1104 | struct intel_sseu rcs_sseu) |
1105 | { |
1106 | const unsigned int max = I915_NUM_ENGINES; |
1107 | struct intel_engine_cs *engine; |
1108 | struct i915_gem_engines *e, *err; |
1109 | |
1110 | e = alloc_engines(count: max); |
1111 | if (!e) |
1112 | return ERR_PTR(error: -ENOMEM); |
1113 | |
1114 | for_each_uabi_engine(engine, ctx->i915) { |
1115 | struct intel_context *ce; |
1116 | struct intel_sseu sseu = {}; |
1117 | int ret; |
1118 | |
1119 | if (engine->legacy_idx == INVALID_ENGINE) |
1120 | continue; |
1121 | |
1122 | GEM_BUG_ON(engine->legacy_idx >= max); |
1123 | GEM_BUG_ON(e->engines[engine->legacy_idx]); |
1124 | |
1125 | ce = intel_context_create(engine); |
1126 | if (IS_ERR(ptr: ce)) { |
1127 | err = ERR_CAST(ptr: ce); |
1128 | goto free_engines; |
1129 | } |
1130 | |
1131 | e->engines[engine->legacy_idx] = ce; |
1132 | e->num_engines = max(e->num_engines, engine->legacy_idx + 1); |
1133 | |
1134 | if (engine->class == RENDER_CLASS) |
1135 | sseu = rcs_sseu; |
1136 | |
1137 | ret = intel_context_set_gem(ce, ctx, sseu); |
1138 | if (ret) { |
1139 | err = ERR_PTR(error: ret); |
1140 | goto free_engines; |
1141 | } |
1142 | |
1143 | } |
1144 | |
1145 | return e; |
1146 | |
1147 | free_engines: |
1148 | free_engines(e); |
1149 | return err; |
1150 | } |
1151 | |
1152 | static int perma_pin_contexts(struct intel_context *ce) |
1153 | { |
1154 | struct intel_context *child; |
1155 | int i = 0, j = 0, ret; |
1156 | |
1157 | GEM_BUG_ON(!intel_context_is_parent(ce)); |
1158 | |
1159 | ret = intel_context_pin(ce); |
1160 | if (unlikely(ret)) |
1161 | return ret; |
1162 | |
1163 | for_each_child(ce, child) { |
1164 | ret = intel_context_pin(ce: child); |
1165 | if (unlikely(ret)) |
1166 | goto unwind; |
1167 | ++i; |
1168 | } |
1169 | |
1170 | set_bit(CONTEXT_PERMA_PIN, addr: &ce->flags); |
1171 | |
1172 | return 0; |
1173 | |
1174 | unwind: |
1175 | intel_context_unpin(ce); |
1176 | for_each_child(ce, child) { |
1177 | if (j++ < i) |
1178 | intel_context_unpin(ce: child); |
1179 | else |
1180 | break; |
1181 | } |
1182 | |
1183 | return ret; |
1184 | } |
1185 | |
1186 | static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, |
1187 | unsigned int num_engines, |
1188 | struct i915_gem_proto_engine *pe) |
1189 | { |
1190 | struct i915_gem_engines *e, *err; |
1191 | unsigned int n; |
1192 | |
1193 | e = alloc_engines(count: num_engines); |
1194 | if (!e) |
1195 | return ERR_PTR(error: -ENOMEM); |
1196 | e->num_engines = num_engines; |
1197 | |
1198 | for (n = 0; n < num_engines; n++) { |
1199 | struct intel_context *ce, *child; |
1200 | int ret; |
1201 | |
1202 | switch (pe[n].type) { |
1203 | case I915_GEM_ENGINE_TYPE_PHYSICAL: |
1204 | ce = intel_context_create(engine: pe[n].engine); |
1205 | break; |
1206 | |
1207 | case I915_GEM_ENGINE_TYPE_BALANCED: |
1208 | ce = intel_engine_create_virtual(siblings: pe[n].siblings, |
1209 | count: pe[n].num_siblings, flags: 0); |
1210 | break; |
1211 | |
1212 | case I915_GEM_ENGINE_TYPE_PARALLEL: |
1213 | ce = intel_engine_create_parallel(engines: pe[n].siblings, |
1214 | num_engines: pe[n].num_siblings, |
1215 | width: pe[n].width); |
1216 | break; |
1217 | |
1218 | case I915_GEM_ENGINE_TYPE_INVALID: |
1219 | default: |
1220 | GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID); |
1221 | continue; |
1222 | } |
1223 | |
1224 | if (IS_ERR(ptr: ce)) { |
1225 | err = ERR_CAST(ptr: ce); |
1226 | goto free_engines; |
1227 | } |
1228 | |
1229 | e->engines[n] = ce; |
1230 | |
1231 | ret = intel_context_set_gem(ce, ctx, sseu: pe->sseu); |
1232 | if (ret) { |
1233 | err = ERR_PTR(error: ret); |
1234 | goto free_engines; |
1235 | } |
1236 | for_each_child(ce, child) { |
1237 | ret = intel_context_set_gem(ce: child, ctx, sseu: pe->sseu); |
1238 | if (ret) { |
1239 | err = ERR_PTR(error: ret); |
1240 | goto free_engines; |
1241 | } |
1242 | } |
1243 | |
1244 | /* |
1245 | * XXX: Must be done after calling intel_context_set_gem as that |
1246 | * function changes the ring size. The ring is allocated when |
1247 | * the context is pinned. If the ring size is changed after |
1248 | * allocation we have a mismatch of the ring size and will cause |
1249 | * the context to hang. Presumably with a bit of reordering we |
1250 | * could move the perma-pin step to the backend function |
1251 | * intel_engine_create_parallel. |
1252 | */ |
1253 | if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) { |
1254 | ret = perma_pin_contexts(ce); |
1255 | if (ret) { |
1256 | err = ERR_PTR(error: ret); |
1257 | goto free_engines; |
1258 | } |
1259 | } |
1260 | } |
1261 | |
1262 | return e; |
1263 | |
1264 | free_engines: |
1265 | free_engines(e); |
1266 | return err; |
1267 | } |
1268 | |
1269 | static void i915_gem_context_release_work(struct work_struct *work) |
1270 | { |
1271 | struct i915_gem_context *ctx = container_of(work, typeof(*ctx), |
1272 | release_work); |
1273 | struct i915_address_space *vm; |
1274 | |
1275 | trace_i915_context_free(ctx); |
1276 | GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); |
1277 | |
1278 | spin_lock(lock: &ctx->i915->gem.contexts.lock); |
1279 | list_del(entry: &ctx->link); |
1280 | spin_unlock(lock: &ctx->i915->gem.contexts.lock); |
1281 | |
1282 | if (ctx->syncobj) |
1283 | drm_syncobj_put(obj: ctx->syncobj); |
1284 | |
1285 | vm = ctx->vm; |
1286 | if (vm) |
1287 | i915_vm_put(vm); |
1288 | |
1289 | if (ctx->pxp_wakeref) |
1290 | intel_runtime_pm_put(rpm: &ctx->i915->runtime_pm, wref: ctx->pxp_wakeref); |
1291 | |
1292 | if (ctx->client) |
1293 | i915_drm_client_put(client: ctx->client); |
1294 | |
1295 | mutex_destroy(lock: &ctx->engines_mutex); |
1296 | mutex_destroy(lock: &ctx->lut_mutex); |
1297 | |
1298 | put_pid(pid: ctx->pid); |
1299 | mutex_destroy(lock: &ctx->mutex); |
1300 | |
1301 | kfree_rcu(ctx, rcu); |
1302 | } |
1303 | |
1304 | void i915_gem_context_release(struct kref *ref) |
1305 | { |
1306 | struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); |
1307 | |
1308 | queue_work(wq: ctx->i915->wq, work: &ctx->release_work); |
1309 | } |
1310 | |
1311 | static inline struct i915_gem_engines * |
1312 | __context_engines_static(const struct i915_gem_context *ctx) |
1313 | { |
1314 | return rcu_dereference_protected(ctx->engines, true); |
1315 | } |
1316 | |
1317 | static void __reset_context(struct i915_gem_context *ctx, |
1318 | struct intel_engine_cs *engine) |
1319 | { |
1320 | intel_gt_handle_error(gt: engine->gt, engine_mask: engine->mask, flags: 0, |
1321 | fmt: "context closure in %s" , ctx->name); |
1322 | } |
1323 | |
1324 | static bool __cancel_engine(struct intel_engine_cs *engine) |
1325 | { |
1326 | /* |
1327 | * Send a "high priority pulse" down the engine to cause the |
1328 | * current request to be momentarily preempted. (If it fails to |
1329 | * be preempted, it will be reset). As we have marked our context |
1330 | * as banned, any incomplete request, including any running, will |
1331 | * be skipped following the preemption. |
1332 | * |
1333 | * If there is no hangchecking (one of the reasons why we try to |
1334 | * cancel the context) and no forced preemption, there may be no |
1335 | * means by which we reset the GPU and evict the persistent hog. |
1336 | * Ergo if we are unable to inject a preemptive pulse that can |
1337 | * kill the banned context, we fallback to doing a local reset |
1338 | * instead. |
1339 | */ |
1340 | return intel_engine_pulse(engine) == 0; |
1341 | } |
1342 | |
1343 | static struct intel_engine_cs *active_engine(struct intel_context *ce) |
1344 | { |
1345 | struct intel_engine_cs *engine = NULL; |
1346 | struct i915_request *rq; |
1347 | |
1348 | if (intel_context_has_inflight(ce)) |
1349 | return intel_context_inflight(ce); |
1350 | |
1351 | if (!ce->timeline) |
1352 | return NULL; |
1353 | |
1354 | /* |
1355 | * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference |
1356 | * to the request to prevent it being transferred to a new timeline |
1357 | * (and onto a new timeline->requests list). |
1358 | */ |
1359 | rcu_read_lock(); |
1360 | list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { |
1361 | bool found; |
1362 | |
1363 | /* timeline is already completed upto this point? */ |
1364 | if (!i915_request_get_rcu(rq)) |
1365 | break; |
1366 | |
1367 | /* Check with the backend if the request is inflight */ |
1368 | found = true; |
1369 | if (likely(rcu_access_pointer(rq->timeline) == ce->timeline)) |
1370 | found = i915_request_active_engine(rq, active: &engine); |
1371 | |
1372 | i915_request_put(rq); |
1373 | if (found) |
1374 | break; |
1375 | } |
1376 | rcu_read_unlock(); |
1377 | |
1378 | return engine; |
1379 | } |
1380 | |
1381 | static void |
1382 | kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent) |
1383 | { |
1384 | struct i915_gem_engines_iter it; |
1385 | struct intel_context *ce; |
1386 | |
1387 | /* |
1388 | * Map the user's engine back to the actual engines; one virtual |
1389 | * engine will be mapped to multiple engines, and using ctx->engine[] |
1390 | * the same engine may be have multiple instances in the user's map. |
1391 | * However, we only care about pending requests, so only include |
1392 | * engines on which there are incomplete requests. |
1393 | */ |
1394 | for_each_gem_engine(ce, engines, it) { |
1395 | struct intel_engine_cs *engine; |
1396 | |
1397 | if ((exit || !persistent) && intel_context_revoke(ce)) |
1398 | continue; /* Already marked. */ |
1399 | |
1400 | /* |
1401 | * Check the current active state of this context; if we |
1402 | * are currently executing on the GPU we need to evict |
1403 | * ourselves. On the other hand, if we haven't yet been |
1404 | * submitted to the GPU or if everything is complete, |
1405 | * we have nothing to do. |
1406 | */ |
1407 | engine = active_engine(ce); |
1408 | |
1409 | /* First attempt to gracefully cancel the context */ |
1410 | if (engine && !__cancel_engine(engine) && (exit || !persistent)) |
1411 | /* |
1412 | * If we are unable to send a preemptive pulse to bump |
1413 | * the context from the GPU, we have to resort to a full |
1414 | * reset. We hope the collateral damage is worth it. |
1415 | */ |
1416 | __reset_context(ctx: engines->ctx, engine); |
1417 | } |
1418 | } |
1419 | |
1420 | static void kill_context(struct i915_gem_context *ctx) |
1421 | { |
1422 | struct i915_gem_engines *pos, *next; |
1423 | |
1424 | spin_lock_irq(lock: &ctx->stale.lock); |
1425 | GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); |
1426 | list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) { |
1427 | if (!i915_sw_fence_await(fence: &pos->fence)) { |
1428 | list_del_init(entry: &pos->link); |
1429 | continue; |
1430 | } |
1431 | |
1432 | spin_unlock_irq(lock: &ctx->stale.lock); |
1433 | |
1434 | kill_engines(engines: pos, exit: !ctx->i915->params.enable_hangcheck, |
1435 | persistent: i915_gem_context_is_persistent(ctx)); |
1436 | |
1437 | spin_lock_irq(lock: &ctx->stale.lock); |
1438 | GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence)); |
1439 | list_safe_reset_next(pos, next, link); |
1440 | list_del_init(entry: &pos->link); /* decouple from FENCE_COMPLETE */ |
1441 | |
1442 | i915_sw_fence_complete(fence: &pos->fence); |
1443 | } |
1444 | spin_unlock_irq(lock: &ctx->stale.lock); |
1445 | } |
1446 | |
1447 | static void engines_idle_release(struct i915_gem_context *ctx, |
1448 | struct i915_gem_engines *engines) |
1449 | { |
1450 | struct i915_gem_engines_iter it; |
1451 | struct intel_context *ce; |
1452 | |
1453 | INIT_LIST_HEAD(list: &engines->link); |
1454 | |
1455 | engines->ctx = i915_gem_context_get(ctx); |
1456 | |
1457 | for_each_gem_engine(ce, engines, it) { |
1458 | int err; |
1459 | |
1460 | /* serialises with execbuf */ |
1461 | intel_context_close(ce); |
1462 | if (!intel_context_pin_if_active(ce)) |
1463 | continue; |
1464 | |
1465 | /* Wait until context is finally scheduled out and retired */ |
1466 | err = i915_sw_fence_await_active(fence: &engines->fence, |
1467 | ref: &ce->active, |
1468 | I915_ACTIVE_AWAIT_BARRIER); |
1469 | intel_context_unpin(ce); |
1470 | if (err) |
1471 | goto kill; |
1472 | } |
1473 | |
1474 | spin_lock_irq(lock: &ctx->stale.lock); |
1475 | if (!i915_gem_context_is_closed(ctx)) |
1476 | list_add_tail(new: &engines->link, head: &ctx->stale.engines); |
1477 | spin_unlock_irq(lock: &ctx->stale.lock); |
1478 | |
1479 | kill: |
1480 | if (list_empty(head: &engines->link)) /* raced, already closed */ |
1481 | kill_engines(engines, exit: true, |
1482 | persistent: i915_gem_context_is_persistent(ctx)); |
1483 | |
1484 | i915_sw_fence_commit(fence: &engines->fence); |
1485 | } |
1486 | |
1487 | static void set_closed_name(struct i915_gem_context *ctx) |
1488 | { |
1489 | char *s; |
1490 | |
1491 | /* Replace '[]' with '<>' to indicate closed in debug prints */ |
1492 | |
1493 | s = strrchr(ctx->name, '['); |
1494 | if (!s) |
1495 | return; |
1496 | |
1497 | *s = '<'; |
1498 | |
1499 | s = strchr(s + 1, ']'); |
1500 | if (s) |
1501 | *s = '>'; |
1502 | } |
1503 | |
1504 | static void context_close(struct i915_gem_context *ctx) |
1505 | { |
1506 | struct i915_drm_client *client; |
1507 | |
1508 | /* Flush any concurrent set_engines() */ |
1509 | mutex_lock(&ctx->engines_mutex); |
1510 | unpin_engines(e: __context_engines_static(ctx)); |
1511 | engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1)); |
1512 | i915_gem_context_set_closed(ctx); |
1513 | mutex_unlock(lock: &ctx->engines_mutex); |
1514 | |
1515 | mutex_lock(&ctx->mutex); |
1516 | |
1517 | set_closed_name(ctx); |
1518 | |
1519 | /* |
1520 | * The LUT uses the VMA as a backpointer to unref the object, |
1521 | * so we need to clear the LUT before we close all the VMA (inside |
1522 | * the ppgtt). |
1523 | */ |
1524 | lut_close(ctx); |
1525 | |
1526 | ctx->file_priv = ERR_PTR(error: -EBADF); |
1527 | |
1528 | client = ctx->client; |
1529 | if (client) { |
1530 | spin_lock(lock: &client->ctx_lock); |
1531 | list_del_rcu(entry: &ctx->client_link); |
1532 | spin_unlock(lock: &client->ctx_lock); |
1533 | } |
1534 | |
1535 | mutex_unlock(lock: &ctx->mutex); |
1536 | |
1537 | /* |
1538 | * If the user has disabled hangchecking, we can not be sure that |
1539 | * the batches will ever complete after the context is closed, |
1540 | * keeping the context and all resources pinned forever. So in this |
1541 | * case we opt to forcibly kill off all remaining requests on |
1542 | * context close. |
1543 | */ |
1544 | kill_context(ctx); |
1545 | |
1546 | i915_gem_context_put(ctx); |
1547 | } |
1548 | |
1549 | static int __context_set_persistence(struct i915_gem_context *ctx, bool state) |
1550 | { |
1551 | if (i915_gem_context_is_persistent(ctx) == state) |
1552 | return 0; |
1553 | |
1554 | if (state) { |
1555 | /* |
1556 | * Only contexts that are short-lived [that will expire or be |
1557 | * reset] are allowed to survive past termination. We require |
1558 | * hangcheck to ensure that the persistent requests are healthy. |
1559 | */ |
1560 | if (!ctx->i915->params.enable_hangcheck) |
1561 | return -EINVAL; |
1562 | |
1563 | i915_gem_context_set_persistence(ctx); |
1564 | } else { |
1565 | /* To cancel a context we use "preempt-to-idle" */ |
1566 | if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) |
1567 | return -ENODEV; |
1568 | |
1569 | /* |
1570 | * If the cancel fails, we then need to reset, cleanly! |
1571 | * |
1572 | * If the per-engine reset fails, all hope is lost! We resort |
1573 | * to a full GPU reset in that unlikely case, but realistically |
1574 | * if the engine could not reset, the full reset does not fare |
1575 | * much better. The damage has been done. |
1576 | * |
1577 | * However, if we cannot reset an engine by itself, we cannot |
1578 | * cleanup a hanging persistent context without causing |
1579 | * colateral damage, and we should not pretend we can by |
1580 | * exposing the interface. |
1581 | */ |
1582 | if (!intel_has_reset_engine(gt: to_gt(i915: ctx->i915))) |
1583 | return -ENODEV; |
1584 | |
1585 | i915_gem_context_clear_persistence(ctx); |
1586 | } |
1587 | |
1588 | return 0; |
1589 | } |
1590 | |
1591 | static struct i915_gem_context * |
1592 | i915_gem_create_context(struct drm_i915_private *i915, |
1593 | const struct i915_gem_proto_context *pc) |
1594 | { |
1595 | struct i915_gem_context *ctx; |
1596 | struct i915_address_space *vm = NULL; |
1597 | struct i915_gem_engines *e; |
1598 | int err; |
1599 | int i; |
1600 | |
1601 | ctx = kzalloc(size: sizeof(*ctx), GFP_KERNEL); |
1602 | if (!ctx) |
1603 | return ERR_PTR(error: -ENOMEM); |
1604 | |
1605 | kref_init(kref: &ctx->ref); |
1606 | ctx->i915 = i915; |
1607 | ctx->sched = pc->sched; |
1608 | mutex_init(&ctx->mutex); |
1609 | INIT_LIST_HEAD(list: &ctx->link); |
1610 | INIT_WORK(&ctx->release_work, i915_gem_context_release_work); |
1611 | |
1612 | spin_lock_init(&ctx->stale.lock); |
1613 | INIT_LIST_HEAD(list: &ctx->stale.engines); |
1614 | |
1615 | if (pc->vm) { |
1616 | vm = i915_vm_get(vm: pc->vm); |
1617 | } else if (HAS_FULL_PPGTT(i915)) { |
1618 | struct i915_ppgtt *ppgtt; |
1619 | |
1620 | ppgtt = i915_ppgtt_create(gt: to_gt(i915), lmem_pt_obj_flags: 0); |
1621 | if (IS_ERR(ptr: ppgtt)) { |
1622 | drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n" , |
1623 | PTR_ERR(ppgtt)); |
1624 | err = PTR_ERR(ptr: ppgtt); |
1625 | goto err_ctx; |
1626 | } |
1627 | ppgtt->vm.fpriv = pc->fpriv; |
1628 | vm = &ppgtt->vm; |
1629 | } |
1630 | if (vm) |
1631 | ctx->vm = vm; |
1632 | |
1633 | mutex_init(&ctx->engines_mutex); |
1634 | if (pc->num_user_engines >= 0) { |
1635 | i915_gem_context_set_user_engines(ctx); |
1636 | e = user_engines(ctx, num_engines: pc->num_user_engines, pe: pc->user_engines); |
1637 | } else { |
1638 | i915_gem_context_clear_user_engines(ctx); |
1639 | e = default_engines(ctx, rcs_sseu: pc->legacy_rcs_sseu); |
1640 | } |
1641 | if (IS_ERR(ptr: e)) { |
1642 | err = PTR_ERR(ptr: e); |
1643 | goto err_vm; |
1644 | } |
1645 | RCU_INIT_POINTER(ctx->engines, e); |
1646 | |
1647 | INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); |
1648 | mutex_init(&ctx->lut_mutex); |
1649 | |
1650 | /* NB: Mark all slices as needing a remap so that when the context first |
1651 | * loads it will restore whatever remap state already exists. If there |
1652 | * is no remap info, it will be a NOP. */ |
1653 | ctx->remap_slice = ALL_L3_SLICES(i915); |
1654 | |
1655 | ctx->user_flags = pc->user_flags; |
1656 | |
1657 | for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) |
1658 | ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; |
1659 | |
1660 | if (pc->single_timeline) { |
1661 | err = drm_syncobj_create(out_syncobj: &ctx->syncobj, |
1662 | DRM_SYNCOBJ_CREATE_SIGNALED, |
1663 | NULL); |
1664 | if (err) |
1665 | goto err_engines; |
1666 | } |
1667 | |
1668 | if (pc->uses_protected_content) { |
1669 | ctx->pxp_wakeref = intel_runtime_pm_get(rpm: &i915->runtime_pm); |
1670 | ctx->uses_protected_content = true; |
1671 | } |
1672 | |
1673 | trace_i915_context_create(ctx); |
1674 | |
1675 | return ctx; |
1676 | |
1677 | err_engines: |
1678 | free_engines(e); |
1679 | err_vm: |
1680 | if (ctx->vm) |
1681 | i915_vm_put(vm: ctx->vm); |
1682 | err_ctx: |
1683 | kfree(objp: ctx); |
1684 | return ERR_PTR(error: err); |
1685 | } |
1686 | |
1687 | static void init_contexts(struct i915_gem_contexts *gc) |
1688 | { |
1689 | spin_lock_init(&gc->lock); |
1690 | INIT_LIST_HEAD(list: &gc->list); |
1691 | } |
1692 | |
1693 | void i915_gem_init__contexts(struct drm_i915_private *i915) |
1694 | { |
1695 | init_contexts(gc: &i915->gem.contexts); |
1696 | } |
1697 | |
1698 | /* |
1699 | * Note that this implicitly consumes the ctx reference, by placing |
1700 | * the ctx in the context_xa. |
1701 | */ |
1702 | static void gem_context_register(struct i915_gem_context *ctx, |
1703 | struct drm_i915_file_private *fpriv, |
1704 | u32 id) |
1705 | { |
1706 | struct drm_i915_private *i915 = ctx->i915; |
1707 | void *old; |
1708 | |
1709 | ctx->file_priv = fpriv; |
1710 | |
1711 | ctx->pid = get_task_pid(current, type: PIDTYPE_PID); |
1712 | ctx->client = i915_drm_client_get(client: fpriv->client); |
1713 | |
1714 | snprintf(buf: ctx->name, size: sizeof(ctx->name), fmt: "%s[%d]" , |
1715 | current->comm, pid_nr(pid: ctx->pid)); |
1716 | |
1717 | spin_lock(lock: &ctx->client->ctx_lock); |
1718 | list_add_tail_rcu(new: &ctx->client_link, head: &ctx->client->ctx_list); |
1719 | spin_unlock(lock: &ctx->client->ctx_lock); |
1720 | |
1721 | spin_lock(lock: &i915->gem.contexts.lock); |
1722 | list_add_tail(new: &ctx->link, head: &i915->gem.contexts.list); |
1723 | spin_unlock(lock: &i915->gem.contexts.lock); |
1724 | |
1725 | /* And finally expose ourselves to userspace via the idr */ |
1726 | old = xa_store(&fpriv->context_xa, index: id, entry: ctx, GFP_KERNEL); |
1727 | WARN_ON(old); |
1728 | } |
1729 | |
1730 | int i915_gem_context_open(struct drm_i915_private *i915, |
1731 | struct drm_file *file) |
1732 | { |
1733 | struct drm_i915_file_private *file_priv = file->driver_priv; |
1734 | struct i915_gem_proto_context *pc; |
1735 | struct i915_gem_context *ctx; |
1736 | int err; |
1737 | |
1738 | mutex_init(&file_priv->proto_context_lock); |
1739 | xa_init_flags(xa: &file_priv->proto_context_xa, XA_FLAGS_ALLOC); |
1740 | |
1741 | /* 0 reserved for the default context */ |
1742 | xa_init_flags(xa: &file_priv->context_xa, XA_FLAGS_ALLOC1); |
1743 | |
1744 | /* 0 reserved for invalid/unassigned ppgtt */ |
1745 | xa_init_flags(xa: &file_priv->vm_xa, XA_FLAGS_ALLOC1); |
1746 | |
1747 | pc = proto_context_create(fpriv: file_priv, i915, flags: 0); |
1748 | if (IS_ERR(ptr: pc)) { |
1749 | err = PTR_ERR(ptr: pc); |
1750 | goto err; |
1751 | } |
1752 | |
1753 | ctx = i915_gem_create_context(i915, pc); |
1754 | proto_context_close(i915, pc); |
1755 | if (IS_ERR(ptr: ctx)) { |
1756 | err = PTR_ERR(ptr: ctx); |
1757 | goto err; |
1758 | } |
1759 | |
1760 | gem_context_register(ctx, fpriv: file_priv, id: 0); |
1761 | |
1762 | return 0; |
1763 | |
1764 | err: |
1765 | xa_destroy(&file_priv->vm_xa); |
1766 | xa_destroy(&file_priv->context_xa); |
1767 | xa_destroy(&file_priv->proto_context_xa); |
1768 | mutex_destroy(lock: &file_priv->proto_context_lock); |
1769 | return err; |
1770 | } |
1771 | |
1772 | void i915_gem_context_close(struct drm_file *file) |
1773 | { |
1774 | struct drm_i915_file_private *file_priv = file->driver_priv; |
1775 | struct i915_gem_proto_context *pc; |
1776 | struct i915_address_space *vm; |
1777 | struct i915_gem_context *ctx; |
1778 | unsigned long idx; |
1779 | |
1780 | xa_for_each(&file_priv->proto_context_xa, idx, pc) |
1781 | proto_context_close(i915: file_priv->i915, pc); |
1782 | xa_destroy(&file_priv->proto_context_xa); |
1783 | mutex_destroy(lock: &file_priv->proto_context_lock); |
1784 | |
1785 | xa_for_each(&file_priv->context_xa, idx, ctx) |
1786 | context_close(ctx); |
1787 | xa_destroy(&file_priv->context_xa); |
1788 | |
1789 | xa_for_each(&file_priv->vm_xa, idx, vm) |
1790 | i915_vm_put(vm); |
1791 | xa_destroy(&file_priv->vm_xa); |
1792 | } |
1793 | |
1794 | int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, |
1795 | struct drm_file *file) |
1796 | { |
1797 | struct drm_i915_private *i915 = to_i915(dev); |
1798 | struct drm_i915_gem_vm_control *args = data; |
1799 | struct drm_i915_file_private *file_priv = file->driver_priv; |
1800 | struct i915_ppgtt *ppgtt; |
1801 | u32 id; |
1802 | int err; |
1803 | |
1804 | if (!HAS_FULL_PPGTT(i915)) |
1805 | return -ENODEV; |
1806 | |
1807 | if (args->flags) |
1808 | return -EINVAL; |
1809 | |
1810 | ppgtt = i915_ppgtt_create(gt: to_gt(i915), lmem_pt_obj_flags: 0); |
1811 | if (IS_ERR(ptr: ppgtt)) |
1812 | return PTR_ERR(ptr: ppgtt); |
1813 | |
1814 | if (args->extensions) { |
1815 | err = i915_user_extensions(u64_to_user_ptr(args->extensions), |
1816 | NULL, count: 0, |
1817 | data: ppgtt); |
1818 | if (err) |
1819 | goto err_put; |
1820 | } |
1821 | |
1822 | err = xa_alloc(xa: &file_priv->vm_xa, id: &id, entry: &ppgtt->vm, |
1823 | xa_limit_32b, GFP_KERNEL); |
1824 | if (err) |
1825 | goto err_put; |
1826 | |
1827 | GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ |
1828 | args->vm_id = id; |
1829 | ppgtt->vm.fpriv = file_priv; |
1830 | return 0; |
1831 | |
1832 | err_put: |
1833 | i915_vm_put(vm: &ppgtt->vm); |
1834 | return err; |
1835 | } |
1836 | |
1837 | int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, |
1838 | struct drm_file *file) |
1839 | { |
1840 | struct drm_i915_file_private *file_priv = file->driver_priv; |
1841 | struct drm_i915_gem_vm_control *args = data; |
1842 | struct i915_address_space *vm; |
1843 | |
1844 | if (args->flags) |
1845 | return -EINVAL; |
1846 | |
1847 | if (args->extensions) |
1848 | return -EINVAL; |
1849 | |
1850 | vm = xa_erase(&file_priv->vm_xa, index: args->vm_id); |
1851 | if (!vm) |
1852 | return -ENOENT; |
1853 | |
1854 | i915_vm_put(vm); |
1855 | return 0; |
1856 | } |
1857 | |
1858 | static int get_ppgtt(struct drm_i915_file_private *file_priv, |
1859 | struct i915_gem_context *ctx, |
1860 | struct drm_i915_gem_context_param *args) |
1861 | { |
1862 | struct i915_address_space *vm; |
1863 | int err; |
1864 | u32 id; |
1865 | |
1866 | if (!i915_gem_context_has_full_ppgtt(ctx)) |
1867 | return -ENODEV; |
1868 | |
1869 | vm = ctx->vm; |
1870 | GEM_BUG_ON(!vm); |
1871 | |
1872 | /* |
1873 | * Get a reference for the allocated handle. Once the handle is |
1874 | * visible in the vm_xa table, userspace could try to close it |
1875 | * from under our feet, so we need to hold the extra reference |
1876 | * first. |
1877 | */ |
1878 | i915_vm_get(vm); |
1879 | |
1880 | err = xa_alloc(xa: &file_priv->vm_xa, id: &id, entry: vm, xa_limit_32b, GFP_KERNEL); |
1881 | if (err) { |
1882 | i915_vm_put(vm); |
1883 | return err; |
1884 | } |
1885 | |
1886 | GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ |
1887 | args->value = id; |
1888 | args->size = 0; |
1889 | |
1890 | return err; |
1891 | } |
1892 | |
1893 | int |
1894 | i915_gem_user_to_context_sseu(struct intel_gt *gt, |
1895 | const struct drm_i915_gem_context_param_sseu *user, |
1896 | struct intel_sseu *context) |
1897 | { |
1898 | const struct sseu_dev_info *device = >->info.sseu; |
1899 | struct drm_i915_private *i915 = gt->i915; |
1900 | unsigned int dev_subslice_mask = intel_sseu_get_hsw_subslices(sseu: device, slice: 0); |
1901 | |
1902 | /* No zeros in any field. */ |
1903 | if (!user->slice_mask || !user->subslice_mask || |
1904 | !user->min_eus_per_subslice || !user->max_eus_per_subslice) |
1905 | return -EINVAL; |
1906 | |
1907 | /* Max > min. */ |
1908 | if (user->max_eus_per_subslice < user->min_eus_per_subslice) |
1909 | return -EINVAL; |
1910 | |
1911 | /* |
1912 | * Some future proofing on the types since the uAPI is wider than the |
1913 | * current internal implementation. |
1914 | */ |
1915 | if (overflows_type(user->slice_mask, context->slice_mask) || |
1916 | overflows_type(user->subslice_mask, context->subslice_mask) || |
1917 | overflows_type(user->min_eus_per_subslice, |
1918 | context->min_eus_per_subslice) || |
1919 | overflows_type(user->max_eus_per_subslice, |
1920 | context->max_eus_per_subslice)) |
1921 | return -EINVAL; |
1922 | |
1923 | /* Check validity against hardware. */ |
1924 | if (user->slice_mask & ~device->slice_mask) |
1925 | return -EINVAL; |
1926 | |
1927 | if (user->subslice_mask & ~dev_subslice_mask) |
1928 | return -EINVAL; |
1929 | |
1930 | if (user->max_eus_per_subslice > device->max_eus_per_subslice) |
1931 | return -EINVAL; |
1932 | |
1933 | context->slice_mask = user->slice_mask; |
1934 | context->subslice_mask = user->subslice_mask; |
1935 | context->min_eus_per_subslice = user->min_eus_per_subslice; |
1936 | context->max_eus_per_subslice = user->max_eus_per_subslice; |
1937 | |
1938 | /* Part specific restrictions. */ |
1939 | if (GRAPHICS_VER(i915) == 11) { |
1940 | unsigned int hw_s = hweight8(device->slice_mask); |
1941 | unsigned int hw_ss_per_s = hweight8(dev_subslice_mask); |
1942 | unsigned int req_s = hweight8(context->slice_mask); |
1943 | unsigned int req_ss = hweight8(context->subslice_mask); |
1944 | |
1945 | /* |
1946 | * Only full subslice enablement is possible if more than one |
1947 | * slice is turned on. |
1948 | */ |
1949 | if (req_s > 1 && req_ss != hw_ss_per_s) |
1950 | return -EINVAL; |
1951 | |
1952 | /* |
1953 | * If more than four (SScount bitfield limit) subslices are |
1954 | * requested then the number has to be even. |
1955 | */ |
1956 | if (req_ss > 4 && (req_ss & 1)) |
1957 | return -EINVAL; |
1958 | |
1959 | /* |
1960 | * If only one slice is enabled and subslice count is below the |
1961 | * device full enablement, it must be at most half of the all |
1962 | * available subslices. |
1963 | */ |
1964 | if (req_s == 1 && req_ss < hw_ss_per_s && |
1965 | req_ss > (hw_ss_per_s / 2)) |
1966 | return -EINVAL; |
1967 | |
1968 | /* ABI restriction - VME use case only. */ |
1969 | |
1970 | /* All slices or one slice only. */ |
1971 | if (req_s != 1 && req_s != hw_s) |
1972 | return -EINVAL; |
1973 | |
1974 | /* |
1975 | * Half subslices or full enablement only when one slice is |
1976 | * enabled. |
1977 | */ |
1978 | if (req_s == 1 && |
1979 | (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) |
1980 | return -EINVAL; |
1981 | |
1982 | /* No EU configuration changes. */ |
1983 | if ((user->min_eus_per_subslice != |
1984 | device->max_eus_per_subslice) || |
1985 | (user->max_eus_per_subslice != |
1986 | device->max_eus_per_subslice)) |
1987 | return -EINVAL; |
1988 | } |
1989 | |
1990 | return 0; |
1991 | } |
1992 | |
1993 | static int set_sseu(struct i915_gem_context *ctx, |
1994 | struct drm_i915_gem_context_param *args) |
1995 | { |
1996 | struct drm_i915_private *i915 = ctx->i915; |
1997 | struct drm_i915_gem_context_param_sseu user_sseu; |
1998 | struct intel_context *ce; |
1999 | struct intel_sseu sseu; |
2000 | unsigned long lookup; |
2001 | int ret; |
2002 | |
2003 | if (args->size < sizeof(user_sseu)) |
2004 | return -EINVAL; |
2005 | |
2006 | if (GRAPHICS_VER(i915) != 11) |
2007 | return -ENODEV; |
2008 | |
2009 | if (copy_from_user(to: &user_sseu, u64_to_user_ptr(args->value), |
2010 | n: sizeof(user_sseu))) |
2011 | return -EFAULT; |
2012 | |
2013 | if (user_sseu.rsvd) |
2014 | return -EINVAL; |
2015 | |
2016 | if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) |
2017 | return -EINVAL; |
2018 | |
2019 | lookup = 0; |
2020 | if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) |
2021 | lookup |= LOOKUP_USER_INDEX; |
2022 | |
2023 | ce = lookup_user_engine(ctx, flags: lookup, ci: &user_sseu.engine); |
2024 | if (IS_ERR(ptr: ce)) |
2025 | return PTR_ERR(ptr: ce); |
2026 | |
2027 | /* Only render engine supports RPCS configuration. */ |
2028 | if (ce->engine->class != RENDER_CLASS) { |
2029 | ret = -ENODEV; |
2030 | goto out_ce; |
2031 | } |
2032 | |
2033 | ret = i915_gem_user_to_context_sseu(gt: ce->engine->gt, user: &user_sseu, context: &sseu); |
2034 | if (ret) |
2035 | goto out_ce; |
2036 | |
2037 | ret = intel_context_reconfigure_sseu(ce, sseu); |
2038 | if (ret) |
2039 | goto out_ce; |
2040 | |
2041 | args->size = sizeof(user_sseu); |
2042 | |
2043 | out_ce: |
2044 | intel_context_put(ce); |
2045 | return ret; |
2046 | } |
2047 | |
2048 | static int |
2049 | set_persistence(struct i915_gem_context *ctx, |
2050 | const struct drm_i915_gem_context_param *args) |
2051 | { |
2052 | if (args->size) |
2053 | return -EINVAL; |
2054 | |
2055 | return __context_set_persistence(ctx, state: args->value); |
2056 | } |
2057 | |
2058 | static int set_priority(struct i915_gem_context *ctx, |
2059 | const struct drm_i915_gem_context_param *args) |
2060 | { |
2061 | struct i915_gem_engines_iter it; |
2062 | struct intel_context *ce; |
2063 | int err; |
2064 | |
2065 | err = validate_priority(i915: ctx->i915, args); |
2066 | if (err) |
2067 | return err; |
2068 | |
2069 | ctx->sched.priority = args->value; |
2070 | |
2071 | for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { |
2072 | if (!intel_engine_has_timeslices(engine: ce->engine)) |
2073 | continue; |
2074 | |
2075 | if (ctx->sched.priority >= I915_PRIORITY_NORMAL && |
2076 | intel_engine_has_semaphores(engine: ce->engine)) |
2077 | intel_context_set_use_semaphores(ce); |
2078 | else |
2079 | intel_context_clear_use_semaphores(ce); |
2080 | } |
2081 | i915_gem_context_unlock_engines(ctx); |
2082 | |
2083 | return 0; |
2084 | } |
2085 | |
2086 | static int get_protected(struct i915_gem_context *ctx, |
2087 | struct drm_i915_gem_context_param *args) |
2088 | { |
2089 | args->size = 0; |
2090 | args->value = i915_gem_context_uses_protected_content(ctx); |
2091 | |
2092 | return 0; |
2093 | } |
2094 | |
2095 | static int ctx_setparam(struct drm_i915_file_private *fpriv, |
2096 | struct i915_gem_context *ctx, |
2097 | struct drm_i915_gem_context_param *args) |
2098 | { |
2099 | int ret = 0; |
2100 | |
2101 | switch (args->param) { |
2102 | case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: |
2103 | if (args->size) |
2104 | ret = -EINVAL; |
2105 | else if (args->value) |
2106 | i915_gem_context_set_no_error_capture(ctx); |
2107 | else |
2108 | i915_gem_context_clear_no_error_capture(ctx); |
2109 | break; |
2110 | |
2111 | case I915_CONTEXT_PARAM_BANNABLE: |
2112 | if (args->size) |
2113 | ret = -EINVAL; |
2114 | else if (!capable(CAP_SYS_ADMIN) && !args->value) |
2115 | ret = -EPERM; |
2116 | else if (args->value) |
2117 | i915_gem_context_set_bannable(ctx); |
2118 | else if (i915_gem_context_uses_protected_content(ctx)) |
2119 | ret = -EPERM; /* can't clear this for protected contexts */ |
2120 | else |
2121 | i915_gem_context_clear_bannable(ctx); |
2122 | break; |
2123 | |
2124 | case I915_CONTEXT_PARAM_RECOVERABLE: |
2125 | if (args->size) |
2126 | ret = -EINVAL; |
2127 | else if (!args->value) |
2128 | i915_gem_context_clear_recoverable(ctx); |
2129 | else if (i915_gem_context_uses_protected_content(ctx)) |
2130 | ret = -EPERM; /* can't set this for protected contexts */ |
2131 | else |
2132 | i915_gem_context_set_recoverable(ctx); |
2133 | break; |
2134 | |
2135 | case I915_CONTEXT_PARAM_PRIORITY: |
2136 | ret = set_priority(ctx, args); |
2137 | break; |
2138 | |
2139 | case I915_CONTEXT_PARAM_SSEU: |
2140 | ret = set_sseu(ctx, args); |
2141 | break; |
2142 | |
2143 | case I915_CONTEXT_PARAM_PERSISTENCE: |
2144 | ret = set_persistence(ctx, args); |
2145 | break; |
2146 | |
2147 | case I915_CONTEXT_PARAM_PROTECTED_CONTENT: |
2148 | case I915_CONTEXT_PARAM_NO_ZEROMAP: |
2149 | case I915_CONTEXT_PARAM_BAN_PERIOD: |
2150 | case I915_CONTEXT_PARAM_RINGSIZE: |
2151 | case I915_CONTEXT_PARAM_VM: |
2152 | case I915_CONTEXT_PARAM_ENGINES: |
2153 | default: |
2154 | ret = -EINVAL; |
2155 | break; |
2156 | } |
2157 | |
2158 | return ret; |
2159 | } |
2160 | |
2161 | struct create_ext { |
2162 | struct i915_gem_proto_context *pc; |
2163 | struct drm_i915_file_private *fpriv; |
2164 | }; |
2165 | |
2166 | static int create_setparam(struct i915_user_extension __user *ext, void *data) |
2167 | { |
2168 | struct drm_i915_gem_context_create_ext_setparam local; |
2169 | const struct create_ext *arg = data; |
2170 | |
2171 | if (copy_from_user(to: &local, from: ext, n: sizeof(local))) |
2172 | return -EFAULT; |
2173 | |
2174 | if (local.param.ctx_id) |
2175 | return -EINVAL; |
2176 | |
2177 | return set_proto_ctx_param(fpriv: arg->fpriv, pc: arg->pc, args: &local.param); |
2178 | } |
2179 | |
2180 | static int invalid_ext(struct i915_user_extension __user *ext, void *data) |
2181 | { |
2182 | return -EINVAL; |
2183 | } |
2184 | |
2185 | static const i915_user_extension_fn create_extensions[] = { |
2186 | [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, |
2187 | [I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext, |
2188 | }; |
2189 | |
2190 | static bool client_is_banned(struct drm_i915_file_private *file_priv) |
2191 | { |
2192 | return atomic_read(v: &file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; |
2193 | } |
2194 | |
2195 | static inline struct i915_gem_context * |
2196 | __context_lookup(struct drm_i915_file_private *file_priv, u32 id) |
2197 | { |
2198 | struct i915_gem_context *ctx; |
2199 | |
2200 | rcu_read_lock(); |
2201 | ctx = xa_load(&file_priv->context_xa, index: id); |
2202 | if (ctx && !kref_get_unless_zero(kref: &ctx->ref)) |
2203 | ctx = NULL; |
2204 | rcu_read_unlock(); |
2205 | |
2206 | return ctx; |
2207 | } |
2208 | |
2209 | static struct i915_gem_context * |
2210 | finalize_create_context_locked(struct drm_i915_file_private *file_priv, |
2211 | struct i915_gem_proto_context *pc, u32 id) |
2212 | { |
2213 | struct i915_gem_context *ctx; |
2214 | void *old; |
2215 | |
2216 | lockdep_assert_held(&file_priv->proto_context_lock); |
2217 | |
2218 | ctx = i915_gem_create_context(i915: file_priv->i915, pc); |
2219 | if (IS_ERR(ptr: ctx)) |
2220 | return ctx; |
2221 | |
2222 | /* |
2223 | * One for the xarray and one for the caller. We need to grab |
2224 | * the reference *prior* to making the ctx visble to userspace |
2225 | * in gem_context_register(), as at any point after that |
2226 | * userspace can try to race us with another thread destroying |
2227 | * the context under our feet. |
2228 | */ |
2229 | i915_gem_context_get(ctx); |
2230 | |
2231 | gem_context_register(ctx, fpriv: file_priv, id); |
2232 | |
2233 | old = xa_erase(&file_priv->proto_context_xa, index: id); |
2234 | GEM_BUG_ON(old != pc); |
2235 | proto_context_close(i915: file_priv->i915, pc); |
2236 | |
2237 | return ctx; |
2238 | } |
2239 | |
2240 | struct i915_gem_context * |
2241 | i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) |
2242 | { |
2243 | struct i915_gem_proto_context *pc; |
2244 | struct i915_gem_context *ctx; |
2245 | |
2246 | ctx = __context_lookup(file_priv, id); |
2247 | if (ctx) |
2248 | return ctx; |
2249 | |
2250 | mutex_lock(&file_priv->proto_context_lock); |
2251 | /* Try one more time under the lock */ |
2252 | ctx = __context_lookup(file_priv, id); |
2253 | if (!ctx) { |
2254 | pc = xa_load(&file_priv->proto_context_xa, index: id); |
2255 | if (!pc) |
2256 | ctx = ERR_PTR(error: -ENOENT); |
2257 | else |
2258 | ctx = finalize_create_context_locked(file_priv, pc, id); |
2259 | } |
2260 | mutex_unlock(lock: &file_priv->proto_context_lock); |
2261 | |
2262 | return ctx; |
2263 | } |
2264 | |
2265 | int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, |
2266 | struct drm_file *file) |
2267 | { |
2268 | struct drm_i915_private *i915 = to_i915(dev); |
2269 | struct drm_i915_gem_context_create_ext *args = data; |
2270 | struct create_ext ext_data; |
2271 | int ret; |
2272 | u32 id; |
2273 | |
2274 | if (!DRIVER_CAPS(i915)->has_logical_contexts) |
2275 | return -ENODEV; |
2276 | |
2277 | if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) |
2278 | return -EINVAL; |
2279 | |
2280 | ret = intel_gt_terminally_wedged(gt: to_gt(i915)); |
2281 | if (ret) |
2282 | return ret; |
2283 | |
2284 | ext_data.fpriv = file->driver_priv; |
2285 | if (client_is_banned(file_priv: ext_data.fpriv)) { |
2286 | drm_dbg(&i915->drm, |
2287 | "client %s[%d] banned from creating ctx\n" , |
2288 | current->comm, task_pid_nr(current)); |
2289 | return -EIO; |
2290 | } |
2291 | |
2292 | ext_data.pc = proto_context_create(fpriv: file->driver_priv, i915, |
2293 | flags: args->flags); |
2294 | if (IS_ERR(ptr: ext_data.pc)) |
2295 | return PTR_ERR(ptr: ext_data.pc); |
2296 | |
2297 | if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { |
2298 | ret = i915_user_extensions(u64_to_user_ptr(args->extensions), |
2299 | tbl: create_extensions, |
2300 | ARRAY_SIZE(create_extensions), |
2301 | data: &ext_data); |
2302 | if (ret) |
2303 | goto err_pc; |
2304 | } |
2305 | |
2306 | if (GRAPHICS_VER(i915) > 12) { |
2307 | struct i915_gem_context *ctx; |
2308 | |
2309 | /* Get ourselves a context ID */ |
2310 | ret = xa_alloc(xa: &ext_data.fpriv->context_xa, id: &id, NULL, |
2311 | xa_limit_32b, GFP_KERNEL); |
2312 | if (ret) |
2313 | goto err_pc; |
2314 | |
2315 | ctx = i915_gem_create_context(i915, pc: ext_data.pc); |
2316 | if (IS_ERR(ptr: ctx)) { |
2317 | ret = PTR_ERR(ptr: ctx); |
2318 | goto err_pc; |
2319 | } |
2320 | |
2321 | proto_context_close(i915, pc: ext_data.pc); |
2322 | gem_context_register(ctx, fpriv: ext_data.fpriv, id); |
2323 | } else { |
2324 | ret = proto_context_register(fpriv: ext_data.fpriv, pc: ext_data.pc, id: &id); |
2325 | if (ret < 0) |
2326 | goto err_pc; |
2327 | } |
2328 | |
2329 | args->ctx_id = id; |
2330 | |
2331 | return 0; |
2332 | |
2333 | err_pc: |
2334 | proto_context_close(i915, pc: ext_data.pc); |
2335 | return ret; |
2336 | } |
2337 | |
2338 | int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, |
2339 | struct drm_file *file) |
2340 | { |
2341 | struct drm_i915_gem_context_destroy *args = data; |
2342 | struct drm_i915_file_private *file_priv = file->driver_priv; |
2343 | struct i915_gem_proto_context *pc; |
2344 | struct i915_gem_context *ctx; |
2345 | |
2346 | if (args->pad != 0) |
2347 | return -EINVAL; |
2348 | |
2349 | if (!args->ctx_id) |
2350 | return -ENOENT; |
2351 | |
2352 | /* We need to hold the proto-context lock here to prevent races |
2353 | * with finalize_create_context_locked(). |
2354 | */ |
2355 | mutex_lock(&file_priv->proto_context_lock); |
2356 | ctx = xa_erase(&file_priv->context_xa, index: args->ctx_id); |
2357 | pc = xa_erase(&file_priv->proto_context_xa, index: args->ctx_id); |
2358 | mutex_unlock(lock: &file_priv->proto_context_lock); |
2359 | |
2360 | if (!ctx && !pc) |
2361 | return -ENOENT; |
2362 | GEM_WARN_ON(ctx && pc); |
2363 | |
2364 | if (pc) |
2365 | proto_context_close(i915: file_priv->i915, pc); |
2366 | |
2367 | if (ctx) |
2368 | context_close(ctx); |
2369 | |
2370 | return 0; |
2371 | } |
2372 | |
2373 | static int get_sseu(struct i915_gem_context *ctx, |
2374 | struct drm_i915_gem_context_param *args) |
2375 | { |
2376 | struct drm_i915_gem_context_param_sseu user_sseu; |
2377 | struct intel_context *ce; |
2378 | unsigned long lookup; |
2379 | int err; |
2380 | |
2381 | if (args->size == 0) |
2382 | goto out; |
2383 | else if (args->size < sizeof(user_sseu)) |
2384 | return -EINVAL; |
2385 | |
2386 | if (copy_from_user(to: &user_sseu, u64_to_user_ptr(args->value), |
2387 | n: sizeof(user_sseu))) |
2388 | return -EFAULT; |
2389 | |
2390 | if (user_sseu.rsvd) |
2391 | return -EINVAL; |
2392 | |
2393 | if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) |
2394 | return -EINVAL; |
2395 | |
2396 | lookup = 0; |
2397 | if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) |
2398 | lookup |= LOOKUP_USER_INDEX; |
2399 | |
2400 | ce = lookup_user_engine(ctx, flags: lookup, ci: &user_sseu.engine); |
2401 | if (IS_ERR(ptr: ce)) |
2402 | return PTR_ERR(ptr: ce); |
2403 | |
2404 | err = intel_context_lock_pinned(ce); /* serialises with set_sseu */ |
2405 | if (err) { |
2406 | intel_context_put(ce); |
2407 | return err; |
2408 | } |
2409 | |
2410 | user_sseu.slice_mask = ce->sseu.slice_mask; |
2411 | user_sseu.subslice_mask = ce->sseu.subslice_mask; |
2412 | user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; |
2413 | user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; |
2414 | |
2415 | intel_context_unlock_pinned(ce); |
2416 | intel_context_put(ce); |
2417 | |
2418 | if (copy_to_user(u64_to_user_ptr(args->value), from: &user_sseu, |
2419 | n: sizeof(user_sseu))) |
2420 | return -EFAULT; |
2421 | |
2422 | out: |
2423 | args->size = sizeof(user_sseu); |
2424 | |
2425 | return 0; |
2426 | } |
2427 | |
2428 | int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, |
2429 | struct drm_file *file) |
2430 | { |
2431 | struct drm_i915_file_private *file_priv = file->driver_priv; |
2432 | struct drm_i915_gem_context_param *args = data; |
2433 | struct i915_gem_context *ctx; |
2434 | struct i915_address_space *vm; |
2435 | int ret = 0; |
2436 | |
2437 | ctx = i915_gem_context_lookup(file_priv, id: args->ctx_id); |
2438 | if (IS_ERR(ptr: ctx)) |
2439 | return PTR_ERR(ptr: ctx); |
2440 | |
2441 | switch (args->param) { |
2442 | case I915_CONTEXT_PARAM_GTT_SIZE: |
2443 | args->size = 0; |
2444 | vm = i915_gem_context_get_eb_vm(ctx); |
2445 | args->value = vm->total; |
2446 | i915_vm_put(vm); |
2447 | |
2448 | break; |
2449 | |
2450 | case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: |
2451 | args->size = 0; |
2452 | args->value = i915_gem_context_no_error_capture(ctx); |
2453 | break; |
2454 | |
2455 | case I915_CONTEXT_PARAM_BANNABLE: |
2456 | args->size = 0; |
2457 | args->value = i915_gem_context_is_bannable(ctx); |
2458 | break; |
2459 | |
2460 | case I915_CONTEXT_PARAM_RECOVERABLE: |
2461 | args->size = 0; |
2462 | args->value = i915_gem_context_is_recoverable(ctx); |
2463 | break; |
2464 | |
2465 | case I915_CONTEXT_PARAM_PRIORITY: |
2466 | args->size = 0; |
2467 | args->value = ctx->sched.priority; |
2468 | break; |
2469 | |
2470 | case I915_CONTEXT_PARAM_SSEU: |
2471 | ret = get_sseu(ctx, args); |
2472 | break; |
2473 | |
2474 | case I915_CONTEXT_PARAM_VM: |
2475 | ret = get_ppgtt(file_priv, ctx, args); |
2476 | break; |
2477 | |
2478 | case I915_CONTEXT_PARAM_PERSISTENCE: |
2479 | args->size = 0; |
2480 | args->value = i915_gem_context_is_persistent(ctx); |
2481 | break; |
2482 | |
2483 | case I915_CONTEXT_PARAM_PROTECTED_CONTENT: |
2484 | ret = get_protected(ctx, args); |
2485 | break; |
2486 | |
2487 | case I915_CONTEXT_PARAM_NO_ZEROMAP: |
2488 | case I915_CONTEXT_PARAM_BAN_PERIOD: |
2489 | case I915_CONTEXT_PARAM_ENGINES: |
2490 | case I915_CONTEXT_PARAM_RINGSIZE: |
2491 | default: |
2492 | ret = -EINVAL; |
2493 | break; |
2494 | } |
2495 | |
2496 | i915_gem_context_put(ctx); |
2497 | return ret; |
2498 | } |
2499 | |
2500 | int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, |
2501 | struct drm_file *file) |
2502 | { |
2503 | struct drm_i915_file_private *file_priv = file->driver_priv; |
2504 | struct drm_i915_gem_context_param *args = data; |
2505 | struct i915_gem_proto_context *pc; |
2506 | struct i915_gem_context *ctx; |
2507 | int ret = 0; |
2508 | |
2509 | mutex_lock(&file_priv->proto_context_lock); |
2510 | ctx = __context_lookup(file_priv, id: args->ctx_id); |
2511 | if (!ctx) { |
2512 | pc = xa_load(&file_priv->proto_context_xa, index: args->ctx_id); |
2513 | if (pc) { |
2514 | /* Contexts should be finalized inside |
2515 | * GEM_CONTEXT_CREATE starting with graphics |
2516 | * version 13. |
2517 | */ |
2518 | WARN_ON(GRAPHICS_VER(file_priv->i915) > 12); |
2519 | ret = set_proto_ctx_param(fpriv: file_priv, pc, args); |
2520 | } else { |
2521 | ret = -ENOENT; |
2522 | } |
2523 | } |
2524 | mutex_unlock(lock: &file_priv->proto_context_lock); |
2525 | |
2526 | if (ctx) { |
2527 | ret = ctx_setparam(fpriv: file_priv, ctx, args); |
2528 | i915_gem_context_put(ctx); |
2529 | } |
2530 | |
2531 | return ret; |
2532 | } |
2533 | |
2534 | int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, |
2535 | void *data, struct drm_file *file) |
2536 | { |
2537 | struct drm_i915_private *i915 = to_i915(dev); |
2538 | struct drm_i915_reset_stats *args = data; |
2539 | struct i915_gem_context *ctx; |
2540 | |
2541 | if (args->flags || args->pad) |
2542 | return -EINVAL; |
2543 | |
2544 | ctx = i915_gem_context_lookup(file_priv: file->driver_priv, id: args->ctx_id); |
2545 | if (IS_ERR(ptr: ctx)) |
2546 | return PTR_ERR(ptr: ctx); |
2547 | |
2548 | /* |
2549 | * We opt for unserialised reads here. This may result in tearing |
2550 | * in the extremely unlikely event of a GPU hang on this context |
2551 | * as we are querying them. If we need that extra layer of protection, |
2552 | * we should wrap the hangstats with a seqlock. |
2553 | */ |
2554 | |
2555 | if (capable(CAP_SYS_ADMIN)) |
2556 | args->reset_count = i915_reset_count(error: &i915->gpu_error); |
2557 | else |
2558 | args->reset_count = 0; |
2559 | |
2560 | args->batch_active = atomic_read(v: &ctx->guilty_count); |
2561 | args->batch_pending = atomic_read(v: &ctx->active_count); |
2562 | |
2563 | i915_gem_context_put(ctx); |
2564 | return 0; |
2565 | } |
2566 | |
2567 | /* GEM context-engines iterator: for_each_gem_engine() */ |
2568 | struct intel_context * |
2569 | i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) |
2570 | { |
2571 | const struct i915_gem_engines *e = it->engines; |
2572 | struct intel_context *ctx; |
2573 | |
2574 | if (unlikely(!e)) |
2575 | return NULL; |
2576 | |
2577 | do { |
2578 | if (it->idx >= e->num_engines) |
2579 | return NULL; |
2580 | |
2581 | ctx = e->engines[it->idx++]; |
2582 | } while (!ctx); |
2583 | |
2584 | return ctx; |
2585 | } |
2586 | |
2587 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
2588 | #include "selftests/mock_context.c" |
2589 | #include "selftests/i915_gem_context.c" |
2590 | #endif |
2591 | |
2592 | void i915_gem_context_module_exit(void) |
2593 | { |
2594 | kmem_cache_destroy(s: slab_luts); |
2595 | } |
2596 | |
2597 | int __init i915_gem_context_module_init(void) |
2598 | { |
2599 | slab_luts = KMEM_CACHE(i915_lut_handle, 0); |
2600 | if (!slab_luts) |
2601 | return -ENOMEM; |
2602 | |
2603 | return 0; |
2604 | } |
2605 | |