1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #include <linux/kthread.h> |
25 | #include <linux/slab.h> |
26 | #include <linux/completion.h> |
27 | |
28 | #include <drm/drm_print.h> |
29 | #include <drm/gpu_scheduler.h> |
30 | |
31 | #include "gpu_scheduler_trace.h" |
32 | |
33 | #define to_drm_sched_job(sched_job) \ |
34 | container_of((sched_job), struct drm_sched_job, queue_node) |
35 | |
36 | /** |
37 | * drm_sched_entity_init - Init a context entity used by scheduler when |
38 | * submit to HW ring. |
39 | * |
40 | * @entity: scheduler entity to init |
41 | * @priority: priority of the entity |
42 | * @sched_list: the list of drm scheds on which jobs from this |
43 | * entity can be submitted |
44 | * @num_sched_list: number of drm sched in sched_list |
45 | * @guilty: atomic_t set to 1 when a job on this queue |
46 | * is found to be guilty causing a timeout |
47 | * |
48 | * Note that the &sched_list must have at least one element to schedule the entity. |
49 | * |
50 | * For changing @priority later on at runtime see |
51 | * drm_sched_entity_set_priority(). For changing the set of schedulers |
52 | * @sched_list at runtime see drm_sched_entity_modify_sched(). |
53 | * |
54 | * An entity is cleaned up by callind drm_sched_entity_fini(). See also |
55 | * drm_sched_entity_destroy(). |
56 | * |
57 | * Returns 0 on success or a negative error code on failure. |
58 | */ |
59 | int drm_sched_entity_init(struct drm_sched_entity *entity, |
60 | enum drm_sched_priority priority, |
61 | struct drm_gpu_scheduler **sched_list, |
62 | unsigned int num_sched_list, |
63 | atomic_t *guilty) |
64 | { |
65 | if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0]))) |
66 | return -EINVAL; |
67 | |
68 | memset(entity, 0, sizeof(struct drm_sched_entity)); |
69 | INIT_LIST_HEAD(list: &entity->list); |
70 | entity->rq = NULL; |
71 | entity->guilty = guilty; |
72 | entity->num_sched_list = num_sched_list; |
73 | entity->priority = priority; |
74 | /* |
75 | * It's perfectly valid to initialize an entity without having a valid |
76 | * scheduler attached. It's just not valid to use the scheduler before it |
77 | * is initialized itself. |
78 | */ |
79 | entity->sched_list = num_sched_list > 1 ? sched_list : NULL; |
80 | RCU_INIT_POINTER(entity->last_scheduled, NULL); |
81 | RB_CLEAR_NODE(&entity->rb_tree_node); |
82 | |
83 | if (num_sched_list && !sched_list[0]->sched_rq) { |
84 | /* Since every entry covered by num_sched_list |
85 | * should be non-NULL and therefore we warn drivers |
86 | * not to do this and to fix their DRM calling order. |
87 | */ |
88 | pr_warn("%s: called with uninitialized scheduler\n" , __func__); |
89 | } else if (num_sched_list) { |
90 | /* The "priority" of an entity cannot exceed the number of run-queues of a |
91 | * scheduler. Protect against num_rqs being 0, by converting to signed. Choose |
92 | * the lowest priority available. |
93 | */ |
94 | if (entity->priority >= sched_list[0]->num_rqs) { |
95 | drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n" , |
96 | entity->priority, sched_list[0]->num_rqs); |
97 | entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1, |
98 | (s32) DRM_SCHED_PRIORITY_KERNEL); |
99 | } |
100 | entity->rq = sched_list[0]->sched_rq[entity->priority]; |
101 | } |
102 | |
103 | init_completion(x: &entity->entity_idle); |
104 | |
105 | /* We start in an idle state. */ |
106 | complete_all(&entity->entity_idle); |
107 | |
108 | spin_lock_init(&entity->rq_lock); |
109 | spsc_queue_init(queue: &entity->job_queue); |
110 | |
111 | atomic_set(v: &entity->fence_seq, i: 0); |
112 | entity->fence_context = dma_fence_context_alloc(num: 2); |
113 | |
114 | return 0; |
115 | } |
116 | EXPORT_SYMBOL(drm_sched_entity_init); |
117 | |
118 | /** |
119 | * drm_sched_entity_modify_sched - Modify sched of an entity |
120 | * @entity: scheduler entity to init |
121 | * @sched_list: the list of new drm scheds which will replace |
122 | * existing entity->sched_list |
123 | * @num_sched_list: number of drm sched in sched_list |
124 | * |
125 | * Note that this must be called under the same common lock for @entity as |
126 | * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to |
127 | * guarantee through some other means that this is never called while new jobs |
128 | * can be pushed to @entity. |
129 | */ |
130 | void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, |
131 | struct drm_gpu_scheduler **sched_list, |
132 | unsigned int num_sched_list) |
133 | { |
134 | WARN_ON(!num_sched_list || !sched_list); |
135 | |
136 | entity->sched_list = sched_list; |
137 | entity->num_sched_list = num_sched_list; |
138 | } |
139 | EXPORT_SYMBOL(drm_sched_entity_modify_sched); |
140 | |
141 | static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) |
142 | { |
143 | rmb(); /* for list_empty to work without lock */ |
144 | |
145 | if (list_empty(head: &entity->list) || |
146 | spsc_queue_count(queue: &entity->job_queue) == 0 || |
147 | entity->stopped) |
148 | return true; |
149 | |
150 | return false; |
151 | } |
152 | |
153 | /* Return true if entity could provide a job. */ |
154 | bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) |
155 | { |
156 | if (spsc_queue_peek(queue: &entity->job_queue) == NULL) |
157 | return false; |
158 | |
159 | if (READ_ONCE(entity->dependency)) |
160 | return false; |
161 | |
162 | return true; |
163 | } |
164 | |
165 | /** |
166 | * drm_sched_entity_error - return error of last scheduled job |
167 | * @entity: scheduler entity to check |
168 | * |
169 | * Opportunistically return the error of the last scheduled job. Result can |
170 | * change any time when new jobs are pushed to the hw. |
171 | */ |
172 | int drm_sched_entity_error(struct drm_sched_entity *entity) |
173 | { |
174 | struct dma_fence *fence; |
175 | int r; |
176 | |
177 | rcu_read_lock(); |
178 | fence = rcu_dereference(entity->last_scheduled); |
179 | r = fence ? fence->error : 0; |
180 | rcu_read_unlock(); |
181 | |
182 | return r; |
183 | } |
184 | EXPORT_SYMBOL(drm_sched_entity_error); |
185 | |
186 | static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) |
187 | { |
188 | struct drm_sched_job *job = container_of(wrk, typeof(*job), work); |
189 | |
190 | drm_sched_fence_finished(fence: job->s_fence, result: -ESRCH); |
191 | WARN_ON(job->s_fence->parent); |
192 | job->sched->ops->free_job(job); |
193 | } |
194 | |
195 | /* Signal the scheduler finished fence when the entity in question is killed. */ |
196 | static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, |
197 | struct dma_fence_cb *cb) |
198 | { |
199 | struct drm_sched_job *job = container_of(cb, struct drm_sched_job, |
200 | finish_cb); |
201 | unsigned long index; |
202 | |
203 | dma_fence_put(fence: f); |
204 | |
205 | /* Wait for all dependencies to avoid data corruptions */ |
206 | xa_for_each(&job->dependencies, index, f) { |
207 | struct drm_sched_fence *s_fence = to_drm_sched_fence(f); |
208 | |
209 | if (s_fence && f == &s_fence->scheduled) { |
210 | /* The dependencies array had a reference on the scheduled |
211 | * fence, and the finished fence refcount might have |
212 | * dropped to zero. Use dma_fence_get_rcu() so we get |
213 | * a NULL fence in that case. |
214 | */ |
215 | f = dma_fence_get_rcu(fence: &s_fence->finished); |
216 | |
217 | /* Now that we have a reference on the finished fence, |
218 | * we can release the reference the dependencies array |
219 | * had on the scheduled fence. |
220 | */ |
221 | dma_fence_put(fence: &s_fence->scheduled); |
222 | } |
223 | |
224 | xa_erase(&job->dependencies, index); |
225 | if (f && !dma_fence_add_callback(fence: f, cb: &job->finish_cb, |
226 | func: drm_sched_entity_kill_jobs_cb)) |
227 | return; |
228 | |
229 | dma_fence_put(fence: f); |
230 | } |
231 | |
232 | INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); |
233 | schedule_work(work: &job->work); |
234 | } |
235 | |
236 | /* Remove the entity from the scheduler and kill all pending jobs */ |
237 | static void drm_sched_entity_kill(struct drm_sched_entity *entity) |
238 | { |
239 | struct drm_sched_job *job; |
240 | struct dma_fence *prev; |
241 | |
242 | if (!entity->rq) |
243 | return; |
244 | |
245 | spin_lock(lock: &entity->rq_lock); |
246 | entity->stopped = true; |
247 | drm_sched_rq_remove_entity(rq: entity->rq, entity); |
248 | spin_unlock(lock: &entity->rq_lock); |
249 | |
250 | /* Make sure this entity is not used by the scheduler at the moment */ |
251 | wait_for_completion(&entity->entity_idle); |
252 | |
253 | /* The entity is guaranteed to not be used by the scheduler */ |
254 | prev = rcu_dereference_check(entity->last_scheduled, true); |
255 | dma_fence_get(fence: prev); |
256 | while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { |
257 | struct drm_sched_fence *s_fence = job->s_fence; |
258 | |
259 | dma_fence_get(fence: &s_fence->finished); |
260 | if (!prev || dma_fence_add_callback(fence: prev, cb: &job->finish_cb, |
261 | func: drm_sched_entity_kill_jobs_cb)) |
262 | drm_sched_entity_kill_jobs_cb(NULL, cb: &job->finish_cb); |
263 | |
264 | prev = &s_fence->finished; |
265 | } |
266 | dma_fence_put(fence: prev); |
267 | } |
268 | |
269 | /** |
270 | * drm_sched_entity_flush - Flush a context entity |
271 | * |
272 | * @entity: scheduler entity |
273 | * @timeout: time to wait in for Q to become empty in jiffies. |
274 | * |
275 | * Splitting drm_sched_entity_fini() into two functions, The first one does the |
276 | * waiting, removes the entity from the runqueue and returns an error when the |
277 | * process was killed. |
278 | * |
279 | * Returns the remaining time in jiffies left from the input timeout |
280 | */ |
281 | long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) |
282 | { |
283 | struct drm_gpu_scheduler *sched; |
284 | struct task_struct *last_user; |
285 | long ret = timeout; |
286 | |
287 | if (!entity->rq) |
288 | return 0; |
289 | |
290 | sched = entity->rq->sched; |
291 | /** |
292 | * The client will not queue more IBs during this fini, consume existing |
293 | * queued IBs or discard them on SIGKILL |
294 | */ |
295 | if (current->flags & PF_EXITING) { |
296 | if (timeout) |
297 | ret = wait_event_timeout( |
298 | sched->job_scheduled, |
299 | drm_sched_entity_is_idle(entity), |
300 | timeout); |
301 | } else { |
302 | wait_event_killable(sched->job_scheduled, |
303 | drm_sched_entity_is_idle(entity)); |
304 | } |
305 | |
306 | /* For killed process disable any more IBs enqueue right now */ |
307 | last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); |
308 | if ((!last_user || last_user == current->group_leader) && |
309 | (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) |
310 | drm_sched_entity_kill(entity); |
311 | |
312 | return ret; |
313 | } |
314 | EXPORT_SYMBOL(drm_sched_entity_flush); |
315 | |
316 | /** |
317 | * drm_sched_entity_fini - Destroy a context entity |
318 | * |
319 | * @entity: scheduler entity |
320 | * |
321 | * Cleanups up @entity which has been initialized by drm_sched_entity_init(). |
322 | * |
323 | * If there are potentially job still in flight or getting newly queued |
324 | * drm_sched_entity_flush() must be called first. This function then goes over |
325 | * the entity and signals all jobs with an error code if the process was killed. |
326 | */ |
327 | void drm_sched_entity_fini(struct drm_sched_entity *entity) |
328 | { |
329 | /* |
330 | * If consumption of existing IBs wasn't completed. Forcefully remove |
331 | * them here. Also makes sure that the scheduler won't touch this entity |
332 | * any more. |
333 | */ |
334 | drm_sched_entity_kill(entity); |
335 | |
336 | if (entity->dependency) { |
337 | dma_fence_remove_callback(fence: entity->dependency, cb: &entity->cb); |
338 | dma_fence_put(fence: entity->dependency); |
339 | entity->dependency = NULL; |
340 | } |
341 | |
342 | dma_fence_put(rcu_dereference_check(entity->last_scheduled, true)); |
343 | RCU_INIT_POINTER(entity->last_scheduled, NULL); |
344 | } |
345 | EXPORT_SYMBOL(drm_sched_entity_fini); |
346 | |
347 | /** |
348 | * drm_sched_entity_destroy - Destroy a context entity |
349 | * @entity: scheduler entity |
350 | * |
351 | * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a |
352 | * convenience wrapper. |
353 | */ |
354 | void drm_sched_entity_destroy(struct drm_sched_entity *entity) |
355 | { |
356 | drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); |
357 | drm_sched_entity_fini(entity); |
358 | } |
359 | EXPORT_SYMBOL(drm_sched_entity_destroy); |
360 | |
361 | /* drm_sched_entity_clear_dep - callback to clear the entities dependency */ |
362 | static void drm_sched_entity_clear_dep(struct dma_fence *f, |
363 | struct dma_fence_cb *cb) |
364 | { |
365 | struct drm_sched_entity *entity = |
366 | container_of(cb, struct drm_sched_entity, cb); |
367 | |
368 | entity->dependency = NULL; |
369 | dma_fence_put(fence: f); |
370 | } |
371 | |
372 | /* |
373 | * drm_sched_entity_clear_dep - callback to clear the entities dependency and |
374 | * wake up scheduler |
375 | */ |
376 | static void drm_sched_entity_wakeup(struct dma_fence *f, |
377 | struct dma_fence_cb *cb) |
378 | { |
379 | struct drm_sched_entity *entity = |
380 | container_of(cb, struct drm_sched_entity, cb); |
381 | |
382 | drm_sched_entity_clear_dep(f, cb); |
383 | drm_sched_wakeup(sched: entity->rq->sched, entity); |
384 | } |
385 | |
386 | /** |
387 | * drm_sched_entity_set_priority - Sets priority of the entity |
388 | * |
389 | * @entity: scheduler entity |
390 | * @priority: scheduler priority |
391 | * |
392 | * Update the priority of runqueus used for the entity. |
393 | */ |
394 | void drm_sched_entity_set_priority(struct drm_sched_entity *entity, |
395 | enum drm_sched_priority priority) |
396 | { |
397 | spin_lock(lock: &entity->rq_lock); |
398 | entity->priority = priority; |
399 | spin_unlock(lock: &entity->rq_lock); |
400 | } |
401 | EXPORT_SYMBOL(drm_sched_entity_set_priority); |
402 | |
403 | /* |
404 | * Add a callback to the current dependency of the entity to wake up the |
405 | * scheduler when the entity becomes available. |
406 | */ |
407 | static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) |
408 | { |
409 | struct drm_gpu_scheduler *sched = entity->rq->sched; |
410 | struct dma_fence *fence = entity->dependency; |
411 | struct drm_sched_fence *s_fence; |
412 | |
413 | if (fence->context == entity->fence_context || |
414 | fence->context == entity->fence_context + 1) { |
415 | /* |
416 | * Fence is a scheduled/finished fence from a job |
417 | * which belongs to the same entity, we can ignore |
418 | * fences from ourself |
419 | */ |
420 | dma_fence_put(fence: entity->dependency); |
421 | return false; |
422 | } |
423 | |
424 | s_fence = to_drm_sched_fence(f: fence); |
425 | if (!fence->error && s_fence && s_fence->sched == sched && |
426 | !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) { |
427 | |
428 | /* |
429 | * Fence is from the same scheduler, only need to wait for |
430 | * it to be scheduled |
431 | */ |
432 | fence = dma_fence_get(fence: &s_fence->scheduled); |
433 | dma_fence_put(fence: entity->dependency); |
434 | entity->dependency = fence; |
435 | if (!dma_fence_add_callback(fence, cb: &entity->cb, |
436 | func: drm_sched_entity_clear_dep)) |
437 | return true; |
438 | |
439 | /* Ignore it when it is already scheduled */ |
440 | dma_fence_put(fence); |
441 | return false; |
442 | } |
443 | |
444 | if (!dma_fence_add_callback(fence: entity->dependency, cb: &entity->cb, |
445 | func: drm_sched_entity_wakeup)) |
446 | return true; |
447 | |
448 | dma_fence_put(fence: entity->dependency); |
449 | return false; |
450 | } |
451 | |
452 | static struct dma_fence * |
453 | drm_sched_job_dependency(struct drm_sched_job *job, |
454 | struct drm_sched_entity *entity) |
455 | { |
456 | struct dma_fence *f; |
457 | |
458 | /* We keep the fence around, so we can iterate over all dependencies |
459 | * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled |
460 | * before killing the job. |
461 | */ |
462 | f = xa_load(&job->dependencies, index: job->last_dependency); |
463 | if (f) { |
464 | job->last_dependency++; |
465 | return dma_fence_get(fence: f); |
466 | } |
467 | |
468 | if (job->sched->ops->prepare_job) |
469 | return job->sched->ops->prepare_job(job, entity); |
470 | |
471 | return NULL; |
472 | } |
473 | |
474 | struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) |
475 | { |
476 | struct drm_sched_job *sched_job; |
477 | |
478 | sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); |
479 | if (!sched_job) |
480 | return NULL; |
481 | |
482 | while ((entity->dependency = |
483 | drm_sched_job_dependency(job: sched_job, entity))) { |
484 | trace_drm_sched_job_wait_dep(sched_job, fence: entity->dependency); |
485 | |
486 | if (drm_sched_entity_add_dependency_cb(entity)) |
487 | return NULL; |
488 | } |
489 | |
490 | /* skip jobs from entity that marked guilty */ |
491 | if (entity->guilty && atomic_read(v: entity->guilty)) |
492 | dma_fence_set_error(fence: &sched_job->s_fence->finished, error: -ECANCELED); |
493 | |
494 | dma_fence_put(rcu_dereference_check(entity->last_scheduled, true)); |
495 | rcu_assign_pointer(entity->last_scheduled, |
496 | dma_fence_get(&sched_job->s_fence->finished)); |
497 | |
498 | /* |
499 | * If the queue is empty we allow drm_sched_entity_select_rq() to |
500 | * locklessly access ->last_scheduled. This only works if we set the |
501 | * pointer before we dequeue and if we a write barrier here. |
502 | */ |
503 | smp_wmb(); |
504 | |
505 | spsc_queue_pop(queue: &entity->job_queue); |
506 | |
507 | /* |
508 | * Update the entity's location in the min heap according to |
509 | * the timestamp of the next job, if any. |
510 | */ |
511 | if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) { |
512 | struct drm_sched_job *next; |
513 | |
514 | next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); |
515 | if (next) |
516 | drm_sched_rq_update_fifo(entity, ts: next->submit_ts); |
517 | } |
518 | |
519 | /* Jobs and entities might have different lifecycles. Since we're |
520 | * removing the job from the entities queue, set the jobs entity pointer |
521 | * to NULL to prevent any future access of the entity through this job. |
522 | */ |
523 | sched_job->entity = NULL; |
524 | |
525 | return sched_job; |
526 | } |
527 | |
528 | void drm_sched_entity_select_rq(struct drm_sched_entity *entity) |
529 | { |
530 | struct dma_fence *fence; |
531 | struct drm_gpu_scheduler *sched; |
532 | struct drm_sched_rq *rq; |
533 | |
534 | /* single possible engine and already selected */ |
535 | if (!entity->sched_list) |
536 | return; |
537 | |
538 | /* queue non-empty, stay on the same engine */ |
539 | if (spsc_queue_count(queue: &entity->job_queue)) |
540 | return; |
541 | |
542 | /* |
543 | * Only when the queue is empty are we guaranteed that the scheduler |
544 | * thread cannot change ->last_scheduled. To enforce ordering we need |
545 | * a read barrier here. See drm_sched_entity_pop_job() for the other |
546 | * side. |
547 | */ |
548 | smp_rmb(); |
549 | |
550 | fence = rcu_dereference_check(entity->last_scheduled, true); |
551 | |
552 | /* stay on the same engine if the previous job hasn't finished */ |
553 | if (fence && !dma_fence_is_signaled(fence)) |
554 | return; |
555 | |
556 | spin_lock(lock: &entity->rq_lock); |
557 | sched = drm_sched_pick_best(sched_list: entity->sched_list, num_sched_list: entity->num_sched_list); |
558 | rq = sched ? sched->sched_rq[entity->priority] : NULL; |
559 | if (rq != entity->rq) { |
560 | drm_sched_rq_remove_entity(rq: entity->rq, entity); |
561 | entity->rq = rq; |
562 | } |
563 | spin_unlock(lock: &entity->rq_lock); |
564 | |
565 | if (entity->num_sched_list == 1) |
566 | entity->sched_list = NULL; |
567 | } |
568 | |
569 | /** |
570 | * drm_sched_entity_push_job - Submit a job to the entity's job queue |
571 | * @sched_job: job to submit |
572 | * |
573 | * Note: To guarantee that the order of insertion to queue matches the job's |
574 | * fence sequence number this function should be called with drm_sched_job_arm() |
575 | * under common lock for the struct drm_sched_entity that was set up for |
576 | * @sched_job in drm_sched_job_init(). |
577 | * |
578 | * Returns 0 for success, negative error code otherwise. |
579 | */ |
580 | void drm_sched_entity_push_job(struct drm_sched_job *sched_job) |
581 | { |
582 | struct drm_sched_entity *entity = sched_job->entity; |
583 | bool first; |
584 | ktime_t submit_ts; |
585 | |
586 | trace_drm_sched_job(sched_job, entity); |
587 | atomic_inc(v: entity->rq->sched->score); |
588 | WRITE_ONCE(entity->last_user, current->group_leader); |
589 | |
590 | /* |
591 | * After the sched_job is pushed into the entity queue, it may be |
592 | * completed and freed up at any time. We can no longer access it. |
593 | * Make sure to set the submit_ts first, to avoid a race. |
594 | */ |
595 | sched_job->submit_ts = submit_ts = ktime_get(); |
596 | first = spsc_queue_push(queue: &entity->job_queue, node: &sched_job->queue_node); |
597 | |
598 | /* first job wakes up scheduler */ |
599 | if (first) { |
600 | /* Add the entity to the run queue */ |
601 | spin_lock(lock: &entity->rq_lock); |
602 | if (entity->stopped) { |
603 | spin_unlock(lock: &entity->rq_lock); |
604 | |
605 | DRM_ERROR("Trying to push to a killed entity\n" ); |
606 | return; |
607 | } |
608 | |
609 | drm_sched_rq_add_entity(rq: entity->rq, entity); |
610 | spin_unlock(lock: &entity->rq_lock); |
611 | |
612 | if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) |
613 | drm_sched_rq_update_fifo(entity, ts: submit_ts); |
614 | |
615 | drm_sched_wakeup(sched: entity->rq->sched, entity); |
616 | } |
617 | } |
618 | EXPORT_SYMBOL(drm_sched_entity_push_job); |
619 | |