1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #ifndef _DRM_GPU_SCHEDULER_H_ |
25 | #define _DRM_GPU_SCHEDULER_H_ |
26 | |
27 | #include <drm/spsc_queue.h> |
28 | #include <linux/dma-fence.h> |
29 | #include <linux/completion.h> |
30 | #include <linux/xarray.h> |
31 | #include <linux/workqueue.h> |
32 | |
33 | #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) |
34 | |
35 | /** |
36 | * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining |
37 | * |
38 | * Setting this flag on a scheduler fence prevents pipelining of jobs depending |
39 | * on this fence. In other words we always insert a full CPU round trip before |
40 | * dependen jobs are pushed to the hw queue. |
41 | */ |
42 | #define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS |
43 | |
44 | /** |
45 | * DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT - A fence deadline hint has been set |
46 | * |
47 | * Because we could have a deadline hint can be set before the backing hw |
48 | * fence is created, we need to keep track of whether a deadline has already |
49 | * been set. |
50 | */ |
51 | #define DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT (DMA_FENCE_FLAG_USER_BITS + 1) |
52 | |
53 | enum dma_resv_usage; |
54 | struct dma_resv; |
55 | struct drm_gem_object; |
56 | |
57 | struct drm_gpu_scheduler; |
58 | struct drm_sched_rq; |
59 | |
60 | struct drm_file; |
61 | |
62 | /* These are often used as an (initial) index |
63 | * to an array, and as such should start at 0. |
64 | */ |
65 | enum drm_sched_priority { |
66 | DRM_SCHED_PRIORITY_MIN, |
67 | DRM_SCHED_PRIORITY_NORMAL, |
68 | DRM_SCHED_PRIORITY_HIGH, |
69 | DRM_SCHED_PRIORITY_KERNEL, |
70 | |
71 | DRM_SCHED_PRIORITY_COUNT |
72 | }; |
73 | |
74 | /* Used to chose between FIFO and RR jobs scheduling */ |
75 | extern int drm_sched_policy; |
76 | |
77 | #define DRM_SCHED_POLICY_RR 0 |
78 | #define DRM_SCHED_POLICY_FIFO 1 |
79 | |
80 | /** |
81 | * struct drm_sched_entity - A wrapper around a job queue (typically |
82 | * attached to the DRM file_priv). |
83 | * |
84 | * Entities will emit jobs in order to their corresponding hardware |
85 | * ring, and the scheduler will alternate between entities based on |
86 | * scheduling policy. |
87 | */ |
88 | struct drm_sched_entity { |
89 | /** |
90 | * @list: |
91 | * |
92 | * Used to append this struct to the list of entities in the runqueue |
93 | * @rq under &drm_sched_rq.entities. |
94 | * |
95 | * Protected by &drm_sched_rq.lock of @rq. |
96 | */ |
97 | struct list_head list; |
98 | |
99 | /** |
100 | * @rq: |
101 | * |
102 | * Runqueue on which this entity is currently scheduled. |
103 | * |
104 | * FIXME: Locking is very unclear for this. Writers are protected by |
105 | * @rq_lock, but readers are generally lockless and seem to just race |
106 | * with not even a READ_ONCE. |
107 | */ |
108 | struct drm_sched_rq *rq; |
109 | |
110 | /** |
111 | * @sched_list: |
112 | * |
113 | * A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can |
114 | * be scheduled on any scheduler on this list. |
115 | * |
116 | * This can be modified by calling drm_sched_entity_modify_sched(). |
117 | * Locking is entirely up to the driver, see the above function for more |
118 | * details. |
119 | * |
120 | * This will be set to NULL if &num_sched_list equals 1 and @rq has been |
121 | * set already. |
122 | * |
123 | * FIXME: This means priority changes through |
124 | * drm_sched_entity_set_priority() will be lost henceforth in this case. |
125 | */ |
126 | struct drm_gpu_scheduler **sched_list; |
127 | |
128 | /** |
129 | * @num_sched_list: |
130 | * |
131 | * Number of drm_gpu_schedulers in the @sched_list. |
132 | */ |
133 | unsigned int num_sched_list; |
134 | |
135 | /** |
136 | * @priority: |
137 | * |
138 | * Priority of the entity. This can be modified by calling |
139 | * drm_sched_entity_set_priority(). Protected by &rq_lock. |
140 | */ |
141 | enum drm_sched_priority priority; |
142 | |
143 | /** |
144 | * @rq_lock: |
145 | * |
146 | * Lock to modify the runqueue to which this entity belongs. |
147 | */ |
148 | spinlock_t rq_lock; |
149 | |
150 | /** |
151 | * @job_queue: the list of jobs of this entity. |
152 | */ |
153 | struct spsc_queue job_queue; |
154 | |
155 | /** |
156 | * @fence_seq: |
157 | * |
158 | * A linearly increasing seqno incremented with each new |
159 | * &drm_sched_fence which is part of the entity. |
160 | * |
161 | * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking, |
162 | * this doesn't need to be atomic. |
163 | */ |
164 | atomic_t fence_seq; |
165 | |
166 | /** |
167 | * @fence_context: |
168 | * |
169 | * A unique context for all the fences which belong to this entity. The |
170 | * &drm_sched_fence.scheduled uses the fence_context but |
171 | * &drm_sched_fence.finished uses fence_context + 1. |
172 | */ |
173 | uint64_t fence_context; |
174 | |
175 | /** |
176 | * @dependency: |
177 | * |
178 | * The dependency fence of the job which is on the top of the job queue. |
179 | */ |
180 | struct dma_fence *dependency; |
181 | |
182 | /** |
183 | * @cb: |
184 | * |
185 | * Callback for the dependency fence above. |
186 | */ |
187 | struct dma_fence_cb cb; |
188 | |
189 | /** |
190 | * @guilty: |
191 | * |
192 | * Points to entities' guilty. |
193 | */ |
194 | atomic_t *guilty; |
195 | |
196 | /** |
197 | * @last_scheduled: |
198 | * |
199 | * Points to the finished fence of the last scheduled job. Only written |
200 | * by the scheduler thread, can be accessed locklessly from |
201 | * drm_sched_job_arm() iff the queue is empty. |
202 | */ |
203 | struct dma_fence __rcu *last_scheduled; |
204 | |
205 | /** |
206 | * @last_user: last group leader pushing a job into the entity. |
207 | */ |
208 | struct task_struct *last_user; |
209 | |
210 | /** |
211 | * @stopped: |
212 | * |
213 | * Marks the enity as removed from rq and destined for |
214 | * termination. This is set by calling drm_sched_entity_flush() and by |
215 | * drm_sched_fini(). |
216 | */ |
217 | bool stopped; |
218 | |
219 | /** |
220 | * @entity_idle: |
221 | * |
222 | * Signals when entity is not in use, used to sequence entity cleanup in |
223 | * drm_sched_entity_fini(). |
224 | */ |
225 | struct completion entity_idle; |
226 | |
227 | /** |
228 | * @oldest_job_waiting: |
229 | * |
230 | * Marks earliest job waiting in SW queue |
231 | */ |
232 | ktime_t oldest_job_waiting; |
233 | |
234 | /** |
235 | * @rb_tree_node: |
236 | * |
237 | * The node used to insert this entity into time based priority queue |
238 | */ |
239 | struct rb_node rb_tree_node; |
240 | |
241 | }; |
242 | |
243 | /** |
244 | * struct drm_sched_rq - queue of entities to be scheduled. |
245 | * |
246 | * @lock: to modify the entities list. |
247 | * @sched: the scheduler to which this rq belongs to. |
248 | * @entities: list of the entities to be scheduled. |
249 | * @current_entity: the entity which is to be scheduled. |
250 | * @rb_tree_root: root of time based priory queue of entities for FIFO scheduling |
251 | * |
252 | * Run queue is a set of entities scheduling command submissions for |
253 | * one specific ring. It implements the scheduling policy that selects |
254 | * the next entity to emit commands from. |
255 | */ |
256 | struct drm_sched_rq { |
257 | spinlock_t lock; |
258 | struct drm_gpu_scheduler *sched; |
259 | struct list_head entities; |
260 | struct drm_sched_entity *current_entity; |
261 | struct rb_root_cached rb_tree_root; |
262 | }; |
263 | |
264 | /** |
265 | * struct drm_sched_fence - fences corresponding to the scheduling of a job. |
266 | */ |
267 | struct drm_sched_fence { |
268 | /** |
269 | * @scheduled: this fence is what will be signaled by the scheduler |
270 | * when the job is scheduled. |
271 | */ |
272 | struct dma_fence scheduled; |
273 | |
274 | /** |
275 | * @finished: this fence is what will be signaled by the scheduler |
276 | * when the job is completed. |
277 | * |
278 | * When setting up an out fence for the job, you should use |
279 | * this, since it's available immediately upon |
280 | * drm_sched_job_init(), and the fence returned by the driver |
281 | * from run_job() won't be created until the dependencies have |
282 | * resolved. |
283 | */ |
284 | struct dma_fence finished; |
285 | |
286 | /** |
287 | * @deadline: deadline set on &drm_sched_fence.finished which |
288 | * potentially needs to be propagated to &drm_sched_fence.parent |
289 | */ |
290 | ktime_t deadline; |
291 | |
292 | /** |
293 | * @parent: the fence returned by &drm_sched_backend_ops.run_job |
294 | * when scheduling the job on hardware. We signal the |
295 | * &drm_sched_fence.finished fence once parent is signalled. |
296 | */ |
297 | struct dma_fence *parent; |
298 | /** |
299 | * @sched: the scheduler instance to which the job having this struct |
300 | * belongs to. |
301 | */ |
302 | struct drm_gpu_scheduler *sched; |
303 | /** |
304 | * @lock: the lock used by the scheduled and the finished fences. |
305 | */ |
306 | spinlock_t lock; |
307 | /** |
308 | * @owner: job owner for debugging |
309 | */ |
310 | void *owner; |
311 | }; |
312 | |
313 | struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); |
314 | |
315 | /** |
316 | * struct drm_sched_job - A job to be run by an entity. |
317 | * |
318 | * @queue_node: used to append this struct to the queue of jobs in an entity. |
319 | * @list: a job participates in a "pending" and "done" lists. |
320 | * @sched: the scheduler instance on which this job is scheduled. |
321 | * @s_fence: contains the fences for the scheduling of job. |
322 | * @finish_cb: the callback for the finished fence. |
323 | * @work: Helper to reschdeule job kill to different context. |
324 | * @id: a unique id assigned to each job scheduled on the scheduler. |
325 | * @karma: increment on every hang caused by this job. If this exceeds the hang |
326 | * limit of the scheduler then the job is marked guilty and will not |
327 | * be scheduled further. |
328 | * @s_priority: the priority of the job. |
329 | * @entity: the entity to which this job belongs. |
330 | * @cb: the callback for the parent fence in s_fence. |
331 | * |
332 | * A job is created by the driver using drm_sched_job_init(), and |
333 | * should call drm_sched_entity_push_job() once it wants the scheduler |
334 | * to schedule the job. |
335 | */ |
336 | struct drm_sched_job { |
337 | struct spsc_node queue_node; |
338 | struct list_head list; |
339 | struct drm_gpu_scheduler *sched; |
340 | struct drm_sched_fence *s_fence; |
341 | |
342 | /* |
343 | * work is used only after finish_cb has been used and will not be |
344 | * accessed anymore. |
345 | */ |
346 | union { |
347 | struct dma_fence_cb finish_cb; |
348 | struct work_struct work; |
349 | }; |
350 | |
351 | uint64_t id; |
352 | atomic_t karma; |
353 | enum drm_sched_priority s_priority; |
354 | struct drm_sched_entity *entity; |
355 | struct dma_fence_cb cb; |
356 | /** |
357 | * @dependencies: |
358 | * |
359 | * Contains the dependencies as struct dma_fence for this job, see |
360 | * drm_sched_job_add_dependency() and |
361 | * drm_sched_job_add_implicit_dependencies(). |
362 | */ |
363 | struct xarray dependencies; |
364 | |
365 | /** @last_dependency: tracks @dependencies as they signal */ |
366 | unsigned long last_dependency; |
367 | |
368 | /** |
369 | * @submit_ts: |
370 | * |
371 | * When the job was pushed into the entity queue. |
372 | */ |
373 | ktime_t submit_ts; |
374 | }; |
375 | |
376 | static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, |
377 | int threshold) |
378 | { |
379 | return s_job && atomic_inc_return(v: &s_job->karma) > threshold; |
380 | } |
381 | |
382 | enum drm_gpu_sched_stat { |
383 | DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */ |
384 | DRM_GPU_SCHED_STAT_NOMINAL, |
385 | DRM_GPU_SCHED_STAT_ENODEV, |
386 | }; |
387 | |
388 | /** |
389 | * struct drm_sched_backend_ops - Define the backend operations |
390 | * called by the scheduler |
391 | * |
392 | * These functions should be implemented in the driver side. |
393 | */ |
394 | struct drm_sched_backend_ops { |
395 | /** |
396 | * @prepare_job: |
397 | * |
398 | * Called when the scheduler is considering scheduling this job next, to |
399 | * get another struct dma_fence for this job to block on. Once it |
400 | * returns NULL, run_job() may be called. |
401 | * |
402 | * Can be NULL if no additional preparation to the dependencies are |
403 | * necessary. Skipped when jobs are killed instead of run. |
404 | */ |
405 | struct dma_fence *(*prepare_job)(struct drm_sched_job *sched_job, |
406 | struct drm_sched_entity *s_entity); |
407 | |
408 | /** |
409 | * @run_job: Called to execute the job once all of the dependencies |
410 | * have been resolved. This may be called multiple times, if |
411 | * timedout_job() has happened and drm_sched_job_recovery() |
412 | * decides to try it again. |
413 | */ |
414 | struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); |
415 | |
416 | /** |
417 | * @timedout_job: Called when a job has taken too long to execute, |
418 | * to trigger GPU recovery. |
419 | * |
420 | * This method is called in a workqueue context. |
421 | * |
422 | * Drivers typically issue a reset to recover from GPU hangs, and this |
423 | * procedure usually follows the following workflow: |
424 | * |
425 | * 1. Stop the scheduler using drm_sched_stop(). This will park the |
426 | * scheduler thread and cancel the timeout work, guaranteeing that |
427 | * nothing is queued while we reset the hardware queue |
428 | * 2. Try to gracefully stop non-faulty jobs (optional) |
429 | * 3. Issue a GPU reset (driver-specific) |
430 | * 4. Re-submit jobs using drm_sched_resubmit_jobs() |
431 | * 5. Restart the scheduler using drm_sched_start(). At that point, new |
432 | * jobs can be queued, and the scheduler thread is unblocked |
433 | * |
434 | * Note that some GPUs have distinct hardware queues but need to reset |
435 | * the GPU globally, which requires extra synchronization between the |
436 | * timeout handler of the different &drm_gpu_scheduler. One way to |
437 | * achieve this synchronization is to create an ordered workqueue |
438 | * (using alloc_ordered_workqueue()) at the driver level, and pass this |
439 | * queue to drm_sched_init(), to guarantee that timeout handlers are |
440 | * executed sequentially. The above workflow needs to be slightly |
441 | * adjusted in that case: |
442 | * |
443 | * 1. Stop all schedulers impacted by the reset using drm_sched_stop() |
444 | * 2. Try to gracefully stop non-faulty jobs on all queues impacted by |
445 | * the reset (optional) |
446 | * 3. Issue a GPU reset on all faulty queues (driver-specific) |
447 | * 4. Re-submit jobs on all schedulers impacted by the reset using |
448 | * drm_sched_resubmit_jobs() |
449 | * 5. Restart all schedulers that were stopped in step #1 using |
450 | * drm_sched_start() |
451 | * |
452 | * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal, |
453 | * and the underlying driver has started or completed recovery. |
454 | * |
455 | * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer |
456 | * available, i.e. has been unplugged. |
457 | */ |
458 | enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job); |
459 | |
460 | /** |
461 | * @free_job: Called once the job's finished fence has been signaled |
462 | * and it's time to clean it up. |
463 | */ |
464 | void (*free_job)(struct drm_sched_job *sched_job); |
465 | }; |
466 | |
467 | /** |
468 | * struct drm_gpu_scheduler - scheduler instance-specific data |
469 | * |
470 | * @ops: backend operations provided by the driver. |
471 | * @hw_submission_limit: the max size of the hardware queue. |
472 | * @timeout: the time after which a job is removed from the scheduler. |
473 | * @name: name of the ring for which this scheduler is being used. |
474 | * @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT, |
475 | * as there's usually one run-queue per priority, but could be less. |
476 | * @sched_rq: An allocated array of run-queues of size @num_rqs; |
477 | * @wake_up_worker: the wait queue on which the scheduler sleeps until a job |
478 | * is ready to be scheduled. |
479 | * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler |
480 | * waits on this wait queue until all the scheduled jobs are |
481 | * finished. |
482 | * @hw_rq_count: the number of jobs currently in the hardware queue. |
483 | * @job_id_count: used to assign unique id to the each job. |
484 | * @timeout_wq: workqueue used to queue @work_tdr |
485 | * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the |
486 | * timeout interval is over. |
487 | * @thread: the kthread on which the scheduler which run. |
488 | * @pending_list: the list of jobs which are currently in the job queue. |
489 | * @job_list_lock: lock to protect the pending_list. |
490 | * @hang_limit: once the hangs by a job crosses this limit then it is marked |
491 | * guilty and it will no longer be considered for scheduling. |
492 | * @score: score to help loadbalancer pick a idle sched |
493 | * @_score: score used when the driver doesn't provide one |
494 | * @ready: marks if the underlying HW is ready to work |
495 | * @free_guilty: A hit to time out handler to free the guilty job. |
496 | * @dev: system &struct device |
497 | * |
498 | * One scheduler is implemented for each hardware ring. |
499 | */ |
500 | struct drm_gpu_scheduler { |
501 | const struct drm_sched_backend_ops *ops; |
502 | uint32_t hw_submission_limit; |
503 | long timeout; |
504 | const char *name; |
505 | u32 num_rqs; |
506 | struct drm_sched_rq **sched_rq; |
507 | wait_queue_head_t wake_up_worker; |
508 | wait_queue_head_t job_scheduled; |
509 | atomic_t hw_rq_count; |
510 | atomic64_t job_id_count; |
511 | struct workqueue_struct *timeout_wq; |
512 | struct delayed_work work_tdr; |
513 | struct task_struct *thread; |
514 | struct list_head pending_list; |
515 | spinlock_t job_list_lock; |
516 | int hang_limit; |
517 | atomic_t *score; |
518 | atomic_t _score; |
519 | bool ready; |
520 | bool free_guilty; |
521 | struct device *dev; |
522 | }; |
523 | |
524 | int drm_sched_init(struct drm_gpu_scheduler *sched, |
525 | const struct drm_sched_backend_ops *ops, |
526 | u32 num_rqs, uint32_t hw_submission, unsigned int hang_limit, |
527 | long timeout, struct workqueue_struct *timeout_wq, |
528 | atomic_t *score, const char *name, struct device *dev); |
529 | |
530 | void drm_sched_fini(struct drm_gpu_scheduler *sched); |
531 | int drm_sched_job_init(struct drm_sched_job *job, |
532 | struct drm_sched_entity *entity, |
533 | void *owner); |
534 | void drm_sched_job_arm(struct drm_sched_job *job); |
535 | int drm_sched_job_add_dependency(struct drm_sched_job *job, |
536 | struct dma_fence *fence); |
537 | int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job, |
538 | struct drm_file *file, |
539 | u32 handle, |
540 | u32 point); |
541 | int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, |
542 | struct dma_resv *resv, |
543 | enum dma_resv_usage usage); |
544 | int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, |
545 | struct drm_gem_object *obj, |
546 | bool write); |
547 | |
548 | |
549 | void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, |
550 | struct drm_gpu_scheduler **sched_list, |
551 | unsigned int num_sched_list); |
552 | |
553 | void drm_sched_job_cleanup(struct drm_sched_job *job); |
554 | void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched); |
555 | void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); |
556 | void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); |
557 | void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); |
558 | void drm_sched_increase_karma(struct drm_sched_job *bad); |
559 | void drm_sched_reset_karma(struct drm_sched_job *bad); |
560 | void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type); |
561 | bool drm_sched_dependency_optimized(struct dma_fence* fence, |
562 | struct drm_sched_entity *entity); |
563 | void drm_sched_fault(struct drm_gpu_scheduler *sched); |
564 | |
565 | void drm_sched_rq_add_entity(struct drm_sched_rq *rq, |
566 | struct drm_sched_entity *entity); |
567 | void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, |
568 | struct drm_sched_entity *entity); |
569 | |
570 | void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts); |
571 | |
572 | int drm_sched_entity_init(struct drm_sched_entity *entity, |
573 | enum drm_sched_priority priority, |
574 | struct drm_gpu_scheduler **sched_list, |
575 | unsigned int num_sched_list, |
576 | atomic_t *guilty); |
577 | long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); |
578 | void drm_sched_entity_fini(struct drm_sched_entity *entity); |
579 | void drm_sched_entity_destroy(struct drm_sched_entity *entity); |
580 | void drm_sched_entity_select_rq(struct drm_sched_entity *entity); |
581 | struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); |
582 | void drm_sched_entity_push_job(struct drm_sched_job *sched_job); |
583 | void drm_sched_entity_set_priority(struct drm_sched_entity *entity, |
584 | enum drm_sched_priority priority); |
585 | bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); |
586 | int drm_sched_entity_error(struct drm_sched_entity *entity); |
587 | |
588 | struct drm_sched_fence *drm_sched_fence_alloc( |
589 | struct drm_sched_entity *s_entity, void *owner); |
590 | void drm_sched_fence_init(struct drm_sched_fence *fence, |
591 | struct drm_sched_entity *entity); |
592 | void drm_sched_fence_free(struct drm_sched_fence *fence); |
593 | |
594 | void drm_sched_fence_scheduled(struct drm_sched_fence *fence, |
595 | struct dma_fence *parent); |
596 | void drm_sched_fence_finished(struct drm_sched_fence *fence, int result); |
597 | |
598 | unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); |
599 | void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, |
600 | unsigned long remaining); |
601 | struct drm_gpu_scheduler * |
602 | drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, |
603 | unsigned int num_sched_list); |
604 | |
605 | #endif |
606 | |