1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "amdgpu.h"
29#include "amdgpu_trace.h"
30
31static void amdgpu_job_timedout(struct drm_sched_job *s_job)
32{
33 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
34 struct amdgpu_job *job = to_amdgpu_job(s_job);
35 struct amdgpu_task_info ti;
36
37 memset(&ti, 0, sizeof(struct amdgpu_task_info));
38
39 if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
40 DRM_ERROR("ring %s timeout, but soft recovered\n",
41 s_job->sched->name);
42 return;
43 }
44
45 amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
46 DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
47 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
48 ring->fence_drv.sync_seq);
49 DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
50 ti.process_name, ti.tgid, ti.task_name, ti.pid);
51
52 if (amdgpu_device_should_recover_gpu(ring->adev))
53 amdgpu_device_gpu_recover(ring->adev, job);
54}
55
56int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
57 struct amdgpu_job **job, struct amdgpu_vm *vm)
58{
59 size_t size = sizeof(struct amdgpu_job);
60
61 if (num_ibs == 0)
62 return -EINVAL;
63
64 size += sizeof(struct amdgpu_ib) * num_ibs;
65
66 *job = kzalloc(size, GFP_KERNEL);
67 if (!*job)
68 return -ENOMEM;
69
70 /*
71 * Initialize the scheduler to at least some ring so that we always
72 * have a pointer to adev.
73 */
74 (*job)->base.sched = &adev->rings[0]->sched;
75 (*job)->vm = vm;
76 (*job)->ibs = (void *)&(*job)[1];
77 (*job)->num_ibs = num_ibs;
78
79 amdgpu_sync_create(&(*job)->sync);
80 amdgpu_sync_create(&(*job)->sched_sync);
81 (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
82 (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
83
84 return 0;
85}
86
87int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
88 struct amdgpu_job **job)
89{
90 int r;
91
92 r = amdgpu_job_alloc(adev, 1, job, NULL);
93 if (r)
94 return r;
95
96 r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
97 if (r)
98 kfree(*job);
99
100 return r;
101}
102
103void amdgpu_job_free_resources(struct amdgpu_job *job)
104{
105 struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
106 struct dma_fence *f;
107 unsigned i;
108
109 /* use sched fence if available */
110 f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
111
112 for (i = 0; i < job->num_ibs; ++i)
113 amdgpu_ib_free(ring->adev, &job->ibs[i], f);
114}
115
116static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
117{
118 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
119 struct amdgpu_job *job = to_amdgpu_job(s_job);
120
121 drm_sched_job_cleanup(s_job);
122
123 amdgpu_ring_priority_put(ring, s_job->s_priority);
124 dma_fence_put(job->fence);
125 amdgpu_sync_free(&job->sync);
126 amdgpu_sync_free(&job->sched_sync);
127 kfree(job);
128}
129
130void amdgpu_job_free(struct amdgpu_job *job)
131{
132 amdgpu_job_free_resources(job);
133
134 dma_fence_put(job->fence);
135 amdgpu_sync_free(&job->sync);
136 amdgpu_sync_free(&job->sched_sync);
137 kfree(job);
138}
139
140int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
141 void *owner, struct dma_fence **f)
142{
143 enum drm_sched_priority priority;
144 struct amdgpu_ring *ring;
145 int r;
146
147 if (!f)
148 return -EINVAL;
149
150 r = drm_sched_job_init(&job->base, entity, owner);
151 if (r)
152 return r;
153
154 job->owner = owner;
155 *f = dma_fence_get(&job->base.s_fence->finished);
156 amdgpu_job_free_resources(job);
157 priority = job->base.s_priority;
158 drm_sched_entity_push_job(&job->base, entity);
159
160 ring = to_amdgpu_ring(entity->rq->sched);
161 amdgpu_ring_priority_get(ring, priority);
162
163 return 0;
164}
165
166int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
167 struct dma_fence **fence)
168{
169 int r;
170
171 job->base.sched = &ring->sched;
172 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
173 job->fence = dma_fence_get(*fence);
174 if (r)
175 return r;
176
177 amdgpu_job_free(job);
178 return 0;
179}
180
181static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
182 struct drm_sched_entity *s_entity)
183{
184 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
185 struct amdgpu_job *job = to_amdgpu_job(sched_job);
186 struct amdgpu_vm *vm = job->vm;
187 struct dma_fence *fence;
188 bool explicit = false;
189 int r;
190
191 fence = amdgpu_sync_get_fence(&job->sync, &explicit);
192 if (fence && explicit) {
193 if (drm_sched_dependency_optimized(fence, s_entity)) {
194 r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
195 fence, false);
196 if (r)
197 DRM_ERROR("Error adding fence (%d)\n", r);
198 }
199 }
200
201 while (fence == NULL && vm && !job->vmid) {
202 r = amdgpu_vmid_grab(vm, ring, &job->sync,
203 &job->base.s_fence->finished,
204 job);
205 if (r)
206 DRM_ERROR("Error getting VM ID (%d)\n", r);
207
208 fence = amdgpu_sync_get_fence(&job->sync, NULL);
209 }
210
211 return fence;
212}
213
214static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
215{
216 struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
217 struct dma_fence *fence = NULL, *finished;
218 struct amdgpu_job *job;
219 int r;
220
221 job = to_amdgpu_job(sched_job);
222 finished = &job->base.s_fence->finished;
223
224 BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
225
226 trace_amdgpu_sched_run_job(job);
227
228 if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
229 dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
230
231 if (finished->error < 0) {
232 DRM_INFO("Skip scheduling IBs!\n");
233 } else {
234 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
235 &fence);
236 if (r)
237 DRM_ERROR("Error scheduling IBs (%d)\n", r);
238 }
239 /* if gpu reset, hw fence will be replaced here */
240 dma_fence_put(job->fence);
241 job->fence = dma_fence_get(fence);
242
243 amdgpu_job_free_resources(job);
244 return fence;
245}
246
247const struct drm_sched_backend_ops amdgpu_sched_ops = {
248 .dependency = amdgpu_job_dependency,
249 .run_job = amdgpu_job_run,
250 .timedout_job = amdgpu_job_timedout,
251 .free_job = amdgpu_job_free_cb
252};
253