1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
26 | * Jerome Glisse |
27 | * Christian König |
28 | */ |
29 | #include <drm/drmP.h> |
30 | #include "radeon.h" |
31 | |
32 | /* |
33 | * IB |
34 | * IBs (Indirect Buffers) and areas of GPU accessible memory where |
35 | * commands are stored. You can put a pointer to the IB in the |
36 | * command ring and the hw will fetch the commands from the IB |
37 | * and execute them. Generally userspace acceleration drivers |
38 | * produce command buffers which are send to the kernel and |
39 | * put in IBs for execution by the requested ring. |
40 | */ |
41 | static int radeon_debugfs_sa_init(struct radeon_device *rdev); |
42 | |
43 | /** |
44 | * radeon_ib_get - request an IB (Indirect Buffer) |
45 | * |
46 | * @rdev: radeon_device pointer |
47 | * @ring: ring index the IB is associated with |
48 | * @ib: IB object returned |
49 | * @size: requested IB size |
50 | * |
51 | * Request an IB (all asics). IBs are allocated using the |
52 | * suballocator. |
53 | * Returns 0 on success, error on failure. |
54 | */ |
55 | int radeon_ib_get(struct radeon_device *rdev, int ring, |
56 | struct radeon_ib *ib, struct radeon_vm *vm, |
57 | unsigned size) |
58 | { |
59 | int r; |
60 | |
61 | r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256); |
62 | if (r) { |
63 | dev_err(rdev->dev, "failed to get a new IB (%d)\n" , r); |
64 | return r; |
65 | } |
66 | |
67 | radeon_sync_create(&ib->sync); |
68 | |
69 | ib->ring = ring; |
70 | ib->fence = NULL; |
71 | ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo); |
72 | ib->vm = vm; |
73 | if (vm) { |
74 | /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address |
75 | * space and soffset is the offset inside the pool bo |
76 | */ |
77 | ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET; |
78 | } else { |
79 | ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); |
80 | } |
81 | ib->is_const_ib = false; |
82 | |
83 | return 0; |
84 | } |
85 | |
86 | /** |
87 | * radeon_ib_free - free an IB (Indirect Buffer) |
88 | * |
89 | * @rdev: radeon_device pointer |
90 | * @ib: IB object to free |
91 | * |
92 | * Free an IB (all asics). |
93 | */ |
94 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) |
95 | { |
96 | radeon_sync_free(rdev, &ib->sync, ib->fence); |
97 | radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence); |
98 | radeon_fence_unref(&ib->fence); |
99 | } |
100 | |
101 | /** |
102 | * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring |
103 | * |
104 | * @rdev: radeon_device pointer |
105 | * @ib: IB object to schedule |
106 | * @const_ib: Const IB to schedule (SI only) |
107 | * @hdp_flush: Whether or not to perform an HDP cache flush |
108 | * |
109 | * Schedule an IB on the associated ring (all asics). |
110 | * Returns 0 on success, error on failure. |
111 | * |
112 | * On SI, there are two parallel engines fed from the primary ring, |
113 | * the CE (Constant Engine) and the DE (Drawing Engine). Since |
114 | * resource descriptors have moved to memory, the CE allows you to |
115 | * prime the caches while the DE is updating register state so that |
116 | * the resource descriptors will be already in cache when the draw is |
117 | * processed. To accomplish this, the userspace driver submits two |
118 | * IBs, one for the CE and one for the DE. If there is a CE IB (called |
119 | * a CONST_IB), it will be put on the ring prior to the DE IB. Prior |
120 | * to SI there was just a DE IB. |
121 | */ |
122 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, |
123 | struct radeon_ib *const_ib, bool hdp_flush) |
124 | { |
125 | struct radeon_ring *ring = &rdev->ring[ib->ring]; |
126 | int r = 0; |
127 | |
128 | if (!ib->length_dw || !ring->ready) { |
129 | /* TODO: Nothings in the ib we should report. */ |
130 | dev_err(rdev->dev, "couldn't schedule ib\n" ); |
131 | return -EINVAL; |
132 | } |
133 | |
134 | /* 64 dwords should be enough for fence too */ |
135 | r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8); |
136 | if (r) { |
137 | dev_err(rdev->dev, "scheduling IB failed (%d).\n" , r); |
138 | return r; |
139 | } |
140 | |
141 | /* grab a vm id if necessary */ |
142 | if (ib->vm) { |
143 | struct radeon_fence *vm_id_fence; |
144 | vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring); |
145 | radeon_sync_fence(&ib->sync, vm_id_fence); |
146 | } |
147 | |
148 | /* sync with other rings */ |
149 | r = radeon_sync_rings(rdev, &ib->sync, ib->ring); |
150 | if (r) { |
151 | dev_err(rdev->dev, "failed to sync rings (%d)\n" , r); |
152 | radeon_ring_unlock_undo(rdev, ring); |
153 | return r; |
154 | } |
155 | |
156 | if (ib->vm) |
157 | radeon_vm_flush(rdev, ib->vm, ib->ring, |
158 | ib->sync.last_vm_update); |
159 | |
160 | if (const_ib) { |
161 | radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); |
162 | radeon_sync_free(rdev, &const_ib->sync, NULL); |
163 | } |
164 | radeon_ring_ib_execute(rdev, ib->ring, ib); |
165 | r = radeon_fence_emit(rdev, &ib->fence, ib->ring); |
166 | if (r) { |
167 | dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n" , r); |
168 | radeon_ring_unlock_undo(rdev, ring); |
169 | return r; |
170 | } |
171 | if (const_ib) { |
172 | const_ib->fence = radeon_fence_ref(ib->fence); |
173 | } |
174 | |
175 | if (ib->vm) |
176 | radeon_vm_fence(rdev, ib->vm, ib->fence); |
177 | |
178 | radeon_ring_unlock_commit(rdev, ring, hdp_flush); |
179 | return 0; |
180 | } |
181 | |
182 | /** |
183 | * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool |
184 | * |
185 | * @rdev: radeon_device pointer |
186 | * |
187 | * Initialize the suballocator to manage a pool of memory |
188 | * for use as IBs (all asics). |
189 | * Returns 0 on success, error on failure. |
190 | */ |
191 | int radeon_ib_pool_init(struct radeon_device *rdev) |
192 | { |
193 | int r; |
194 | |
195 | if (rdev->ib_pool_ready) { |
196 | return 0; |
197 | } |
198 | |
199 | if (rdev->family >= CHIP_BONAIRE) { |
200 | r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo, |
201 | RADEON_IB_POOL_SIZE*64*1024, |
202 | RADEON_GPU_PAGE_SIZE, |
203 | RADEON_GEM_DOMAIN_GTT, |
204 | RADEON_GEM_GTT_WC); |
205 | } else { |
206 | /* Before CIK, it's better to stick to cacheable GTT due |
207 | * to the command stream checking |
208 | */ |
209 | r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo, |
210 | RADEON_IB_POOL_SIZE*64*1024, |
211 | RADEON_GPU_PAGE_SIZE, |
212 | RADEON_GEM_DOMAIN_GTT, 0); |
213 | } |
214 | if (r) { |
215 | return r; |
216 | } |
217 | |
218 | r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo); |
219 | if (r) { |
220 | return r; |
221 | } |
222 | |
223 | rdev->ib_pool_ready = true; |
224 | if (radeon_debugfs_sa_init(rdev)) { |
225 | dev_err(rdev->dev, "failed to register debugfs file for SA\n" ); |
226 | } |
227 | return 0; |
228 | } |
229 | |
230 | /** |
231 | * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool |
232 | * |
233 | * @rdev: radeon_device pointer |
234 | * |
235 | * Tear down the suballocator managing the pool of memory |
236 | * for use as IBs (all asics). |
237 | */ |
238 | void radeon_ib_pool_fini(struct radeon_device *rdev) |
239 | { |
240 | if (rdev->ib_pool_ready) { |
241 | radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo); |
242 | radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo); |
243 | rdev->ib_pool_ready = false; |
244 | } |
245 | } |
246 | |
247 | /** |
248 | * radeon_ib_ring_tests - test IBs on the rings |
249 | * |
250 | * @rdev: radeon_device pointer |
251 | * |
252 | * Test an IB (Indirect Buffer) on each ring. |
253 | * If the test fails, disable the ring. |
254 | * Returns 0 on success, error if the primary GFX ring |
255 | * IB test fails. |
256 | */ |
257 | int radeon_ib_ring_tests(struct radeon_device *rdev) |
258 | { |
259 | unsigned i; |
260 | int r; |
261 | |
262 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
263 | struct radeon_ring *ring = &rdev->ring[i]; |
264 | |
265 | if (!ring->ready) |
266 | continue; |
267 | |
268 | r = radeon_ib_test(rdev, i, ring); |
269 | if (r) { |
270 | radeon_fence_driver_force_completion(rdev, i); |
271 | ring->ready = false; |
272 | rdev->needs_reset = false; |
273 | |
274 | if (i == RADEON_RING_TYPE_GFX_INDEX) { |
275 | /* oh, oh, that's really bad */ |
276 | DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n" , r); |
277 | rdev->accel_working = false; |
278 | return r; |
279 | |
280 | } else { |
281 | /* still not good, but we can live with it */ |
282 | DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n" , i, r); |
283 | } |
284 | } |
285 | } |
286 | return 0; |
287 | } |
288 | |
289 | /* |
290 | * Debugfs info |
291 | */ |
292 | #if defined(CONFIG_DEBUG_FS) |
293 | |
294 | static int radeon_debugfs_sa_info(struct seq_file *m, void *data) |
295 | { |
296 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
297 | struct drm_device *dev = node->minor->dev; |
298 | struct radeon_device *rdev = dev->dev_private; |
299 | |
300 | radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m); |
301 | |
302 | return 0; |
303 | |
304 | } |
305 | |
306 | static struct drm_info_list radeon_debugfs_sa_list[] = { |
307 | {"radeon_sa_info" , &radeon_debugfs_sa_info, 0, NULL}, |
308 | }; |
309 | |
310 | #endif |
311 | |
312 | static int radeon_debugfs_sa_init(struct radeon_device *rdev) |
313 | { |
314 | #if defined(CONFIG_DEBUG_FS) |
315 | return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1); |
316 | #else |
317 | return 0; |
318 | #endif |
319 | } |
320 | |