1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
2 | /************************************************************************** |
3 | * |
4 | * Copyright 2015-2023 VMware, Inc., Palo Alto, CA., USA |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
26 | **************************************************************************/ |
27 | |
28 | #include "vmwgfx_bo.h" |
29 | #include "vmwgfx_drv.h" |
30 | |
31 | #include <drm/ttm/ttm_bo.h> |
32 | |
33 | #include <linux/dmapool.h> |
34 | #include <linux/pci.h> |
35 | |
36 | /* |
37 | * Size of inline command buffers. Try to make sure that a page size is a |
38 | * multiple of the DMA pool allocation size. |
39 | */ |
40 | #define VMW_CMDBUF_INLINE_ALIGN 64 |
41 | #define VMW_CMDBUF_INLINE_SIZE \ |
42 | (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN)) |
43 | |
44 | /** |
45 | * struct vmw_cmdbuf_context - Command buffer context queues |
46 | * |
47 | * @submitted: List of command buffers that have been submitted to the |
48 | * manager but not yet submitted to hardware. |
49 | * @hw_submitted: List of command buffers submitted to hardware. |
50 | * @preempted: List of preempted command buffers. |
51 | * @num_hw_submitted: Number of buffers currently being processed by hardware |
52 | * @block_submission: Identifies a block command submission. |
53 | */ |
54 | struct vmw_cmdbuf_context { |
55 | struct list_head submitted; |
56 | struct list_head hw_submitted; |
57 | struct list_head preempted; |
58 | unsigned num_hw_submitted; |
59 | bool block_submission; |
60 | }; |
61 | |
62 | /** |
63 | * struct vmw_cmdbuf_man - Command buffer manager |
64 | * |
65 | * @cur_mutex: Mutex protecting the command buffer used for incremental small |
66 | * kernel command submissions, @cur. |
67 | * @space_mutex: Mutex to protect against starvation when we allocate |
68 | * main pool buffer space. |
69 | * @error_mutex: Mutex to serialize the work queue error handling. |
70 | * Note this is not needed if the same workqueue handler |
71 | * can't race with itself... |
72 | * @work: A struct work_struct implementeing command buffer error handling. |
73 | * Immutable. |
74 | * @dev_priv: Pointer to the device private struct. Immutable. |
75 | * @ctx: Array of command buffer context queues. The queues and the context |
76 | * data is protected by @lock. |
77 | * @error: List of command buffers that have caused device errors. |
78 | * Protected by @lock. |
79 | * @mm: Range manager for the command buffer space. Manager allocations and |
80 | * frees are protected by @lock. |
81 | * @cmd_space: Buffer object for the command buffer space, unless we were |
82 | * able to make a contigous coherent DMA memory allocation, @handle. Immutable. |
83 | * @map: Pointer to command buffer space. May be a mapped buffer object or |
84 | * a contigous coherent DMA memory allocation. Immutable. |
85 | * @cur: Command buffer for small kernel command submissions. Protected by |
86 | * the @cur_mutex. |
87 | * @cur_pos: Space already used in @cur. Protected by @cur_mutex. |
88 | * @default_size: Default size for the @cur command buffer. Immutable. |
89 | * @max_hw_submitted: Max number of in-flight command buffers the device can |
90 | * handle. Immutable. |
91 | * @lock: Spinlock protecting command submission queues. |
92 | * @headers: Pool of DMA memory for device command buffer headers. |
93 | * Internal protection. |
94 | * @dheaders: Pool of DMA memory for device command buffer headers with trailing |
95 | * space for inline data. Internal protection. |
96 | * @alloc_queue: Wait queue for processes waiting to allocate command buffer |
97 | * space. |
98 | * @idle_queue: Wait queue for processes waiting for command buffer idle. |
99 | * @irq_on: Whether the process function has requested irq to be turned on. |
100 | * Protected by @lock. |
101 | * @using_mob: Whether the command buffer space is a MOB or a contigous DMA |
102 | * allocation. Immutable. |
103 | * @has_pool: Has a large pool of DMA memory which allows larger allocations. |
104 | * Typically this is false only during bootstrap. |
105 | * @handle: DMA address handle for the command buffer space if @using_mob is |
106 | * false. Immutable. |
107 | * @size: The size of the command buffer space. Immutable. |
108 | * @num_contexts: Number of contexts actually enabled. |
109 | */ |
110 | struct vmw_cmdbuf_man { |
111 | struct mutex cur_mutex; |
112 | struct mutex space_mutex; |
113 | struct mutex error_mutex; |
114 | struct work_struct work; |
115 | struct vmw_private *dev_priv; |
116 | struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX]; |
117 | struct list_head error; |
118 | struct drm_mm mm; |
119 | struct vmw_bo *cmd_space; |
120 | u8 *map; |
121 | struct vmw_cmdbuf_header *cur; |
122 | size_t cur_pos; |
123 | size_t default_size; |
124 | unsigned max_hw_submitted; |
125 | spinlock_t lock; |
126 | struct dma_pool *; |
127 | struct dma_pool *; |
128 | wait_queue_head_t alloc_queue; |
129 | wait_queue_head_t idle_queue; |
130 | bool irq_on; |
131 | bool using_mob; |
132 | bool has_pool; |
133 | dma_addr_t handle; |
134 | size_t size; |
135 | u32 num_contexts; |
136 | }; |
137 | |
138 | /** |
139 | * struct vmw_cmdbuf_header - Command buffer metadata |
140 | * |
141 | * @man: The command buffer manager. |
142 | * @cb_header: Device command buffer header, allocated from a DMA pool. |
143 | * @cb_context: The device command buffer context. |
144 | * @list: List head for attaching to the manager lists. |
145 | * @node: The range manager node. |
146 | * @handle: The DMA address of @cb_header. Handed to the device on command |
147 | * buffer submission. |
148 | * @cmd: Pointer to the command buffer space of this buffer. |
149 | * @size: Size of the command buffer space of this buffer. |
150 | * @reserved: Reserved space of this buffer. |
151 | * @inline_space: Whether inline command buffer space is used. |
152 | */ |
153 | struct { |
154 | struct vmw_cmdbuf_man *; |
155 | SVGACBHeader *; |
156 | SVGACBContext ; |
157 | struct list_head ; |
158 | struct drm_mm_node ; |
159 | dma_addr_t handle; |
160 | u8 *; |
161 | size_t ; |
162 | size_t ; |
163 | bool ; |
164 | }; |
165 | |
166 | /** |
167 | * struct vmw_cmdbuf_dheader - Device command buffer header with inline |
168 | * command buffer space. |
169 | * |
170 | * @cb_header: Device command buffer header. |
171 | * @cmd: Inline command buffer space. |
172 | */ |
173 | struct { |
174 | SVGACBHeader ; |
175 | u8 [VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN); |
176 | }; |
177 | |
178 | /** |
179 | * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata |
180 | * |
181 | * @page_size: Size of requested command buffer space in pages. |
182 | * @node: Pointer to the range manager node. |
183 | * @done: True if this allocation has succeeded. |
184 | */ |
185 | struct vmw_cmdbuf_alloc_info { |
186 | size_t page_size; |
187 | struct drm_mm_node *node; |
188 | bool done; |
189 | }; |
190 | |
191 | /* Loop over each context in the command buffer manager. */ |
192 | #define for_each_cmdbuf_ctx(_man, _i, _ctx) \ |
193 | for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \ |
194 | ++(_i), ++(_ctx)) |
195 | |
196 | static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context, |
197 | bool enable); |
198 | static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context); |
199 | |
200 | /** |
201 | * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex. |
202 | * |
203 | * @man: The range manager. |
204 | * @interruptible: Whether to wait interruptible when locking. |
205 | */ |
206 | static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible) |
207 | { |
208 | if (interruptible) { |
209 | if (mutex_lock_interruptible(&man->cur_mutex)) |
210 | return -ERESTARTSYS; |
211 | } else { |
212 | mutex_lock(&man->cur_mutex); |
213 | } |
214 | |
215 | return 0; |
216 | } |
217 | |
218 | /** |
219 | * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex. |
220 | * |
221 | * @man: The range manager. |
222 | */ |
223 | static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man) |
224 | { |
225 | mutex_unlock(lock: &man->cur_mutex); |
226 | } |
227 | |
228 | /** |
229 | * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has |
230 | * been used for the device context with inline command buffers. |
231 | * Need not be called locked. |
232 | * |
233 | * @header: Pointer to the header to free. |
234 | */ |
235 | static void (struct vmw_cmdbuf_header *) |
236 | { |
237 | struct vmw_cmdbuf_dheader *; |
238 | |
239 | if (WARN_ON_ONCE(!header->inline_space)) |
240 | return; |
241 | |
242 | dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader, |
243 | cb_header); |
244 | dma_pool_free(pool: header->man->dheaders, vaddr: dheader, addr: header->handle); |
245 | kfree(objp: header); |
246 | } |
247 | |
248 | /** |
249 | * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its |
250 | * associated structures. |
251 | * |
252 | * @header: Pointer to the header to free. |
253 | * |
254 | * For internal use. Must be called with man::lock held. |
255 | */ |
256 | static void (struct vmw_cmdbuf_header *) |
257 | { |
258 | struct vmw_cmdbuf_man *man = header->man; |
259 | |
260 | lockdep_assert_held_once(&man->lock); |
261 | |
262 | if (header->inline_space) { |
263 | vmw_cmdbuf_header_inline_free(header); |
264 | return; |
265 | } |
266 | |
267 | drm_mm_remove_node(node: &header->node); |
268 | wake_up_all(&man->alloc_queue); |
269 | if (header->cb_header) |
270 | dma_pool_free(pool: man->headers, vaddr: header->cb_header, |
271 | addr: header->handle); |
272 | kfree(objp: header); |
273 | } |
274 | |
275 | /** |
276 | * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its |
277 | * associated structures. |
278 | * |
279 | * @header: Pointer to the header to free. |
280 | */ |
281 | void (struct vmw_cmdbuf_header *) |
282 | { |
283 | struct vmw_cmdbuf_man *man = header->man; |
284 | |
285 | /* Avoid locking if inline_space */ |
286 | if (header->inline_space) { |
287 | vmw_cmdbuf_header_inline_free(header); |
288 | return; |
289 | } |
290 | spin_lock(lock: &man->lock); |
291 | __vmw_cmdbuf_header_free(header); |
292 | spin_unlock(lock: &man->lock); |
293 | } |
294 | |
295 | |
296 | /** |
297 | * vmw_cmdbuf_header_submit: Submit a command buffer to hardware. |
298 | * |
299 | * @header: The header of the buffer to submit. |
300 | */ |
301 | static int (struct vmw_cmdbuf_header *) |
302 | { |
303 | struct vmw_cmdbuf_man *man = header->man; |
304 | u32 val; |
305 | |
306 | val = upper_32_bits(header->handle); |
307 | vmw_write(dev_priv: man->dev_priv, offset: SVGA_REG_COMMAND_HIGH, value: val); |
308 | |
309 | val = lower_32_bits(header->handle); |
310 | val |= header->cb_context & SVGA_CB_CONTEXT_MASK; |
311 | vmw_write(dev_priv: man->dev_priv, offset: SVGA_REG_COMMAND_LOW, value: val); |
312 | |
313 | return header->cb_header->status; |
314 | } |
315 | |
316 | /** |
317 | * vmw_cmdbuf_ctx_init: Initialize a command buffer context. |
318 | * |
319 | * @ctx: The command buffer context to initialize |
320 | */ |
321 | static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx) |
322 | { |
323 | INIT_LIST_HEAD(list: &ctx->hw_submitted); |
324 | INIT_LIST_HEAD(list: &ctx->submitted); |
325 | INIT_LIST_HEAD(list: &ctx->preempted); |
326 | ctx->num_hw_submitted = 0; |
327 | } |
328 | |
329 | /** |
330 | * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer |
331 | * context. |
332 | * |
333 | * @man: The command buffer manager. |
334 | * @ctx: The command buffer context. |
335 | * |
336 | * Submits command buffers to hardware until there are no more command |
337 | * buffers to submit or the hardware can't handle more command buffers. |
338 | */ |
339 | static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man, |
340 | struct vmw_cmdbuf_context *ctx) |
341 | { |
342 | while (ctx->num_hw_submitted < man->max_hw_submitted && |
343 | !list_empty(head: &ctx->submitted) && |
344 | !ctx->block_submission) { |
345 | struct vmw_cmdbuf_header *entry; |
346 | SVGACBStatus status; |
347 | |
348 | entry = list_first_entry(&ctx->submitted, |
349 | struct vmw_cmdbuf_header, |
350 | list); |
351 | |
352 | status = vmw_cmdbuf_header_submit(header: entry); |
353 | |
354 | /* This should never happen */ |
355 | if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) { |
356 | entry->cb_header->status = SVGA_CB_STATUS_NONE; |
357 | break; |
358 | } |
359 | |
360 | list_move_tail(list: &entry->list, head: &ctx->hw_submitted); |
361 | ctx->num_hw_submitted++; |
362 | } |
363 | |
364 | } |
365 | |
366 | /** |
367 | * vmw_cmdbuf_ctx_process - Process a command buffer context. |
368 | * |
369 | * @man: The command buffer manager. |
370 | * @ctx: The command buffer context. |
371 | * @notempty: Pass back count of non-empty command submitted lists. |
372 | * |
373 | * Submit command buffers to hardware if possible, and process finished |
374 | * buffers. Typically freeing them, but on preemption or error take |
375 | * appropriate action. Wake up waiters if appropriate. |
376 | */ |
377 | static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man, |
378 | struct vmw_cmdbuf_context *ctx, |
379 | int *notempty) |
380 | { |
381 | struct vmw_cmdbuf_header *entry, *next; |
382 | |
383 | vmw_cmdbuf_ctx_submit(man, ctx); |
384 | |
385 | list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) { |
386 | SVGACBStatus status = entry->cb_header->status; |
387 | |
388 | if (status == SVGA_CB_STATUS_NONE) |
389 | break; |
390 | |
391 | list_del(entry: &entry->list); |
392 | wake_up_all(&man->idle_queue); |
393 | ctx->num_hw_submitted--; |
394 | switch (status) { |
395 | case SVGA_CB_STATUS_COMPLETED: |
396 | __vmw_cmdbuf_header_free(header: entry); |
397 | break; |
398 | case SVGA_CB_STATUS_COMMAND_ERROR: |
399 | WARN_ONCE(true, "Command buffer error.\n" ); |
400 | entry->cb_header->status = SVGA_CB_STATUS_NONE; |
401 | list_add_tail(new: &entry->list, head: &man->error); |
402 | schedule_work(work: &man->work); |
403 | break; |
404 | case SVGA_CB_STATUS_PREEMPTED: |
405 | entry->cb_header->status = SVGA_CB_STATUS_NONE; |
406 | list_add_tail(new: &entry->list, head: &ctx->preempted); |
407 | break; |
408 | case SVGA_CB_STATUS_CB_HEADER_ERROR: |
409 | WARN_ONCE(true, "Command buffer header error.\n" ); |
410 | __vmw_cmdbuf_header_free(header: entry); |
411 | break; |
412 | default: |
413 | WARN_ONCE(true, "Undefined command buffer status.\n" ); |
414 | __vmw_cmdbuf_header_free(header: entry); |
415 | break; |
416 | } |
417 | } |
418 | |
419 | vmw_cmdbuf_ctx_submit(man, ctx); |
420 | if (!list_empty(head: &ctx->submitted)) |
421 | (*notempty)++; |
422 | } |
423 | |
424 | /** |
425 | * vmw_cmdbuf_man_process - Process all command buffer contexts and |
426 | * switch on and off irqs as appropriate. |
427 | * |
428 | * @man: The command buffer manager. |
429 | * |
430 | * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has |
431 | * command buffers left that are not submitted to hardware, Make sure |
432 | * IRQ handling is turned on. Otherwise, make sure it's turned off. |
433 | */ |
434 | static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) |
435 | { |
436 | int notempty; |
437 | struct vmw_cmdbuf_context *ctx; |
438 | int i; |
439 | |
440 | retry: |
441 | notempty = 0; |
442 | for_each_cmdbuf_ctx(man, i, ctx) |
443 | vmw_cmdbuf_ctx_process(man, ctx, notempty: ¬empty); |
444 | |
445 | if (man->irq_on && !notempty) { |
446 | vmw_generic_waiter_remove(dev_priv: man->dev_priv, |
447 | SVGA_IRQFLAG_COMMAND_BUFFER, |
448 | waiter_count: &man->dev_priv->cmdbuf_waiters); |
449 | man->irq_on = false; |
450 | } else if (!man->irq_on && notempty) { |
451 | vmw_generic_waiter_add(dev_priv: man->dev_priv, |
452 | SVGA_IRQFLAG_COMMAND_BUFFER, |
453 | waiter_count: &man->dev_priv->cmdbuf_waiters); |
454 | man->irq_on = true; |
455 | |
456 | /* Rerun in case we just missed an irq. */ |
457 | goto retry; |
458 | } |
459 | } |
460 | |
461 | /** |
462 | * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a |
463 | * command buffer context |
464 | * |
465 | * @man: The command buffer manager. |
466 | * @header: The header of the buffer to submit. |
467 | * @cb_context: The command buffer context to use. |
468 | * |
469 | * This function adds @header to the "submitted" queue of the command |
470 | * buffer context identified by @cb_context. It then calls the command buffer |
471 | * manager processing to potentially submit the buffer to hardware. |
472 | * @man->lock needs to be held when calling this function. |
473 | */ |
474 | static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man, |
475 | struct vmw_cmdbuf_header *, |
476 | SVGACBContext cb_context) |
477 | { |
478 | if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT)) |
479 | header->cb_header->dxContext = 0; |
480 | header->cb_context = cb_context; |
481 | list_add_tail(new: &header->list, head: &man->ctx[cb_context].submitted); |
482 | |
483 | vmw_cmdbuf_man_process(man); |
484 | } |
485 | |
486 | /** |
487 | * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt |
488 | * handler implemented as a threaded irq task. |
489 | * |
490 | * @man: Pointer to the command buffer manager. |
491 | * |
492 | * The bottom half of the interrupt handler simply calls into the |
493 | * command buffer processor to free finished buffers and submit any |
494 | * queued buffers to hardware. |
495 | */ |
496 | void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man) |
497 | { |
498 | spin_lock(lock: &man->lock); |
499 | vmw_cmdbuf_man_process(man); |
500 | spin_unlock(lock: &man->lock); |
501 | } |
502 | |
503 | /** |
504 | * vmw_cmdbuf_work_func - The deferred work function that handles |
505 | * command buffer errors. |
506 | * |
507 | * @work: The work func closure argument. |
508 | * |
509 | * Restarting the command buffer context after an error requires process |
510 | * context, so it is deferred to this work function. |
511 | */ |
512 | static void vmw_cmdbuf_work_func(struct work_struct *work) |
513 | { |
514 | struct vmw_cmdbuf_man *man = |
515 | container_of(work, struct vmw_cmdbuf_man, work); |
516 | struct vmw_cmdbuf_header *entry, *next; |
517 | uint32_t dummy = 0; |
518 | bool send_fence = false; |
519 | struct list_head restart_head[SVGA_CB_CONTEXT_MAX]; |
520 | int i; |
521 | struct vmw_cmdbuf_context *ctx; |
522 | bool global_block = false; |
523 | |
524 | for_each_cmdbuf_ctx(man, i, ctx) |
525 | INIT_LIST_HEAD(list: &restart_head[i]); |
526 | |
527 | mutex_lock(&man->error_mutex); |
528 | spin_lock(lock: &man->lock); |
529 | list_for_each_entry_safe(entry, next, &man->error, list) { |
530 | SVGACBHeader *cb_hdr = entry->cb_header; |
531 | SVGA3dCmdHeader * = (SVGA3dCmdHeader *) |
532 | (entry->cmd + cb_hdr->errorOffset); |
533 | u32 error_cmd_size, new_start_offset; |
534 | const char *cmd_name; |
535 | |
536 | list_del_init(entry: &entry->list); |
537 | global_block = true; |
538 | |
539 | if (!vmw_cmd_describe(buf: header, size: &error_cmd_size, cmd: &cmd_name)) { |
540 | VMW_DEBUG_USER("Unknown command causing device error.\n" ); |
541 | VMW_DEBUG_USER("Command buffer offset is %lu\n" , |
542 | (unsigned long) cb_hdr->errorOffset); |
543 | __vmw_cmdbuf_header_free(header: entry); |
544 | send_fence = true; |
545 | continue; |
546 | } |
547 | |
548 | VMW_DEBUG_USER("Command \"%s\" causing device error.\n" , |
549 | cmd_name); |
550 | VMW_DEBUG_USER("Command buffer offset is %lu\n" , |
551 | (unsigned long) cb_hdr->errorOffset); |
552 | VMW_DEBUG_USER("Command size is %lu\n" , |
553 | (unsigned long) error_cmd_size); |
554 | |
555 | new_start_offset = cb_hdr->errorOffset + error_cmd_size; |
556 | |
557 | if (new_start_offset >= cb_hdr->length) { |
558 | __vmw_cmdbuf_header_free(header: entry); |
559 | send_fence = true; |
560 | continue; |
561 | } |
562 | |
563 | if (man->using_mob) |
564 | cb_hdr->ptr.mob.mobOffset += new_start_offset; |
565 | else |
566 | cb_hdr->ptr.pa += (u64) new_start_offset; |
567 | |
568 | entry->cmd += new_start_offset; |
569 | cb_hdr->length -= new_start_offset; |
570 | cb_hdr->errorOffset = 0; |
571 | cb_hdr->offset = 0; |
572 | |
573 | list_add_tail(new: &entry->list, head: &restart_head[entry->cb_context]); |
574 | } |
575 | |
576 | for_each_cmdbuf_ctx(man, i, ctx) |
577 | man->ctx[i].block_submission = true; |
578 | |
579 | spin_unlock(lock: &man->lock); |
580 | |
581 | /* Preempt all contexts */ |
582 | if (global_block && vmw_cmdbuf_preempt(man, context: 0)) |
583 | DRM_ERROR("Failed preempting command buffer contexts\n" ); |
584 | |
585 | spin_lock(lock: &man->lock); |
586 | for_each_cmdbuf_ctx(man, i, ctx) { |
587 | /* Move preempted command buffers to the preempted queue. */ |
588 | vmw_cmdbuf_ctx_process(man, ctx, notempty: &dummy); |
589 | |
590 | /* |
591 | * Add the preempted queue after the command buffer |
592 | * that caused an error. |
593 | */ |
594 | list_splice_init(list: &ctx->preempted, head: restart_head[i].prev); |
595 | |
596 | /* |
597 | * Finally add all command buffers first in the submitted |
598 | * queue, to rerun them. |
599 | */ |
600 | |
601 | ctx->block_submission = false; |
602 | list_splice_init(list: &restart_head[i], head: &ctx->submitted); |
603 | } |
604 | |
605 | vmw_cmdbuf_man_process(man); |
606 | spin_unlock(lock: &man->lock); |
607 | |
608 | if (global_block && vmw_cmdbuf_startstop(man, context: 0, enable: true)) |
609 | DRM_ERROR("Failed restarting command buffer contexts\n" ); |
610 | |
611 | /* Send a new fence in case one was removed */ |
612 | if (send_fence) { |
613 | vmw_cmd_send_fence(dev_priv: man->dev_priv, seqno: &dummy); |
614 | wake_up_all(&man->idle_queue); |
615 | } |
616 | |
617 | mutex_unlock(lock: &man->error_mutex); |
618 | } |
619 | |
620 | /** |
621 | * vmw_cmdbuf_man_idle - Check whether the command buffer manager is idle. |
622 | * |
623 | * @man: The command buffer manager. |
624 | * @check_preempted: Check also the preempted queue for pending command buffers. |
625 | * |
626 | */ |
627 | static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man, |
628 | bool check_preempted) |
629 | { |
630 | struct vmw_cmdbuf_context *ctx; |
631 | bool idle = false; |
632 | int i; |
633 | |
634 | spin_lock(lock: &man->lock); |
635 | vmw_cmdbuf_man_process(man); |
636 | for_each_cmdbuf_ctx(man, i, ctx) { |
637 | if (!list_empty(head: &ctx->submitted) || |
638 | !list_empty(head: &ctx->hw_submitted) || |
639 | (check_preempted && !list_empty(head: &ctx->preempted))) |
640 | goto out_unlock; |
641 | } |
642 | |
643 | idle = list_empty(head: &man->error); |
644 | |
645 | out_unlock: |
646 | spin_unlock(lock: &man->lock); |
647 | |
648 | return idle; |
649 | } |
650 | |
651 | /** |
652 | * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel |
653 | * command submissions |
654 | * |
655 | * @man: The command buffer manager. |
656 | * |
657 | * Flushes the current command buffer without allocating a new one. A new one |
658 | * is automatically allocated when needed. Call with @man->cur_mutex held. |
659 | */ |
660 | static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man) |
661 | { |
662 | struct vmw_cmdbuf_header *cur = man->cur; |
663 | |
664 | lockdep_assert_held_once(&man->cur_mutex); |
665 | |
666 | if (!cur) |
667 | return; |
668 | |
669 | spin_lock(lock: &man->lock); |
670 | if (man->cur_pos == 0) { |
671 | __vmw_cmdbuf_header_free(header: cur); |
672 | goto out_unlock; |
673 | } |
674 | |
675 | man->cur->cb_header->length = man->cur_pos; |
676 | vmw_cmdbuf_ctx_add(man, header: man->cur, cb_context: SVGA_CB_CONTEXT_0); |
677 | out_unlock: |
678 | spin_unlock(lock: &man->lock); |
679 | man->cur = NULL; |
680 | man->cur_pos = 0; |
681 | } |
682 | |
683 | /** |
684 | * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel |
685 | * command submissions |
686 | * |
687 | * @man: The command buffer manager. |
688 | * @interruptible: Whether to sleep interruptible when sleeping. |
689 | * |
690 | * Flushes the current command buffer without allocating a new one. A new one |
691 | * is automatically allocated when needed. |
692 | */ |
693 | int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, |
694 | bool interruptible) |
695 | { |
696 | int ret = vmw_cmdbuf_cur_lock(man, interruptible); |
697 | |
698 | if (ret) |
699 | return ret; |
700 | |
701 | __vmw_cmdbuf_cur_flush(man); |
702 | vmw_cmdbuf_cur_unlock(man); |
703 | |
704 | return 0; |
705 | } |
706 | |
707 | /** |
708 | * vmw_cmdbuf_idle - Wait for command buffer manager idle. |
709 | * |
710 | * @man: The command buffer manager. |
711 | * @interruptible: Sleep interruptible while waiting. |
712 | * @timeout: Time out after this many ticks. |
713 | * |
714 | * Wait until the command buffer manager has processed all command buffers, |
715 | * or until a timeout occurs. If a timeout occurs, the function will return |
716 | * -EBUSY. |
717 | */ |
718 | int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, |
719 | unsigned long timeout) |
720 | { |
721 | int ret; |
722 | |
723 | ret = vmw_cmdbuf_cur_flush(man, interruptible); |
724 | vmw_generic_waiter_add(dev_priv: man->dev_priv, |
725 | SVGA_IRQFLAG_COMMAND_BUFFER, |
726 | waiter_count: &man->dev_priv->cmdbuf_waiters); |
727 | |
728 | if (interruptible) { |
729 | ret = wait_event_interruptible_timeout |
730 | (man->idle_queue, vmw_cmdbuf_man_idle(man, true), |
731 | timeout); |
732 | } else { |
733 | ret = wait_event_timeout |
734 | (man->idle_queue, vmw_cmdbuf_man_idle(man, true), |
735 | timeout); |
736 | } |
737 | vmw_generic_waiter_remove(dev_priv: man->dev_priv, |
738 | SVGA_IRQFLAG_COMMAND_BUFFER, |
739 | waiter_count: &man->dev_priv->cmdbuf_waiters); |
740 | if (ret == 0) { |
741 | if (!vmw_cmdbuf_man_idle(man, check_preempted: true)) |
742 | ret = -EBUSY; |
743 | else |
744 | ret = 0; |
745 | } |
746 | if (ret > 0) |
747 | ret = 0; |
748 | |
749 | return ret; |
750 | } |
751 | |
752 | /** |
753 | * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool. |
754 | * |
755 | * @man: The command buffer manager. |
756 | * @info: Allocation info. Will hold the size on entry and allocated mm node |
757 | * on successful return. |
758 | * |
759 | * Try to allocate buffer space from the main pool. Returns true if succeeded. |
760 | * If a fatal error was hit, the error code is returned in @info->ret. |
761 | */ |
762 | static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, |
763 | struct vmw_cmdbuf_alloc_info *info) |
764 | { |
765 | int ret; |
766 | |
767 | if (info->done) |
768 | return true; |
769 | |
770 | memset(info->node, 0, sizeof(*info->node)); |
771 | spin_lock(lock: &man->lock); |
772 | ret = drm_mm_insert_node(mm: &man->mm, node: info->node, size: info->page_size); |
773 | if (ret) { |
774 | vmw_cmdbuf_man_process(man); |
775 | ret = drm_mm_insert_node(mm: &man->mm, node: info->node, size: info->page_size); |
776 | } |
777 | |
778 | spin_unlock(lock: &man->lock); |
779 | info->done = !ret; |
780 | |
781 | return info->done; |
782 | } |
783 | |
784 | /** |
785 | * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool. |
786 | * |
787 | * @man: The command buffer manager. |
788 | * @node: Pointer to pre-allocated range-manager node. |
789 | * @size: The size of the allocation. |
790 | * @interruptible: Whether to sleep interruptible while waiting for space. |
791 | * |
792 | * This function allocates buffer space from the main pool, and if there is |
793 | * no space available ATM, it turns on IRQ handling and sleeps waiting for it to |
794 | * become available. |
795 | */ |
796 | static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man, |
797 | struct drm_mm_node *node, |
798 | size_t size, |
799 | bool interruptible) |
800 | { |
801 | struct vmw_cmdbuf_alloc_info info; |
802 | |
803 | info.page_size = PFN_UP(size); |
804 | info.node = node; |
805 | info.done = false; |
806 | |
807 | /* |
808 | * To prevent starvation of large requests, only one allocating call |
809 | * at a time waiting for space. |
810 | */ |
811 | if (interruptible) { |
812 | if (mutex_lock_interruptible(&man->space_mutex)) |
813 | return -ERESTARTSYS; |
814 | } else { |
815 | mutex_lock(&man->space_mutex); |
816 | } |
817 | |
818 | /* Try to allocate space without waiting. */ |
819 | if (vmw_cmdbuf_try_alloc(man, info: &info)) |
820 | goto out_unlock; |
821 | |
822 | vmw_generic_waiter_add(dev_priv: man->dev_priv, |
823 | SVGA_IRQFLAG_COMMAND_BUFFER, |
824 | waiter_count: &man->dev_priv->cmdbuf_waiters); |
825 | |
826 | if (interruptible) { |
827 | int ret; |
828 | |
829 | ret = wait_event_interruptible |
830 | (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); |
831 | if (ret) { |
832 | vmw_generic_waiter_remove |
833 | (dev_priv: man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER, |
834 | waiter_count: &man->dev_priv->cmdbuf_waiters); |
835 | mutex_unlock(lock: &man->space_mutex); |
836 | return ret; |
837 | } |
838 | } else { |
839 | wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); |
840 | } |
841 | vmw_generic_waiter_remove(dev_priv: man->dev_priv, |
842 | SVGA_IRQFLAG_COMMAND_BUFFER, |
843 | waiter_count: &man->dev_priv->cmdbuf_waiters); |
844 | |
845 | out_unlock: |
846 | mutex_unlock(lock: &man->space_mutex); |
847 | |
848 | return 0; |
849 | } |
850 | |
851 | /** |
852 | * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer |
853 | * space from the main pool. |
854 | * |
855 | * @man: The command buffer manager. |
856 | * @header: Pointer to the header to set up. |
857 | * @size: The requested size of the buffer space. |
858 | * @interruptible: Whether to sleep interruptible while waiting for space. |
859 | */ |
860 | static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, |
861 | struct vmw_cmdbuf_header *, |
862 | size_t size, |
863 | bool interruptible) |
864 | { |
865 | SVGACBHeader *cb_hdr; |
866 | size_t offset; |
867 | int ret; |
868 | |
869 | if (!man->has_pool) |
870 | return -ENOMEM; |
871 | |
872 | ret = vmw_cmdbuf_alloc_space(man, node: &header->node, size, interruptible); |
873 | |
874 | if (ret) |
875 | return ret; |
876 | |
877 | header->cb_header = dma_pool_zalloc(pool: man->headers, GFP_KERNEL, |
878 | handle: &header->handle); |
879 | if (!header->cb_header) { |
880 | ret = -ENOMEM; |
881 | goto out_no_cb_header; |
882 | } |
883 | |
884 | header->size = header->node.size << PAGE_SHIFT; |
885 | cb_hdr = header->cb_header; |
886 | offset = header->node.start << PAGE_SHIFT; |
887 | header->cmd = man->map + offset; |
888 | if (man->using_mob) { |
889 | cb_hdr->flags = SVGA_CB_FLAG_MOB; |
890 | cb_hdr->ptr.mob.mobid = man->cmd_space->tbo.resource->start; |
891 | cb_hdr->ptr.mob.mobOffset = offset; |
892 | } else { |
893 | cb_hdr->ptr.pa = (u64)man->handle + (u64)offset; |
894 | } |
895 | |
896 | return 0; |
897 | |
898 | : |
899 | spin_lock(lock: &man->lock); |
900 | drm_mm_remove_node(node: &header->node); |
901 | spin_unlock(lock: &man->lock); |
902 | |
903 | return ret; |
904 | } |
905 | |
906 | /** |
907 | * vmw_cmdbuf_space_inline - Set up a command buffer header with |
908 | * inline command buffer space. |
909 | * |
910 | * @man: The command buffer manager. |
911 | * @header: Pointer to the header to set up. |
912 | * @size: The requested size of the buffer space. |
913 | */ |
914 | static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man, |
915 | struct vmw_cmdbuf_header *, |
916 | int size) |
917 | { |
918 | struct vmw_cmdbuf_dheader *; |
919 | SVGACBHeader *cb_hdr; |
920 | |
921 | if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE)) |
922 | return -ENOMEM; |
923 | |
924 | dheader = dma_pool_zalloc(pool: man->dheaders, GFP_KERNEL, |
925 | handle: &header->handle); |
926 | if (!dheader) |
927 | return -ENOMEM; |
928 | |
929 | header->inline_space = true; |
930 | header->size = VMW_CMDBUF_INLINE_SIZE; |
931 | cb_hdr = &dheader->cb_header; |
932 | header->cb_header = cb_hdr; |
933 | header->cmd = dheader->cmd; |
934 | cb_hdr->status = SVGA_CB_STATUS_NONE; |
935 | cb_hdr->flags = SVGA_CB_FLAG_NONE; |
936 | cb_hdr->ptr.pa = (u64)header->handle + |
937 | (u64)offsetof(struct vmw_cmdbuf_dheader, cmd); |
938 | |
939 | return 0; |
940 | } |
941 | |
942 | /** |
943 | * vmw_cmdbuf_alloc - Allocate a command buffer header complete with |
944 | * command buffer space. |
945 | * |
946 | * @man: The command buffer manager. |
947 | * @size: The requested size of the buffer space. |
948 | * @interruptible: Whether to sleep interruptible while waiting for space. |
949 | * @p_header: points to a header pointer to populate on successful return. |
950 | * |
951 | * Returns a pointer to command buffer space if successful. Otherwise |
952 | * returns an error pointer. The header pointer returned in @p_header should |
953 | * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit(). |
954 | */ |
955 | void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, |
956 | size_t size, bool interruptible, |
957 | struct vmw_cmdbuf_header **) |
958 | { |
959 | struct vmw_cmdbuf_header *; |
960 | int ret = 0; |
961 | |
962 | *p_header = NULL; |
963 | |
964 | header = kzalloc(size: sizeof(*header), GFP_KERNEL); |
965 | if (!header) |
966 | return ERR_PTR(error: -ENOMEM); |
967 | |
968 | if (size <= VMW_CMDBUF_INLINE_SIZE) |
969 | ret = vmw_cmdbuf_space_inline(man, header, size); |
970 | else |
971 | ret = vmw_cmdbuf_space_pool(man, header, size, interruptible); |
972 | |
973 | if (ret) { |
974 | kfree(objp: header); |
975 | return ERR_PTR(error: ret); |
976 | } |
977 | |
978 | header->man = man; |
979 | INIT_LIST_HEAD(list: &header->list); |
980 | header->cb_header->status = SVGA_CB_STATUS_NONE; |
981 | *p_header = header; |
982 | |
983 | return header->cmd; |
984 | } |
985 | |
986 | /** |
987 | * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current |
988 | * command buffer. |
989 | * |
990 | * @man: The command buffer manager. |
991 | * @size: The requested size of the commands. |
992 | * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID. |
993 | * @interruptible: Whether to sleep interruptible while waiting for space. |
994 | * |
995 | * Returns a pointer to command buffer space if successful. Otherwise |
996 | * returns an error pointer. |
997 | */ |
998 | static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man, |
999 | size_t size, |
1000 | int ctx_id, |
1001 | bool interruptible) |
1002 | { |
1003 | struct vmw_cmdbuf_header *cur; |
1004 | void *ret; |
1005 | |
1006 | if (vmw_cmdbuf_cur_lock(man, interruptible)) |
1007 | return ERR_PTR(error: -ERESTARTSYS); |
1008 | |
1009 | cur = man->cur; |
1010 | if (cur && (size + man->cur_pos > cur->size || |
1011 | ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) && |
1012 | ctx_id != cur->cb_header->dxContext))) |
1013 | __vmw_cmdbuf_cur_flush(man); |
1014 | |
1015 | if (!man->cur) { |
1016 | ret = vmw_cmdbuf_alloc(man, |
1017 | max_t(size_t, size, man->default_size), |
1018 | interruptible, p_header: &man->cur); |
1019 | if (IS_ERR(ptr: ret)) { |
1020 | vmw_cmdbuf_cur_unlock(man); |
1021 | return ret; |
1022 | } |
1023 | |
1024 | cur = man->cur; |
1025 | } |
1026 | |
1027 | if (ctx_id != SVGA3D_INVALID_ID) { |
1028 | cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT; |
1029 | cur->cb_header->dxContext = ctx_id; |
1030 | } |
1031 | |
1032 | cur->reserved = size; |
1033 | |
1034 | return (void *) (man->cur->cmd + man->cur_pos); |
1035 | } |
1036 | |
1037 | /** |
1038 | * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer. |
1039 | * |
1040 | * @man: The command buffer manager. |
1041 | * @size: The size of the commands actually written. |
1042 | * @flush: Whether to flush the command buffer immediately. |
1043 | */ |
1044 | static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man, |
1045 | size_t size, bool flush) |
1046 | { |
1047 | struct vmw_cmdbuf_header *cur = man->cur; |
1048 | |
1049 | lockdep_assert_held_once(&man->cur_mutex); |
1050 | |
1051 | WARN_ON(size > cur->reserved); |
1052 | man->cur_pos += size; |
1053 | if (!size) |
1054 | cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT; |
1055 | if (flush) |
1056 | __vmw_cmdbuf_cur_flush(man); |
1057 | vmw_cmdbuf_cur_unlock(man); |
1058 | } |
1059 | |
1060 | /** |
1061 | * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer. |
1062 | * |
1063 | * @man: The command buffer manager. |
1064 | * @size: The requested size of the commands. |
1065 | * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID. |
1066 | * @interruptible: Whether to sleep interruptible while waiting for space. |
1067 | * @header: Header of the command buffer. NULL if the current command buffer |
1068 | * should be used. |
1069 | * |
1070 | * Returns a pointer to command buffer space if successful. Otherwise |
1071 | * returns an error pointer. |
1072 | */ |
1073 | void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size, |
1074 | int ctx_id, bool interruptible, |
1075 | struct vmw_cmdbuf_header *) |
1076 | { |
1077 | if (!header) |
1078 | return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible); |
1079 | |
1080 | if (size > header->size) |
1081 | return ERR_PTR(error: -EINVAL); |
1082 | |
1083 | if (ctx_id != SVGA3D_INVALID_ID) { |
1084 | header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT; |
1085 | header->cb_header->dxContext = ctx_id; |
1086 | } |
1087 | |
1088 | header->reserved = size; |
1089 | return header->cmd; |
1090 | } |
1091 | |
1092 | /** |
1093 | * vmw_cmdbuf_commit - Commit commands in a command buffer. |
1094 | * |
1095 | * @man: The command buffer manager. |
1096 | * @size: The size of the commands actually written. |
1097 | * @header: Header of the command buffer. NULL if the current command buffer |
1098 | * should be used. |
1099 | * @flush: Whether to flush the command buffer immediately. |
1100 | */ |
1101 | void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, |
1102 | struct vmw_cmdbuf_header *, bool flush) |
1103 | { |
1104 | if (!header) { |
1105 | vmw_cmdbuf_commit_cur(man, size, flush); |
1106 | return; |
1107 | } |
1108 | |
1109 | (void) vmw_cmdbuf_cur_lock(man, interruptible: false); |
1110 | __vmw_cmdbuf_cur_flush(man); |
1111 | WARN_ON(size > header->reserved); |
1112 | man->cur = header; |
1113 | man->cur_pos = size; |
1114 | if (!size) |
1115 | header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT; |
1116 | if (flush) |
1117 | __vmw_cmdbuf_cur_flush(man); |
1118 | vmw_cmdbuf_cur_unlock(man); |
1119 | } |
1120 | |
1121 | |
1122 | /** |
1123 | * vmw_cmdbuf_send_device_command - Send a command through the device context. |
1124 | * |
1125 | * @man: The command buffer manager. |
1126 | * @command: Pointer to the command to send. |
1127 | * @size: Size of the command. |
1128 | * |
1129 | * Synchronously sends a device context command. |
1130 | */ |
1131 | static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man, |
1132 | const void *command, |
1133 | size_t size) |
1134 | { |
1135 | struct vmw_cmdbuf_header *; |
1136 | int status; |
1137 | void *cmd = vmw_cmdbuf_alloc(man, size, interruptible: false, p_header: &header); |
1138 | |
1139 | if (IS_ERR(ptr: cmd)) |
1140 | return PTR_ERR(ptr: cmd); |
1141 | |
1142 | memcpy(cmd, command, size); |
1143 | header->cb_header->length = size; |
1144 | header->cb_context = SVGA_CB_CONTEXT_DEVICE; |
1145 | spin_lock(lock: &man->lock); |
1146 | status = vmw_cmdbuf_header_submit(header); |
1147 | spin_unlock(lock: &man->lock); |
1148 | vmw_cmdbuf_header_free(header); |
1149 | |
1150 | if (status != SVGA_CB_STATUS_COMPLETED) { |
1151 | DRM_ERROR("Device context command failed with status %d\n" , |
1152 | status); |
1153 | return -EINVAL; |
1154 | } |
1155 | |
1156 | return 0; |
1157 | } |
1158 | |
1159 | /** |
1160 | * vmw_cmdbuf_preempt - Send a preempt command through the device |
1161 | * context. |
1162 | * |
1163 | * @man: The command buffer manager. |
1164 | * @context: Device context to pass command through. |
1165 | * |
1166 | * Synchronously sends a preempt command. |
1167 | */ |
1168 | static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context) |
1169 | { |
1170 | struct { |
1171 | uint32 id; |
1172 | SVGADCCmdPreempt body; |
1173 | } __packed cmd; |
1174 | |
1175 | cmd.id = SVGA_DC_CMD_PREEMPT; |
1176 | cmd.body.context = SVGA_CB_CONTEXT_0 + context; |
1177 | cmd.body.ignoreIDZero = 0; |
1178 | |
1179 | return vmw_cmdbuf_send_device_command(man, command: &cmd, size: sizeof(cmd)); |
1180 | } |
1181 | |
1182 | |
1183 | /** |
1184 | * vmw_cmdbuf_startstop - Send a start / stop command through the device |
1185 | * context. |
1186 | * |
1187 | * @man: The command buffer manager. |
1188 | * @context: Device context to start/stop. |
1189 | * @enable: Whether to enable or disable the context. |
1190 | * |
1191 | * Synchronously sends a device start / stop context command. |
1192 | */ |
1193 | static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context, |
1194 | bool enable) |
1195 | { |
1196 | struct { |
1197 | uint32 id; |
1198 | SVGADCCmdStartStop body; |
1199 | } __packed cmd; |
1200 | |
1201 | cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT; |
1202 | cmd.body.enable = (enable) ? 1 : 0; |
1203 | cmd.body.context = SVGA_CB_CONTEXT_0 + context; |
1204 | |
1205 | return vmw_cmdbuf_send_device_command(man, command: &cmd, size: sizeof(cmd)); |
1206 | } |
1207 | |
1208 | /** |
1209 | * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes |
1210 | * |
1211 | * @man: The command buffer manager. |
1212 | * @size: The size of the main space pool. |
1213 | * |
1214 | * Set the size and allocate the main command buffer space pool. |
1215 | * If successful, this enables large command submissions. |
1216 | * Note that this function requires that rudimentary command |
1217 | * submission is already available and that the MOB memory manager is alive. |
1218 | * Returns 0 on success. Negative error code on failure. |
1219 | */ |
1220 | int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size) |
1221 | { |
1222 | struct vmw_private *dev_priv = man->dev_priv; |
1223 | int ret; |
1224 | |
1225 | if (man->has_pool) |
1226 | return -EINVAL; |
1227 | |
1228 | /* First, try to allocate a huge chunk of DMA memory */ |
1229 | size = PAGE_ALIGN(size); |
1230 | man->map = dma_alloc_coherent(dev: dev_priv->drm.dev, size, |
1231 | dma_handle: &man->handle, GFP_KERNEL); |
1232 | if (man->map) { |
1233 | man->using_mob = false; |
1234 | } else { |
1235 | struct vmw_bo_params bo_params = { |
1236 | .domain = VMW_BO_DOMAIN_MOB, |
1237 | .busy_domain = VMW_BO_DOMAIN_MOB, |
1238 | .bo_type = ttm_bo_type_kernel, |
1239 | .size = size, |
1240 | .pin = true |
1241 | }; |
1242 | /* |
1243 | * DMA memory failed. If we can have command buffers in a |
1244 | * MOB, try to use that instead. Note that this will |
1245 | * actually call into the already enabled manager, when |
1246 | * binding the MOB. |
1247 | */ |
1248 | if (!(dev_priv->capabilities & SVGA_CAP_DX) || |
1249 | !dev_priv->has_mob) |
1250 | return -ENOMEM; |
1251 | |
1252 | ret = vmw_bo_create(dev_priv, params: &bo_params, p_bo: &man->cmd_space); |
1253 | if (ret) |
1254 | return ret; |
1255 | |
1256 | man->map = vmw_bo_map_and_cache(vbo: man->cmd_space); |
1257 | man->using_mob = man->map; |
1258 | } |
1259 | |
1260 | man->size = size; |
1261 | drm_mm_init(mm: &man->mm, start: 0, size: size >> PAGE_SHIFT); |
1262 | |
1263 | man->has_pool = true; |
1264 | |
1265 | /* |
1266 | * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to |
1267 | * prevent deadlocks from happening when vmw_cmdbuf_space_pool() |
1268 | * needs to wait for space and we block on further command |
1269 | * submissions to be able to free up space. |
1270 | */ |
1271 | man->default_size = VMW_CMDBUF_INLINE_SIZE; |
1272 | drm_info(&dev_priv->drm, |
1273 | "Using command buffers with %s pool.\n" , |
1274 | (man->using_mob) ? "MOB" : "DMA" ); |
1275 | |
1276 | return 0; |
1277 | } |
1278 | |
1279 | /** |
1280 | * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for |
1281 | * inline command buffer submissions only. |
1282 | * |
1283 | * @dev_priv: Pointer to device private structure. |
1284 | * |
1285 | * Returns a pointer to a cummand buffer manager to success or error pointer |
1286 | * on failure. The command buffer manager will be enabled for submissions of |
1287 | * size VMW_CMDBUF_INLINE_SIZE only. |
1288 | */ |
1289 | struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv) |
1290 | { |
1291 | struct vmw_cmdbuf_man *man; |
1292 | struct vmw_cmdbuf_context *ctx; |
1293 | unsigned int i; |
1294 | int ret; |
1295 | |
1296 | if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS)) |
1297 | return ERR_PTR(error: -ENOSYS); |
1298 | |
1299 | man = kzalloc(size: sizeof(*man), GFP_KERNEL); |
1300 | if (!man) |
1301 | return ERR_PTR(error: -ENOMEM); |
1302 | |
1303 | man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ? |
1304 | 2 : 1; |
1305 | man->headers = dma_pool_create(name: "vmwgfx cmdbuf" , |
1306 | dev: dev_priv->drm.dev, |
1307 | size: sizeof(SVGACBHeader), |
1308 | align: 64, PAGE_SIZE); |
1309 | if (!man->headers) { |
1310 | ret = -ENOMEM; |
1311 | goto out_no_pool; |
1312 | } |
1313 | |
1314 | man->dheaders = dma_pool_create(name: "vmwgfx inline cmdbuf" , |
1315 | dev: dev_priv->drm.dev, |
1316 | size: sizeof(struct vmw_cmdbuf_dheader), |
1317 | align: 64, PAGE_SIZE); |
1318 | if (!man->dheaders) { |
1319 | ret = -ENOMEM; |
1320 | goto out_no_dpool; |
1321 | } |
1322 | |
1323 | for_each_cmdbuf_ctx(man, i, ctx) |
1324 | vmw_cmdbuf_ctx_init(ctx); |
1325 | |
1326 | INIT_LIST_HEAD(list: &man->error); |
1327 | spin_lock_init(&man->lock); |
1328 | mutex_init(&man->cur_mutex); |
1329 | mutex_init(&man->space_mutex); |
1330 | mutex_init(&man->error_mutex); |
1331 | man->default_size = VMW_CMDBUF_INLINE_SIZE; |
1332 | init_waitqueue_head(&man->alloc_queue); |
1333 | init_waitqueue_head(&man->idle_queue); |
1334 | man->dev_priv = dev_priv; |
1335 | man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1; |
1336 | INIT_WORK(&man->work, &vmw_cmdbuf_work_func); |
1337 | vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR, |
1338 | waiter_count: &dev_priv->error_waiters); |
1339 | ret = vmw_cmdbuf_startstop(man, context: 0, enable: true); |
1340 | if (ret) { |
1341 | DRM_ERROR("Failed starting command buffer contexts\n" ); |
1342 | vmw_cmdbuf_man_destroy(man); |
1343 | return ERR_PTR(error: ret); |
1344 | } |
1345 | |
1346 | return man; |
1347 | |
1348 | out_no_dpool: |
1349 | dma_pool_destroy(pool: man->headers); |
1350 | out_no_pool: |
1351 | kfree(objp: man); |
1352 | |
1353 | return ERR_PTR(error: ret); |
1354 | } |
1355 | |
1356 | /** |
1357 | * vmw_cmdbuf_remove_pool - Take down the main buffer space pool. |
1358 | * |
1359 | * @man: Pointer to a command buffer manager. |
1360 | * |
1361 | * This function removes the main buffer space pool, and should be called |
1362 | * before MOB memory management is removed. When this function has been called, |
1363 | * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or |
1364 | * less are allowed, and the default size of the command buffer for small kernel |
1365 | * submissions is also set to this size. |
1366 | */ |
1367 | void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man) |
1368 | { |
1369 | if (!man->has_pool) |
1370 | return; |
1371 | |
1372 | man->has_pool = false; |
1373 | man->default_size = VMW_CMDBUF_INLINE_SIZE; |
1374 | (void) vmw_cmdbuf_idle(man, interruptible: false, timeout: 10*HZ); |
1375 | if (man->using_mob) |
1376 | vmw_bo_unreference(buf: &man->cmd_space); |
1377 | else |
1378 | dma_free_coherent(dev: man->dev_priv->drm.dev, |
1379 | size: man->size, cpu_addr: man->map, dma_handle: man->handle); |
1380 | } |
1381 | |
1382 | /** |
1383 | * vmw_cmdbuf_man_destroy - Take down a command buffer manager. |
1384 | * |
1385 | * @man: Pointer to a command buffer manager. |
1386 | * |
1387 | * This function idles and then destroys a command buffer manager. |
1388 | */ |
1389 | void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man) |
1390 | { |
1391 | WARN_ON_ONCE(man->has_pool); |
1392 | (void) vmw_cmdbuf_idle(man, interruptible: false, timeout: 10*HZ); |
1393 | |
1394 | if (vmw_cmdbuf_startstop(man, context: 0, enable: false)) |
1395 | DRM_ERROR("Failed stopping command buffer contexts.\n" ); |
1396 | |
1397 | vmw_generic_waiter_remove(dev_priv: man->dev_priv, SVGA_IRQFLAG_ERROR, |
1398 | waiter_count: &man->dev_priv->error_waiters); |
1399 | (void) cancel_work_sync(work: &man->work); |
1400 | dma_pool_destroy(pool: man->dheaders); |
1401 | dma_pool_destroy(pool: man->headers); |
1402 | mutex_destroy(lock: &man->cur_mutex); |
1403 | mutex_destroy(lock: &man->space_mutex); |
1404 | mutex_destroy(lock: &man->error_mutex); |
1405 | kfree(objp: man); |
1406 | } |
1407 | |