1 | /* SPDX-License-Identifier: GPL-2.0 OR MIT */ |
2 | /************************************************************************** |
3 | * |
4 | * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
26 | **************************************************************************/ |
27 | |
28 | #ifndef _VMWGFX_DRV_H_ |
29 | #define _VMWGFX_DRV_H_ |
30 | |
31 | #include <linux/suspend.h> |
32 | #include <linux/sync_file.h> |
33 | #include <linux/hashtable.h> |
34 | |
35 | #include <drm/drm_auth.h> |
36 | #include <drm/drm_device.h> |
37 | #include <drm/drm_file.h> |
38 | #include <drm/drm_rect.h> |
39 | |
40 | #include <drm/ttm/ttm_execbuf_util.h> |
41 | #include <drm/ttm/ttm_tt.h> |
42 | #include <drm/ttm/ttm_placement.h> |
43 | #include <drm/ttm/ttm_bo.h> |
44 | |
45 | #include "ttm_object.h" |
46 | |
47 | #include "vmwgfx_fence.h" |
48 | #include "vmwgfx_reg.h" |
49 | #include "vmwgfx_validation.h" |
50 | |
51 | /* |
52 | * FIXME: vmwgfx_drm.h needs to be last due to dependencies. |
53 | * uapi headers should not depend on header files outside uapi/. |
54 | */ |
55 | #include <drm/vmwgfx_drm.h> |
56 | |
57 | |
58 | #define VMWGFX_DRIVER_NAME "vmwgfx" |
59 | #define VMWGFX_DRIVER_DATE "20211206" |
60 | #define VMWGFX_DRIVER_MAJOR 2 |
61 | #define VMWGFX_DRIVER_MINOR 20 |
62 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
63 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
64 | #define VMWGFX_MAX_DISPLAYS 16 |
65 | #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 |
66 | |
67 | #define VMWGFX_MIN_INITIAL_WIDTH 1280 |
68 | #define VMWGFX_MIN_INITIAL_HEIGHT 800 |
69 | |
70 | #define VMWGFX_PCI_ID_SVGA2 0x0405 |
71 | #define VMWGFX_PCI_ID_SVGA3 0x0406 |
72 | |
73 | /* |
74 | * This has to match get_count_order(SVGA_IRQFLAG_MAX) |
75 | */ |
76 | #define VMWGFX_MAX_NUM_IRQS 6 |
77 | |
78 | /* |
79 | * Perhaps we should have sysfs entries for these. |
80 | */ |
81 | #define VMWGFX_NUM_GB_CONTEXT 256 |
82 | #define VMWGFX_NUM_GB_SHADER 20000 |
83 | #define VMWGFX_NUM_GB_SURFACE 32768 |
84 | #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS |
85 | #define VMWGFX_NUM_DXCONTEXT 256 |
86 | #define VMWGFX_NUM_DXQUERY 512 |
87 | #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ |
88 | VMWGFX_NUM_GB_SHADER +\ |
89 | VMWGFX_NUM_GB_SURFACE +\ |
90 | VMWGFX_NUM_GB_SCREEN_TARGET) |
91 | |
92 | #define VMW_PL_GMR (TTM_PL_PRIV + 0) |
93 | #define VMW_PL_MOB (TTM_PL_PRIV + 1) |
94 | #define VMW_PL_SYSTEM (TTM_PL_PRIV + 2) |
95 | |
96 | #define VMW_RES_CONTEXT ttm_driver_type0 |
97 | #define VMW_RES_SURFACE ttm_driver_type1 |
98 | #define VMW_RES_STREAM ttm_driver_type2 |
99 | #define VMW_RES_FENCE ttm_driver_type3 |
100 | #define VMW_RES_SHADER ttm_driver_type4 |
101 | #define VMW_RES_HT_ORDER 12 |
102 | |
103 | #define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8 |
104 | #define VMW_CURSOR_SNOOP_WIDTH 64 |
105 | #define VMW_CURSOR_SNOOP_HEIGHT 64 |
106 | |
107 | #define MKSSTAT_CAPACITY_LOG2 5U |
108 | #define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2) |
109 | |
110 | struct vmw_fpriv { |
111 | struct ttm_object_file *tfile; |
112 | bool gb_aware; /* user-space is guest-backed aware */ |
113 | }; |
114 | |
115 | struct vmwgfx_hash_item { |
116 | struct hlist_node head; |
117 | unsigned long key; |
118 | }; |
119 | |
120 | |
121 | /** |
122 | * struct vmw_validate_buffer - Carries validation info about buffers. |
123 | * |
124 | * @base: Validation info for TTM. |
125 | * @hash: Hash entry for quick lookup of the TTM buffer object. |
126 | * |
127 | * This structure contains also driver private validation info |
128 | * on top of the info needed by TTM. |
129 | */ |
130 | struct vmw_validate_buffer { |
131 | struct ttm_validate_buffer base; |
132 | struct vmwgfx_hash_item hash; |
133 | bool validate_as_mob; |
134 | }; |
135 | |
136 | struct vmw_res_func; |
137 | |
138 | |
139 | /** |
140 | * struct vmw-resource - base class for hardware resources |
141 | * |
142 | * @kref: For refcounting. |
143 | * @dev_priv: Pointer to the device private for this resource. Immutable. |
144 | * @id: Device id. Protected by @dev_priv::resource_lock. |
145 | * @guest_memory_size: Guest memory buffer size. Immutable. |
146 | * @res_dirty: Resource contains data not yet in the guest memory buffer. |
147 | * Protected by resource reserved. |
148 | * @guest_memory_dirty: Guest memory buffer contains data not yet in the HW |
149 | * resource. Protected by resource reserved. |
150 | * @coherent: Emulate coherency by tracking vm accesses. |
151 | * @guest_memory_bo: The guest memory buffer if any. Protected by resource |
152 | * reserved. |
153 | * @guest_memory_offset: Offset into the guest memory buffer if any. Protected |
154 | * by resource reserved. Note that only a few resource types can have a |
155 | * @guest_memory_offset different from zero. |
156 | * @pin_count: The pin count for this resource. A pinned resource has a |
157 | * pin-count greater than zero. It is not on the resource LRU lists and its |
158 | * guest memory buffer is pinned. Hence it can't be evicted. |
159 | * @func: Method vtable for this resource. Immutable. |
160 | * @mob_node; Node for the MOB guest memory rbtree. Protected by |
161 | * @guest_memory_bo reserved. |
162 | * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock. |
163 | * @binding_head: List head for the context binding list. Protected by |
164 | * the @dev_priv::binding_mutex |
165 | * @res_free: The resource destructor. |
166 | * @hw_destroy: Callback to destroy the resource on the device, as part of |
167 | * resource destruction. |
168 | */ |
169 | struct vmw_bo; |
170 | struct vmw_bo; |
171 | struct vmw_resource_dirty; |
172 | struct vmw_resource { |
173 | struct kref kref; |
174 | struct vmw_private *dev_priv; |
175 | int id; |
176 | u32 used_prio; |
177 | unsigned long guest_memory_size; |
178 | u32 res_dirty : 1; |
179 | u32 guest_memory_dirty : 1; |
180 | u32 coherent : 1; |
181 | struct vmw_bo *guest_memory_bo; |
182 | unsigned long guest_memory_offset; |
183 | unsigned long pin_count; |
184 | const struct vmw_res_func *func; |
185 | struct rb_node mob_node; |
186 | struct list_head lru_head; |
187 | struct list_head binding_head; |
188 | struct vmw_resource_dirty *dirty; |
189 | void (*res_free) (struct vmw_resource *res); |
190 | void (*hw_destroy) (struct vmw_resource *res); |
191 | }; |
192 | |
193 | |
194 | /* |
195 | * Resources that are managed using ioctls. |
196 | */ |
197 | enum vmw_res_type { |
198 | vmw_res_context, |
199 | vmw_res_surface, |
200 | vmw_res_stream, |
201 | vmw_res_shader, |
202 | vmw_res_dx_context, |
203 | vmw_res_cotable, |
204 | vmw_res_view, |
205 | vmw_res_streamoutput, |
206 | vmw_res_max |
207 | }; |
208 | |
209 | /* |
210 | * Resources that are managed using command streams. |
211 | */ |
212 | enum vmw_cmdbuf_res_type { |
213 | vmw_cmdbuf_res_shader, |
214 | vmw_cmdbuf_res_view, |
215 | vmw_cmdbuf_res_streamoutput |
216 | }; |
217 | |
218 | struct vmw_cmdbuf_res_manager; |
219 | |
220 | struct vmw_cursor_snooper { |
221 | size_t age; |
222 | uint32_t *image; |
223 | }; |
224 | |
225 | struct vmw_framebuffer; |
226 | struct vmw_surface_offset; |
227 | |
228 | /** |
229 | * struct vmw_surface_metadata - Metadata describing a surface. |
230 | * |
231 | * @flags: Device flags. |
232 | * @format: Surface SVGA3D_x format. |
233 | * @mip_levels: Mip level for each face. For GB first index is used only. |
234 | * @multisample_count: Sample count. |
235 | * @multisample_pattern: Sample patterns. |
236 | * @quality_level: Quality level. |
237 | * @autogen_filter: Filter for automatically generated mipmaps. |
238 | * @array_size: Number of array elements for a 1D/2D texture. For cubemap |
239 | texture number of faces * array_size. This should be 0 for pre |
240 | SM4 device. |
241 | * @buffer_byte_stride: Buffer byte stride. |
242 | * @num_sizes: Size of @sizes. For GB surface this should always be 1. |
243 | * @base_size: Surface dimension. |
244 | * @sizes: Array representing mip sizes. Legacy only. |
245 | * @scanout: Whether this surface will be used for scanout. |
246 | * |
247 | * This tracks metadata for both legacy and guest backed surface. |
248 | */ |
249 | struct vmw_surface_metadata { |
250 | u64 flags; |
251 | u32 format; |
252 | u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES]; |
253 | u32 multisample_count; |
254 | u32 multisample_pattern; |
255 | u32 quality_level; |
256 | u32 autogen_filter; |
257 | u32 array_size; |
258 | u32 num_sizes; |
259 | u32 buffer_byte_stride; |
260 | struct drm_vmw_size base_size; |
261 | struct drm_vmw_size *sizes; |
262 | bool scanout; |
263 | }; |
264 | |
265 | /** |
266 | * struct vmw_surface: Resource structure for a surface. |
267 | * |
268 | * @res: The base resource for this surface. |
269 | * @metadata: Metadata for this surface resource. |
270 | * @snooper: Cursor data. Legacy surface only. |
271 | * @offsets: Legacy surface only. |
272 | * @view_list: List of views bound to this surface. |
273 | */ |
274 | struct vmw_surface { |
275 | struct vmw_resource res; |
276 | struct vmw_surface_metadata metadata; |
277 | struct vmw_cursor_snooper snooper; |
278 | struct vmw_surface_offset *offsets; |
279 | struct list_head view_list; |
280 | }; |
281 | |
282 | struct vmw_fifo_state { |
283 | unsigned long reserved_size; |
284 | u32 *dynamic_buffer; |
285 | u32 *static_buffer; |
286 | unsigned long static_buffer_size; |
287 | bool using_bounce_buffer; |
288 | uint32_t capabilities; |
289 | struct mutex fifo_mutex; |
290 | struct rw_semaphore rwsem; |
291 | }; |
292 | |
293 | /** |
294 | * struct vmw_res_cache_entry - resource information cache entry |
295 | * @handle: User-space handle of a resource. |
296 | * @res: Non-ref-counted pointer to the resource. |
297 | * @valid_handle: Whether the @handle member is valid. |
298 | * @valid: Whether the entry is valid, which also implies that the execbuf |
299 | * code holds a reference to the resource, and it's placed on the |
300 | * validation list. |
301 | * |
302 | * Used to avoid frequent repeated user-space handle lookups of the |
303 | * same resource. |
304 | */ |
305 | struct vmw_res_cache_entry { |
306 | uint32_t handle; |
307 | struct vmw_resource *res; |
308 | void *private; |
309 | unsigned short valid_handle; |
310 | unsigned short valid; |
311 | }; |
312 | |
313 | /** |
314 | * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings. |
315 | */ |
316 | enum vmw_dma_map_mode { |
317 | vmw_dma_alloc_coherent, /* Use TTM coherent pages */ |
318 | vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */ |
319 | vmw_dma_map_bind, /* Unmap from DMA just before unbind */ |
320 | vmw_dma_map_max |
321 | }; |
322 | |
323 | /** |
324 | * struct vmw_sg_table - Scatter/gather table for binding, with additional |
325 | * device-specific information. |
326 | * |
327 | * @sgt: Pointer to a struct sg_table with binding information |
328 | * @num_regions: Number of regions with device-address contiguous pages |
329 | */ |
330 | struct vmw_sg_table { |
331 | enum vmw_dma_map_mode mode; |
332 | struct page **pages; |
333 | const dma_addr_t *addrs; |
334 | struct sg_table *sgt; |
335 | unsigned long num_pages; |
336 | }; |
337 | |
338 | /** |
339 | * struct vmw_piter - Page iterator that iterates over a list of pages |
340 | * and DMA addresses that could be either a scatter-gather list or |
341 | * arrays |
342 | * |
343 | * @pages: Array of page pointers to the pages. |
344 | * @addrs: DMA addresses to the pages if coherent pages are used. |
345 | * @iter: Scatter-gather page iterator. Current position in SG list. |
346 | * @i: Current position in arrays. |
347 | * @num_pages: Number of pages total. |
348 | * @next: Function to advance the iterator. Returns false if past the list |
349 | * of pages, true otherwise. |
350 | * @dma_address: Function to return the DMA address of the current page. |
351 | */ |
352 | struct vmw_piter { |
353 | struct page **pages; |
354 | const dma_addr_t *addrs; |
355 | struct sg_dma_page_iter iter; |
356 | unsigned long i; |
357 | unsigned long num_pages; |
358 | bool (*next)(struct vmw_piter *); |
359 | dma_addr_t (*dma_address)(struct vmw_piter *); |
360 | }; |
361 | |
362 | |
363 | struct vmw_ttm_tt { |
364 | struct ttm_tt dma_ttm; |
365 | struct vmw_private *dev_priv; |
366 | int gmr_id; |
367 | struct vmw_mob *mob; |
368 | int mem_type; |
369 | struct sg_table sgt; |
370 | struct vmw_sg_table vsgt; |
371 | bool mapped; |
372 | bool bound; |
373 | }; |
374 | |
375 | /* |
376 | * enum vmw_display_unit_type - Describes the display unit |
377 | */ |
378 | enum vmw_display_unit_type { |
379 | vmw_du_invalid = 0, |
380 | vmw_du_legacy, |
381 | vmw_du_screen_object, |
382 | vmw_du_screen_target, |
383 | vmw_du_max |
384 | }; |
385 | |
386 | struct vmw_validation_context; |
387 | struct vmw_ctx_validation_info; |
388 | |
389 | /** |
390 | * struct vmw_sw_context - Command submission context |
391 | * @res_ht: Pointer hash table used to find validation duplicates |
392 | * @kernel: Whether the command buffer originates from kernel code rather |
393 | * than from user-space |
394 | * @fp: If @kernel is false, points to the file of the client. Otherwise |
395 | * NULL |
396 | * @cmd_bounce: Command bounce buffer used for command validation before |
397 | * copying to fifo space |
398 | * @cmd_bounce_size: Current command bounce buffer size |
399 | * @cur_query_bo: Current buffer object used as query result buffer |
400 | * @bo_relocations: List of buffer object relocations |
401 | * @res_relocations: List of resource relocations |
402 | * @buf_start: Pointer to start of memory where command validation takes |
403 | * place |
404 | * @res_cache: Cache of recently looked up resources |
405 | * @last_query_ctx: Last context that submitted a query |
406 | * @needs_post_query_barrier: Whether a query barrier is needed after |
407 | * command submission |
408 | * @staged_bindings: Cached per-context binding tracker |
409 | * @staged_bindings_inuse: Whether the cached per-context binding tracker |
410 | * is in use |
411 | * @staged_cmd_res: List of staged command buffer managed resources in this |
412 | * command buffer |
413 | * @ctx_list: List of context resources referenced in this command buffer |
414 | * @dx_ctx_node: Validation metadata of the current DX context |
415 | * @dx_query_mob: The MOB used for DX queries |
416 | * @dx_query_ctx: The DX context used for the last DX query |
417 | * @man: Pointer to the command buffer managed resource manager |
418 | * @ctx: The validation context |
419 | */ |
420 | struct vmw_sw_context{ |
421 | DECLARE_HASHTABLE(res_ht, VMW_RES_HT_ORDER); |
422 | bool kernel; |
423 | struct vmw_fpriv *fp; |
424 | struct drm_file *filp; |
425 | uint32_t *cmd_bounce; |
426 | uint32_t cmd_bounce_size; |
427 | struct vmw_bo *cur_query_bo; |
428 | struct list_head bo_relocations; |
429 | struct list_head res_relocations; |
430 | uint32_t *buf_start; |
431 | struct vmw_res_cache_entry res_cache[vmw_res_max]; |
432 | struct vmw_resource *last_query_ctx; |
433 | bool needs_post_query_barrier; |
434 | struct vmw_ctx_binding_state *staged_bindings; |
435 | bool staged_bindings_inuse; |
436 | struct list_head staged_cmd_res; |
437 | struct list_head ctx_list; |
438 | struct vmw_ctx_validation_info *dx_ctx_node; |
439 | struct vmw_bo *dx_query_mob; |
440 | struct vmw_resource *dx_query_ctx; |
441 | struct vmw_cmdbuf_res_manager *man; |
442 | struct vmw_validation_context *ctx; |
443 | }; |
444 | |
445 | struct vmw_legacy_display; |
446 | struct vmw_overlay; |
447 | |
448 | struct vmw_vga_topology_state { |
449 | uint32_t width; |
450 | uint32_t height; |
451 | uint32_t primary; |
452 | uint32_t pos_x; |
453 | uint32_t pos_y; |
454 | }; |
455 | |
456 | |
457 | /* |
458 | * struct vmw_otable - Guest Memory OBject table metadata |
459 | * |
460 | * @size: Size of the table (page-aligned). |
461 | * @page_table: Pointer to a struct vmw_mob holding the page table. |
462 | */ |
463 | struct vmw_otable { |
464 | unsigned long size; |
465 | struct vmw_mob *page_table; |
466 | bool enabled; |
467 | }; |
468 | |
469 | struct vmw_otable_batch { |
470 | unsigned num_otables; |
471 | struct vmw_otable *otables; |
472 | struct vmw_resource *context; |
473 | struct vmw_bo *otable_bo; |
474 | }; |
475 | |
476 | enum { |
477 | VMW_IRQTHREAD_FENCE, |
478 | VMW_IRQTHREAD_CMDBUF, |
479 | VMW_IRQTHREAD_MAX |
480 | }; |
481 | |
482 | /** |
483 | * enum vmw_sm_type - Graphics context capability supported by device. |
484 | * @VMW_SM_LEGACY: Pre DX context. |
485 | * @VMW_SM_4: Context support upto SM4. |
486 | * @VMW_SM_4_1: Context support upto SM4_1. |
487 | * @VMW_SM_5: Context support up to SM5. |
488 | * @VMW_SM_5_1X: Adds support for sm5_1 and gl43 extensions. |
489 | * @VMW_SM_MAX: Should be the last. |
490 | */ |
491 | enum vmw_sm_type { |
492 | VMW_SM_LEGACY = 0, |
493 | VMW_SM_4, |
494 | VMW_SM_4_1, |
495 | VMW_SM_5, |
496 | VMW_SM_5_1X, |
497 | VMW_SM_MAX |
498 | }; |
499 | |
500 | struct vmw_private { |
501 | struct drm_device drm; |
502 | struct ttm_device bdev; |
503 | |
504 | struct drm_vma_offset_manager vma_manager; |
505 | u32 pci_id; |
506 | resource_size_t io_start; |
507 | resource_size_t vram_start; |
508 | resource_size_t vram_size; |
509 | resource_size_t max_primary_mem; |
510 | u32 __iomem *rmmio; |
511 | u32 *fifo_mem; |
512 | resource_size_t fifo_mem_size; |
513 | uint32_t fb_max_width; |
514 | uint32_t fb_max_height; |
515 | uint32_t texture_max_width; |
516 | uint32_t texture_max_height; |
517 | uint32_t stdu_max_width; |
518 | uint32_t stdu_max_height; |
519 | uint32_t initial_width; |
520 | uint32_t initial_height; |
521 | uint32_t capabilities; |
522 | uint32_t capabilities2; |
523 | uint32_t max_gmr_ids; |
524 | uint32_t max_gmr_pages; |
525 | uint32_t max_mob_pages; |
526 | uint32_t max_mob_size; |
527 | uint32_t memory_size; |
528 | bool has_gmr; |
529 | bool has_mob; |
530 | spinlock_t hw_lock; |
531 | bool assume_16bpp; |
532 | u32 irqs[VMWGFX_MAX_NUM_IRQS]; |
533 | u32 num_irq_vectors; |
534 | |
535 | enum vmw_sm_type sm_type; |
536 | |
537 | /* |
538 | * Framebuffer info. |
539 | */ |
540 | |
541 | enum vmw_display_unit_type active_display_unit; |
542 | struct vmw_legacy_display *ldu_priv; |
543 | struct vmw_overlay *overlay_priv; |
544 | struct drm_property *hotplug_mode_update_property; |
545 | struct drm_property *implicit_placement_property; |
546 | spinlock_t cursor_lock; |
547 | struct drm_atomic_state *suspend_state; |
548 | |
549 | /* |
550 | * Context and surface management. |
551 | */ |
552 | |
553 | spinlock_t resource_lock; |
554 | struct idr res_idr[vmw_res_max]; |
555 | |
556 | /* |
557 | * A resource manager for kernel-only surfaces and |
558 | * contexts. |
559 | */ |
560 | |
561 | struct ttm_object_device *tdev; |
562 | |
563 | /* |
564 | * Fencing and IRQs. |
565 | */ |
566 | |
567 | atomic_t marker_seq; |
568 | wait_queue_head_t fence_queue; |
569 | wait_queue_head_t fifo_queue; |
570 | spinlock_t waiter_lock; |
571 | int fence_queue_waiters; /* Protected by waiter_lock */ |
572 | int goal_queue_waiters; /* Protected by waiter_lock */ |
573 | int cmdbuf_waiters; /* Protected by waiter_lock */ |
574 | int error_waiters; /* Protected by waiter_lock */ |
575 | int fifo_queue_waiters; /* Protected by waiter_lock */ |
576 | uint32_t last_read_seqno; |
577 | struct vmw_fence_manager *fman; |
578 | uint32_t irq_mask; /* Updates protected by waiter_lock */ |
579 | |
580 | /* |
581 | * Device state |
582 | */ |
583 | |
584 | uint32_t traces_state; |
585 | uint32_t enable_state; |
586 | uint32_t config_done_state; |
587 | |
588 | /** |
589 | * Execbuf |
590 | */ |
591 | /** |
592 | * Protected by the cmdbuf mutex. |
593 | */ |
594 | |
595 | struct vmw_sw_context ctx; |
596 | struct mutex cmdbuf_mutex; |
597 | struct mutex binding_mutex; |
598 | |
599 | /** |
600 | * PM management. |
601 | */ |
602 | struct notifier_block pm_nb; |
603 | bool refuse_hibernation; |
604 | bool suspend_locked; |
605 | |
606 | atomic_t num_fifo_resources; |
607 | |
608 | /* |
609 | * Query processing. These members |
610 | * are protected by the cmdbuf mutex. |
611 | */ |
612 | |
613 | struct vmw_bo *dummy_query_bo; |
614 | struct vmw_bo *pinned_bo; |
615 | uint32_t query_cid; |
616 | uint32_t query_cid_valid; |
617 | bool dummy_query_bo_pinned; |
618 | |
619 | /* |
620 | * Surface swapping. The "surface_lru" list is protected by the |
621 | * resource lock in order to be able to destroy a surface and take |
622 | * it off the lru atomically. "used_memory_size" is currently |
623 | * protected by the cmdbuf mutex for simplicity. |
624 | */ |
625 | |
626 | struct list_head res_lru[vmw_res_max]; |
627 | uint32_t used_memory_size; |
628 | |
629 | /* |
630 | * DMA mapping stuff. |
631 | */ |
632 | enum vmw_dma_map_mode map_mode; |
633 | |
634 | /* |
635 | * Guest Backed stuff |
636 | */ |
637 | struct vmw_otable_batch otable_batch; |
638 | |
639 | struct vmw_fifo_state *fifo; |
640 | struct vmw_cmdbuf_man *cman; |
641 | DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX); |
642 | |
643 | uint32 *devcaps; |
644 | |
645 | /* |
646 | * mksGuestStat instance-descriptor and pid arrays |
647 | */ |
648 | struct page *mksstat_user_pages[MKSSTAT_CAPACITY]; |
649 | atomic_t mksstat_user_pids[MKSSTAT_CAPACITY]; |
650 | |
651 | #if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS) |
652 | struct page *mksstat_kern_pages[MKSSTAT_CAPACITY]; |
653 | u8 mksstat_kern_top_timer[MKSSTAT_CAPACITY]; |
654 | atomic_t mksstat_kern_pids[MKSSTAT_CAPACITY]; |
655 | #endif |
656 | }; |
657 | |
658 | static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) |
659 | { |
660 | return container_of(res, struct vmw_surface, res); |
661 | } |
662 | |
663 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) |
664 | { |
665 | return (struct vmw_private *)dev->dev_private; |
666 | } |
667 | |
668 | static inline struct vmw_private *vmw_priv_from_ttm(struct ttm_device *bdev) |
669 | { |
670 | return container_of(bdev, struct vmw_private, bdev); |
671 | } |
672 | |
673 | static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) |
674 | { |
675 | return (struct vmw_fpriv *)file_priv->driver_priv; |
676 | } |
677 | |
678 | /* |
679 | * SVGA v3 has mmio register access and lacks fifo cmds |
680 | */ |
681 | static inline bool vmw_is_svga_v3(const struct vmw_private *dev) |
682 | { |
683 | return dev->pci_id == VMWGFX_PCI_ID_SVGA3; |
684 | } |
685 | |
686 | /* |
687 | * The locking here is fine-grained, so that it is performed once |
688 | * for every read- and write operation. This is of course costly, but we |
689 | * don't perform much register access in the timing critical paths anyway. |
690 | * Instead we have the extra benefit of being sure that we don't forget |
691 | * the hw lock around register accesses. |
692 | */ |
693 | static inline void vmw_write(struct vmw_private *dev_priv, |
694 | unsigned int offset, uint32_t value) |
695 | { |
696 | if (vmw_is_svga_v3(dev: dev_priv)) { |
697 | iowrite32(value, dev_priv->rmmio + offset); |
698 | } else { |
699 | spin_lock(lock: &dev_priv->hw_lock); |
700 | outl(value: offset, port: dev_priv->io_start + SVGA_INDEX_PORT); |
701 | outl(value, port: dev_priv->io_start + SVGA_VALUE_PORT); |
702 | spin_unlock(lock: &dev_priv->hw_lock); |
703 | } |
704 | } |
705 | |
706 | static inline uint32_t vmw_read(struct vmw_private *dev_priv, |
707 | unsigned int offset) |
708 | { |
709 | u32 val; |
710 | |
711 | if (vmw_is_svga_v3(dev: dev_priv)) { |
712 | val = ioread32(dev_priv->rmmio + offset); |
713 | } else { |
714 | spin_lock(lock: &dev_priv->hw_lock); |
715 | outl(value: offset, port: dev_priv->io_start + SVGA_INDEX_PORT); |
716 | val = inl(port: dev_priv->io_start + SVGA_VALUE_PORT); |
717 | spin_unlock(lock: &dev_priv->hw_lock); |
718 | } |
719 | |
720 | return val; |
721 | } |
722 | |
723 | /** |
724 | * has_sm4_context - Does the device support SM4 context. |
725 | * @dev_priv: Device private. |
726 | * |
727 | * Return: Bool value if device support SM4 context or not. |
728 | */ |
729 | static inline bool has_sm4_context(const struct vmw_private *dev_priv) |
730 | { |
731 | return (dev_priv->sm_type >= VMW_SM_4); |
732 | } |
733 | |
734 | /** |
735 | * has_sm4_1_context - Does the device support SM4_1 context. |
736 | * @dev_priv: Device private. |
737 | * |
738 | * Return: Bool value if device support SM4_1 context or not. |
739 | */ |
740 | static inline bool has_sm4_1_context(const struct vmw_private *dev_priv) |
741 | { |
742 | return (dev_priv->sm_type >= VMW_SM_4_1); |
743 | } |
744 | |
745 | /** |
746 | * has_sm5_context - Does the device support SM5 context. |
747 | * @dev_priv: Device private. |
748 | * |
749 | * Return: Bool value if device support SM5 context or not. |
750 | */ |
751 | static inline bool has_sm5_context(const struct vmw_private *dev_priv) |
752 | { |
753 | return (dev_priv->sm_type >= VMW_SM_5); |
754 | } |
755 | |
756 | /** |
757 | * has_gl43_context - Does the device support GL43 context. |
758 | * @dev_priv: Device private. |
759 | * |
760 | * Return: Bool value if device support SM5 context or not. |
761 | */ |
762 | static inline bool has_gl43_context(const struct vmw_private *dev_priv) |
763 | { |
764 | return (dev_priv->sm_type >= VMW_SM_5_1X); |
765 | } |
766 | |
767 | |
768 | static inline u32 vmw_max_num_uavs(struct vmw_private *dev_priv) |
769 | { |
770 | return (has_gl43_context(dev_priv) ? |
771 | SVGA3D_DX11_1_MAX_UAVIEWS : SVGA3D_MAX_UAVIEWS); |
772 | } |
773 | |
774 | extern void vmw_svga_enable(struct vmw_private *dev_priv); |
775 | extern void vmw_svga_disable(struct vmw_private *dev_priv); |
776 | bool vmwgfx_supported(struct vmw_private *vmw); |
777 | |
778 | |
779 | /** |
780 | * GMR utilities - vmwgfx_gmr.c |
781 | */ |
782 | |
783 | extern int vmw_gmr_bind(struct vmw_private *dev_priv, |
784 | const struct vmw_sg_table *vsgt, |
785 | unsigned long num_pages, |
786 | int gmr_id); |
787 | extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); |
788 | |
789 | /** |
790 | * Resource utilities - vmwgfx_resource.c |
791 | */ |
792 | struct vmw_user_resource_conv; |
793 | |
794 | extern void vmw_resource_unreference(struct vmw_resource **p_res); |
795 | extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); |
796 | extern struct vmw_resource * |
797 | vmw_resource_reference_unless_doomed(struct vmw_resource *res); |
798 | extern int vmw_resource_validate(struct vmw_resource *res, bool intr, |
799 | bool dirtying); |
800 | extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, |
801 | bool no_backup); |
802 | extern bool vmw_resource_needs_backup(const struct vmw_resource *res); |
803 | extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, |
804 | struct drm_file *filp, |
805 | uint32_t handle, |
806 | struct vmw_surface **out_surf, |
807 | struct vmw_bo **out_buf); |
808 | extern int vmw_user_resource_lookup_handle( |
809 | struct vmw_private *dev_priv, |
810 | struct ttm_object_file *tfile, |
811 | uint32_t handle, |
812 | const struct vmw_user_resource_conv *converter, |
813 | struct vmw_resource **p_res); |
814 | |
815 | extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, |
816 | struct drm_file *file_priv); |
817 | extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, |
818 | struct drm_file *file_priv); |
819 | extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, |
820 | struct ttm_object_file *tfile, |
821 | uint32_t *inout_id, |
822 | struct vmw_resource **out); |
823 | extern void vmw_resource_unreserve(struct vmw_resource *res, |
824 | bool dirty_set, |
825 | bool dirty, |
826 | bool switch_guest_memory, |
827 | struct vmw_bo *new_guest_memory, |
828 | unsigned long new_guest_memory_offset); |
829 | extern void vmw_query_move_notify(struct ttm_buffer_object *bo, |
830 | struct ttm_resource *old_mem, |
831 | struct ttm_resource *new_mem); |
832 | int vmw_query_readback_all(struct vmw_bo *dx_query_mob); |
833 | void vmw_resource_evict_all(struct vmw_private *dev_priv); |
834 | void vmw_resource_unbind_list(struct vmw_bo *vbo); |
835 | void vmw_resource_mob_attach(struct vmw_resource *res); |
836 | void vmw_resource_mob_detach(struct vmw_resource *res); |
837 | void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start, |
838 | pgoff_t end); |
839 | int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start, |
840 | pgoff_t end, pgoff_t *num_prefault); |
841 | |
842 | /** |
843 | * vmw_resource_mob_attached - Whether a resource currently has a mob attached |
844 | * @res: The resource |
845 | * |
846 | * Return: true if the resource has a mob attached, false otherwise. |
847 | */ |
848 | static inline bool vmw_resource_mob_attached(const struct vmw_resource *res) |
849 | { |
850 | return !RB_EMPTY_NODE(&res->mob_node); |
851 | } |
852 | |
853 | /** |
854 | * GEM related functionality - vmwgfx_gem.c |
855 | */ |
856 | struct vmw_bo_params; |
857 | int vmw_gem_object_create(struct vmw_private *vmw, |
858 | struct vmw_bo_params *params, |
859 | struct vmw_bo **p_vbo); |
860 | extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, |
861 | struct drm_file *filp, |
862 | uint32_t size, |
863 | uint32_t *handle, |
864 | struct vmw_bo **p_vbo); |
865 | extern int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data, |
866 | struct drm_file *filp); |
867 | extern void vmw_debugfs_gem_init(struct vmw_private *vdev); |
868 | |
869 | /** |
870 | * Misc Ioctl functionality - vmwgfx_ioctl.c |
871 | */ |
872 | |
873 | extern int vmw_getparam_ioctl(struct drm_device *dev, void *data, |
874 | struct drm_file *file_priv); |
875 | extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, |
876 | struct drm_file *file_priv); |
877 | extern int vmw_present_ioctl(struct drm_device *dev, void *data, |
878 | struct drm_file *file_priv); |
879 | extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data, |
880 | struct drm_file *file_priv); |
881 | |
882 | /** |
883 | * Fifo utilities - vmwgfx_fifo.c |
884 | */ |
885 | |
886 | extern struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv); |
887 | extern void vmw_fifo_destroy(struct vmw_private *dev_priv); |
888 | extern bool vmw_cmd_supported(struct vmw_private *vmw); |
889 | extern void * |
890 | vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id); |
891 | extern void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes); |
892 | extern void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes); |
893 | extern int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno); |
894 | extern bool vmw_supports_3d(struct vmw_private *dev_priv); |
895 | extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); |
896 | extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); |
897 | extern int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv, |
898 | uint32_t cid); |
899 | extern int vmw_cmd_flush(struct vmw_private *dev_priv, |
900 | bool interruptible); |
901 | |
902 | #define VMW_CMD_CTX_RESERVE(__priv, __bytes, __ctx_id) \ |
903 | ({ \ |
904 | vmw_cmd_ctx_reserve(__priv, __bytes, __ctx_id) ? : ({ \ |
905 | DRM_ERROR("FIFO reserve failed at %s for %u bytes\n", \ |
906 | __func__, (unsigned int) __bytes); \ |
907 | NULL; \ |
908 | }); \ |
909 | }) |
910 | |
911 | #define VMW_CMD_RESERVE(__priv, __bytes) \ |
912 | VMW_CMD_CTX_RESERVE(__priv, __bytes, SVGA3D_INVALID_ID) |
913 | |
914 | |
915 | /** |
916 | * vmw_fifo_caps - Returns the capabilities of the FIFO command |
917 | * queue or 0 if fifo memory isn't present. |
918 | * @dev_priv: The device private context |
919 | */ |
920 | static inline uint32_t vmw_fifo_caps(const struct vmw_private *dev_priv) |
921 | { |
922 | if (!dev_priv->fifo_mem || !dev_priv->fifo) |
923 | return 0; |
924 | return dev_priv->fifo->capabilities; |
925 | } |
926 | |
927 | |
928 | /** |
929 | * vmw_is_cursor_bypass3_enabled - Returns TRUE iff Cursor Bypass 3 |
930 | * is enabled in the FIFO. |
931 | * @dev_priv: The device private context |
932 | */ |
933 | static inline bool |
934 | vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv) |
935 | { |
936 | return (vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_CURSOR_BYPASS_3) != 0; |
937 | } |
938 | |
939 | /** |
940 | * TTM buffer object driver - vmwgfx_ttm_buffer.c |
941 | */ |
942 | |
943 | extern const size_t vmw_tt_size; |
944 | extern struct ttm_placement vmw_vram_placement; |
945 | extern struct ttm_placement vmw_sys_placement; |
946 | extern struct ttm_device_funcs vmw_bo_driver; |
947 | extern const struct vmw_sg_table * |
948 | vmw_bo_sg_table(struct ttm_buffer_object *bo); |
949 | int vmw_bo_create_and_populate(struct vmw_private *dev_priv, |
950 | size_t bo_size, |
951 | u32 domain, |
952 | struct vmw_bo **bo_p); |
953 | |
954 | extern void vmw_piter_start(struct vmw_piter *viter, |
955 | const struct vmw_sg_table *vsgt, |
956 | unsigned long p_offs); |
957 | |
958 | /** |
959 | * vmw_piter_next - Advance the iterator one page. |
960 | * |
961 | * @viter: Pointer to the iterator to advance. |
962 | * |
963 | * Returns false if past the list of pages, true otherwise. |
964 | */ |
965 | static inline bool vmw_piter_next(struct vmw_piter *viter) |
966 | { |
967 | return viter->next(viter); |
968 | } |
969 | |
970 | /** |
971 | * vmw_piter_dma_addr - Return the DMA address of the current page. |
972 | * |
973 | * @viter: Pointer to the iterator |
974 | * |
975 | * Returns the DMA address of the page pointed to by @viter. |
976 | */ |
977 | static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter) |
978 | { |
979 | return viter->dma_address(viter); |
980 | } |
981 | |
982 | /** |
983 | * vmw_piter_page - Return a pointer to the current page. |
984 | * |
985 | * @viter: Pointer to the iterator |
986 | * |
987 | * Returns the DMA address of the page pointed to by @viter. |
988 | */ |
989 | static inline struct page *vmw_piter_page(struct vmw_piter *viter) |
990 | { |
991 | return viter->pages[viter->i]; |
992 | } |
993 | |
994 | /** |
995 | * Command submission - vmwgfx_execbuf.c |
996 | */ |
997 | |
998 | extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data, |
999 | struct drm_file *file_priv); |
1000 | extern int vmw_execbuf_process(struct drm_file *file_priv, |
1001 | struct vmw_private *dev_priv, |
1002 | void __user *user_commands, |
1003 | void *kernel_commands, |
1004 | uint32_t command_size, |
1005 | uint64_t throttle_us, |
1006 | uint32_t dx_context_handle, |
1007 | struct drm_vmw_fence_rep __user |
1008 | *user_fence_rep, |
1009 | struct vmw_fence_obj **out_fence, |
1010 | uint32_t flags); |
1011 | extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, |
1012 | struct vmw_fence_obj *fence); |
1013 | extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv); |
1014 | |
1015 | extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, |
1016 | struct vmw_private *dev_priv, |
1017 | struct vmw_fence_obj **p_fence, |
1018 | uint32_t *p_handle); |
1019 | extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, |
1020 | struct vmw_fpriv *vmw_fp, |
1021 | int ret, |
1022 | struct drm_vmw_fence_rep __user |
1023 | *user_fence_rep, |
1024 | struct vmw_fence_obj *fence, |
1025 | uint32_t fence_handle, |
1026 | int32_t out_fence_fd); |
1027 | bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); |
1028 | |
1029 | /** |
1030 | * IRQs and wating - vmwgfx_irq.c |
1031 | */ |
1032 | |
1033 | extern int vmw_irq_install(struct vmw_private *dev_priv); |
1034 | extern void vmw_irq_uninstall(struct drm_device *dev); |
1035 | extern bool vmw_seqno_passed(struct vmw_private *dev_priv, |
1036 | uint32_t seqno); |
1037 | extern int vmw_fallback_wait(struct vmw_private *dev_priv, |
1038 | bool lazy, |
1039 | bool fifo_idle, |
1040 | uint32_t seqno, |
1041 | bool interruptible, |
1042 | unsigned long timeout); |
1043 | extern void vmw_update_seqno(struct vmw_private *dev_priv); |
1044 | extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv); |
1045 | extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); |
1046 | extern void vmw_goal_waiter_add(struct vmw_private *dev_priv); |
1047 | extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv); |
1048 | extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag, |
1049 | int *waiter_count); |
1050 | extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv, |
1051 | u32 flag, int *waiter_count); |
1052 | |
1053 | /** |
1054 | * Kernel modesetting - vmwgfx_kms.c |
1055 | */ |
1056 | |
1057 | int vmw_kms_init(struct vmw_private *dev_priv); |
1058 | int vmw_kms_close(struct vmw_private *dev_priv); |
1059 | int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, |
1060 | struct drm_file *file_priv); |
1061 | void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv); |
1062 | void vmw_kms_cursor_snoop(struct vmw_surface *srf, |
1063 | struct ttm_object_file *tfile, |
1064 | struct ttm_buffer_object *bo, |
1065 | SVGA3dCmdHeader *); |
1066 | int vmw_kms_write_svga(struct vmw_private *vmw_priv, |
1067 | unsigned width, unsigned height, unsigned pitch, |
1068 | unsigned bpp, unsigned depth); |
1069 | bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, |
1070 | uint32_t pitch, |
1071 | uint32_t height); |
1072 | int vmw_kms_present(struct vmw_private *dev_priv, |
1073 | struct drm_file *file_priv, |
1074 | struct vmw_framebuffer *vfb, |
1075 | struct vmw_surface *surface, |
1076 | uint32_t sid, int32_t destX, int32_t destY, |
1077 | struct drm_vmw_rect *clips, |
1078 | uint32_t num_clips); |
1079 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, |
1080 | struct drm_file *file_priv); |
1081 | void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); |
1082 | int vmw_kms_suspend(struct drm_device *dev); |
1083 | int vmw_kms_resume(struct drm_device *dev); |
1084 | void vmw_kms_lost_device(struct drm_device *dev); |
1085 | |
1086 | int vmw_dumb_create(struct drm_file *file_priv, |
1087 | struct drm_device *dev, |
1088 | struct drm_mode_create_dumb *args); |
1089 | extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); |
1090 | extern void vmw_resource_unpin(struct vmw_resource *res); |
1091 | extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res); |
1092 | |
1093 | /** |
1094 | * Overlay control - vmwgfx_overlay.c |
1095 | */ |
1096 | |
1097 | int vmw_overlay_init(struct vmw_private *dev_priv); |
1098 | int vmw_overlay_close(struct vmw_private *dev_priv); |
1099 | int vmw_overlay_ioctl(struct drm_device *dev, void *data, |
1100 | struct drm_file *file_priv); |
1101 | int vmw_overlay_resume_all(struct vmw_private *dev_priv); |
1102 | int vmw_overlay_pause_all(struct vmw_private *dev_priv); |
1103 | int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out); |
1104 | int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id); |
1105 | int vmw_overlay_num_overlays(struct vmw_private *dev_priv); |
1106 | int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); |
1107 | |
1108 | /** |
1109 | * GMR Id manager |
1110 | */ |
1111 | |
1112 | int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type); |
1113 | void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type); |
1114 | |
1115 | /** |
1116 | * System memory manager |
1117 | */ |
1118 | int vmw_sys_man_init(struct vmw_private *dev_priv); |
1119 | void vmw_sys_man_fini(struct vmw_private *dev_priv); |
1120 | |
1121 | /** |
1122 | * Prime - vmwgfx_prime.c |
1123 | */ |
1124 | |
1125 | extern const struct dma_buf_ops vmw_prime_dmabuf_ops; |
1126 | extern int vmw_prime_fd_to_handle(struct drm_device *dev, |
1127 | struct drm_file *file_priv, |
1128 | int fd, u32 *handle); |
1129 | extern int vmw_prime_handle_to_fd(struct drm_device *dev, |
1130 | struct drm_file *file_priv, |
1131 | uint32_t handle, uint32_t flags, |
1132 | int *prime_fd); |
1133 | struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev, |
1134 | struct dma_buf_attachment *attach, |
1135 | struct sg_table *table); |
1136 | |
1137 | /* |
1138 | * MemoryOBject management - vmwgfx_mob.c |
1139 | */ |
1140 | struct vmw_mob; |
1141 | extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob, |
1142 | const struct vmw_sg_table *vsgt, |
1143 | unsigned long num_data_pages, int32_t mob_id); |
1144 | extern void vmw_mob_unbind(struct vmw_private *dev_priv, |
1145 | struct vmw_mob *mob); |
1146 | extern void vmw_mob_destroy(struct vmw_mob *mob); |
1147 | extern struct vmw_mob *vmw_mob_create(unsigned long data_pages); |
1148 | extern int vmw_otables_setup(struct vmw_private *dev_priv); |
1149 | extern void vmw_otables_takedown(struct vmw_private *dev_priv); |
1150 | |
1151 | /* |
1152 | * Context management - vmwgfx_context.c |
1153 | */ |
1154 | |
1155 | extern const struct vmw_user_resource_conv *user_context_converter; |
1156 | |
1157 | extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, |
1158 | struct drm_file *file_priv); |
1159 | extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data, |
1160 | struct drm_file *file_priv); |
1161 | extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, |
1162 | struct drm_file *file_priv); |
1163 | extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); |
1164 | extern struct vmw_cmdbuf_res_manager * |
1165 | vmw_context_res_man(struct vmw_resource *ctx); |
1166 | extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, |
1167 | SVGACOTableType cotable_type); |
1168 | struct vmw_ctx_binding_state; |
1169 | extern struct vmw_ctx_binding_state * |
1170 | vmw_context_binding_state(struct vmw_resource *ctx); |
1171 | extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, |
1172 | bool readback); |
1173 | extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, |
1174 | struct vmw_bo *mob); |
1175 | extern struct vmw_bo * |
1176 | vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res); |
1177 | |
1178 | |
1179 | /* |
1180 | * Surface management - vmwgfx_surface.c |
1181 | */ |
1182 | |
1183 | extern const struct vmw_user_resource_conv *user_surface_converter; |
1184 | |
1185 | extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, |
1186 | struct drm_file *file_priv); |
1187 | extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, |
1188 | struct drm_file *file_priv); |
1189 | extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, |
1190 | struct drm_file *file_priv); |
1191 | extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, |
1192 | struct drm_file *file_priv); |
1193 | extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, |
1194 | struct drm_file *file_priv); |
1195 | extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, |
1196 | void *data, |
1197 | struct drm_file *file_priv); |
1198 | extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, |
1199 | void *data, |
1200 | struct drm_file *file_priv); |
1201 | |
1202 | int vmw_gb_surface_define(struct vmw_private *dev_priv, |
1203 | const struct vmw_surface_metadata *req, |
1204 | struct vmw_surface **srf_out); |
1205 | |
1206 | /* |
1207 | * Shader management - vmwgfx_shader.c |
1208 | */ |
1209 | |
1210 | extern const struct vmw_user_resource_conv *user_shader_converter; |
1211 | |
1212 | extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data, |
1213 | struct drm_file *file_priv); |
1214 | extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, |
1215 | struct drm_file *file_priv); |
1216 | extern int vmw_compat_shader_add(struct vmw_private *dev_priv, |
1217 | struct vmw_cmdbuf_res_manager *man, |
1218 | u32 user_key, const void *bytecode, |
1219 | SVGA3dShaderType shader_type, |
1220 | size_t size, |
1221 | struct list_head *list); |
1222 | extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man, |
1223 | u32 user_key, SVGA3dShaderType shader_type, |
1224 | struct list_head *list); |
1225 | extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, |
1226 | struct vmw_resource *ctx, |
1227 | u32 user_key, |
1228 | SVGA3dShaderType shader_type, |
1229 | struct list_head *list); |
1230 | extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, |
1231 | struct list_head *list, |
1232 | bool readback); |
1233 | |
1234 | extern struct vmw_resource * |
1235 | vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man, |
1236 | u32 user_key, SVGA3dShaderType shader_type); |
1237 | |
1238 | /* |
1239 | * Streamoutput management |
1240 | */ |
1241 | struct vmw_resource * |
1242 | vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man, |
1243 | u32 user_key); |
1244 | int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man, |
1245 | struct vmw_resource *ctx, |
1246 | SVGA3dStreamOutputId user_key, |
1247 | struct list_head *list); |
1248 | void vmw_dx_streamoutput_set_size(struct vmw_resource *res, u32 size); |
1249 | int vmw_dx_streamoutput_remove(struct vmw_cmdbuf_res_manager *man, |
1250 | SVGA3dStreamOutputId user_key, |
1251 | struct list_head *list); |
1252 | void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv, |
1253 | struct list_head *list, |
1254 | bool readback); |
1255 | |
1256 | /* |
1257 | * Command buffer managed resources - vmwgfx_cmdbuf_res.c |
1258 | */ |
1259 | |
1260 | extern struct vmw_cmdbuf_res_manager * |
1261 | vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv); |
1262 | extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man); |
1263 | extern struct vmw_resource * |
1264 | vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, |
1265 | enum vmw_cmdbuf_res_type res_type, |
1266 | u32 user_key); |
1267 | extern void vmw_cmdbuf_res_revert(struct list_head *list); |
1268 | extern void vmw_cmdbuf_res_commit(struct list_head *list); |
1269 | extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, |
1270 | enum vmw_cmdbuf_res_type res_type, |
1271 | u32 user_key, |
1272 | struct vmw_resource *res, |
1273 | struct list_head *list); |
1274 | extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, |
1275 | enum vmw_cmdbuf_res_type res_type, |
1276 | u32 user_key, |
1277 | struct list_head *list, |
1278 | struct vmw_resource **res); |
1279 | |
1280 | /* |
1281 | * COTable management - vmwgfx_cotable.c |
1282 | */ |
1283 | extern const SVGACOTableType vmw_cotable_scrub_order[]; |
1284 | extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, |
1285 | struct vmw_resource *ctx, |
1286 | u32 type); |
1287 | extern int vmw_cotable_notify(struct vmw_resource *res, int id); |
1288 | extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback); |
1289 | extern void vmw_cotable_add_resource(struct vmw_resource *ctx, |
1290 | struct list_head *head); |
1291 | |
1292 | /* |
1293 | * Command buffer managerment vmwgfx_cmdbuf.c |
1294 | */ |
1295 | struct vmw_cmdbuf_man; |
1296 | struct ; |
1297 | |
1298 | extern struct vmw_cmdbuf_man * |
1299 | vmw_cmdbuf_man_create(struct vmw_private *dev_priv); |
1300 | extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size); |
1301 | extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man); |
1302 | extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man); |
1303 | extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, |
1304 | unsigned long timeout); |
1305 | extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size, |
1306 | int ctx_id, bool interruptible, |
1307 | struct vmw_cmdbuf_header *); |
1308 | extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, |
1309 | struct vmw_cmdbuf_header *, |
1310 | bool flush); |
1311 | extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, |
1312 | size_t size, bool interruptible, |
1313 | struct vmw_cmdbuf_header **); |
1314 | extern void (struct vmw_cmdbuf_header *); |
1315 | extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, |
1316 | bool interruptible); |
1317 | extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man); |
1318 | |
1319 | /* CPU blit utilities - vmwgfx_blit.c */ |
1320 | |
1321 | /** |
1322 | * struct vmw_diff_cpy - CPU blit information structure |
1323 | * |
1324 | * @rect: The output bounding box rectangle. |
1325 | * @line: The current line of the blit. |
1326 | * @line_offset: Offset of the current line segment. |
1327 | * @cpp: Bytes per pixel (granularity information). |
1328 | * @memcpy: Which memcpy function to use. |
1329 | */ |
1330 | struct vmw_diff_cpy { |
1331 | struct drm_rect rect; |
1332 | size_t line; |
1333 | size_t line_offset; |
1334 | int cpp; |
1335 | void (*do_cpy)(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, |
1336 | size_t n); |
1337 | }; |
1338 | |
1339 | #define VMW_CPU_BLIT_INITIALIZER { \ |
1340 | .do_cpy = vmw_memcpy, \ |
1341 | } |
1342 | |
1343 | #define VMW_CPU_BLIT_DIFF_INITIALIZER(_cpp) { \ |
1344 | .line = 0, \ |
1345 | .line_offset = 0, \ |
1346 | .rect = { .x1 = INT_MAX/2, \ |
1347 | .y1 = INT_MAX/2, \ |
1348 | .x2 = INT_MIN/2, \ |
1349 | .y2 = INT_MIN/2 \ |
1350 | }, \ |
1351 | .cpp = _cpp, \ |
1352 | .do_cpy = vmw_diff_memcpy, \ |
1353 | } |
1354 | |
1355 | void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, |
1356 | size_t n); |
1357 | |
1358 | void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n); |
1359 | |
1360 | int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, |
1361 | u32 dst_offset, u32 dst_stride, |
1362 | struct ttm_buffer_object *src, |
1363 | u32 src_offset, u32 src_stride, |
1364 | u32 w, u32 h, |
1365 | struct vmw_diff_cpy *diff); |
1366 | |
1367 | /* Host messaging -vmwgfx_msg.c: */ |
1368 | void vmw_disable_backdoor(void); |
1369 | int vmw_host_get_guestinfo(const char *guest_info_param, |
1370 | char *buffer, size_t *length); |
1371 | __printf(1, 2) int vmw_host_printf(const char *fmt, ...); |
1372 | int vmw_msg_ioctl(struct drm_device *dev, void *data, |
1373 | struct drm_file *file_priv); |
1374 | |
1375 | /* Host mksGuestStats -vmwgfx_msg.c: */ |
1376 | int vmw_mksstat_get_kern_slot(pid_t pid, struct vmw_private *dev_priv); |
1377 | |
1378 | int vmw_mksstat_reset_ioctl(struct drm_device *dev, void *data, |
1379 | struct drm_file *file_priv); |
1380 | int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data, |
1381 | struct drm_file *file_priv); |
1382 | int vmw_mksstat_remove_ioctl(struct drm_device *dev, void *data, |
1383 | struct drm_file *file_priv); |
1384 | int vmw_mksstat_remove_all(struct vmw_private *dev_priv); |
1385 | |
1386 | /* VMW logging */ |
1387 | |
1388 | /** |
1389 | * VMW_DEBUG_USER - Debug output for user-space debugging. |
1390 | * |
1391 | * @fmt: printf() like format string. |
1392 | * |
1393 | * This macro is for logging user-space error and debugging messages for e.g. |
1394 | * command buffer execution errors due to malformed commands, invalid context, |
1395 | * etc. |
1396 | */ |
1397 | #define VMW_DEBUG_USER(fmt, ...) \ |
1398 | DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) |
1399 | |
1400 | /* Resource dirtying - vmwgfx_page_dirty.c */ |
1401 | void vmw_bo_dirty_scan(struct vmw_bo *vbo); |
1402 | int vmw_bo_dirty_add(struct vmw_bo *vbo); |
1403 | void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res); |
1404 | void vmw_bo_dirty_clear_res(struct vmw_resource *res); |
1405 | void vmw_bo_dirty_release(struct vmw_bo *vbo); |
1406 | void vmw_bo_dirty_unmap(struct vmw_bo *vbo, |
1407 | pgoff_t start, pgoff_t end); |
1408 | vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf); |
1409 | vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf); |
1410 | |
1411 | |
1412 | /** |
1413 | * VMW_DEBUG_KMS - Debug output for kernel mode-setting |
1414 | * |
1415 | * This macro is for debugging vmwgfx mode-setting code. |
1416 | */ |
1417 | #define VMW_DEBUG_KMS(fmt, ...) \ |
1418 | DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) |
1419 | |
1420 | /** |
1421 | * Inline helper functions |
1422 | */ |
1423 | |
1424 | static inline void vmw_surface_unreference(struct vmw_surface **srf) |
1425 | { |
1426 | struct vmw_surface *tmp_srf = *srf; |
1427 | struct vmw_resource *res = &tmp_srf->res; |
1428 | *srf = NULL; |
1429 | |
1430 | vmw_resource_unreference(p_res: &res); |
1431 | } |
1432 | |
1433 | static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf) |
1434 | { |
1435 | (void) vmw_resource_reference(res: &srf->res); |
1436 | return srf; |
1437 | } |
1438 | |
1439 | static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv) |
1440 | { |
1441 | atomic_inc(v: &dev_priv->num_fifo_resources); |
1442 | } |
1443 | |
1444 | static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv) |
1445 | { |
1446 | atomic_dec(v: &dev_priv->num_fifo_resources); |
1447 | } |
1448 | |
1449 | /** |
1450 | * vmw_fifo_mem_read - Perform a MMIO read from the fifo memory |
1451 | * |
1452 | * @fifo_reg: The fifo register to read from |
1453 | * |
1454 | * This function is intended to be equivalent to ioread32() on |
1455 | * memremap'd memory, but without byteswapping. |
1456 | */ |
1457 | static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg) |
1458 | { |
1459 | BUG_ON(vmw_is_svga_v3(vmw)); |
1460 | return READ_ONCE(*(vmw->fifo_mem + fifo_reg)); |
1461 | } |
1462 | |
1463 | /** |
1464 | * vmw_fifo_mem_write - Perform a MMIO write to volatile memory |
1465 | * |
1466 | * @addr: The fifo register to write to |
1467 | * |
1468 | * This function is intended to be equivalent to iowrite32 on |
1469 | * memremap'd memory, but without byteswapping. |
1470 | */ |
1471 | static inline void vmw_fifo_mem_write(struct vmw_private *vmw, u32 fifo_reg, |
1472 | u32 value) |
1473 | { |
1474 | BUG_ON(vmw_is_svga_v3(vmw)); |
1475 | WRITE_ONCE(*(vmw->fifo_mem + fifo_reg), value); |
1476 | } |
1477 | |
1478 | static inline u32 vmw_fence_read(struct vmw_private *dev_priv) |
1479 | { |
1480 | u32 fence; |
1481 | if (vmw_is_svga_v3(dev: dev_priv)) |
1482 | fence = vmw_read(dev_priv, offset: SVGA_REG_FENCE); |
1483 | else |
1484 | fence = vmw_fifo_mem_read(vmw: dev_priv, fifo_reg: SVGA_FIFO_FENCE); |
1485 | return fence; |
1486 | } |
1487 | |
1488 | static inline void vmw_fence_write(struct vmw_private *dev_priv, |
1489 | u32 fence) |
1490 | { |
1491 | BUG_ON(vmw_is_svga_v3(dev_priv)); |
1492 | vmw_fifo_mem_write(vmw: dev_priv, fifo_reg: SVGA_FIFO_FENCE, value: fence); |
1493 | } |
1494 | |
1495 | static inline u32 vmw_irq_status_read(struct vmw_private *vmw) |
1496 | { |
1497 | u32 status; |
1498 | if (vmw_is_svga_v3(dev: vmw)) |
1499 | status = vmw_read(dev_priv: vmw, offset: SVGA_REG_IRQ_STATUS); |
1500 | else |
1501 | status = inl(port: vmw->io_start + SVGA_IRQSTATUS_PORT); |
1502 | return status; |
1503 | } |
1504 | |
1505 | static inline void vmw_irq_status_write(struct vmw_private *vmw, |
1506 | uint32 status) |
1507 | { |
1508 | if (vmw_is_svga_v3(dev: vmw)) |
1509 | vmw_write(dev_priv: vmw, offset: SVGA_REG_IRQ_STATUS, value: status); |
1510 | else |
1511 | outl(value: status, port: vmw->io_start + SVGA_IRQSTATUS_PORT); |
1512 | } |
1513 | |
1514 | static inline bool vmw_has_fences(struct vmw_private *vmw) |
1515 | { |
1516 | if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS | |
1517 | SVGA_CAP_CMD_BUFFERS_2)) != 0) |
1518 | return true; |
1519 | return (vmw_fifo_caps(dev_priv: vmw) & SVGA_FIFO_CAP_FENCE) != 0; |
1520 | } |
1521 | |
1522 | static inline bool vmw_shadertype_is_valid(enum vmw_sm_type shader_model, |
1523 | u32 shader_type) |
1524 | { |
1525 | SVGA3dShaderType max_allowed = SVGA3D_SHADERTYPE_PREDX_MAX; |
1526 | |
1527 | if (shader_model >= VMW_SM_5) |
1528 | max_allowed = SVGA3D_SHADERTYPE_MAX; |
1529 | else if (shader_model >= VMW_SM_4) |
1530 | max_allowed = SVGA3D_SHADERTYPE_DX10_MAX; |
1531 | return shader_type >= SVGA3D_SHADERTYPE_MIN && shader_type < max_allowed; |
1532 | } |
1533 | |
1534 | #endif |
1535 | |