1 | /* SPDX-License-Identifier: MIT */ |
2 | /* |
3 | * Copyright © 2020 Intel Corporation |
4 | * |
5 | * Please try to maintain the following order within this file unless it makes |
6 | * sense to do otherwise. From top to bottom: |
7 | * 1. typedefs |
8 | * 2. #defines, and macros |
9 | * 3. structure definitions |
10 | * 4. function prototypes |
11 | * |
12 | * Within each section, please try to order by generation in ascending order, |
13 | * from top to bottom (ie. gen6 on the top, gen8 on the bottom). |
14 | */ |
15 | |
16 | #ifndef __INTEL_GTT_H__ |
17 | #define __INTEL_GTT_H__ |
18 | |
19 | #include <linux/io-mapping.h> |
20 | #include <linux/kref.h> |
21 | #include <linux/mm.h> |
22 | #include <linux/pagevec.h> |
23 | #include <linux/scatterlist.h> |
24 | #include <linux/workqueue.h> |
25 | |
26 | #include <drm/drm_mm.h> |
27 | |
28 | #include "gt/intel_reset.h" |
29 | #include "i915_selftest.h" |
30 | #include "i915_vma_resource.h" |
31 | #include "i915_vma_types.h" |
32 | #include "i915_params.h" |
33 | #include "intel_memory_region.h" |
34 | |
35 | #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) |
36 | |
37 | #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT) |
38 | #define GTT_TRACE(...) trace_printk(__VA_ARGS__) |
39 | #else |
40 | #define GTT_TRACE(...) |
41 | #endif |
42 | |
43 | #define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */ |
44 | |
45 | #define I915_GTT_PAGE_SIZE_4K BIT_ULL(12) |
46 | #define I915_GTT_PAGE_SIZE_64K BIT_ULL(16) |
47 | #define I915_GTT_PAGE_SIZE_2M BIT_ULL(21) |
48 | |
49 | #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K |
50 | #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M |
51 | |
52 | #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE |
53 | |
54 | #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE |
55 | |
56 | #define I915_FENCE_REG_NONE -1 |
57 | #define I915_MAX_NUM_FENCES 32 |
58 | /* 32 fences + sign bit for FENCE_REG_NONE */ |
59 | #define I915_MAX_NUM_FENCE_BITS 6 |
60 | |
61 | typedef u32 gen6_pte_t; |
62 | typedef u64 gen8_pte_t; |
63 | |
64 | #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) |
65 | |
66 | #define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len))) |
67 | #define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1) |
68 | #define I915_PDES 512 |
69 | #define I915_PDE_MASK (I915_PDES - 1) |
70 | |
71 | /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ |
72 | #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) |
73 | #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
74 | #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
75 | #define GEN6_PTE_CACHE_LLC (2 << 1) |
76 | #define GEN6_PTE_UNCACHED (1 << 1) |
77 | #define GEN6_PTE_VALID REG_BIT(0) |
78 | |
79 | #define GEN6_PTES I915_PTES(sizeof(gen6_pte_t)) |
80 | #define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE) |
81 | #define GEN6_PD_ALIGN (PAGE_SIZE * 16) |
82 | #define GEN6_PDE_SHIFT 22 |
83 | #define GEN6_PDE_VALID REG_BIT(0) |
84 | #define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT)) |
85 | |
86 | #define GEN7_PTE_CACHE_L3_LLC (3 << 1) |
87 | |
88 | #define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2) |
89 | #define BYT_PTE_WRITEABLE REG_BIT(1) |
90 | |
91 | #define MTL_PPGTT_PTE_PAT3 BIT_ULL(62) |
92 | #define GEN12_PPGTT_PTE_LM BIT_ULL(11) |
93 | #define GEN12_PPGTT_PTE_PAT2 BIT_ULL(7) |
94 | #define GEN12_PPGTT_PTE_PAT1 BIT_ULL(4) |
95 | #define GEN12_PPGTT_PTE_PAT0 BIT_ULL(3) |
96 | |
97 | #define GEN12_GGTT_PTE_LM BIT_ULL(1) |
98 | #define MTL_GGTT_PTE_PAT0 BIT_ULL(52) |
99 | #define MTL_GGTT_PTE_PAT1 BIT_ULL(53) |
100 | #define GEN12_GGTT_PTE_ADDR_MASK GENMASK_ULL(45, 12) |
101 | #define MTL_GGTT_PTE_PAT_MASK GENMASK_ULL(53, 52) |
102 | |
103 | #define GEN12_PDE_64K BIT(6) |
104 | #define GEN12_PTE_PS64 BIT(8) |
105 | |
106 | /* |
107 | * Cacheability Control is a 4-bit value. The low three bits are stored in bits |
108 | * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. |
109 | */ |
110 | #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ |
111 | (((bits) & 0x8) << (11 - 3))) |
112 | #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) |
113 | #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) |
114 | #define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) |
115 | #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) |
116 | #define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) |
117 | #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) |
118 | #define HSW_PTE_UNCACHED (0) |
119 | #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) |
120 | #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) |
121 | |
122 | /* |
123 | * GEN8 32b style address is defined as a 3 level page table: |
124 | * 31:30 | 29:21 | 20:12 | 11:0 |
125 | * PDPE | PDE | PTE | offset |
126 | * The difference as compared to normal x86 3 level page table is the PDPEs are |
127 | * programmed via register. |
128 | * |
129 | * GEN8 48b style address is defined as a 4 level page table: |
130 | * 47:39 | 38:30 | 29:21 | 20:12 | 11:0 |
131 | * PML4E | PDPE | PDE | PTE | offset |
132 | */ |
133 | #define GEN8_3LVL_PDPES 4 |
134 | |
135 | #define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD) |
136 | #define PPAT_CACHED_PDE 0 /* WB LLC */ |
137 | #define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */ |
138 | #define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */ |
139 | |
140 | #define CHV_PPAT_SNOOP REG_BIT(6) |
141 | #define GEN8_PPAT_AGE(x) ((x)<<4) |
142 | #define GEN8_PPAT_LLCeLLC (3<<2) |
143 | #define GEN8_PPAT_LLCELLC (2<<2) |
144 | #define GEN8_PPAT_LLC (1<<2) |
145 | #define GEN8_PPAT_WB (3<<0) |
146 | #define GEN8_PPAT_WT (2<<0) |
147 | #define GEN8_PPAT_WC (1<<0) |
148 | #define GEN8_PPAT_UC (0<<0) |
149 | #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) |
150 | #define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) |
151 | |
152 | #define GEN8_PAGE_PRESENT BIT_ULL(0) |
153 | #define GEN8_PAGE_RW BIT_ULL(1) |
154 | |
155 | #define GEN8_PDE_IPS_64K BIT(11) |
156 | #define GEN8_PDE_PS_2M BIT(7) |
157 | |
158 | #define MTL_PPAT_L4_CACHE_POLICY_MASK REG_GENMASK(3, 2) |
159 | #define MTL_PAT_INDEX_COH_MODE_MASK REG_GENMASK(1, 0) |
160 | #define MTL_PPAT_L4_3_UC REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 3) |
161 | #define MTL_PPAT_L4_1_WT REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 1) |
162 | #define MTL_PPAT_L4_0_WB REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 0) |
163 | #define MTL_3_COH_2W REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 3) |
164 | #define MTL_2_COH_1W REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 2) |
165 | |
166 | struct drm_i915_gem_object; |
167 | struct i915_fence_reg; |
168 | struct i915_vma; |
169 | struct intel_gt; |
170 | |
171 | #define for_each_sgt_daddr(__dp, __iter, __sgt) \ |
172 | __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE) |
173 | |
174 | #define for_each_sgt_daddr_next(__dp, __iter) \ |
175 | __for_each_daddr_next(__dp, __iter, I915_GTT_PAGE_SIZE) |
176 | |
177 | struct i915_page_table { |
178 | struct drm_i915_gem_object *base; |
179 | union { |
180 | atomic_t used; |
181 | struct i915_page_table *stash; |
182 | }; |
183 | bool is_compact; |
184 | }; |
185 | |
186 | struct i915_page_directory { |
187 | struct i915_page_table pt; |
188 | spinlock_t lock; |
189 | void **entry; |
190 | }; |
191 | |
192 | #define __px_choose_expr(x, type, expr, other) \ |
193 | __builtin_choose_expr( \ |
194 | __builtin_types_compatible_p(typeof(x), type) || \ |
195 | __builtin_types_compatible_p(typeof(x), const type), \ |
196 | ({ type __x = (type)(x); expr; }), \ |
197 | other) |
198 | |
199 | #define px_base(px) \ |
200 | __px_choose_expr(px, struct drm_i915_gem_object *, __x, \ |
201 | __px_choose_expr(px, struct i915_page_table *, __x->base, \ |
202 | __px_choose_expr(px, struct i915_page_directory *, __x->pt.base, \ |
203 | (void)0))) |
204 | |
205 | struct page *__px_page(struct drm_i915_gem_object *p); |
206 | dma_addr_t __px_dma(struct drm_i915_gem_object *p); |
207 | #define px_dma(px) (__px_dma(px_base(px))) |
208 | |
209 | void *__px_vaddr(struct drm_i915_gem_object *p); |
210 | #define px_vaddr(px) (__px_vaddr(px_base(px))) |
211 | |
212 | #define px_pt(px) \ |
213 | __px_choose_expr(px, struct i915_page_table *, __x, \ |
214 | __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \ |
215 | (void)0)) |
216 | #define px_used(px) (&px_pt(px)->used) |
217 | |
218 | struct i915_vm_pt_stash { |
219 | /* preallocated chains of page tables/directories */ |
220 | struct i915_page_table *pt[2]; |
221 | /* |
222 | * Optionally override the alignment/size of the physical page that |
223 | * contains each PT. If not set defaults back to the usual |
224 | * I915_GTT_PAGE_SIZE_4K. This does not influence the other paging |
225 | * structures. MUST be a power-of-two. ONLY applicable on discrete |
226 | * platforms. |
227 | */ |
228 | int pt_sz; |
229 | }; |
230 | |
231 | struct i915_vma_ops { |
232 | /* Map an object into an address space with the given cache flags. */ |
233 | void (*bind_vma)(struct i915_address_space *vm, |
234 | struct i915_vm_pt_stash *stash, |
235 | struct i915_vma_resource *vma_res, |
236 | unsigned int pat_index, |
237 | u32 flags); |
238 | /* |
239 | * Unmap an object from an address space. This usually consists of |
240 | * setting the valid PTE entries to a reserved scratch page. |
241 | */ |
242 | void (*unbind_vma)(struct i915_address_space *vm, |
243 | struct i915_vma_resource *vma_res); |
244 | |
245 | }; |
246 | |
247 | struct i915_address_space { |
248 | struct kref ref; |
249 | struct work_struct release_work; |
250 | |
251 | struct drm_mm mm; |
252 | struct intel_gt *gt; |
253 | struct drm_i915_private *i915; |
254 | struct device *dma; |
255 | u64 total; /* size addr space maps (ex. 2GB for ggtt) */ |
256 | u64 reserved; /* size addr space reserved */ |
257 | u64 min_alignment[INTEL_MEMORY_STOLEN_LOCAL + 1]; |
258 | |
259 | unsigned int bind_async_flags; |
260 | |
261 | struct mutex mutex; /* protects vma and our lists */ |
262 | |
263 | struct kref resv_ref; /* kref to keep the reservation lock alive. */ |
264 | struct dma_resv _resv; /* reservation lock for all pd objects, and buffer pool */ |
265 | #define VM_CLASS_GGTT 0 |
266 | #define VM_CLASS_PPGTT 1 |
267 | #define VM_CLASS_DPT 2 |
268 | |
269 | struct drm_i915_gem_object *scratch[4]; |
270 | /** |
271 | * List of vma currently bound. |
272 | */ |
273 | struct list_head bound_list; |
274 | |
275 | /** |
276 | * List of vmas not yet bound or evicted. |
277 | */ |
278 | struct list_head unbound_list; |
279 | |
280 | /* Global GTT */ |
281 | bool is_ggtt:1; |
282 | |
283 | /* Display page table */ |
284 | bool is_dpt:1; |
285 | |
286 | /* Some systems support read-only mappings for GGTT and/or PPGTT */ |
287 | bool has_read_only:1; |
288 | |
289 | /* Skip pte rewrite on unbind for suspend. Protected by @mutex */ |
290 | bool skip_pte_rewrite:1; |
291 | |
292 | u8 top; |
293 | u8 pd_shift; |
294 | u8 scratch_order; |
295 | |
296 | /* Flags used when creating page-table objects for this vm */ |
297 | unsigned long lmem_pt_obj_flags; |
298 | |
299 | /* Interval tree for pending unbind vma resources */ |
300 | struct rb_root_cached pending_unbind; |
301 | |
302 | struct drm_i915_gem_object * |
303 | (*alloc_pt_dma)(struct i915_address_space *vm, int sz); |
304 | struct drm_i915_gem_object * |
305 | (*alloc_scratch_dma)(struct i915_address_space *vm, int sz); |
306 | |
307 | u64 (*pte_encode)(dma_addr_t addr, |
308 | unsigned int pat_index, |
309 | u32 flags); /* Create a valid PTE */ |
310 | #define PTE_READ_ONLY BIT(0) |
311 | #define PTE_LM BIT(1) |
312 | |
313 | void (*allocate_va_range)(struct i915_address_space *vm, |
314 | struct i915_vm_pt_stash *stash, |
315 | u64 start, u64 length); |
316 | void (*clear_range)(struct i915_address_space *vm, |
317 | u64 start, u64 length); |
318 | void (*scratch_range)(struct i915_address_space *vm, |
319 | u64 start, u64 length); |
320 | void (*insert_page)(struct i915_address_space *vm, |
321 | dma_addr_t addr, |
322 | u64 offset, |
323 | unsigned int pat_index, |
324 | u32 flags); |
325 | void (*insert_entries)(struct i915_address_space *vm, |
326 | struct i915_vma_resource *vma_res, |
327 | unsigned int pat_index, |
328 | u32 flags); |
329 | void (*raw_insert_page)(struct i915_address_space *vm, |
330 | dma_addr_t addr, |
331 | u64 offset, |
332 | unsigned int pat_index, |
333 | u32 flags); |
334 | void (*raw_insert_entries)(struct i915_address_space *vm, |
335 | struct i915_vma_resource *vma_res, |
336 | unsigned int pat_index, |
337 | u32 flags); |
338 | void (*cleanup)(struct i915_address_space *vm); |
339 | |
340 | void (*foreach)(struct i915_address_space *vm, |
341 | u64 start, u64 length, |
342 | void (*fn)(struct i915_address_space *vm, |
343 | struct i915_page_table *pt, |
344 | void *data), |
345 | void *data); |
346 | |
347 | struct i915_vma_ops vma_ops; |
348 | |
349 | I915_SELFTEST_DECLARE(struct fault_attr fault_attr); |
350 | I915_SELFTEST_DECLARE(bool scrub_64K); |
351 | }; |
352 | |
353 | /* |
354 | * The Graphics Translation Table is the way in which GEN hardware translates a |
355 | * Graphics Virtual Address into a Physical Address. In addition to the normal |
356 | * collateral associated with any va->pa translations GEN hardware also has a |
357 | * portion of the GTT which can be mapped by the CPU and remain both coherent |
358 | * and correct (in cases like swizzling). That region is referred to as GMADR in |
359 | * the spec. |
360 | */ |
361 | struct i915_ggtt { |
362 | struct i915_address_space vm; |
363 | |
364 | struct io_mapping iomap; /* Mapping to our CPU mappable region */ |
365 | struct resource gmadr; /* GMADR resource */ |
366 | resource_size_t mappable_end; /* End offset that we can CPU map */ |
367 | |
368 | /** "Graphics Stolen Memory" holds the global PTEs */ |
369 | void __iomem *gsm; |
370 | void (*invalidate)(struct i915_ggtt *ggtt); |
371 | |
372 | /** PPGTT used for aliasing the PPGTT with the GTT */ |
373 | struct i915_ppgtt *alias; |
374 | |
375 | bool do_idle_maps; |
376 | |
377 | int mtrr; |
378 | |
379 | /** Bit 6 swizzling required for X tiling */ |
380 | u32 bit_6_swizzle_x; |
381 | /** Bit 6 swizzling required for Y tiling */ |
382 | u32 bit_6_swizzle_y; |
383 | |
384 | u32 pin_bias; |
385 | |
386 | unsigned int num_fences; |
387 | struct i915_fence_reg *fence_regs; |
388 | struct list_head fence_list; |
389 | |
390 | /** |
391 | * List of all objects in gtt_space, currently mmaped by userspace. |
392 | * All objects within this list must also be on bound_list. |
393 | */ |
394 | struct list_head userfault_list; |
395 | |
396 | struct mutex error_mutex; |
397 | struct drm_mm_node error_capture; |
398 | struct drm_mm_node uc_fw; |
399 | |
400 | /** List of GTs mapping this GGTT */ |
401 | struct list_head gt_list; |
402 | }; |
403 | |
404 | struct i915_ppgtt { |
405 | struct i915_address_space vm; |
406 | |
407 | struct i915_page_directory *pd; |
408 | }; |
409 | |
410 | #define i915_is_ggtt(vm) ((vm)->is_ggtt) |
411 | #define i915_is_dpt(vm) ((vm)->is_dpt) |
412 | #define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm)) |
413 | |
414 | bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915); |
415 | |
416 | int __must_check |
417 | i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww); |
418 | |
419 | static inline bool |
420 | i915_vm_is_4lvl(const struct i915_address_space *vm) |
421 | { |
422 | return (vm->total - 1) >> 32; |
423 | } |
424 | |
425 | static inline bool |
426 | i915_vm_has_scratch_64K(struct i915_address_space *vm) |
427 | { |
428 | return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K); |
429 | } |
430 | |
431 | static inline u64 i915_vm_min_alignment(struct i915_address_space *vm, |
432 | enum intel_memory_type type) |
433 | { |
434 | /* avoid INTEL_MEMORY_MOCK overflow */ |
435 | if ((int)type >= ARRAY_SIZE(vm->min_alignment)) |
436 | type = INTEL_MEMORY_SYSTEM; |
437 | |
438 | return vm->min_alignment[type]; |
439 | } |
440 | |
441 | static inline u64 i915_vm_obj_min_alignment(struct i915_address_space *vm, |
442 | struct drm_i915_gem_object *obj) |
443 | { |
444 | struct intel_memory_region *mr = READ_ONCE(obj->mm.region); |
445 | enum intel_memory_type type = mr ? mr->type : INTEL_MEMORY_SYSTEM; |
446 | |
447 | return i915_vm_min_alignment(vm, type); |
448 | } |
449 | |
450 | static inline bool |
451 | i915_vm_has_cache_coloring(struct i915_address_space *vm) |
452 | { |
453 | return i915_is_ggtt(vm) && vm->mm.color_adjust; |
454 | } |
455 | |
456 | static inline struct i915_ggtt * |
457 | i915_vm_to_ggtt(struct i915_address_space *vm) |
458 | { |
459 | BUILD_BUG_ON(offsetof(struct i915_ggtt, vm)); |
460 | GEM_BUG_ON(!i915_is_ggtt(vm)); |
461 | return container_of(vm, struct i915_ggtt, vm); |
462 | } |
463 | |
464 | static inline struct i915_ppgtt * |
465 | i915_vm_to_ppgtt(struct i915_address_space *vm) |
466 | { |
467 | BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm)); |
468 | GEM_BUG_ON(i915_is_ggtt_or_dpt(vm)); |
469 | return container_of(vm, struct i915_ppgtt, vm); |
470 | } |
471 | |
472 | static inline struct i915_address_space * |
473 | i915_vm_get(struct i915_address_space *vm) |
474 | { |
475 | kref_get(kref: &vm->ref); |
476 | return vm; |
477 | } |
478 | |
479 | static inline struct i915_address_space * |
480 | i915_vm_tryget(struct i915_address_space *vm) |
481 | { |
482 | return kref_get_unless_zero(kref: &vm->ref) ? vm : NULL; |
483 | } |
484 | |
485 | static inline void assert_vm_alive(struct i915_address_space *vm) |
486 | { |
487 | GEM_BUG_ON(!kref_read(&vm->ref)); |
488 | } |
489 | |
490 | /** |
491 | * i915_vm_resv_get - Obtain a reference on the vm's reservation lock |
492 | * @vm: The vm whose reservation lock we want to share. |
493 | * |
494 | * Return: A pointer to the vm's reservation lock. |
495 | */ |
496 | static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm) |
497 | { |
498 | kref_get(kref: &vm->resv_ref); |
499 | return &vm->_resv; |
500 | } |
501 | |
502 | void i915_vm_release(struct kref *kref); |
503 | |
504 | void i915_vm_resv_release(struct kref *kref); |
505 | |
506 | static inline void i915_vm_put(struct i915_address_space *vm) |
507 | { |
508 | kref_put(kref: &vm->ref, release: i915_vm_release); |
509 | } |
510 | |
511 | /** |
512 | * i915_vm_resv_put - Release a reference on the vm's reservation lock |
513 | * @vm: The vm whose reservation lock reference we want to release |
514 | */ |
515 | static inline void i915_vm_resv_put(struct i915_address_space *vm) |
516 | { |
517 | kref_put(kref: &vm->resv_ref, release: i915_vm_resv_release); |
518 | } |
519 | |
520 | void i915_address_space_init(struct i915_address_space *vm, int subclass); |
521 | void i915_address_space_fini(struct i915_address_space *vm); |
522 | |
523 | static inline u32 i915_pte_index(u64 address, unsigned int pde_shift) |
524 | { |
525 | const u32 mask = NUM_PTE(pde_shift) - 1; |
526 | |
527 | return (address >> PAGE_SHIFT) & mask; |
528 | } |
529 | |
530 | /* |
531 | * Helper to counts the number of PTEs within the given length. This count |
532 | * does not cross a page table boundary, so the max value would be |
533 | * GEN6_PTES for GEN6, and GEN8_PTES for GEN8. |
534 | */ |
535 | static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift) |
536 | { |
537 | const u64 mask = ~((1ULL << pde_shift) - 1); |
538 | u64 end; |
539 | |
540 | GEM_BUG_ON(length == 0); |
541 | GEM_BUG_ON(offset_in_page(addr | length)); |
542 | |
543 | end = addr + length; |
544 | |
545 | if ((addr & mask) != (end & mask)) |
546 | return NUM_PTE(pde_shift) - i915_pte_index(address: addr, pde_shift); |
547 | |
548 | return i915_pte_index(address: end, pde_shift) - i915_pte_index(address: addr, pde_shift); |
549 | } |
550 | |
551 | static inline u32 i915_pde_index(u64 addr, u32 shift) |
552 | { |
553 | return (addr >> shift) & I915_PDE_MASK; |
554 | } |
555 | |
556 | static inline struct i915_page_table * |
557 | i915_pt_entry(const struct i915_page_directory * const pd, |
558 | const unsigned short n) |
559 | { |
560 | return pd->entry[n]; |
561 | } |
562 | |
563 | static inline struct i915_page_directory * |
564 | i915_pd_entry(const struct i915_page_directory * const pdp, |
565 | const unsigned short n) |
566 | { |
567 | return pdp->entry[n]; |
568 | } |
569 | |
570 | static inline dma_addr_t |
571 | i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) |
572 | { |
573 | struct i915_page_table *pt = ppgtt->pd->entry[n]; |
574 | |
575 | return __px_dma(p: pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]); |
576 | } |
577 | |
578 | void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt, |
579 | unsigned long lmem_pt_obj_flags); |
580 | void intel_ggtt_bind_vma(struct i915_address_space *vm, |
581 | struct i915_vm_pt_stash *stash, |
582 | struct i915_vma_resource *vma_res, |
583 | unsigned int pat_index, |
584 | u32 flags); |
585 | void intel_ggtt_unbind_vma(struct i915_address_space *vm, |
586 | struct i915_vma_resource *vma_res); |
587 | |
588 | int i915_ggtt_probe_hw(struct drm_i915_private *i915); |
589 | int i915_ggtt_init_hw(struct drm_i915_private *i915); |
590 | int i915_ggtt_enable_hw(struct drm_i915_private *i915); |
591 | int i915_init_ggtt(struct drm_i915_private *i915); |
592 | void i915_ggtt_driver_release(struct drm_i915_private *i915); |
593 | void i915_ggtt_driver_late_release(struct drm_i915_private *i915); |
594 | struct i915_ggtt *i915_ggtt_create(struct drm_i915_private *i915); |
595 | |
596 | static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt) |
597 | { |
598 | return ggtt->mappable_end > 0; |
599 | } |
600 | |
601 | int i915_ppgtt_init_hw(struct intel_gt *gt); |
602 | |
603 | struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt, |
604 | unsigned long lmem_pt_obj_flags); |
605 | |
606 | void i915_ggtt_suspend_vm(struct i915_address_space *vm); |
607 | bool i915_ggtt_resume_vm(struct i915_address_space *vm); |
608 | void i915_ggtt_suspend(struct i915_ggtt *gtt); |
609 | void i915_ggtt_resume(struct i915_ggtt *ggtt); |
610 | |
611 | void |
612 | fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count); |
613 | |
614 | #define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64)) |
615 | #define fill32_px(px, v) do { \ |
616 | u64 v__ = lower_32_bits(v); \ |
617 | fill_px((px), v__ << 32 | v__); \ |
618 | } while (0) |
619 | |
620 | int setup_scratch_page(struct i915_address_space *vm); |
621 | void free_scratch(struct i915_address_space *vm); |
622 | |
623 | struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz); |
624 | struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz); |
625 | struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz); |
626 | struct i915_page_directory *alloc_pd(struct i915_address_space *vm); |
627 | struct i915_page_directory *__alloc_pd(int npde); |
628 | |
629 | int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj); |
630 | int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj); |
631 | |
632 | void free_px(struct i915_address_space *vm, |
633 | struct i915_page_table *pt, int lvl); |
634 | #define free_pt(vm, px) free_px(vm, px, 0) |
635 | #define free_pd(vm, px) free_px(vm, px_pt(px), 1) |
636 | |
637 | void |
638 | __set_pd_entry(struct i915_page_directory * const pd, |
639 | const unsigned short idx, |
640 | struct i915_page_table *pt, |
641 | u64 (*encode)(const dma_addr_t, const enum i915_cache_level)); |
642 | |
643 | #define set_pd_entry(pd, idx, to) \ |
644 | __set_pd_entry((pd), (idx), px_pt(to), gen8_pde_encode) |
645 | |
646 | void |
647 | clear_pd_entry(struct i915_page_directory * const pd, |
648 | const unsigned short idx, |
649 | const struct drm_i915_gem_object * const scratch); |
650 | |
651 | bool |
652 | release_pd_entry(struct i915_page_directory * const pd, |
653 | const unsigned short idx, |
654 | struct i915_page_table * const pt, |
655 | const struct drm_i915_gem_object * const scratch); |
656 | void gen6_ggtt_invalidate(struct i915_ggtt *ggtt); |
657 | |
658 | void ppgtt_bind_vma(struct i915_address_space *vm, |
659 | struct i915_vm_pt_stash *stash, |
660 | struct i915_vma_resource *vma_res, |
661 | unsigned int pat_index, |
662 | u32 flags); |
663 | void ppgtt_unbind_vma(struct i915_address_space *vm, |
664 | struct i915_vma_resource *vma_res); |
665 | |
666 | void gtt_write_workarounds(struct intel_gt *gt); |
667 | |
668 | void setup_private_pat(struct intel_gt *gt); |
669 | |
670 | int i915_vm_alloc_pt_stash(struct i915_address_space *vm, |
671 | struct i915_vm_pt_stash *stash, |
672 | u64 size); |
673 | int i915_vm_map_pt_stash(struct i915_address_space *vm, |
674 | struct i915_vm_pt_stash *stash); |
675 | void i915_vm_free_pt_stash(struct i915_address_space *vm, |
676 | struct i915_vm_pt_stash *stash); |
677 | |
678 | struct i915_vma * |
679 | __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size); |
680 | |
681 | struct i915_vma * |
682 | __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size); |
683 | |
684 | static inline struct sgt_dma { |
685 | struct scatterlist *sg; |
686 | dma_addr_t dma, max; |
687 | } sgt_dma(struct i915_vma_resource *vma_res) { |
688 | struct scatterlist *sg = vma_res->bi.pages->sgl; |
689 | dma_addr_t addr = sg_dma_address(sg); |
690 | |
691 | return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) }; |
692 | } |
693 | |
694 | bool i915_ggtt_require_binder(struct drm_i915_private *i915); |
695 | |
696 | #endif |
697 | |