1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Copyright (C) 2020-2023 Intel Corporation |
4 | */ |
5 | |
6 | #ifndef __IVPU_MMU_CONTEXT_H__ |
7 | #define __IVPU_MMU_CONTEXT_H__ |
8 | |
9 | #include <drm/drm_mm.h> |
10 | |
11 | struct ivpu_device; |
12 | struct ivpu_file_priv; |
13 | struct ivpu_addr_range; |
14 | |
15 | #define IVPU_MMU_PGTABLE_ENTRIES 512ull |
16 | |
17 | struct ivpu_mmu_pgtable { |
18 | u64 ***pte_ptrs[IVPU_MMU_PGTABLE_ENTRIES]; |
19 | u64 **pmd_ptrs[IVPU_MMU_PGTABLE_ENTRIES]; |
20 | u64 *pud_ptrs[IVPU_MMU_PGTABLE_ENTRIES]; |
21 | u64 *pgd_dma_ptr; |
22 | dma_addr_t pgd_dma; |
23 | }; |
24 | |
25 | struct ivpu_mmu_context { |
26 | struct mutex lock; /* Protects: mm, pgtable */ |
27 | struct drm_mm mm; |
28 | struct ivpu_mmu_pgtable pgtable; |
29 | u32 id; |
30 | }; |
31 | |
32 | int ivpu_mmu_global_context_init(struct ivpu_device *vdev); |
33 | void ivpu_mmu_global_context_fini(struct ivpu_device *vdev); |
34 | int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev); |
35 | void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev); |
36 | |
37 | int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id); |
38 | void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx); |
39 | void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid); |
40 | |
41 | int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range, |
42 | u64 size, struct drm_mm_node *node); |
43 | void ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node); |
44 | |
45 | int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, |
46 | u64 vpu_addr, struct sg_table *sgt, bool llc_coherent); |
47 | void ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, |
48 | u64 vpu_addr, struct sg_table *sgt); |
49 | |
50 | #endif /* __IVPU_MMU_CONTEXT_H__ */ |
51 | |