1 | /* SPDX-License-Identifier: GPL-2.0 OR MIT */ |
2 | /* |
3 | * Copyright 2020-2021 Advanced Micro Devices, Inc. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
12 | * The above copyright notice and this permission notice shall be included in |
13 | * all copies or substantial portions of the Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * |
23 | */ |
24 | |
25 | #ifndef KFD_SVM_H_ |
26 | #define KFD_SVM_H_ |
27 | |
28 | #if IS_ENABLED(CONFIG_HSA_AMD_SVM) |
29 | |
30 | #include <linux/rwsem.h> |
31 | #include <linux/list.h> |
32 | #include <linux/mutex.h> |
33 | #include <linux/sched/mm.h> |
34 | #include <linux/hmm.h> |
35 | #include "amdgpu.h" |
36 | #include "kfd_priv.h" |
37 | |
38 | #define SVM_RANGE_VRAM_DOMAIN (1UL << 0) |
39 | #define SVM_ADEV_PGMAP_OWNER(adev)\ |
40 | ((adev)->hive ? (void *)(adev)->hive : (void *)(adev)) |
41 | |
42 | struct svm_range_bo { |
43 | struct amdgpu_bo *bo; |
44 | struct kref kref; |
45 | struct list_head range_list; /* all svm ranges shared this bo */ |
46 | spinlock_t list_lock; |
47 | struct amdgpu_amdkfd_fence *eviction_fence; |
48 | struct work_struct eviction_work; |
49 | uint32_t evicting; |
50 | struct work_struct release_work; |
51 | struct kfd_node *node; |
52 | }; |
53 | |
54 | enum svm_work_list_ops { |
55 | SVM_OP_NULL, |
56 | SVM_OP_UNMAP_RANGE, |
57 | SVM_OP_UPDATE_RANGE_NOTIFIER, |
58 | SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP, |
59 | SVM_OP_ADD_RANGE, |
60 | SVM_OP_ADD_RANGE_AND_MAP |
61 | }; |
62 | |
63 | struct svm_work_list_item { |
64 | enum svm_work_list_ops op; |
65 | struct mm_struct *mm; |
66 | }; |
67 | |
68 | /** |
69 | * struct svm_range - shared virtual memory range |
70 | * |
71 | * @svms: list of svm ranges, structure defined in kfd_process |
72 | * @migrate_mutex: to serialize range migration, validation and mapping update |
73 | * @start: range start address in pages |
74 | * @last: range last address in pages |
75 | * @it_node: node [start, last] stored in interval tree, start, last are page |
76 | * aligned, page size is (last - start + 1) |
77 | * @list: link list node, used to scan all ranges of svms |
78 | * @update_list:link list node used to add to update_list |
79 | * @mapping: bo_va mapping structure to create and update GPU page table |
80 | * @npages: number of pages |
81 | * @vram_pages: vram pages number in this svm_range |
82 | * @dma_addr: dma mapping address on each GPU for system memory physical page |
83 | * @ttm_res: vram ttm resource map |
84 | * @offset: range start offset within mm_nodes |
85 | * @svm_bo: struct to manage splited amdgpu_bo |
86 | * @svm_bo_list:link list node, to scan all ranges which share same svm_bo |
87 | * @lock: protect prange start, last, child_list, svm_bo_list |
88 | * @saved_flags:save/restore current PF_MEMALLOC flags |
89 | * @flags: flags defined as KFD_IOCTL_SVM_FLAG_* |
90 | * @perferred_loc: perferred location, 0 for CPU, or GPU id |
91 | * @perfetch_loc: last prefetch location, 0 for CPU, or GPU id |
92 | * @actual_loc: this svm_range location. 0: all pages are from sys ram; |
93 | * GPU id: this svm_range may include vram pages from GPU with |
94 | * id actual_loc. |
95 | * @granularity:migration granularity, log2 num pages |
96 | * @invalid: not 0 means cpu page table is invalidated |
97 | * @validate_timestamp: system timestamp when range is validated |
98 | * @notifier: register mmu interval notifier |
99 | * @work_item: deferred work item information |
100 | * @deferred_list: list header used to add range to deferred list |
101 | * @child_list: list header for split ranges which are not added to svms yet |
102 | * @bitmap_access: index bitmap of GPUs which can access the range |
103 | * @bitmap_aip: index bitmap of GPUs which can access the range in place |
104 | * |
105 | * Data structure for virtual memory range shared by CPU and GPUs, it can be |
106 | * allocated from system memory ram or device vram, and migrate from ram to vram |
107 | * or from vram to ram. |
108 | */ |
109 | struct svm_range { |
110 | struct svm_range_list *svms; |
111 | struct mutex migrate_mutex; |
112 | unsigned long start; |
113 | unsigned long last; |
114 | struct interval_tree_node it_node; |
115 | struct list_head list; |
116 | struct list_head update_list; |
117 | uint64_t npages; |
118 | uint64_t vram_pages; |
119 | dma_addr_t *dma_addr[MAX_GPU_INSTANCE]; |
120 | struct ttm_resource *ttm_res; |
121 | uint64_t offset; |
122 | struct svm_range_bo *svm_bo; |
123 | struct list_head svm_bo_list; |
124 | struct mutex lock; |
125 | unsigned int saved_flags; |
126 | uint32_t flags; |
127 | uint32_t preferred_loc; |
128 | uint32_t prefetch_loc; |
129 | uint32_t actual_loc; |
130 | uint8_t granularity; |
131 | atomic_t invalid; |
132 | ktime_t validate_timestamp; |
133 | struct mmu_interval_notifier notifier; |
134 | struct svm_work_list_item work_item; |
135 | struct list_head deferred_list; |
136 | struct list_head child_list; |
137 | DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE); |
138 | DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE); |
139 | bool mapped_to_gpu; |
140 | }; |
141 | |
142 | static inline void svm_range_lock(struct svm_range *prange) |
143 | { |
144 | mutex_lock(&prange->lock); |
145 | prange->saved_flags = memalloc_noreclaim_save(); |
146 | |
147 | } |
148 | static inline void svm_range_unlock(struct svm_range *prange) |
149 | { |
150 | memalloc_noreclaim_restore(flags: prange->saved_flags); |
151 | mutex_unlock(lock: &prange->lock); |
152 | } |
153 | |
154 | static inline struct svm_range_bo *svm_range_bo_ref(struct svm_range_bo *svm_bo) |
155 | { |
156 | if (svm_bo) |
157 | kref_get(kref: &svm_bo->kref); |
158 | |
159 | return svm_bo; |
160 | } |
161 | |
162 | int svm_range_list_init(struct kfd_process *p); |
163 | void svm_range_list_fini(struct kfd_process *p); |
164 | int svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start, |
165 | uint64_t size, uint32_t nattrs, |
166 | struct kfd_ioctl_svm_attribute *attrs); |
167 | struct svm_range *svm_range_from_addr(struct svm_range_list *svms, |
168 | unsigned long addr, |
169 | struct svm_range **parent); |
170 | struct kfd_node *svm_range_get_node_by_id(struct svm_range *prange, |
171 | uint32_t gpu_id); |
172 | int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, |
173 | bool clear); |
174 | void svm_range_vram_node_free(struct svm_range *prange); |
175 | int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, |
176 | uint32_t vmid, uint32_t node_id, uint64_t addr, |
177 | bool write_fault); |
178 | int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence); |
179 | void svm_range_add_list_work(struct svm_range_list *svms, |
180 | struct svm_range *prange, struct mm_struct *mm, |
181 | enum svm_work_list_ops op); |
182 | void schedule_deferred_list_work(struct svm_range_list *svms); |
183 | void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr, |
184 | unsigned long offset, unsigned long npages); |
185 | void svm_range_dma_unmap(struct svm_range *prange); |
186 | int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges, |
187 | uint64_t *svm_priv_data_size); |
188 | int kfd_criu_checkpoint_svm(struct kfd_process *p, |
189 | uint8_t __user *user_priv_data, |
190 | uint64_t *priv_offset); |
191 | int kfd_criu_restore_svm(struct kfd_process *p, |
192 | uint8_t __user *user_priv_ptr, |
193 | uint64_t *priv_data_offset, |
194 | uint64_t max_priv_data_size); |
195 | int kfd_criu_resume_svm(struct kfd_process *p); |
196 | struct kfd_process_device * |
197 | svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node); |
198 | void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm); |
199 | |
200 | /* SVM API and HMM page migration work together, device memory type |
201 | * is initialized to not 0 when page migration register device memory. |
202 | */ |
203 | #define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\ |
204 | (adev)->gmc.is_app_apu) |
205 | |
206 | void svm_range_bo_unref_async(struct svm_range_bo *svm_bo); |
207 | |
208 | void svm_range_set_max_pages(struct amdgpu_device *adev); |
209 | int svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled); |
210 | |
211 | #else |
212 | |
213 | struct kfd_process; |
214 | |
215 | static inline int svm_range_list_init(struct kfd_process *p) |
216 | { |
217 | return 0; |
218 | } |
219 | static inline void svm_range_list_fini(struct kfd_process *p) |
220 | { |
221 | /* empty */ |
222 | } |
223 | |
224 | static inline int svm_range_restore_pages(struct amdgpu_device *adev, |
225 | unsigned int pasid, |
226 | uint32_t client_id, uint32_t node_id, |
227 | uint64_t addr, bool write_fault) |
228 | { |
229 | return -EFAULT; |
230 | } |
231 | |
232 | static inline int svm_range_schedule_evict_svm_bo( |
233 | struct amdgpu_amdkfd_fence *fence) |
234 | { |
235 | WARN_ONCE(1, "SVM eviction fence triggered, but SVM is disabled" ); |
236 | return -EINVAL; |
237 | } |
238 | |
239 | static inline int svm_range_get_info(struct kfd_process *p, |
240 | uint32_t *num_svm_ranges, |
241 | uint64_t *svm_priv_data_size) |
242 | { |
243 | *num_svm_ranges = 0; |
244 | *svm_priv_data_size = 0; |
245 | return 0; |
246 | } |
247 | |
248 | static inline int kfd_criu_checkpoint_svm(struct kfd_process *p, |
249 | uint8_t __user *user_priv_data, |
250 | uint64_t *priv_offset) |
251 | { |
252 | return 0; |
253 | } |
254 | |
255 | static inline int kfd_criu_restore_svm(struct kfd_process *p, |
256 | uint8_t __user *user_priv_ptr, |
257 | uint64_t *priv_data_offset, |
258 | uint64_t max_priv_data_size) |
259 | { |
260 | return -EINVAL; |
261 | } |
262 | |
263 | static inline int kfd_criu_resume_svm(struct kfd_process *p) |
264 | { |
265 | return 0; |
266 | } |
267 | |
268 | static inline void svm_range_set_max_pages(struct amdgpu_device *adev) |
269 | { |
270 | } |
271 | |
272 | #define KFD_IS_SVM_API_SUPPORTED(dev) false |
273 | |
274 | #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */ |
275 | |
276 | #endif /* KFD_SVM_H_ */ |
277 | |