1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES |
4 | */ |
5 | #include <linux/iommu.h> |
6 | #include <uapi/linux/iommufd.h> |
7 | |
8 | #include "../iommu-priv.h" |
9 | #include "iommufd_private.h" |
10 | |
11 | void iommufd_hwpt_paging_destroy(struct iommufd_object *obj) |
12 | { |
13 | struct iommufd_hwpt_paging *hwpt_paging = |
14 | container_of(obj, struct iommufd_hwpt_paging, common.obj); |
15 | |
16 | if (!list_empty(head: &hwpt_paging->hwpt_item)) { |
17 | mutex_lock(&hwpt_paging->ioas->mutex); |
18 | list_del(entry: &hwpt_paging->hwpt_item); |
19 | mutex_unlock(lock: &hwpt_paging->ioas->mutex); |
20 | |
21 | iopt_table_remove_domain(iopt: &hwpt_paging->ioas->iopt, |
22 | domain: hwpt_paging->common.domain); |
23 | } |
24 | |
25 | if (hwpt_paging->common.domain) |
26 | iommu_domain_free(domain: hwpt_paging->common.domain); |
27 | |
28 | refcount_dec(r: &hwpt_paging->ioas->obj.users); |
29 | } |
30 | |
31 | void iommufd_hwpt_paging_abort(struct iommufd_object *obj) |
32 | { |
33 | struct iommufd_hwpt_paging *hwpt_paging = |
34 | container_of(obj, struct iommufd_hwpt_paging, common.obj); |
35 | |
36 | /* The ioas->mutex must be held until finalize is called. */ |
37 | lockdep_assert_held(&hwpt_paging->ioas->mutex); |
38 | |
39 | if (!list_empty(head: &hwpt_paging->hwpt_item)) { |
40 | list_del_init(entry: &hwpt_paging->hwpt_item); |
41 | iopt_table_remove_domain(iopt: &hwpt_paging->ioas->iopt, |
42 | domain: hwpt_paging->common.domain); |
43 | } |
44 | iommufd_hwpt_paging_destroy(obj); |
45 | } |
46 | |
47 | void iommufd_hwpt_nested_destroy(struct iommufd_object *obj) |
48 | { |
49 | struct iommufd_hwpt_nested *hwpt_nested = |
50 | container_of(obj, struct iommufd_hwpt_nested, common.obj); |
51 | |
52 | if (hwpt_nested->common.domain) |
53 | iommu_domain_free(domain: hwpt_nested->common.domain); |
54 | |
55 | refcount_dec(r: &hwpt_nested->parent->common.obj.users); |
56 | } |
57 | |
58 | void iommufd_hwpt_nested_abort(struct iommufd_object *obj) |
59 | { |
60 | iommufd_hwpt_nested_destroy(obj); |
61 | } |
62 | |
63 | static int |
64 | iommufd_hwpt_paging_enforce_cc(struct iommufd_hwpt_paging *hwpt_paging) |
65 | { |
66 | struct iommu_domain *paging_domain = hwpt_paging->common.domain; |
67 | |
68 | if (hwpt_paging->enforce_cache_coherency) |
69 | return 0; |
70 | |
71 | if (paging_domain->ops->enforce_cache_coherency) |
72 | hwpt_paging->enforce_cache_coherency = |
73 | paging_domain->ops->enforce_cache_coherency( |
74 | paging_domain); |
75 | if (!hwpt_paging->enforce_cache_coherency) |
76 | return -EINVAL; |
77 | return 0; |
78 | } |
79 | |
80 | /** |
81 | * iommufd_hwpt_paging_alloc() - Get a PAGING iommu_domain for a device |
82 | * @ictx: iommufd context |
83 | * @ioas: IOAS to associate the domain with |
84 | * @idev: Device to get an iommu_domain for |
85 | * @flags: Flags from userspace |
86 | * @immediate_attach: True if idev should be attached to the hwpt |
87 | * @user_data: The user provided driver specific data describing the domain to |
88 | * create |
89 | * |
90 | * Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT |
91 | * will be linked to the given ioas and upon return the underlying iommu_domain |
92 | * is fully popoulated. |
93 | * |
94 | * The caller must hold the ioas->mutex until after |
95 | * iommufd_object_abort_and_destroy() or iommufd_object_finalize() is called on |
96 | * the returned hwpt. |
97 | */ |
98 | struct iommufd_hwpt_paging * |
99 | iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, |
100 | struct iommufd_device *idev, u32 flags, |
101 | bool immediate_attach, |
102 | const struct iommu_user_data *user_data) |
103 | { |
104 | const u32 valid_flags = IOMMU_HWPT_ALLOC_NEST_PARENT | |
105 | IOMMU_HWPT_ALLOC_DIRTY_TRACKING; |
106 | const struct iommu_ops *ops = dev_iommu_ops(dev: idev->dev); |
107 | struct iommufd_hwpt_paging *hwpt_paging; |
108 | struct iommufd_hw_pagetable *hwpt; |
109 | int rc; |
110 | |
111 | lockdep_assert_held(&ioas->mutex); |
112 | |
113 | if ((flags || user_data) && !ops->domain_alloc_user) |
114 | return ERR_PTR(error: -EOPNOTSUPP); |
115 | if (flags & ~valid_flags) |
116 | return ERR_PTR(error: -EOPNOTSUPP); |
117 | |
118 | hwpt_paging = __iommufd_object_alloc( |
119 | ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj); |
120 | if (IS_ERR(ptr: hwpt_paging)) |
121 | return ERR_CAST(ptr: hwpt_paging); |
122 | hwpt = &hwpt_paging->common; |
123 | |
124 | INIT_LIST_HEAD(list: &hwpt_paging->hwpt_item); |
125 | /* Pairs with iommufd_hw_pagetable_destroy() */ |
126 | refcount_inc(r: &ioas->obj.users); |
127 | hwpt_paging->ioas = ioas; |
128 | hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT; |
129 | |
130 | if (ops->domain_alloc_user) { |
131 | hwpt->domain = ops->domain_alloc_user(idev->dev, flags, NULL, |
132 | user_data); |
133 | if (IS_ERR(ptr: hwpt->domain)) { |
134 | rc = PTR_ERR(ptr: hwpt->domain); |
135 | hwpt->domain = NULL; |
136 | goto out_abort; |
137 | } |
138 | hwpt->domain->owner = ops; |
139 | } else { |
140 | hwpt->domain = iommu_domain_alloc(bus: idev->dev->bus); |
141 | if (!hwpt->domain) { |
142 | rc = -ENOMEM; |
143 | goto out_abort; |
144 | } |
145 | } |
146 | |
147 | /* |
148 | * Set the coherency mode before we do iopt_table_add_domain() as some |
149 | * iommus have a per-PTE bit that controls it and need to decide before |
150 | * doing any maps. It is an iommu driver bug to report |
151 | * IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail enforce_cache_coherency on |
152 | * a new domain. |
153 | * |
154 | * The cache coherency mode must be configured here and unchanged later. |
155 | * Note that a HWPT (non-CC) created for a device (non-CC) can be later |
156 | * reused by another device (either non-CC or CC). However, A HWPT (CC) |
157 | * created for a device (CC) cannot be reused by another device (non-CC) |
158 | * but only devices (CC). Instead user space in this case would need to |
159 | * allocate a separate HWPT (non-CC). |
160 | */ |
161 | if (idev->enforce_cache_coherency) { |
162 | rc = iommufd_hwpt_paging_enforce_cc(hwpt_paging); |
163 | if (WARN_ON(rc)) |
164 | goto out_abort; |
165 | } |
166 | |
167 | /* |
168 | * immediate_attach exists only to accommodate iommu drivers that cannot |
169 | * directly allocate a domain. These drivers do not finish creating the |
170 | * domain until attach is completed. Thus we must have this call |
171 | * sequence. Once those drivers are fixed this should be removed. |
172 | */ |
173 | if (immediate_attach) { |
174 | rc = iommufd_hw_pagetable_attach(hwpt, idev); |
175 | if (rc) |
176 | goto out_abort; |
177 | } |
178 | |
179 | rc = iopt_table_add_domain(iopt: &ioas->iopt, domain: hwpt->domain); |
180 | if (rc) |
181 | goto out_detach; |
182 | list_add_tail(new: &hwpt_paging->hwpt_item, head: &ioas->hwpt_list); |
183 | return hwpt_paging; |
184 | |
185 | out_detach: |
186 | if (immediate_attach) |
187 | iommufd_hw_pagetable_detach(idev); |
188 | out_abort: |
189 | iommufd_object_abort_and_destroy(ictx, obj: &hwpt->obj); |
190 | return ERR_PTR(error: rc); |
191 | } |
192 | |
193 | /** |
194 | * iommufd_hwpt_nested_alloc() - Get a NESTED iommu_domain for a device |
195 | * @ictx: iommufd context |
196 | * @parent: Parent PAGING-type hwpt to associate the domain with |
197 | * @idev: Device to get an iommu_domain for |
198 | * @flags: Flags from userspace |
199 | * @user_data: user_data pointer. Must be valid |
200 | * |
201 | * Allocate a new iommu_domain (must be IOMMU_DOMAIN_NESTED) and return it as |
202 | * a NESTED hw_pagetable. The given parent PAGING-type hwpt must be capable of |
203 | * being a parent. |
204 | */ |
205 | static struct iommufd_hwpt_nested * |
206 | iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx, |
207 | struct iommufd_hwpt_paging *parent, |
208 | struct iommufd_device *idev, u32 flags, |
209 | const struct iommu_user_data *user_data) |
210 | { |
211 | const struct iommu_ops *ops = dev_iommu_ops(dev: idev->dev); |
212 | struct iommufd_hwpt_nested *hwpt_nested; |
213 | struct iommufd_hw_pagetable *hwpt; |
214 | int rc; |
215 | |
216 | if (flags || !user_data->len || !ops->domain_alloc_user) |
217 | return ERR_PTR(error: -EOPNOTSUPP); |
218 | if (parent->auto_domain || !parent->nest_parent) |
219 | return ERR_PTR(error: -EINVAL); |
220 | |
221 | hwpt_nested = __iommufd_object_alloc( |
222 | ictx, hwpt_nested, IOMMUFD_OBJ_HWPT_NESTED, common.obj); |
223 | if (IS_ERR(ptr: hwpt_nested)) |
224 | return ERR_CAST(ptr: hwpt_nested); |
225 | hwpt = &hwpt_nested->common; |
226 | |
227 | refcount_inc(r: &parent->common.obj.users); |
228 | hwpt_nested->parent = parent; |
229 | |
230 | hwpt->domain = ops->domain_alloc_user(idev->dev, flags, |
231 | parent->common.domain, user_data); |
232 | if (IS_ERR(ptr: hwpt->domain)) { |
233 | rc = PTR_ERR(ptr: hwpt->domain); |
234 | hwpt->domain = NULL; |
235 | goto out_abort; |
236 | } |
237 | hwpt->domain->owner = ops; |
238 | |
239 | if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) { |
240 | rc = -EINVAL; |
241 | goto out_abort; |
242 | } |
243 | return hwpt_nested; |
244 | |
245 | out_abort: |
246 | iommufd_object_abort_and_destroy(ictx, obj: &hwpt->obj); |
247 | return ERR_PTR(error: rc); |
248 | } |
249 | |
250 | int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) |
251 | { |
252 | struct iommu_hwpt_alloc *cmd = ucmd->cmd; |
253 | const struct iommu_user_data user_data = { |
254 | .type = cmd->data_type, |
255 | .uptr = u64_to_user_ptr(cmd->data_uptr), |
256 | .len = cmd->data_len, |
257 | }; |
258 | struct iommufd_hw_pagetable *hwpt; |
259 | struct iommufd_ioas *ioas = NULL; |
260 | struct iommufd_object *pt_obj; |
261 | struct iommufd_device *idev; |
262 | int rc; |
263 | |
264 | if (cmd->__reserved) |
265 | return -EOPNOTSUPP; |
266 | if ((cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len) || |
267 | (cmd->data_type != IOMMU_HWPT_DATA_NONE && !cmd->data_len)) |
268 | return -EINVAL; |
269 | |
270 | idev = iommufd_get_device(ucmd, id: cmd->dev_id); |
271 | if (IS_ERR(ptr: idev)) |
272 | return PTR_ERR(ptr: idev); |
273 | |
274 | pt_obj = iommufd_get_object(ictx: ucmd->ictx, id: cmd->pt_id, type: IOMMUFD_OBJ_ANY); |
275 | if (IS_ERR(ptr: pt_obj)) { |
276 | rc = -EINVAL; |
277 | goto out_put_idev; |
278 | } |
279 | |
280 | if (pt_obj->type == IOMMUFD_OBJ_IOAS) { |
281 | struct iommufd_hwpt_paging *hwpt_paging; |
282 | |
283 | ioas = container_of(pt_obj, struct iommufd_ioas, obj); |
284 | mutex_lock(&ioas->mutex); |
285 | hwpt_paging = iommufd_hwpt_paging_alloc( |
286 | ictx: ucmd->ictx, ioas, idev, flags: cmd->flags, immediate_attach: false, |
287 | user_data: user_data.len ? &user_data : NULL); |
288 | if (IS_ERR(ptr: hwpt_paging)) { |
289 | rc = PTR_ERR(ptr: hwpt_paging); |
290 | goto out_unlock; |
291 | } |
292 | hwpt = &hwpt_paging->common; |
293 | } else if (pt_obj->type == IOMMUFD_OBJ_HWPT_PAGING) { |
294 | struct iommufd_hwpt_nested *hwpt_nested; |
295 | |
296 | hwpt_nested = iommufd_hwpt_nested_alloc( |
297 | ictx: ucmd->ictx, |
298 | container_of(pt_obj, struct iommufd_hwpt_paging, |
299 | common.obj), |
300 | idev, flags: cmd->flags, user_data: &user_data); |
301 | if (IS_ERR(ptr: hwpt_nested)) { |
302 | rc = PTR_ERR(ptr: hwpt_nested); |
303 | goto out_unlock; |
304 | } |
305 | hwpt = &hwpt_nested->common; |
306 | } else { |
307 | rc = -EINVAL; |
308 | goto out_put_pt; |
309 | } |
310 | |
311 | cmd->out_hwpt_id = hwpt->obj.id; |
312 | rc = iommufd_ucmd_respond(ucmd, cmd_len: sizeof(*cmd)); |
313 | if (rc) |
314 | goto out_hwpt; |
315 | iommufd_object_finalize(ictx: ucmd->ictx, obj: &hwpt->obj); |
316 | goto out_unlock; |
317 | |
318 | out_hwpt: |
319 | iommufd_object_abort_and_destroy(ictx: ucmd->ictx, obj: &hwpt->obj); |
320 | out_unlock: |
321 | if (ioas) |
322 | mutex_unlock(lock: &ioas->mutex); |
323 | out_put_pt: |
324 | iommufd_put_object(ictx: ucmd->ictx, obj: pt_obj); |
325 | out_put_idev: |
326 | iommufd_put_object(ictx: ucmd->ictx, obj: &idev->obj); |
327 | return rc; |
328 | } |
329 | |
330 | int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd) |
331 | { |
332 | struct iommu_hwpt_set_dirty_tracking *cmd = ucmd->cmd; |
333 | struct iommufd_hwpt_paging *hwpt_paging; |
334 | struct iommufd_ioas *ioas; |
335 | int rc = -EOPNOTSUPP; |
336 | bool enable; |
337 | |
338 | if (cmd->flags & ~IOMMU_HWPT_DIRTY_TRACKING_ENABLE) |
339 | return rc; |
340 | |
341 | hwpt_paging = iommufd_get_hwpt_paging(ucmd, id: cmd->hwpt_id); |
342 | if (IS_ERR(ptr: hwpt_paging)) |
343 | return PTR_ERR(ptr: hwpt_paging); |
344 | |
345 | ioas = hwpt_paging->ioas; |
346 | enable = cmd->flags & IOMMU_HWPT_DIRTY_TRACKING_ENABLE; |
347 | |
348 | rc = iopt_set_dirty_tracking(iopt: &ioas->iopt, domain: hwpt_paging->common.domain, |
349 | enable); |
350 | |
351 | iommufd_put_object(ictx: ucmd->ictx, obj: &hwpt_paging->common.obj); |
352 | return rc; |
353 | } |
354 | |
355 | int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd) |
356 | { |
357 | struct iommu_hwpt_get_dirty_bitmap *cmd = ucmd->cmd; |
358 | struct iommufd_hwpt_paging *hwpt_paging; |
359 | struct iommufd_ioas *ioas; |
360 | int rc = -EOPNOTSUPP; |
361 | |
362 | if ((cmd->flags & ~(IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR)) || |
363 | cmd->__reserved) |
364 | return -EOPNOTSUPP; |
365 | |
366 | hwpt_paging = iommufd_get_hwpt_paging(ucmd, id: cmd->hwpt_id); |
367 | if (IS_ERR(ptr: hwpt_paging)) |
368 | return PTR_ERR(ptr: hwpt_paging); |
369 | |
370 | ioas = hwpt_paging->ioas; |
371 | rc = iopt_read_and_clear_dirty_data( |
372 | iopt: &ioas->iopt, domain: hwpt_paging->common.domain, flags: cmd->flags, bitmap: cmd); |
373 | |
374 | iommufd_put_object(ictx: ucmd->ictx, obj: &hwpt_paging->common.obj); |
375 | return rc; |
376 | } |
377 | |
378 | int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd) |
379 | { |
380 | struct iommu_hwpt_invalidate *cmd = ucmd->cmd; |
381 | struct iommu_user_data_array data_array = { |
382 | .type = cmd->data_type, |
383 | .uptr = u64_to_user_ptr(cmd->data_uptr), |
384 | .entry_len = cmd->entry_len, |
385 | .entry_num = cmd->entry_num, |
386 | }; |
387 | struct iommufd_hw_pagetable *hwpt; |
388 | u32 done_num = 0; |
389 | int rc; |
390 | |
391 | if (cmd->__reserved) { |
392 | rc = -EOPNOTSUPP; |
393 | goto out; |
394 | } |
395 | |
396 | if (cmd->entry_num && (!cmd->data_uptr || !cmd->entry_len)) { |
397 | rc = -EINVAL; |
398 | goto out; |
399 | } |
400 | |
401 | hwpt = iommufd_get_hwpt_nested(ucmd, id: cmd->hwpt_id); |
402 | if (IS_ERR(ptr: hwpt)) { |
403 | rc = PTR_ERR(ptr: hwpt); |
404 | goto out; |
405 | } |
406 | |
407 | rc = hwpt->domain->ops->cache_invalidate_user(hwpt->domain, |
408 | &data_array); |
409 | done_num = data_array.entry_num; |
410 | |
411 | iommufd_put_object(ictx: ucmd->ictx, obj: &hwpt->obj); |
412 | out: |
413 | cmd->entry_num = done_num; |
414 | if (iommufd_ucmd_respond(ucmd, cmd_len: sizeof(*cmd))) |
415 | return -EFAULT; |
416 | return rc; |
417 | } |
418 | |