1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES |
4 | */ |
5 | #include <linux/interval_tree.h> |
6 | #include <linux/iommufd.h> |
7 | #include <linux/iommu.h> |
8 | #include <uapi/linux/iommufd.h> |
9 | |
10 | #include "io_pagetable.h" |
11 | |
12 | void iommufd_ioas_destroy(struct iommufd_object *obj) |
13 | { |
14 | struct iommufd_ioas *ioas = container_of(obj, struct iommufd_ioas, obj); |
15 | int rc; |
16 | |
17 | rc = iopt_unmap_all(iopt: &ioas->iopt, NULL); |
18 | WARN_ON(rc && rc != -ENOENT); |
19 | iopt_destroy_table(iopt: &ioas->iopt); |
20 | mutex_destroy(lock: &ioas->mutex); |
21 | } |
22 | |
23 | struct iommufd_ioas *iommufd_ioas_alloc(struct iommufd_ctx *ictx) |
24 | { |
25 | struct iommufd_ioas *ioas; |
26 | |
27 | ioas = iommufd_object_alloc(ictx, ioas, IOMMUFD_OBJ_IOAS); |
28 | if (IS_ERR(ptr: ioas)) |
29 | return ioas; |
30 | |
31 | iopt_init_table(iopt: &ioas->iopt); |
32 | INIT_LIST_HEAD(list: &ioas->hwpt_list); |
33 | mutex_init(&ioas->mutex); |
34 | return ioas; |
35 | } |
36 | |
37 | int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd *ucmd) |
38 | { |
39 | struct iommu_ioas_alloc *cmd = ucmd->cmd; |
40 | struct iommufd_ioas *ioas; |
41 | int rc; |
42 | |
43 | if (cmd->flags) |
44 | return -EOPNOTSUPP; |
45 | |
46 | ioas = iommufd_ioas_alloc(ictx: ucmd->ictx); |
47 | if (IS_ERR(ptr: ioas)) |
48 | return PTR_ERR(ptr: ioas); |
49 | |
50 | cmd->out_ioas_id = ioas->obj.id; |
51 | rc = iommufd_ucmd_respond(ucmd, cmd_len: sizeof(*cmd)); |
52 | if (rc) |
53 | goto out_table; |
54 | iommufd_object_finalize(ictx: ucmd->ictx, obj: &ioas->obj); |
55 | return 0; |
56 | |
57 | out_table: |
58 | iommufd_object_abort_and_destroy(ictx: ucmd->ictx, obj: &ioas->obj); |
59 | return rc; |
60 | } |
61 | |
62 | int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd) |
63 | { |
64 | struct iommu_iova_range __user *ranges; |
65 | struct iommu_ioas_iova_ranges *cmd = ucmd->cmd; |
66 | struct iommufd_ioas *ioas; |
67 | struct interval_tree_span_iter span; |
68 | u32 max_iovas; |
69 | int rc; |
70 | |
71 | if (cmd->__reserved) |
72 | return -EOPNOTSUPP; |
73 | |
74 | ioas = iommufd_get_ioas(ictx: ucmd->ictx, id: cmd->ioas_id); |
75 | if (IS_ERR(ptr: ioas)) |
76 | return PTR_ERR(ptr: ioas); |
77 | |
78 | down_read(sem: &ioas->iopt.iova_rwsem); |
79 | max_iovas = cmd->num_iovas; |
80 | ranges = u64_to_user_ptr(cmd->allowed_iovas); |
81 | cmd->num_iovas = 0; |
82 | cmd->out_iova_alignment = ioas->iopt.iova_alignment; |
83 | interval_tree_for_each_span(&span, &ioas->iopt.reserved_itree, 0, |
84 | ULONG_MAX) { |
85 | if (!span.is_hole) |
86 | continue; |
87 | if (cmd->num_iovas < max_iovas) { |
88 | struct iommu_iova_range elm = { |
89 | .start = span.start_hole, |
90 | .last = span.last_hole, |
91 | }; |
92 | |
93 | if (copy_to_user(to: &ranges[cmd->num_iovas], from: &elm, |
94 | n: sizeof(elm))) { |
95 | rc = -EFAULT; |
96 | goto out_put; |
97 | } |
98 | } |
99 | cmd->num_iovas++; |
100 | } |
101 | rc = iommufd_ucmd_respond(ucmd, cmd_len: sizeof(*cmd)); |
102 | if (rc) |
103 | goto out_put; |
104 | if (cmd->num_iovas > max_iovas) |
105 | rc = -EMSGSIZE; |
106 | out_put: |
107 | up_read(sem: &ioas->iopt.iova_rwsem); |
108 | iommufd_put_object(ictx: ucmd->ictx, obj: &ioas->obj); |
109 | return rc; |
110 | } |
111 | |
112 | static int iommufd_ioas_load_iovas(struct rb_root_cached *itree, |
113 | struct iommu_iova_range __user *ranges, |
114 | u32 num) |
115 | { |
116 | u32 i; |
117 | |
118 | for (i = 0; i != num; i++) { |
119 | struct iommu_iova_range range; |
120 | struct iopt_allowed *allowed; |
121 | |
122 | if (copy_from_user(to: &range, from: ranges + i, n: sizeof(range))) |
123 | return -EFAULT; |
124 | |
125 | if (range.start >= range.last) |
126 | return -EINVAL; |
127 | |
128 | if (interval_tree_iter_first(root: itree, start: range.start, last: range.last)) |
129 | return -EINVAL; |
130 | |
131 | allowed = kzalloc(size: sizeof(*allowed), GFP_KERNEL_ACCOUNT); |
132 | if (!allowed) |
133 | return -ENOMEM; |
134 | allowed->node.start = range.start; |
135 | allowed->node.last = range.last; |
136 | |
137 | interval_tree_insert(node: &allowed->node, root: itree); |
138 | } |
139 | return 0; |
140 | } |
141 | |
142 | int iommufd_ioas_allow_iovas(struct iommufd_ucmd *ucmd) |
143 | { |
144 | struct iommu_ioas_allow_iovas *cmd = ucmd->cmd; |
145 | struct rb_root_cached allowed_iova = RB_ROOT_CACHED; |
146 | struct interval_tree_node *node; |
147 | struct iommufd_ioas *ioas; |
148 | struct io_pagetable *iopt; |
149 | int rc = 0; |
150 | |
151 | if (cmd->__reserved) |
152 | return -EOPNOTSUPP; |
153 | |
154 | ioas = iommufd_get_ioas(ictx: ucmd->ictx, id: cmd->ioas_id); |
155 | if (IS_ERR(ptr: ioas)) |
156 | return PTR_ERR(ptr: ioas); |
157 | iopt = &ioas->iopt; |
158 | |
159 | rc = iommufd_ioas_load_iovas(itree: &allowed_iova, |
160 | u64_to_user_ptr(cmd->allowed_iovas), |
161 | num: cmd->num_iovas); |
162 | if (rc) |
163 | goto out_free; |
164 | |
165 | /* |
166 | * We want the allowed tree update to be atomic, so we have to keep the |
167 | * original nodes around, and keep track of the new nodes as we allocate |
168 | * memory for them. The simplest solution is to have a new/old tree and |
169 | * then swap new for old. On success we free the old tree, on failure we |
170 | * free the new tree. |
171 | */ |
172 | rc = iopt_set_allow_iova(iopt, allowed_iova: &allowed_iova); |
173 | out_free: |
174 | while ((node = interval_tree_iter_first(root: &allowed_iova, start: 0, ULONG_MAX))) { |
175 | interval_tree_remove(node, root: &allowed_iova); |
176 | kfree(container_of(node, struct iopt_allowed, node)); |
177 | } |
178 | iommufd_put_object(ictx: ucmd->ictx, obj: &ioas->obj); |
179 | return rc; |
180 | } |
181 | |
182 | static int conv_iommu_prot(u32 map_flags) |
183 | { |
184 | /* |
185 | * We provide no manual cache coherency ioctls to userspace and most |
186 | * architectures make the CPU ops for cache flushing privileged. |
187 | * Therefore we require the underlying IOMMU to support CPU coherent |
188 | * operation. Support for IOMMU_CACHE is enforced by the |
189 | * IOMMU_CAP_CACHE_COHERENCY test during bind. |
190 | */ |
191 | int iommu_prot = IOMMU_CACHE; |
192 | |
193 | if (map_flags & IOMMU_IOAS_MAP_WRITEABLE) |
194 | iommu_prot |= IOMMU_WRITE; |
195 | if (map_flags & IOMMU_IOAS_MAP_READABLE) |
196 | iommu_prot |= IOMMU_READ; |
197 | return iommu_prot; |
198 | } |
199 | |
200 | int iommufd_ioas_map(struct iommufd_ucmd *ucmd) |
201 | { |
202 | struct iommu_ioas_map *cmd = ucmd->cmd; |
203 | unsigned long iova = cmd->iova; |
204 | struct iommufd_ioas *ioas; |
205 | unsigned int flags = 0; |
206 | int rc; |
207 | |
208 | if ((cmd->flags & |
209 | ~(IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE | |
210 | IOMMU_IOAS_MAP_READABLE)) || |
211 | cmd->__reserved) |
212 | return -EOPNOTSUPP; |
213 | if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX) |
214 | return -EOVERFLOW; |
215 | |
216 | ioas = iommufd_get_ioas(ictx: ucmd->ictx, id: cmd->ioas_id); |
217 | if (IS_ERR(ptr: ioas)) |
218 | return PTR_ERR(ptr: ioas); |
219 | |
220 | if (!(cmd->flags & IOMMU_IOAS_MAP_FIXED_IOVA)) |
221 | flags = IOPT_ALLOC_IOVA; |
222 | rc = iopt_map_user_pages(ictx: ucmd->ictx, iopt: &ioas->iopt, iova: &iova, |
223 | u64_to_user_ptr(cmd->user_va), length: cmd->length, |
224 | iommu_prot: conv_iommu_prot(map_flags: cmd->flags), flags); |
225 | if (rc) |
226 | goto out_put; |
227 | |
228 | cmd->iova = iova; |
229 | rc = iommufd_ucmd_respond(ucmd, cmd_len: sizeof(*cmd)); |
230 | out_put: |
231 | iommufd_put_object(ictx: ucmd->ictx, obj: &ioas->obj); |
232 | return rc; |
233 | } |
234 | |
235 | int iommufd_ioas_copy(struct iommufd_ucmd *ucmd) |
236 | { |
237 | struct iommu_ioas_copy *cmd = ucmd->cmd; |
238 | struct iommufd_ioas *src_ioas; |
239 | struct iommufd_ioas *dst_ioas; |
240 | unsigned int flags = 0; |
241 | LIST_HEAD(pages_list); |
242 | unsigned long iova; |
243 | int rc; |
244 | |
245 | iommufd_test_syz_conv_iova_id(ucmd, ioas_id: cmd->src_ioas_id, iova: &cmd->src_iova, |
246 | flags: &cmd->flags); |
247 | |
248 | if ((cmd->flags & |
249 | ~(IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE | |
250 | IOMMU_IOAS_MAP_READABLE))) |
251 | return -EOPNOTSUPP; |
252 | if (cmd->length >= ULONG_MAX || cmd->src_iova >= ULONG_MAX || |
253 | cmd->dst_iova >= ULONG_MAX) |
254 | return -EOVERFLOW; |
255 | |
256 | src_ioas = iommufd_get_ioas(ictx: ucmd->ictx, id: cmd->src_ioas_id); |
257 | if (IS_ERR(ptr: src_ioas)) |
258 | return PTR_ERR(ptr: src_ioas); |
259 | rc = iopt_get_pages(iopt: &src_ioas->iopt, iova: cmd->src_iova, length: cmd->length, |
260 | pages_list: &pages_list); |
261 | iommufd_put_object(ictx: ucmd->ictx, obj: &src_ioas->obj); |
262 | if (rc) |
263 | return rc; |
264 | |
265 | dst_ioas = iommufd_get_ioas(ictx: ucmd->ictx, id: cmd->dst_ioas_id); |
266 | if (IS_ERR(ptr: dst_ioas)) { |
267 | rc = PTR_ERR(ptr: dst_ioas); |
268 | goto out_pages; |
269 | } |
270 | |
271 | if (!(cmd->flags & IOMMU_IOAS_MAP_FIXED_IOVA)) |
272 | flags = IOPT_ALLOC_IOVA; |
273 | iova = cmd->dst_iova; |
274 | rc = iopt_map_pages(iopt: &dst_ioas->iopt, pages_list: &pages_list, length: cmd->length, dst_iova: &iova, |
275 | iommu_prot: conv_iommu_prot(map_flags: cmd->flags), flags); |
276 | if (rc) |
277 | goto out_put_dst; |
278 | |
279 | cmd->dst_iova = iova; |
280 | rc = iommufd_ucmd_respond(ucmd, cmd_len: sizeof(*cmd)); |
281 | out_put_dst: |
282 | iommufd_put_object(ictx: ucmd->ictx, obj: &dst_ioas->obj); |
283 | out_pages: |
284 | iopt_free_pages_list(pages_list: &pages_list); |
285 | return rc; |
286 | } |
287 | |
288 | int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd) |
289 | { |
290 | struct iommu_ioas_unmap *cmd = ucmd->cmd; |
291 | struct iommufd_ioas *ioas; |
292 | unsigned long unmapped = 0; |
293 | int rc; |
294 | |
295 | ioas = iommufd_get_ioas(ictx: ucmd->ictx, id: cmd->ioas_id); |
296 | if (IS_ERR(ptr: ioas)) |
297 | return PTR_ERR(ptr: ioas); |
298 | |
299 | if (cmd->iova == 0 && cmd->length == U64_MAX) { |
300 | rc = iopt_unmap_all(iopt: &ioas->iopt, unmapped: &unmapped); |
301 | if (rc) |
302 | goto out_put; |
303 | } else { |
304 | if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX) { |
305 | rc = -EOVERFLOW; |
306 | goto out_put; |
307 | } |
308 | rc = iopt_unmap_iova(iopt: &ioas->iopt, iova: cmd->iova, length: cmd->length, |
309 | unmapped: &unmapped); |
310 | if (rc) |
311 | goto out_put; |
312 | } |
313 | |
314 | cmd->length = unmapped; |
315 | rc = iommufd_ucmd_respond(ucmd, cmd_len: sizeof(*cmd)); |
316 | |
317 | out_put: |
318 | iommufd_put_object(ictx: ucmd->ictx, obj: &ioas->obj); |
319 | return rc; |
320 | } |
321 | |
322 | int iommufd_option_rlimit_mode(struct iommu_option *cmd, |
323 | struct iommufd_ctx *ictx) |
324 | { |
325 | if (cmd->object_id) |
326 | return -EOPNOTSUPP; |
327 | |
328 | if (cmd->op == IOMMU_OPTION_OP_GET) { |
329 | cmd->val64 = ictx->account_mode == IOPT_PAGES_ACCOUNT_MM; |
330 | return 0; |
331 | } |
332 | if (cmd->op == IOMMU_OPTION_OP_SET) { |
333 | int rc = 0; |
334 | |
335 | if (!capable(CAP_SYS_RESOURCE)) |
336 | return -EPERM; |
337 | |
338 | xa_lock(&ictx->objects); |
339 | if (!xa_empty(xa: &ictx->objects)) { |
340 | rc = -EBUSY; |
341 | } else { |
342 | if (cmd->val64 == 0) |
343 | ictx->account_mode = IOPT_PAGES_ACCOUNT_USER; |
344 | else if (cmd->val64 == 1) |
345 | ictx->account_mode = IOPT_PAGES_ACCOUNT_MM; |
346 | else |
347 | rc = -EINVAL; |
348 | } |
349 | xa_unlock(&ictx->objects); |
350 | |
351 | return rc; |
352 | } |
353 | return -EOPNOTSUPP; |
354 | } |
355 | |
356 | static int iommufd_ioas_option_huge_pages(struct iommu_option *cmd, |
357 | struct iommufd_ioas *ioas) |
358 | { |
359 | if (cmd->op == IOMMU_OPTION_OP_GET) { |
360 | cmd->val64 = !ioas->iopt.disable_large_pages; |
361 | return 0; |
362 | } |
363 | if (cmd->op == IOMMU_OPTION_OP_SET) { |
364 | if (cmd->val64 == 0) |
365 | return iopt_disable_large_pages(iopt: &ioas->iopt); |
366 | if (cmd->val64 == 1) { |
367 | iopt_enable_large_pages(iopt: &ioas->iopt); |
368 | return 0; |
369 | } |
370 | return -EINVAL; |
371 | } |
372 | return -EOPNOTSUPP; |
373 | } |
374 | |
375 | int iommufd_ioas_option(struct iommufd_ucmd *ucmd) |
376 | { |
377 | struct iommu_option *cmd = ucmd->cmd; |
378 | struct iommufd_ioas *ioas; |
379 | int rc = 0; |
380 | |
381 | if (cmd->__reserved) |
382 | return -EOPNOTSUPP; |
383 | |
384 | ioas = iommufd_get_ioas(ictx: ucmd->ictx, id: cmd->object_id); |
385 | if (IS_ERR(ptr: ioas)) |
386 | return PTR_ERR(ptr: ioas); |
387 | |
388 | switch (cmd->option_id) { |
389 | case IOMMU_OPTION_HUGE_PAGES: |
390 | rc = iommufd_ioas_option_huge_pages(cmd, ioas); |
391 | break; |
392 | default: |
393 | rc = -EOPNOTSUPP; |
394 | } |
395 | |
396 | iommufd_put_object(ictx: ucmd->ictx, obj: &ioas->obj); |
397 | return rc; |
398 | } |
399 | |