1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. |
4 | * Author: Joerg Roedel <jroedel@suse.de> |
5 | */ |
6 | |
7 | #define pr_fmt(fmt) "iommu: " fmt |
8 | |
9 | #include <linux/amba/bus.h> |
10 | #include <linux/device.h> |
11 | #include <linux/kernel.h> |
12 | #include <linux/bits.h> |
13 | #include <linux/bug.h> |
14 | #include <linux/types.h> |
15 | #include <linux/init.h> |
16 | #include <linux/export.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/errno.h> |
19 | #include <linux/host1x_context_bus.h> |
20 | #include <linux/iommu.h> |
21 | #include <linux/idr.h> |
22 | #include <linux/err.h> |
23 | #include <linux/pci.h> |
24 | #include <linux/pci-ats.h> |
25 | #include <linux/bitops.h> |
26 | #include <linux/platform_device.h> |
27 | #include <linux/property.h> |
28 | #include <linux/fsl/mc.h> |
29 | #include <linux/module.h> |
30 | #include <linux/cc_platform.h> |
31 | #include <linux/cdx/cdx_bus.h> |
32 | #include <trace/events/iommu.h> |
33 | #include <linux/sched/mm.h> |
34 | #include <linux/msi.h> |
35 | |
36 | #include "dma-iommu.h" |
37 | #include "iommu-priv.h" |
38 | |
39 | #include "iommu-sva.h" |
40 | #include "iommu-priv.h" |
41 | |
42 | static struct kset *iommu_group_kset; |
43 | static DEFINE_IDA(iommu_group_ida); |
44 | static DEFINE_IDA(iommu_global_pasid_ida); |
45 | |
46 | static unsigned int iommu_def_domain_type __read_mostly; |
47 | static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT); |
48 | static u32 iommu_cmd_line __read_mostly; |
49 | |
50 | struct iommu_group { |
51 | struct kobject kobj; |
52 | struct kobject *devices_kobj; |
53 | struct list_head devices; |
54 | struct xarray pasid_array; |
55 | struct mutex mutex; |
56 | void *iommu_data; |
57 | void (*iommu_data_release)(void *iommu_data); |
58 | char *name; |
59 | int id; |
60 | struct iommu_domain *default_domain; |
61 | struct iommu_domain *blocking_domain; |
62 | struct iommu_domain *domain; |
63 | struct list_head entry; |
64 | unsigned int owner_cnt; |
65 | void *owner; |
66 | }; |
67 | |
68 | struct group_device { |
69 | struct list_head list; |
70 | struct device *dev; |
71 | char *name; |
72 | }; |
73 | |
74 | /* Iterate over each struct group_device in a struct iommu_group */ |
75 | #define for_each_group_device(group, pos) \ |
76 | list_for_each_entry(pos, &(group)->devices, list) |
77 | |
78 | struct iommu_group_attribute { |
79 | struct attribute attr; |
80 | ssize_t (*show)(struct iommu_group *group, char *buf); |
81 | ssize_t (*store)(struct iommu_group *group, |
82 | const char *buf, size_t count); |
83 | }; |
84 | |
85 | static const char * const iommu_group_resv_type_string[] = { |
86 | [IOMMU_RESV_DIRECT] = "direct" , |
87 | [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable" , |
88 | [IOMMU_RESV_RESERVED] = "reserved" , |
89 | [IOMMU_RESV_MSI] = "msi" , |
90 | [IOMMU_RESV_SW_MSI] = "msi" , |
91 | }; |
92 | |
93 | #define IOMMU_CMD_LINE_DMA_API BIT(0) |
94 | #define IOMMU_CMD_LINE_STRICT BIT(1) |
95 | |
96 | static int iommu_bus_notifier(struct notifier_block *nb, |
97 | unsigned long action, void *data); |
98 | static void iommu_release_device(struct device *dev); |
99 | static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus, |
100 | unsigned type); |
101 | static int __iommu_attach_device(struct iommu_domain *domain, |
102 | struct device *dev); |
103 | static int __iommu_attach_group(struct iommu_domain *domain, |
104 | struct iommu_group *group); |
105 | |
106 | enum { |
107 | IOMMU_SET_DOMAIN_MUST_SUCCEED = 1 << 0, |
108 | }; |
109 | |
110 | static int __iommu_device_set_domain(struct iommu_group *group, |
111 | struct device *dev, |
112 | struct iommu_domain *new_domain, |
113 | unsigned int flags); |
114 | static int __iommu_group_set_domain_internal(struct iommu_group *group, |
115 | struct iommu_domain *new_domain, |
116 | unsigned int flags); |
117 | static int __iommu_group_set_domain(struct iommu_group *group, |
118 | struct iommu_domain *new_domain) |
119 | { |
120 | return __iommu_group_set_domain_internal(group, new_domain, flags: 0); |
121 | } |
122 | static void __iommu_group_set_domain_nofail(struct iommu_group *group, |
123 | struct iommu_domain *new_domain) |
124 | { |
125 | WARN_ON(__iommu_group_set_domain_internal( |
126 | group, new_domain, IOMMU_SET_DOMAIN_MUST_SUCCEED)); |
127 | } |
128 | |
129 | static int iommu_setup_default_domain(struct iommu_group *group, |
130 | int target_type); |
131 | static int iommu_create_device_direct_mappings(struct iommu_domain *domain, |
132 | struct device *dev); |
133 | static ssize_t iommu_group_store_type(struct iommu_group *group, |
134 | const char *buf, size_t count); |
135 | static struct group_device *iommu_group_alloc_device(struct iommu_group *group, |
136 | struct device *dev); |
137 | static void __iommu_group_free_device(struct iommu_group *group, |
138 | struct group_device *grp_dev); |
139 | |
140 | #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ |
141 | struct iommu_group_attribute iommu_group_attr_##_name = \ |
142 | __ATTR(_name, _mode, _show, _store) |
143 | |
144 | #define to_iommu_group_attr(_attr) \ |
145 | container_of(_attr, struct iommu_group_attribute, attr) |
146 | #define to_iommu_group(_kobj) \ |
147 | container_of(_kobj, struct iommu_group, kobj) |
148 | |
149 | static LIST_HEAD(iommu_device_list); |
150 | static DEFINE_SPINLOCK(iommu_device_lock); |
151 | |
152 | static struct bus_type * const iommu_buses[] = { |
153 | &platform_bus_type, |
154 | #ifdef CONFIG_PCI |
155 | &pci_bus_type, |
156 | #endif |
157 | #ifdef CONFIG_ARM_AMBA |
158 | &amba_bustype, |
159 | #endif |
160 | #ifdef CONFIG_FSL_MC_BUS |
161 | &fsl_mc_bus_type, |
162 | #endif |
163 | #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS |
164 | &host1x_context_device_bus_type, |
165 | #endif |
166 | #ifdef CONFIG_CDX_BUS |
167 | &cdx_bus_type, |
168 | #endif |
169 | }; |
170 | |
171 | /* |
172 | * Use a function instead of an array here because the domain-type is a |
173 | * bit-field, so an array would waste memory. |
174 | */ |
175 | static const char *iommu_domain_type_str(unsigned int t) |
176 | { |
177 | switch (t) { |
178 | case IOMMU_DOMAIN_BLOCKED: |
179 | return "Blocked" ; |
180 | case IOMMU_DOMAIN_IDENTITY: |
181 | return "Passthrough" ; |
182 | case IOMMU_DOMAIN_UNMANAGED: |
183 | return "Unmanaged" ; |
184 | case IOMMU_DOMAIN_DMA: |
185 | case IOMMU_DOMAIN_DMA_FQ: |
186 | return "Translated" ; |
187 | default: |
188 | return "Unknown" ; |
189 | } |
190 | } |
191 | |
192 | static int __init iommu_subsys_init(void) |
193 | { |
194 | struct notifier_block *nb; |
195 | |
196 | if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { |
197 | if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) |
198 | iommu_set_default_passthrough(cmd_line: false); |
199 | else |
200 | iommu_set_default_translated(cmd_line: false); |
201 | |
202 | if (iommu_default_passthrough() && cc_platform_has(attr: CC_ATTR_MEM_ENCRYPT)) { |
203 | pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n" ); |
204 | iommu_set_default_translated(cmd_line: false); |
205 | } |
206 | } |
207 | |
208 | if (!iommu_default_passthrough() && !iommu_dma_strict) |
209 | iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ; |
210 | |
211 | pr_info("Default domain type: %s%s\n" , |
212 | iommu_domain_type_str(iommu_def_domain_type), |
213 | (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ? |
214 | " (set via kernel command line)" : "" ); |
215 | |
216 | if (!iommu_default_passthrough()) |
217 | pr_info("DMA domain TLB invalidation policy: %s mode%s\n" , |
218 | iommu_dma_strict ? "strict" : "lazy" , |
219 | (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ? |
220 | " (set via kernel command line)" : "" ); |
221 | |
222 | nb = kcalloc(ARRAY_SIZE(iommu_buses), size: sizeof(*nb), GFP_KERNEL); |
223 | if (!nb) |
224 | return -ENOMEM; |
225 | |
226 | for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { |
227 | nb[i].notifier_call = iommu_bus_notifier; |
228 | bus_register_notifier(bus: iommu_buses[i], nb: &nb[i]); |
229 | } |
230 | |
231 | return 0; |
232 | } |
233 | subsys_initcall(iommu_subsys_init); |
234 | |
235 | static int remove_iommu_group(struct device *dev, void *data) |
236 | { |
237 | if (dev->iommu && dev->iommu->iommu_dev == data) |
238 | iommu_release_device(dev); |
239 | |
240 | return 0; |
241 | } |
242 | |
243 | /** |
244 | * iommu_device_register() - Register an IOMMU hardware instance |
245 | * @iommu: IOMMU handle for the instance |
246 | * @ops: IOMMU ops to associate with the instance |
247 | * @hwdev: (optional) actual instance device, used for fwnode lookup |
248 | * |
249 | * Return: 0 on success, or an error. |
250 | */ |
251 | int iommu_device_register(struct iommu_device *iommu, |
252 | const struct iommu_ops *ops, struct device *hwdev) |
253 | { |
254 | int err = 0; |
255 | |
256 | /* We need to be able to take module references appropriately */ |
257 | if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) |
258 | return -EINVAL; |
259 | /* |
260 | * Temporarily enforce global restriction to a single driver. This was |
261 | * already the de-facto behaviour, since any possible combination of |
262 | * existing drivers would compete for at least the PCI or platform bus. |
263 | */ |
264 | if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops) |
265 | return -EBUSY; |
266 | |
267 | iommu->ops = ops; |
268 | if (hwdev) |
269 | iommu->fwnode = dev_fwnode(hwdev); |
270 | |
271 | spin_lock(lock: &iommu_device_lock); |
272 | list_add_tail(new: &iommu->list, head: &iommu_device_list); |
273 | spin_unlock(lock: &iommu_device_lock); |
274 | |
275 | for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) { |
276 | iommu_buses[i]->iommu_ops = ops; |
277 | err = bus_iommu_probe(bus: iommu_buses[i]); |
278 | } |
279 | if (err) |
280 | iommu_device_unregister(iommu); |
281 | return err; |
282 | } |
283 | EXPORT_SYMBOL_GPL(iommu_device_register); |
284 | |
285 | void iommu_device_unregister(struct iommu_device *iommu) |
286 | { |
287 | for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) |
288 | bus_for_each_dev(bus: iommu_buses[i], NULL, data: iommu, fn: remove_iommu_group); |
289 | |
290 | spin_lock(lock: &iommu_device_lock); |
291 | list_del(entry: &iommu->list); |
292 | spin_unlock(lock: &iommu_device_lock); |
293 | } |
294 | EXPORT_SYMBOL_GPL(iommu_device_unregister); |
295 | |
296 | #if IS_ENABLED(CONFIG_IOMMUFD_TEST) |
297 | void iommu_device_unregister_bus(struct iommu_device *iommu, |
298 | struct bus_type *bus, |
299 | struct notifier_block *nb) |
300 | { |
301 | bus_unregister_notifier(bus, nb); |
302 | iommu_device_unregister(iommu); |
303 | } |
304 | EXPORT_SYMBOL_GPL(iommu_device_unregister_bus); |
305 | |
306 | /* |
307 | * Register an iommu driver against a single bus. This is only used by iommufd |
308 | * selftest to create a mock iommu driver. The caller must provide |
309 | * some memory to hold a notifier_block. |
310 | */ |
311 | int iommu_device_register_bus(struct iommu_device *iommu, |
312 | const struct iommu_ops *ops, struct bus_type *bus, |
313 | struct notifier_block *nb) |
314 | { |
315 | int err; |
316 | |
317 | iommu->ops = ops; |
318 | nb->notifier_call = iommu_bus_notifier; |
319 | err = bus_register_notifier(bus, nb); |
320 | if (err) |
321 | return err; |
322 | |
323 | spin_lock(lock: &iommu_device_lock); |
324 | list_add_tail(new: &iommu->list, head: &iommu_device_list); |
325 | spin_unlock(lock: &iommu_device_lock); |
326 | |
327 | bus->iommu_ops = ops; |
328 | err = bus_iommu_probe(bus); |
329 | if (err) { |
330 | iommu_device_unregister_bus(iommu, bus, nb); |
331 | return err; |
332 | } |
333 | return 0; |
334 | } |
335 | EXPORT_SYMBOL_GPL(iommu_device_register_bus); |
336 | #endif |
337 | |
338 | static struct dev_iommu *dev_iommu_get(struct device *dev) |
339 | { |
340 | struct dev_iommu *param = dev->iommu; |
341 | |
342 | if (param) |
343 | return param; |
344 | |
345 | param = kzalloc(size: sizeof(*param), GFP_KERNEL); |
346 | if (!param) |
347 | return NULL; |
348 | |
349 | mutex_init(¶m->lock); |
350 | dev->iommu = param; |
351 | return param; |
352 | } |
353 | |
354 | static void dev_iommu_free(struct device *dev) |
355 | { |
356 | struct dev_iommu *param = dev->iommu; |
357 | |
358 | dev->iommu = NULL; |
359 | if (param->fwspec) { |
360 | fwnode_handle_put(fwnode: param->fwspec->iommu_fwnode); |
361 | kfree(objp: param->fwspec); |
362 | } |
363 | kfree(objp: param); |
364 | } |
365 | |
366 | static u32 dev_iommu_get_max_pasids(struct device *dev) |
367 | { |
368 | u32 max_pasids = 0, bits = 0; |
369 | int ret; |
370 | |
371 | if (dev_is_pci(dev)) { |
372 | ret = pci_max_pasids(to_pci_dev(dev)); |
373 | if (ret > 0) |
374 | max_pasids = ret; |
375 | } else { |
376 | ret = device_property_read_u32(dev, propname: "pasid-num-bits" , val: &bits); |
377 | if (!ret) |
378 | max_pasids = 1UL << bits; |
379 | } |
380 | |
381 | return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); |
382 | } |
383 | |
384 | /* |
385 | * Init the dev->iommu and dev->iommu_group in the struct device and get the |
386 | * driver probed |
387 | */ |
388 | static int iommu_init_device(struct device *dev, const struct iommu_ops *ops) |
389 | { |
390 | struct iommu_device *iommu_dev; |
391 | struct iommu_group *group; |
392 | int ret; |
393 | |
394 | if (!dev_iommu_get(dev)) |
395 | return -ENOMEM; |
396 | |
397 | if (!try_module_get(module: ops->owner)) { |
398 | ret = -EINVAL; |
399 | goto err_free; |
400 | } |
401 | |
402 | iommu_dev = ops->probe_device(dev); |
403 | if (IS_ERR(ptr: iommu_dev)) { |
404 | ret = PTR_ERR(ptr: iommu_dev); |
405 | goto err_module_put; |
406 | } |
407 | |
408 | ret = iommu_device_link(iommu: iommu_dev, link: dev); |
409 | if (ret) |
410 | goto err_release; |
411 | |
412 | group = ops->device_group(dev); |
413 | if (WARN_ON_ONCE(group == NULL)) |
414 | group = ERR_PTR(error: -EINVAL); |
415 | if (IS_ERR(ptr: group)) { |
416 | ret = PTR_ERR(ptr: group); |
417 | goto err_unlink; |
418 | } |
419 | dev->iommu_group = group; |
420 | |
421 | dev->iommu->iommu_dev = iommu_dev; |
422 | dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); |
423 | if (ops->is_attach_deferred) |
424 | dev->iommu->attach_deferred = ops->is_attach_deferred(dev); |
425 | return 0; |
426 | |
427 | err_unlink: |
428 | iommu_device_unlink(iommu: iommu_dev, link: dev); |
429 | err_release: |
430 | if (ops->release_device) |
431 | ops->release_device(dev); |
432 | err_module_put: |
433 | module_put(module: ops->owner); |
434 | err_free: |
435 | dev_iommu_free(dev); |
436 | return ret; |
437 | } |
438 | |
439 | static void iommu_deinit_device(struct device *dev) |
440 | { |
441 | struct iommu_group *group = dev->iommu_group; |
442 | const struct iommu_ops *ops = dev_iommu_ops(dev); |
443 | |
444 | lockdep_assert_held(&group->mutex); |
445 | |
446 | iommu_device_unlink(iommu: dev->iommu->iommu_dev, link: dev); |
447 | |
448 | /* |
449 | * release_device() must stop using any attached domain on the device. |
450 | * If there are still other devices in the group they are not effected |
451 | * by this callback. |
452 | * |
453 | * The IOMMU driver must set the device to either an identity or |
454 | * blocking translation and stop using any domain pointer, as it is |
455 | * going to be freed. |
456 | */ |
457 | if (ops->release_device) |
458 | ops->release_device(dev); |
459 | |
460 | /* |
461 | * If this is the last driver to use the group then we must free the |
462 | * domains before we do the module_put(). |
463 | */ |
464 | if (list_empty(head: &group->devices)) { |
465 | if (group->default_domain) { |
466 | iommu_domain_free(domain: group->default_domain); |
467 | group->default_domain = NULL; |
468 | } |
469 | if (group->blocking_domain) { |
470 | iommu_domain_free(domain: group->blocking_domain); |
471 | group->blocking_domain = NULL; |
472 | } |
473 | group->domain = NULL; |
474 | } |
475 | |
476 | /* Caller must put iommu_group */ |
477 | dev->iommu_group = NULL; |
478 | module_put(module: ops->owner); |
479 | dev_iommu_free(dev); |
480 | } |
481 | |
482 | static int __iommu_probe_device(struct device *dev, struct list_head *group_list) |
483 | { |
484 | const struct iommu_ops *ops = dev->bus->iommu_ops; |
485 | struct iommu_group *group; |
486 | static DEFINE_MUTEX(iommu_probe_device_lock); |
487 | struct group_device *gdev; |
488 | int ret; |
489 | |
490 | if (!ops) |
491 | return -ENODEV; |
492 | /* |
493 | * Serialise to avoid races between IOMMU drivers registering in |
494 | * parallel and/or the "replay" calls from ACPI/OF code via client |
495 | * driver probe. Once the latter have been cleaned up we should |
496 | * probably be able to use device_lock() here to minimise the scope, |
497 | * but for now enforcing a simple global ordering is fine. |
498 | */ |
499 | mutex_lock(&iommu_probe_device_lock); |
500 | |
501 | /* Device is probed already if in a group */ |
502 | if (dev->iommu_group) { |
503 | ret = 0; |
504 | goto out_unlock; |
505 | } |
506 | |
507 | ret = iommu_init_device(dev, ops); |
508 | if (ret) |
509 | goto out_unlock; |
510 | |
511 | group = dev->iommu_group; |
512 | gdev = iommu_group_alloc_device(group, dev); |
513 | mutex_lock(&group->mutex); |
514 | if (IS_ERR(ptr: gdev)) { |
515 | ret = PTR_ERR(ptr: gdev); |
516 | goto err_put_group; |
517 | } |
518 | |
519 | /* |
520 | * The gdev must be in the list before calling |
521 | * iommu_setup_default_domain() |
522 | */ |
523 | list_add_tail(new: &gdev->list, head: &group->devices); |
524 | WARN_ON(group->default_domain && !group->domain); |
525 | if (group->default_domain) |
526 | iommu_create_device_direct_mappings(domain: group->default_domain, dev); |
527 | if (group->domain) { |
528 | ret = __iommu_device_set_domain(group, dev, new_domain: group->domain, flags: 0); |
529 | if (ret) |
530 | goto err_remove_gdev; |
531 | } else if (!group->default_domain && !group_list) { |
532 | ret = iommu_setup_default_domain(group, target_type: 0); |
533 | if (ret) |
534 | goto err_remove_gdev; |
535 | } else if (!group->default_domain) { |
536 | /* |
537 | * With a group_list argument we defer the default_domain setup |
538 | * to the caller by providing a de-duplicated list of groups |
539 | * that need further setup. |
540 | */ |
541 | if (list_empty(head: &group->entry)) |
542 | list_add_tail(new: &group->entry, head: group_list); |
543 | } |
544 | mutex_unlock(lock: &group->mutex); |
545 | mutex_unlock(lock: &iommu_probe_device_lock); |
546 | |
547 | if (dev_is_pci(dev)) |
548 | iommu_dma_set_pci_32bit_workaround(dev); |
549 | |
550 | return 0; |
551 | |
552 | err_remove_gdev: |
553 | list_del(entry: &gdev->list); |
554 | __iommu_group_free_device(group, grp_dev: gdev); |
555 | err_put_group: |
556 | iommu_deinit_device(dev); |
557 | mutex_unlock(lock: &group->mutex); |
558 | iommu_group_put(group); |
559 | out_unlock: |
560 | mutex_unlock(lock: &iommu_probe_device_lock); |
561 | |
562 | return ret; |
563 | } |
564 | |
565 | int iommu_probe_device(struct device *dev) |
566 | { |
567 | const struct iommu_ops *ops; |
568 | int ret; |
569 | |
570 | ret = __iommu_probe_device(dev, NULL); |
571 | if (ret) |
572 | return ret; |
573 | |
574 | ops = dev_iommu_ops(dev); |
575 | if (ops->probe_finalize) |
576 | ops->probe_finalize(dev); |
577 | |
578 | return 0; |
579 | } |
580 | |
581 | static void __iommu_group_free_device(struct iommu_group *group, |
582 | struct group_device *grp_dev) |
583 | { |
584 | struct device *dev = grp_dev->dev; |
585 | |
586 | sysfs_remove_link(kobj: group->devices_kobj, name: grp_dev->name); |
587 | sysfs_remove_link(kobj: &dev->kobj, name: "iommu_group" ); |
588 | |
589 | trace_remove_device_from_group(group_id: group->id, dev); |
590 | |
591 | /* |
592 | * If the group has become empty then ownership must have been |
593 | * released, and the current domain must be set back to NULL or |
594 | * the default domain. |
595 | */ |
596 | if (list_empty(head: &group->devices)) |
597 | WARN_ON(group->owner_cnt || |
598 | group->domain != group->default_domain); |
599 | |
600 | kfree(objp: grp_dev->name); |
601 | kfree(objp: grp_dev); |
602 | } |
603 | |
604 | /* Remove the iommu_group from the struct device. */ |
605 | static void __iommu_group_remove_device(struct device *dev) |
606 | { |
607 | struct iommu_group *group = dev->iommu_group; |
608 | struct group_device *device; |
609 | |
610 | mutex_lock(&group->mutex); |
611 | for_each_group_device(group, device) { |
612 | if (device->dev != dev) |
613 | continue; |
614 | |
615 | list_del(entry: &device->list); |
616 | __iommu_group_free_device(group, grp_dev: device); |
617 | if (dev->iommu && dev->iommu->iommu_dev) |
618 | iommu_deinit_device(dev); |
619 | else |
620 | dev->iommu_group = NULL; |
621 | break; |
622 | } |
623 | mutex_unlock(lock: &group->mutex); |
624 | |
625 | /* |
626 | * Pairs with the get in iommu_init_device() or |
627 | * iommu_group_add_device() |
628 | */ |
629 | iommu_group_put(group); |
630 | } |
631 | |
632 | static void iommu_release_device(struct device *dev) |
633 | { |
634 | struct iommu_group *group = dev->iommu_group; |
635 | |
636 | if (group) |
637 | __iommu_group_remove_device(dev); |
638 | |
639 | /* Free any fwspec if no iommu_driver was ever attached */ |
640 | if (dev->iommu) |
641 | dev_iommu_free(dev); |
642 | } |
643 | |
644 | static int __init iommu_set_def_domain_type(char *str) |
645 | { |
646 | bool pt; |
647 | int ret; |
648 | |
649 | ret = kstrtobool(s: str, res: &pt); |
650 | if (ret) |
651 | return ret; |
652 | |
653 | if (pt) |
654 | iommu_set_default_passthrough(cmd_line: true); |
655 | else |
656 | iommu_set_default_translated(cmd_line: true); |
657 | |
658 | return 0; |
659 | } |
660 | early_param("iommu.passthrough" , iommu_set_def_domain_type); |
661 | |
662 | static int __init iommu_dma_setup(char *str) |
663 | { |
664 | int ret = kstrtobool(s: str, res: &iommu_dma_strict); |
665 | |
666 | if (!ret) |
667 | iommu_cmd_line |= IOMMU_CMD_LINE_STRICT; |
668 | return ret; |
669 | } |
670 | early_param("iommu.strict" , iommu_dma_setup); |
671 | |
672 | void iommu_set_dma_strict(void) |
673 | { |
674 | iommu_dma_strict = true; |
675 | if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ) |
676 | iommu_def_domain_type = IOMMU_DOMAIN_DMA; |
677 | } |
678 | |
679 | static ssize_t iommu_group_attr_show(struct kobject *kobj, |
680 | struct attribute *__attr, char *buf) |
681 | { |
682 | struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); |
683 | struct iommu_group *group = to_iommu_group(kobj); |
684 | ssize_t ret = -EIO; |
685 | |
686 | if (attr->show) |
687 | ret = attr->show(group, buf); |
688 | return ret; |
689 | } |
690 | |
691 | static ssize_t iommu_group_attr_store(struct kobject *kobj, |
692 | struct attribute *__attr, |
693 | const char *buf, size_t count) |
694 | { |
695 | struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); |
696 | struct iommu_group *group = to_iommu_group(kobj); |
697 | ssize_t ret = -EIO; |
698 | |
699 | if (attr->store) |
700 | ret = attr->store(group, buf, count); |
701 | return ret; |
702 | } |
703 | |
704 | static const struct sysfs_ops iommu_group_sysfs_ops = { |
705 | .show = iommu_group_attr_show, |
706 | .store = iommu_group_attr_store, |
707 | }; |
708 | |
709 | static int iommu_group_create_file(struct iommu_group *group, |
710 | struct iommu_group_attribute *attr) |
711 | { |
712 | return sysfs_create_file(kobj: &group->kobj, attr: &attr->attr); |
713 | } |
714 | |
715 | static void iommu_group_remove_file(struct iommu_group *group, |
716 | struct iommu_group_attribute *attr) |
717 | { |
718 | sysfs_remove_file(kobj: &group->kobj, attr: &attr->attr); |
719 | } |
720 | |
721 | static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) |
722 | { |
723 | return sysfs_emit(buf, fmt: "%s\n" , group->name); |
724 | } |
725 | |
726 | /** |
727 | * iommu_insert_resv_region - Insert a new region in the |
728 | * list of reserved regions. |
729 | * @new: new region to insert |
730 | * @regions: list of regions |
731 | * |
732 | * Elements are sorted by start address and overlapping segments |
733 | * of the same type are merged. |
734 | */ |
735 | static int iommu_insert_resv_region(struct iommu_resv_region *new, |
736 | struct list_head *regions) |
737 | { |
738 | struct iommu_resv_region *iter, *tmp, *nr, *top; |
739 | LIST_HEAD(stack); |
740 | |
741 | nr = iommu_alloc_resv_region(start: new->start, length: new->length, |
742 | prot: new->prot, type: new->type, GFP_KERNEL); |
743 | if (!nr) |
744 | return -ENOMEM; |
745 | |
746 | /* First add the new element based on start address sorting */ |
747 | list_for_each_entry(iter, regions, list) { |
748 | if (nr->start < iter->start || |
749 | (nr->start == iter->start && nr->type <= iter->type)) |
750 | break; |
751 | } |
752 | list_add_tail(new: &nr->list, head: &iter->list); |
753 | |
754 | /* Merge overlapping segments of type nr->type in @regions, if any */ |
755 | list_for_each_entry_safe(iter, tmp, regions, list) { |
756 | phys_addr_t top_end, iter_end = iter->start + iter->length - 1; |
757 | |
758 | /* no merge needed on elements of different types than @new */ |
759 | if (iter->type != new->type) { |
760 | list_move_tail(list: &iter->list, head: &stack); |
761 | continue; |
762 | } |
763 | |
764 | /* look for the last stack element of same type as @iter */ |
765 | list_for_each_entry_reverse(top, &stack, list) |
766 | if (top->type == iter->type) |
767 | goto check_overlap; |
768 | |
769 | list_move_tail(list: &iter->list, head: &stack); |
770 | continue; |
771 | |
772 | check_overlap: |
773 | top_end = top->start + top->length - 1; |
774 | |
775 | if (iter->start > top_end + 1) { |
776 | list_move_tail(list: &iter->list, head: &stack); |
777 | } else { |
778 | top->length = max(top_end, iter_end) - top->start + 1; |
779 | list_del(entry: &iter->list); |
780 | kfree(objp: iter); |
781 | } |
782 | } |
783 | list_splice(list: &stack, head: regions); |
784 | return 0; |
785 | } |
786 | |
787 | static int |
788 | iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, |
789 | struct list_head *group_resv_regions) |
790 | { |
791 | struct iommu_resv_region *entry; |
792 | int ret = 0; |
793 | |
794 | list_for_each_entry(entry, dev_resv_regions, list) { |
795 | ret = iommu_insert_resv_region(new: entry, regions: group_resv_regions); |
796 | if (ret) |
797 | break; |
798 | } |
799 | return ret; |
800 | } |
801 | |
802 | int iommu_get_group_resv_regions(struct iommu_group *group, |
803 | struct list_head *head) |
804 | { |
805 | struct group_device *device; |
806 | int ret = 0; |
807 | |
808 | mutex_lock(&group->mutex); |
809 | for_each_group_device(group, device) { |
810 | struct list_head dev_resv_regions; |
811 | |
812 | /* |
813 | * Non-API groups still expose reserved_regions in sysfs, |
814 | * so filter out calls that get here that way. |
815 | */ |
816 | if (!device->dev->iommu) |
817 | break; |
818 | |
819 | INIT_LIST_HEAD(list: &dev_resv_regions); |
820 | iommu_get_resv_regions(dev: device->dev, list: &dev_resv_regions); |
821 | ret = iommu_insert_device_resv_regions(dev_resv_regions: &dev_resv_regions, group_resv_regions: head); |
822 | iommu_put_resv_regions(dev: device->dev, list: &dev_resv_regions); |
823 | if (ret) |
824 | break; |
825 | } |
826 | mutex_unlock(lock: &group->mutex); |
827 | return ret; |
828 | } |
829 | EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); |
830 | |
831 | static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, |
832 | char *buf) |
833 | { |
834 | struct iommu_resv_region *region, *next; |
835 | struct list_head group_resv_regions; |
836 | int offset = 0; |
837 | |
838 | INIT_LIST_HEAD(list: &group_resv_regions); |
839 | iommu_get_group_resv_regions(group, &group_resv_regions); |
840 | |
841 | list_for_each_entry_safe(region, next, &group_resv_regions, list) { |
842 | offset += sysfs_emit_at(buf, at: offset, fmt: "0x%016llx 0x%016llx %s\n" , |
843 | (long long)region->start, |
844 | (long long)(region->start + |
845 | region->length - 1), |
846 | iommu_group_resv_type_string[region->type]); |
847 | kfree(objp: region); |
848 | } |
849 | |
850 | return offset; |
851 | } |
852 | |
853 | static ssize_t iommu_group_show_type(struct iommu_group *group, |
854 | char *buf) |
855 | { |
856 | char *type = "unknown" ; |
857 | |
858 | mutex_lock(&group->mutex); |
859 | if (group->default_domain) { |
860 | switch (group->default_domain->type) { |
861 | case IOMMU_DOMAIN_BLOCKED: |
862 | type = "blocked" ; |
863 | break; |
864 | case IOMMU_DOMAIN_IDENTITY: |
865 | type = "identity" ; |
866 | break; |
867 | case IOMMU_DOMAIN_UNMANAGED: |
868 | type = "unmanaged" ; |
869 | break; |
870 | case IOMMU_DOMAIN_DMA: |
871 | type = "DMA" ; |
872 | break; |
873 | case IOMMU_DOMAIN_DMA_FQ: |
874 | type = "DMA-FQ" ; |
875 | break; |
876 | } |
877 | } |
878 | mutex_unlock(lock: &group->mutex); |
879 | |
880 | return sysfs_emit(buf, fmt: "%s\n" , type); |
881 | } |
882 | |
883 | static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); |
884 | |
885 | static IOMMU_GROUP_ATTR(reserved_regions, 0444, |
886 | iommu_group_show_resv_regions, NULL); |
887 | |
888 | static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, |
889 | iommu_group_store_type); |
890 | |
891 | static void iommu_group_release(struct kobject *kobj) |
892 | { |
893 | struct iommu_group *group = to_iommu_group(kobj); |
894 | |
895 | pr_debug("Releasing group %d\n" , group->id); |
896 | |
897 | if (group->iommu_data_release) |
898 | group->iommu_data_release(group->iommu_data); |
899 | |
900 | ida_free(&iommu_group_ida, id: group->id); |
901 | |
902 | /* Domains are free'd by iommu_deinit_device() */ |
903 | WARN_ON(group->default_domain); |
904 | WARN_ON(group->blocking_domain); |
905 | |
906 | kfree(objp: group->name); |
907 | kfree(objp: group); |
908 | } |
909 | |
910 | static const struct kobj_type iommu_group_ktype = { |
911 | .sysfs_ops = &iommu_group_sysfs_ops, |
912 | .release = iommu_group_release, |
913 | }; |
914 | |
915 | /** |
916 | * iommu_group_alloc - Allocate a new group |
917 | * |
918 | * This function is called by an iommu driver to allocate a new iommu |
919 | * group. The iommu group represents the minimum granularity of the iommu. |
920 | * Upon successful return, the caller holds a reference to the supplied |
921 | * group in order to hold the group until devices are added. Use |
922 | * iommu_group_put() to release this extra reference count, allowing the |
923 | * group to be automatically reclaimed once it has no devices or external |
924 | * references. |
925 | */ |
926 | struct iommu_group *iommu_group_alloc(void) |
927 | { |
928 | struct iommu_group *group; |
929 | int ret; |
930 | |
931 | group = kzalloc(size: sizeof(*group), GFP_KERNEL); |
932 | if (!group) |
933 | return ERR_PTR(error: -ENOMEM); |
934 | |
935 | group->kobj.kset = iommu_group_kset; |
936 | mutex_init(&group->mutex); |
937 | INIT_LIST_HEAD(list: &group->devices); |
938 | INIT_LIST_HEAD(list: &group->entry); |
939 | xa_init(xa: &group->pasid_array); |
940 | |
941 | ret = ida_alloc(ida: &iommu_group_ida, GFP_KERNEL); |
942 | if (ret < 0) { |
943 | kfree(objp: group); |
944 | return ERR_PTR(error: ret); |
945 | } |
946 | group->id = ret; |
947 | |
948 | ret = kobject_init_and_add(kobj: &group->kobj, ktype: &iommu_group_ktype, |
949 | NULL, fmt: "%d" , group->id); |
950 | if (ret) { |
951 | kobject_put(kobj: &group->kobj); |
952 | return ERR_PTR(error: ret); |
953 | } |
954 | |
955 | group->devices_kobj = kobject_create_and_add(name: "devices" , parent: &group->kobj); |
956 | if (!group->devices_kobj) { |
957 | kobject_put(kobj: &group->kobj); /* triggers .release & free */ |
958 | return ERR_PTR(error: -ENOMEM); |
959 | } |
960 | |
961 | /* |
962 | * The devices_kobj holds a reference on the group kobject, so |
963 | * as long as that exists so will the group. We can therefore |
964 | * use the devices_kobj for reference counting. |
965 | */ |
966 | kobject_put(kobj: &group->kobj); |
967 | |
968 | ret = iommu_group_create_file(group, |
969 | attr: &iommu_group_attr_reserved_regions); |
970 | if (ret) { |
971 | kobject_put(kobj: group->devices_kobj); |
972 | return ERR_PTR(error: ret); |
973 | } |
974 | |
975 | ret = iommu_group_create_file(group, attr: &iommu_group_attr_type); |
976 | if (ret) { |
977 | kobject_put(kobj: group->devices_kobj); |
978 | return ERR_PTR(error: ret); |
979 | } |
980 | |
981 | pr_debug("Allocated group %d\n" , group->id); |
982 | |
983 | return group; |
984 | } |
985 | EXPORT_SYMBOL_GPL(iommu_group_alloc); |
986 | |
987 | /** |
988 | * iommu_group_get_iommudata - retrieve iommu_data registered for a group |
989 | * @group: the group |
990 | * |
991 | * iommu drivers can store data in the group for use when doing iommu |
992 | * operations. This function provides a way to retrieve it. Caller |
993 | * should hold a group reference. |
994 | */ |
995 | void *iommu_group_get_iommudata(struct iommu_group *group) |
996 | { |
997 | return group->iommu_data; |
998 | } |
999 | EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); |
1000 | |
1001 | /** |
1002 | * iommu_group_set_iommudata - set iommu_data for a group |
1003 | * @group: the group |
1004 | * @iommu_data: new data |
1005 | * @release: release function for iommu_data |
1006 | * |
1007 | * iommu drivers can store data in the group for use when doing iommu |
1008 | * operations. This function provides a way to set the data after |
1009 | * the group has been allocated. Caller should hold a group reference. |
1010 | */ |
1011 | void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, |
1012 | void (*release)(void *iommu_data)) |
1013 | { |
1014 | group->iommu_data = iommu_data; |
1015 | group->iommu_data_release = release; |
1016 | } |
1017 | EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); |
1018 | |
1019 | /** |
1020 | * iommu_group_set_name - set name for a group |
1021 | * @group: the group |
1022 | * @name: name |
1023 | * |
1024 | * Allow iommu driver to set a name for a group. When set it will |
1025 | * appear in a name attribute file under the group in sysfs. |
1026 | */ |
1027 | int iommu_group_set_name(struct iommu_group *group, const char *name) |
1028 | { |
1029 | int ret; |
1030 | |
1031 | if (group->name) { |
1032 | iommu_group_remove_file(group, attr: &iommu_group_attr_name); |
1033 | kfree(objp: group->name); |
1034 | group->name = NULL; |
1035 | if (!name) |
1036 | return 0; |
1037 | } |
1038 | |
1039 | group->name = kstrdup(s: name, GFP_KERNEL); |
1040 | if (!group->name) |
1041 | return -ENOMEM; |
1042 | |
1043 | ret = iommu_group_create_file(group, attr: &iommu_group_attr_name); |
1044 | if (ret) { |
1045 | kfree(objp: group->name); |
1046 | group->name = NULL; |
1047 | return ret; |
1048 | } |
1049 | |
1050 | return 0; |
1051 | } |
1052 | EXPORT_SYMBOL_GPL(iommu_group_set_name); |
1053 | |
1054 | static int iommu_create_device_direct_mappings(struct iommu_domain *domain, |
1055 | struct device *dev) |
1056 | { |
1057 | struct iommu_resv_region *entry; |
1058 | struct list_head mappings; |
1059 | unsigned long pg_size; |
1060 | int ret = 0; |
1061 | |
1062 | pg_size = domain->pgsize_bitmap ? 1UL << __ffs(domain->pgsize_bitmap) : 0; |
1063 | INIT_LIST_HEAD(list: &mappings); |
1064 | |
1065 | if (WARN_ON_ONCE(iommu_is_dma_domain(domain) && !pg_size)) |
1066 | return -EINVAL; |
1067 | |
1068 | iommu_get_resv_regions(dev, list: &mappings); |
1069 | |
1070 | /* We need to consider overlapping regions for different devices */ |
1071 | list_for_each_entry(entry, &mappings, list) { |
1072 | dma_addr_t start, end, addr; |
1073 | size_t map_size = 0; |
1074 | |
1075 | if (entry->type == IOMMU_RESV_DIRECT) |
1076 | dev->iommu->require_direct = 1; |
1077 | |
1078 | if ((entry->type != IOMMU_RESV_DIRECT && |
1079 | entry->type != IOMMU_RESV_DIRECT_RELAXABLE) || |
1080 | !iommu_is_dma_domain(domain)) |
1081 | continue; |
1082 | |
1083 | start = ALIGN(entry->start, pg_size); |
1084 | end = ALIGN(entry->start + entry->length, pg_size); |
1085 | |
1086 | for (addr = start; addr <= end; addr += pg_size) { |
1087 | phys_addr_t phys_addr; |
1088 | |
1089 | if (addr == end) |
1090 | goto map_end; |
1091 | |
1092 | phys_addr = iommu_iova_to_phys(domain, iova: addr); |
1093 | if (!phys_addr) { |
1094 | map_size += pg_size; |
1095 | continue; |
1096 | } |
1097 | |
1098 | map_end: |
1099 | if (map_size) { |
1100 | ret = iommu_map(domain, iova: addr - map_size, |
1101 | paddr: addr - map_size, size: map_size, |
1102 | prot: entry->prot, GFP_KERNEL); |
1103 | if (ret) |
1104 | goto out; |
1105 | map_size = 0; |
1106 | } |
1107 | } |
1108 | |
1109 | } |
1110 | |
1111 | if (!list_empty(head: &mappings) && iommu_is_dma_domain(domain)) |
1112 | iommu_flush_iotlb_all(domain); |
1113 | |
1114 | out: |
1115 | iommu_put_resv_regions(dev, list: &mappings); |
1116 | |
1117 | return ret; |
1118 | } |
1119 | |
1120 | /* This is undone by __iommu_group_free_device() */ |
1121 | static struct group_device *iommu_group_alloc_device(struct iommu_group *group, |
1122 | struct device *dev) |
1123 | { |
1124 | int ret, i = 0; |
1125 | struct group_device *device; |
1126 | |
1127 | device = kzalloc(size: sizeof(*device), GFP_KERNEL); |
1128 | if (!device) |
1129 | return ERR_PTR(error: -ENOMEM); |
1130 | |
1131 | device->dev = dev; |
1132 | |
1133 | ret = sysfs_create_link(kobj: &dev->kobj, target: &group->kobj, name: "iommu_group" ); |
1134 | if (ret) |
1135 | goto err_free_device; |
1136 | |
1137 | device->name = kasprintf(GFP_KERNEL, fmt: "%s" , kobject_name(kobj: &dev->kobj)); |
1138 | rename: |
1139 | if (!device->name) { |
1140 | ret = -ENOMEM; |
1141 | goto err_remove_link; |
1142 | } |
1143 | |
1144 | ret = sysfs_create_link_nowarn(kobj: group->devices_kobj, |
1145 | target: &dev->kobj, name: device->name); |
1146 | if (ret) { |
1147 | if (ret == -EEXIST && i >= 0) { |
1148 | /* |
1149 | * Account for the slim chance of collision |
1150 | * and append an instance to the name. |
1151 | */ |
1152 | kfree(objp: device->name); |
1153 | device->name = kasprintf(GFP_KERNEL, fmt: "%s.%d" , |
1154 | kobject_name(kobj: &dev->kobj), i++); |
1155 | goto rename; |
1156 | } |
1157 | goto err_free_name; |
1158 | } |
1159 | |
1160 | trace_add_device_to_group(group_id: group->id, dev); |
1161 | |
1162 | dev_info(dev, "Adding to iommu group %d\n" , group->id); |
1163 | |
1164 | return device; |
1165 | |
1166 | err_free_name: |
1167 | kfree(objp: device->name); |
1168 | err_remove_link: |
1169 | sysfs_remove_link(kobj: &dev->kobj, name: "iommu_group" ); |
1170 | err_free_device: |
1171 | kfree(objp: device); |
1172 | dev_err(dev, "Failed to add to iommu group %d: %d\n" , group->id, ret); |
1173 | return ERR_PTR(error: ret); |
1174 | } |
1175 | |
1176 | /** |
1177 | * iommu_group_add_device - add a device to an iommu group |
1178 | * @group: the group into which to add the device (reference should be held) |
1179 | * @dev: the device |
1180 | * |
1181 | * This function is called by an iommu driver to add a device into a |
1182 | * group. Adding a device increments the group reference count. |
1183 | */ |
1184 | int iommu_group_add_device(struct iommu_group *group, struct device *dev) |
1185 | { |
1186 | struct group_device *gdev; |
1187 | |
1188 | gdev = iommu_group_alloc_device(group, dev); |
1189 | if (IS_ERR(ptr: gdev)) |
1190 | return PTR_ERR(ptr: gdev); |
1191 | |
1192 | iommu_group_ref_get(group); |
1193 | dev->iommu_group = group; |
1194 | |
1195 | mutex_lock(&group->mutex); |
1196 | list_add_tail(new: &gdev->list, head: &group->devices); |
1197 | mutex_unlock(lock: &group->mutex); |
1198 | return 0; |
1199 | } |
1200 | EXPORT_SYMBOL_GPL(iommu_group_add_device); |
1201 | |
1202 | /** |
1203 | * iommu_group_remove_device - remove a device from it's current group |
1204 | * @dev: device to be removed |
1205 | * |
1206 | * This function is called by an iommu driver to remove the device from |
1207 | * it's current group. This decrements the iommu group reference count. |
1208 | */ |
1209 | void iommu_group_remove_device(struct device *dev) |
1210 | { |
1211 | struct iommu_group *group = dev->iommu_group; |
1212 | |
1213 | if (!group) |
1214 | return; |
1215 | |
1216 | dev_info(dev, "Removing from iommu group %d\n" , group->id); |
1217 | |
1218 | __iommu_group_remove_device(dev); |
1219 | } |
1220 | EXPORT_SYMBOL_GPL(iommu_group_remove_device); |
1221 | |
1222 | /** |
1223 | * iommu_group_for_each_dev - iterate over each device in the group |
1224 | * @group: the group |
1225 | * @data: caller opaque data to be passed to callback function |
1226 | * @fn: caller supplied callback function |
1227 | * |
1228 | * This function is called by group users to iterate over group devices. |
1229 | * Callers should hold a reference count to the group during callback. |
1230 | * The group->mutex is held across callbacks, which will block calls to |
1231 | * iommu_group_add/remove_device. |
1232 | */ |
1233 | int iommu_group_for_each_dev(struct iommu_group *group, void *data, |
1234 | int (*fn)(struct device *, void *)) |
1235 | { |
1236 | struct group_device *device; |
1237 | int ret = 0; |
1238 | |
1239 | mutex_lock(&group->mutex); |
1240 | for_each_group_device(group, device) { |
1241 | ret = fn(device->dev, data); |
1242 | if (ret) |
1243 | break; |
1244 | } |
1245 | mutex_unlock(lock: &group->mutex); |
1246 | |
1247 | return ret; |
1248 | } |
1249 | EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); |
1250 | |
1251 | /** |
1252 | * iommu_group_get - Return the group for a device and increment reference |
1253 | * @dev: get the group that this device belongs to |
1254 | * |
1255 | * This function is called by iommu drivers and users to get the group |
1256 | * for the specified device. If found, the group is returned and the group |
1257 | * reference in incremented, else NULL. |
1258 | */ |
1259 | struct iommu_group *iommu_group_get(struct device *dev) |
1260 | { |
1261 | struct iommu_group *group = dev->iommu_group; |
1262 | |
1263 | if (group) |
1264 | kobject_get(kobj: group->devices_kobj); |
1265 | |
1266 | return group; |
1267 | } |
1268 | EXPORT_SYMBOL_GPL(iommu_group_get); |
1269 | |
1270 | /** |
1271 | * iommu_group_ref_get - Increment reference on a group |
1272 | * @group: the group to use, must not be NULL |
1273 | * |
1274 | * This function is called by iommu drivers to take additional references on an |
1275 | * existing group. Returns the given group for convenience. |
1276 | */ |
1277 | struct iommu_group *iommu_group_ref_get(struct iommu_group *group) |
1278 | { |
1279 | kobject_get(kobj: group->devices_kobj); |
1280 | return group; |
1281 | } |
1282 | EXPORT_SYMBOL_GPL(iommu_group_ref_get); |
1283 | |
1284 | /** |
1285 | * iommu_group_put - Decrement group reference |
1286 | * @group: the group to use |
1287 | * |
1288 | * This function is called by iommu drivers and users to release the |
1289 | * iommu group. Once the reference count is zero, the group is released. |
1290 | */ |
1291 | void iommu_group_put(struct iommu_group *group) |
1292 | { |
1293 | if (group) |
1294 | kobject_put(kobj: group->devices_kobj); |
1295 | } |
1296 | EXPORT_SYMBOL_GPL(iommu_group_put); |
1297 | |
1298 | /** |
1299 | * iommu_register_device_fault_handler() - Register a device fault handler |
1300 | * @dev: the device |
1301 | * @handler: the fault handler |
1302 | * @data: private data passed as argument to the handler |
1303 | * |
1304 | * When an IOMMU fault event is received, this handler gets called with the |
1305 | * fault event and data as argument. The handler should return 0 on success. If |
1306 | * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also |
1307 | * complete the fault by calling iommu_page_response() with one of the following |
1308 | * response code: |
1309 | * - IOMMU_PAGE_RESP_SUCCESS: retry the translation |
1310 | * - IOMMU_PAGE_RESP_INVALID: terminate the fault |
1311 | * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting |
1312 | * page faults if possible. |
1313 | * |
1314 | * Return 0 if the fault handler was installed successfully, or an error. |
1315 | */ |
1316 | int iommu_register_device_fault_handler(struct device *dev, |
1317 | iommu_dev_fault_handler_t handler, |
1318 | void *data) |
1319 | { |
1320 | struct dev_iommu *param = dev->iommu; |
1321 | int ret = 0; |
1322 | |
1323 | if (!param) |
1324 | return -EINVAL; |
1325 | |
1326 | mutex_lock(¶m->lock); |
1327 | /* Only allow one fault handler registered for each device */ |
1328 | if (param->fault_param) { |
1329 | ret = -EBUSY; |
1330 | goto done_unlock; |
1331 | } |
1332 | |
1333 | get_device(dev); |
1334 | param->fault_param = kzalloc(size: sizeof(*param->fault_param), GFP_KERNEL); |
1335 | if (!param->fault_param) { |
1336 | put_device(dev); |
1337 | ret = -ENOMEM; |
1338 | goto done_unlock; |
1339 | } |
1340 | param->fault_param->handler = handler; |
1341 | param->fault_param->data = data; |
1342 | mutex_init(¶m->fault_param->lock); |
1343 | INIT_LIST_HEAD(list: ¶m->fault_param->faults); |
1344 | |
1345 | done_unlock: |
1346 | mutex_unlock(lock: ¶m->lock); |
1347 | |
1348 | return ret; |
1349 | } |
1350 | EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); |
1351 | |
1352 | /** |
1353 | * iommu_unregister_device_fault_handler() - Unregister the device fault handler |
1354 | * @dev: the device |
1355 | * |
1356 | * Remove the device fault handler installed with |
1357 | * iommu_register_device_fault_handler(). |
1358 | * |
1359 | * Return 0 on success, or an error. |
1360 | */ |
1361 | int iommu_unregister_device_fault_handler(struct device *dev) |
1362 | { |
1363 | struct dev_iommu *param = dev->iommu; |
1364 | int ret = 0; |
1365 | |
1366 | if (!param) |
1367 | return -EINVAL; |
1368 | |
1369 | mutex_lock(¶m->lock); |
1370 | |
1371 | if (!param->fault_param) |
1372 | goto unlock; |
1373 | |
1374 | /* we cannot unregister handler if there are pending faults */ |
1375 | if (!list_empty(head: ¶m->fault_param->faults)) { |
1376 | ret = -EBUSY; |
1377 | goto unlock; |
1378 | } |
1379 | |
1380 | kfree(objp: param->fault_param); |
1381 | param->fault_param = NULL; |
1382 | put_device(dev); |
1383 | unlock: |
1384 | mutex_unlock(lock: ¶m->lock); |
1385 | |
1386 | return ret; |
1387 | } |
1388 | EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); |
1389 | |
1390 | /** |
1391 | * iommu_report_device_fault() - Report fault event to device driver |
1392 | * @dev: the device |
1393 | * @evt: fault event data |
1394 | * |
1395 | * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ |
1396 | * handler. When this function fails and the fault is recoverable, it is the |
1397 | * caller's responsibility to complete the fault. |
1398 | * |
1399 | * Return 0 on success, or an error. |
1400 | */ |
1401 | int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) |
1402 | { |
1403 | struct dev_iommu *param = dev->iommu; |
1404 | struct iommu_fault_event *evt_pending = NULL; |
1405 | struct iommu_fault_param *fparam; |
1406 | int ret = 0; |
1407 | |
1408 | if (!param || !evt) |
1409 | return -EINVAL; |
1410 | |
1411 | /* we only report device fault if there is a handler registered */ |
1412 | mutex_lock(¶m->lock); |
1413 | fparam = param->fault_param; |
1414 | if (!fparam || !fparam->handler) { |
1415 | ret = -EINVAL; |
1416 | goto done_unlock; |
1417 | } |
1418 | |
1419 | if (evt->fault.type == IOMMU_FAULT_PAGE_REQ && |
1420 | (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { |
1421 | evt_pending = kmemdup(p: evt, size: sizeof(struct iommu_fault_event), |
1422 | GFP_KERNEL); |
1423 | if (!evt_pending) { |
1424 | ret = -ENOMEM; |
1425 | goto done_unlock; |
1426 | } |
1427 | mutex_lock(&fparam->lock); |
1428 | list_add_tail(new: &evt_pending->list, head: &fparam->faults); |
1429 | mutex_unlock(lock: &fparam->lock); |
1430 | } |
1431 | |
1432 | ret = fparam->handler(&evt->fault, fparam->data); |
1433 | if (ret && evt_pending) { |
1434 | mutex_lock(&fparam->lock); |
1435 | list_del(entry: &evt_pending->list); |
1436 | mutex_unlock(lock: &fparam->lock); |
1437 | kfree(objp: evt_pending); |
1438 | } |
1439 | done_unlock: |
1440 | mutex_unlock(lock: ¶m->lock); |
1441 | return ret; |
1442 | } |
1443 | EXPORT_SYMBOL_GPL(iommu_report_device_fault); |
1444 | |
1445 | int iommu_page_response(struct device *dev, |
1446 | struct iommu_page_response *msg) |
1447 | { |
1448 | bool needs_pasid; |
1449 | int ret = -EINVAL; |
1450 | struct iommu_fault_event *evt; |
1451 | struct iommu_fault_page_request *prm; |
1452 | struct dev_iommu *param = dev->iommu; |
1453 | const struct iommu_ops *ops = dev_iommu_ops(dev); |
1454 | bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; |
1455 | |
1456 | if (!ops->page_response) |
1457 | return -ENODEV; |
1458 | |
1459 | if (!param || !param->fault_param) |
1460 | return -EINVAL; |
1461 | |
1462 | if (msg->version != IOMMU_PAGE_RESP_VERSION_1 || |
1463 | msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID) |
1464 | return -EINVAL; |
1465 | |
1466 | /* Only send response if there is a fault report pending */ |
1467 | mutex_lock(¶m->fault_param->lock); |
1468 | if (list_empty(head: ¶m->fault_param->faults)) { |
1469 | dev_warn_ratelimited(dev, "no pending PRQ, drop response\n" ); |
1470 | goto done_unlock; |
1471 | } |
1472 | /* |
1473 | * Check if we have a matching page request pending to respond, |
1474 | * otherwise return -EINVAL |
1475 | */ |
1476 | list_for_each_entry(evt, ¶m->fault_param->faults, list) { |
1477 | prm = &evt->fault.prm; |
1478 | if (prm->grpid != msg->grpid) |
1479 | continue; |
1480 | |
1481 | /* |
1482 | * If the PASID is required, the corresponding request is |
1483 | * matched using the group ID, the PASID valid bit and the PASID |
1484 | * value. Otherwise only the group ID matches request and |
1485 | * response. |
1486 | */ |
1487 | needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; |
1488 | if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid)) |
1489 | continue; |
1490 | |
1491 | if (!needs_pasid && has_pasid) { |
1492 | /* No big deal, just clear it. */ |
1493 | msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID; |
1494 | msg->pasid = 0; |
1495 | } |
1496 | |
1497 | ret = ops->page_response(dev, evt, msg); |
1498 | list_del(entry: &evt->list); |
1499 | kfree(objp: evt); |
1500 | break; |
1501 | } |
1502 | |
1503 | done_unlock: |
1504 | mutex_unlock(lock: ¶m->fault_param->lock); |
1505 | return ret; |
1506 | } |
1507 | EXPORT_SYMBOL_GPL(iommu_page_response); |
1508 | |
1509 | /** |
1510 | * iommu_group_id - Return ID for a group |
1511 | * @group: the group to ID |
1512 | * |
1513 | * Return the unique ID for the group matching the sysfs group number. |
1514 | */ |
1515 | int iommu_group_id(struct iommu_group *group) |
1516 | { |
1517 | return group->id; |
1518 | } |
1519 | EXPORT_SYMBOL_GPL(iommu_group_id); |
1520 | |
1521 | static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, |
1522 | unsigned long *devfns); |
1523 | |
1524 | /* |
1525 | * To consider a PCI device isolated, we require ACS to support Source |
1526 | * Validation, Request Redirection, Completer Redirection, and Upstream |
1527 | * Forwarding. This effectively means that devices cannot spoof their |
1528 | * requester ID, requests and completions cannot be redirected, and all |
1529 | * transactions are forwarded upstream, even as it passes through a |
1530 | * bridge where the target device is downstream. |
1531 | */ |
1532 | #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) |
1533 | |
1534 | /* |
1535 | * For multifunction devices which are not isolated from each other, find |
1536 | * all the other non-isolated functions and look for existing groups. For |
1537 | * each function, we also need to look for aliases to or from other devices |
1538 | * that may already have a group. |
1539 | */ |
1540 | static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, |
1541 | unsigned long *devfns) |
1542 | { |
1543 | struct pci_dev *tmp = NULL; |
1544 | struct iommu_group *group; |
1545 | |
1546 | if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) |
1547 | return NULL; |
1548 | |
1549 | for_each_pci_dev(tmp) { |
1550 | if (tmp == pdev || tmp->bus != pdev->bus || |
1551 | PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || |
1552 | pci_acs_enabled(pdev: tmp, REQ_ACS_FLAGS)) |
1553 | continue; |
1554 | |
1555 | group = get_pci_alias_group(pdev: tmp, devfns); |
1556 | if (group) { |
1557 | pci_dev_put(dev: tmp); |
1558 | return group; |
1559 | } |
1560 | } |
1561 | |
1562 | return NULL; |
1563 | } |
1564 | |
1565 | /* |
1566 | * Look for aliases to or from the given device for existing groups. DMA |
1567 | * aliases are only supported on the same bus, therefore the search |
1568 | * space is quite small (especially since we're really only looking at pcie |
1569 | * device, and therefore only expect multiple slots on the root complex or |
1570 | * downstream switch ports). It's conceivable though that a pair of |
1571 | * multifunction devices could have aliases between them that would cause a |
1572 | * loop. To prevent this, we use a bitmap to track where we've been. |
1573 | */ |
1574 | static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, |
1575 | unsigned long *devfns) |
1576 | { |
1577 | struct pci_dev *tmp = NULL; |
1578 | struct iommu_group *group; |
1579 | |
1580 | if (test_and_set_bit(nr: pdev->devfn & 0xff, addr: devfns)) |
1581 | return NULL; |
1582 | |
1583 | group = iommu_group_get(&pdev->dev); |
1584 | if (group) |
1585 | return group; |
1586 | |
1587 | for_each_pci_dev(tmp) { |
1588 | if (tmp == pdev || tmp->bus != pdev->bus) |
1589 | continue; |
1590 | |
1591 | /* We alias them or they alias us */ |
1592 | if (pci_devs_are_dma_aliases(dev1: pdev, dev2: tmp)) { |
1593 | group = get_pci_alias_group(pdev: tmp, devfns); |
1594 | if (group) { |
1595 | pci_dev_put(dev: tmp); |
1596 | return group; |
1597 | } |
1598 | |
1599 | group = get_pci_function_alias_group(pdev: tmp, devfns); |
1600 | if (group) { |
1601 | pci_dev_put(dev: tmp); |
1602 | return group; |
1603 | } |
1604 | } |
1605 | } |
1606 | |
1607 | return NULL; |
1608 | } |
1609 | |
1610 | struct group_for_pci_data { |
1611 | struct pci_dev *pdev; |
1612 | struct iommu_group *group; |
1613 | }; |
1614 | |
1615 | /* |
1616 | * DMA alias iterator callback, return the last seen device. Stop and return |
1617 | * the IOMMU group if we find one along the way. |
1618 | */ |
1619 | static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) |
1620 | { |
1621 | struct group_for_pci_data *data = opaque; |
1622 | |
1623 | data->pdev = pdev; |
1624 | data->group = iommu_group_get(&pdev->dev); |
1625 | |
1626 | return data->group != NULL; |
1627 | } |
1628 | |
1629 | /* |
1630 | * Generic device_group call-back function. It just allocates one |
1631 | * iommu-group per device. |
1632 | */ |
1633 | struct iommu_group *generic_device_group(struct device *dev) |
1634 | { |
1635 | return iommu_group_alloc(); |
1636 | } |
1637 | EXPORT_SYMBOL_GPL(generic_device_group); |
1638 | |
1639 | /* |
1640 | * Use standard PCI bus topology, isolation features, and DMA alias quirks |
1641 | * to find or create an IOMMU group for a device. |
1642 | */ |
1643 | struct iommu_group *pci_device_group(struct device *dev) |
1644 | { |
1645 | struct pci_dev *pdev = to_pci_dev(dev); |
1646 | struct group_for_pci_data data; |
1647 | struct pci_bus *bus; |
1648 | struct iommu_group *group = NULL; |
1649 | u64 devfns[4] = { 0 }; |
1650 | |
1651 | if (WARN_ON(!dev_is_pci(dev))) |
1652 | return ERR_PTR(error: -EINVAL); |
1653 | |
1654 | /* |
1655 | * Find the upstream DMA alias for the device. A device must not |
1656 | * be aliased due to topology in order to have its own IOMMU group. |
1657 | * If we find an alias along the way that already belongs to a |
1658 | * group, use it. |
1659 | */ |
1660 | if (pci_for_each_dma_alias(pdev, fn: get_pci_alias_or_group, data: &data)) |
1661 | return data.group; |
1662 | |
1663 | pdev = data.pdev; |
1664 | |
1665 | /* |
1666 | * Continue upstream from the point of minimum IOMMU granularity |
1667 | * due to aliases to the point where devices are protected from |
1668 | * peer-to-peer DMA by PCI ACS. Again, if we find an existing |
1669 | * group, use it. |
1670 | */ |
1671 | for (bus = pdev->bus; !pci_is_root_bus(pbus: bus); bus = bus->parent) { |
1672 | if (!bus->self) |
1673 | continue; |
1674 | |
1675 | if (pci_acs_path_enabled(start: bus->self, NULL, REQ_ACS_FLAGS)) |
1676 | break; |
1677 | |
1678 | pdev = bus->self; |
1679 | |
1680 | group = iommu_group_get(&pdev->dev); |
1681 | if (group) |
1682 | return group; |
1683 | } |
1684 | |
1685 | /* |
1686 | * Look for existing groups on device aliases. If we alias another |
1687 | * device or another device aliases us, use the same group. |
1688 | */ |
1689 | group = get_pci_alias_group(pdev, devfns: (unsigned long *)devfns); |
1690 | if (group) |
1691 | return group; |
1692 | |
1693 | /* |
1694 | * Look for existing groups on non-isolated functions on the same |
1695 | * slot and aliases of those funcions, if any. No need to clear |
1696 | * the search bitmap, the tested devfns are still valid. |
1697 | */ |
1698 | group = get_pci_function_alias_group(pdev, devfns: (unsigned long *)devfns); |
1699 | if (group) |
1700 | return group; |
1701 | |
1702 | /* No shared group found, allocate new */ |
1703 | return iommu_group_alloc(); |
1704 | } |
1705 | EXPORT_SYMBOL_GPL(pci_device_group); |
1706 | |
1707 | /* Get the IOMMU group for device on fsl-mc bus */ |
1708 | struct iommu_group *fsl_mc_device_group(struct device *dev) |
1709 | { |
1710 | struct device *cont_dev = fsl_mc_cont_dev(dev); |
1711 | struct iommu_group *group; |
1712 | |
1713 | group = iommu_group_get(cont_dev); |
1714 | if (!group) |
1715 | group = iommu_group_alloc(); |
1716 | return group; |
1717 | } |
1718 | EXPORT_SYMBOL_GPL(fsl_mc_device_group); |
1719 | |
1720 | static int iommu_get_def_domain_type(struct device *dev) |
1721 | { |
1722 | const struct iommu_ops *ops = dev_iommu_ops(dev); |
1723 | |
1724 | if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) |
1725 | return IOMMU_DOMAIN_DMA; |
1726 | |
1727 | if (ops->def_domain_type) |
1728 | return ops->def_domain_type(dev); |
1729 | |
1730 | return 0; |
1731 | } |
1732 | |
1733 | static struct iommu_domain * |
1734 | __iommu_group_alloc_default_domain(const struct bus_type *bus, |
1735 | struct iommu_group *group, int req_type) |
1736 | { |
1737 | if (group->default_domain && group->default_domain->type == req_type) |
1738 | return group->default_domain; |
1739 | return __iommu_domain_alloc(bus, type: req_type); |
1740 | } |
1741 | |
1742 | /* |
1743 | * req_type of 0 means "auto" which means to select a domain based on |
1744 | * iommu_def_domain_type or what the driver actually supports. |
1745 | */ |
1746 | static struct iommu_domain * |
1747 | iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) |
1748 | { |
1749 | const struct bus_type *bus = |
1750 | list_first_entry(&group->devices, struct group_device, list) |
1751 | ->dev->bus; |
1752 | struct iommu_domain *dom; |
1753 | |
1754 | lockdep_assert_held(&group->mutex); |
1755 | |
1756 | if (req_type) |
1757 | return __iommu_group_alloc_default_domain(bus, group, req_type); |
1758 | |
1759 | /* The driver gave no guidance on what type to use, try the default */ |
1760 | dom = __iommu_group_alloc_default_domain(bus, group, req_type: iommu_def_domain_type); |
1761 | if (dom) |
1762 | return dom; |
1763 | |
1764 | /* Otherwise IDENTITY and DMA_FQ defaults will try DMA */ |
1765 | if (iommu_def_domain_type == IOMMU_DOMAIN_DMA) |
1766 | return NULL; |
1767 | dom = __iommu_group_alloc_default_domain(bus, group, IOMMU_DOMAIN_DMA); |
1768 | if (!dom) |
1769 | return NULL; |
1770 | |
1771 | pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA" , |
1772 | iommu_def_domain_type, group->name); |
1773 | return dom; |
1774 | } |
1775 | |
1776 | struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) |
1777 | { |
1778 | return group->default_domain; |
1779 | } |
1780 | |
1781 | static int probe_iommu_group(struct device *dev, void *data) |
1782 | { |
1783 | struct list_head *group_list = data; |
1784 | int ret; |
1785 | |
1786 | ret = __iommu_probe_device(dev, group_list); |
1787 | if (ret == -ENODEV) |
1788 | ret = 0; |
1789 | |
1790 | return ret; |
1791 | } |
1792 | |
1793 | static int iommu_bus_notifier(struct notifier_block *nb, |
1794 | unsigned long action, void *data) |
1795 | { |
1796 | struct device *dev = data; |
1797 | |
1798 | if (action == BUS_NOTIFY_ADD_DEVICE) { |
1799 | int ret; |
1800 | |
1801 | ret = iommu_probe_device(dev); |
1802 | return (ret) ? NOTIFY_DONE : NOTIFY_OK; |
1803 | } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { |
1804 | iommu_release_device(dev); |
1805 | return NOTIFY_OK; |
1806 | } |
1807 | |
1808 | return 0; |
1809 | } |
1810 | |
1811 | /* A target_type of 0 will select the best domain type and cannot fail */ |
1812 | static int iommu_get_default_domain_type(struct iommu_group *group, |
1813 | int target_type) |
1814 | { |
1815 | int best_type = target_type; |
1816 | struct group_device *gdev; |
1817 | struct device *last_dev; |
1818 | |
1819 | lockdep_assert_held(&group->mutex); |
1820 | |
1821 | for_each_group_device(group, gdev) { |
1822 | unsigned int type = iommu_get_def_domain_type(dev: gdev->dev); |
1823 | |
1824 | if (best_type && type && best_type != type) { |
1825 | if (target_type) { |
1826 | dev_err_ratelimited( |
1827 | gdev->dev, |
1828 | "Device cannot be in %s domain\n" , |
1829 | iommu_domain_type_str(target_type)); |
1830 | return -1; |
1831 | } |
1832 | |
1833 | dev_warn( |
1834 | gdev->dev, |
1835 | "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n" , |
1836 | iommu_domain_type_str(type), dev_name(last_dev), |
1837 | iommu_domain_type_str(best_type)); |
1838 | return 0; |
1839 | } |
1840 | if (!best_type) |
1841 | best_type = type; |
1842 | last_dev = gdev->dev; |
1843 | } |
1844 | return best_type; |
1845 | } |
1846 | |
1847 | static void iommu_group_do_probe_finalize(struct device *dev) |
1848 | { |
1849 | const struct iommu_ops *ops = dev_iommu_ops(dev); |
1850 | |
1851 | if (ops->probe_finalize) |
1852 | ops->probe_finalize(dev); |
1853 | } |
1854 | |
1855 | int bus_iommu_probe(const struct bus_type *bus) |
1856 | { |
1857 | struct iommu_group *group, *next; |
1858 | LIST_HEAD(group_list); |
1859 | int ret; |
1860 | |
1861 | ret = bus_for_each_dev(bus, NULL, data: &group_list, fn: probe_iommu_group); |
1862 | if (ret) |
1863 | return ret; |
1864 | |
1865 | list_for_each_entry_safe(group, next, &group_list, entry) { |
1866 | struct group_device *gdev; |
1867 | |
1868 | mutex_lock(&group->mutex); |
1869 | |
1870 | /* Remove item from the list */ |
1871 | list_del_init(entry: &group->entry); |
1872 | |
1873 | /* |
1874 | * We go to the trouble of deferred default domain creation so |
1875 | * that the cross-group default domain type and the setup of the |
1876 | * IOMMU_RESV_DIRECT will work correctly in non-hotpug scenarios. |
1877 | */ |
1878 | ret = iommu_setup_default_domain(group, target_type: 0); |
1879 | if (ret) { |
1880 | mutex_unlock(lock: &group->mutex); |
1881 | return ret; |
1882 | } |
1883 | mutex_unlock(lock: &group->mutex); |
1884 | |
1885 | /* |
1886 | * FIXME: Mis-locked because the ops->probe_finalize() call-back |
1887 | * of some IOMMU drivers calls arm_iommu_attach_device() which |
1888 | * in-turn might call back into IOMMU core code, where it tries |
1889 | * to take group->mutex, resulting in a deadlock. |
1890 | */ |
1891 | for_each_group_device(group, gdev) |
1892 | iommu_group_do_probe_finalize(dev: gdev->dev); |
1893 | } |
1894 | |
1895 | return 0; |
1896 | } |
1897 | |
1898 | bool iommu_present(const struct bus_type *bus) |
1899 | { |
1900 | return bus->iommu_ops != NULL; |
1901 | } |
1902 | EXPORT_SYMBOL_GPL(iommu_present); |
1903 | |
1904 | /** |
1905 | * device_iommu_capable() - check for a general IOMMU capability |
1906 | * @dev: device to which the capability would be relevant, if available |
1907 | * @cap: IOMMU capability |
1908 | * |
1909 | * Return: true if an IOMMU is present and supports the given capability |
1910 | * for the given device, otherwise false. |
1911 | */ |
1912 | bool device_iommu_capable(struct device *dev, enum iommu_cap cap) |
1913 | { |
1914 | const struct iommu_ops *ops; |
1915 | |
1916 | if (!dev->iommu || !dev->iommu->iommu_dev) |
1917 | return false; |
1918 | |
1919 | ops = dev_iommu_ops(dev); |
1920 | if (!ops->capable) |
1921 | return false; |
1922 | |
1923 | return ops->capable(dev, cap); |
1924 | } |
1925 | EXPORT_SYMBOL_GPL(device_iommu_capable); |
1926 | |
1927 | /** |
1928 | * iommu_group_has_isolated_msi() - Compute msi_device_has_isolated_msi() |
1929 | * for a group |
1930 | * @group: Group to query |
1931 | * |
1932 | * IOMMU groups should not have differing values of |
1933 | * msi_device_has_isolated_msi() for devices in a group. However nothing |
1934 | * directly prevents this, so ensure mistakes don't result in isolation failures |
1935 | * by checking that all the devices are the same. |
1936 | */ |
1937 | bool iommu_group_has_isolated_msi(struct iommu_group *group) |
1938 | { |
1939 | struct group_device *group_dev; |
1940 | bool ret = true; |
1941 | |
1942 | mutex_lock(&group->mutex); |
1943 | for_each_group_device(group, group_dev) |
1944 | ret &= msi_device_has_isolated_msi(dev: group_dev->dev); |
1945 | mutex_unlock(lock: &group->mutex); |
1946 | return ret; |
1947 | } |
1948 | EXPORT_SYMBOL_GPL(iommu_group_has_isolated_msi); |
1949 | |
1950 | /** |
1951 | * iommu_set_fault_handler() - set a fault handler for an iommu domain |
1952 | * @domain: iommu domain |
1953 | * @handler: fault handler |
1954 | * @token: user data, will be passed back to the fault handler |
1955 | * |
1956 | * This function should be used by IOMMU users which want to be notified |
1957 | * whenever an IOMMU fault happens. |
1958 | * |
1959 | * The fault handler itself should return 0 on success, and an appropriate |
1960 | * error code otherwise. |
1961 | */ |
1962 | void iommu_set_fault_handler(struct iommu_domain *domain, |
1963 | iommu_fault_handler_t handler, |
1964 | void *token) |
1965 | { |
1966 | BUG_ON(!domain); |
1967 | |
1968 | domain->handler = handler; |
1969 | domain->handler_token = token; |
1970 | } |
1971 | EXPORT_SYMBOL_GPL(iommu_set_fault_handler); |
1972 | |
1973 | static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus, |
1974 | unsigned type) |
1975 | { |
1976 | struct iommu_domain *domain; |
1977 | unsigned int alloc_type = type & IOMMU_DOMAIN_ALLOC_FLAGS; |
1978 | |
1979 | if (bus == NULL || bus->iommu_ops == NULL) |
1980 | return NULL; |
1981 | |
1982 | domain = bus->iommu_ops->domain_alloc(alloc_type); |
1983 | if (!domain) |
1984 | return NULL; |
1985 | |
1986 | domain->type = type; |
1987 | /* |
1988 | * If not already set, assume all sizes by default; the driver |
1989 | * may override this later |
1990 | */ |
1991 | if (!domain->pgsize_bitmap) |
1992 | domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; |
1993 | |
1994 | if (!domain->ops) |
1995 | domain->ops = bus->iommu_ops->default_domain_ops; |
1996 | |
1997 | if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) { |
1998 | iommu_domain_free(domain); |
1999 | domain = NULL; |
2000 | } |
2001 | return domain; |
2002 | } |
2003 | |
2004 | struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) |
2005 | { |
2006 | return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED); |
2007 | } |
2008 | EXPORT_SYMBOL_GPL(iommu_domain_alloc); |
2009 | |
2010 | void iommu_domain_free(struct iommu_domain *domain) |
2011 | { |
2012 | if (domain->type == IOMMU_DOMAIN_SVA) |
2013 | mmdrop(mm: domain->mm); |
2014 | iommu_put_dma_cookie(domain); |
2015 | domain->ops->free(domain); |
2016 | } |
2017 | EXPORT_SYMBOL_GPL(iommu_domain_free); |
2018 | |
2019 | /* |
2020 | * Put the group's domain back to the appropriate core-owned domain - either the |
2021 | * standard kernel-mode DMA configuration or an all-DMA-blocked domain. |
2022 | */ |
2023 | static void __iommu_group_set_core_domain(struct iommu_group *group) |
2024 | { |
2025 | struct iommu_domain *new_domain; |
2026 | |
2027 | if (group->owner) |
2028 | new_domain = group->blocking_domain; |
2029 | else |
2030 | new_domain = group->default_domain; |
2031 | |
2032 | __iommu_group_set_domain_nofail(group, new_domain); |
2033 | } |
2034 | |
2035 | static int __iommu_attach_device(struct iommu_domain *domain, |
2036 | struct device *dev) |
2037 | { |
2038 | int ret; |
2039 | |
2040 | if (unlikely(domain->ops->attach_dev == NULL)) |
2041 | return -ENODEV; |
2042 | |
2043 | ret = domain->ops->attach_dev(domain, dev); |
2044 | if (ret) |
2045 | return ret; |
2046 | dev->iommu->attach_deferred = 0; |
2047 | trace_attach_device_to_domain(dev); |
2048 | return 0; |
2049 | } |
2050 | |
2051 | /** |
2052 | * iommu_attach_device - Attach an IOMMU domain to a device |
2053 | * @domain: IOMMU domain to attach |
2054 | * @dev: Device that will be attached |
2055 | * |
2056 | * Returns 0 on success and error code on failure |
2057 | * |
2058 | * Note that EINVAL can be treated as a soft failure, indicating |
2059 | * that certain configuration of the domain is incompatible with |
2060 | * the device. In this case attaching a different domain to the |
2061 | * device may succeed. |
2062 | */ |
2063 | int iommu_attach_device(struct iommu_domain *domain, struct device *dev) |
2064 | { |
2065 | struct iommu_group *group; |
2066 | int ret; |
2067 | |
2068 | group = iommu_group_get(dev); |
2069 | if (!group) |
2070 | return -ENODEV; |
2071 | |
2072 | /* |
2073 | * Lock the group to make sure the device-count doesn't |
2074 | * change while we are attaching |
2075 | */ |
2076 | mutex_lock(&group->mutex); |
2077 | ret = -EINVAL; |
2078 | if (list_count_nodes(head: &group->devices) != 1) |
2079 | goto out_unlock; |
2080 | |
2081 | ret = __iommu_attach_group(domain, group); |
2082 | |
2083 | out_unlock: |
2084 | mutex_unlock(lock: &group->mutex); |
2085 | iommu_group_put(group); |
2086 | |
2087 | return ret; |
2088 | } |
2089 | EXPORT_SYMBOL_GPL(iommu_attach_device); |
2090 | |
2091 | int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) |
2092 | { |
2093 | if (dev->iommu && dev->iommu->attach_deferred) |
2094 | return __iommu_attach_device(domain, dev); |
2095 | |
2096 | return 0; |
2097 | } |
2098 | |
2099 | void iommu_detach_device(struct iommu_domain *domain, struct device *dev) |
2100 | { |
2101 | struct iommu_group *group; |
2102 | |
2103 | group = iommu_group_get(dev); |
2104 | if (!group) |
2105 | return; |
2106 | |
2107 | mutex_lock(&group->mutex); |
2108 | if (WARN_ON(domain != group->domain) || |
2109 | WARN_ON(list_count_nodes(&group->devices) != 1)) |
2110 | goto out_unlock; |
2111 | __iommu_group_set_core_domain(group); |
2112 | |
2113 | out_unlock: |
2114 | mutex_unlock(lock: &group->mutex); |
2115 | iommu_group_put(group); |
2116 | } |
2117 | EXPORT_SYMBOL_GPL(iommu_detach_device); |
2118 | |
2119 | struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) |
2120 | { |
2121 | struct iommu_domain *domain; |
2122 | struct iommu_group *group; |
2123 | |
2124 | group = iommu_group_get(dev); |
2125 | if (!group) |
2126 | return NULL; |
2127 | |
2128 | domain = group->domain; |
2129 | |
2130 | iommu_group_put(group); |
2131 | |
2132 | return domain; |
2133 | } |
2134 | EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); |
2135 | |
2136 | /* |
2137 | * For IOMMU_DOMAIN_DMA implementations which already provide their own |
2138 | * guarantees that the group and its default domain are valid and correct. |
2139 | */ |
2140 | struct iommu_domain *iommu_get_dma_domain(struct device *dev) |
2141 | { |
2142 | return dev->iommu_group->default_domain; |
2143 | } |
2144 | |
2145 | static int __iommu_attach_group(struct iommu_domain *domain, |
2146 | struct iommu_group *group) |
2147 | { |
2148 | if (group->domain && group->domain != group->default_domain && |
2149 | group->domain != group->blocking_domain) |
2150 | return -EBUSY; |
2151 | |
2152 | return __iommu_group_set_domain(group, new_domain: domain); |
2153 | } |
2154 | |
2155 | /** |
2156 | * iommu_attach_group - Attach an IOMMU domain to an IOMMU group |
2157 | * @domain: IOMMU domain to attach |
2158 | * @group: IOMMU group that will be attached |
2159 | * |
2160 | * Returns 0 on success and error code on failure |
2161 | * |
2162 | * Note that EINVAL can be treated as a soft failure, indicating |
2163 | * that certain configuration of the domain is incompatible with |
2164 | * the group. In this case attaching a different domain to the |
2165 | * group may succeed. |
2166 | */ |
2167 | int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) |
2168 | { |
2169 | int ret; |
2170 | |
2171 | mutex_lock(&group->mutex); |
2172 | ret = __iommu_attach_group(domain, group); |
2173 | mutex_unlock(lock: &group->mutex); |
2174 | |
2175 | return ret; |
2176 | } |
2177 | EXPORT_SYMBOL_GPL(iommu_attach_group); |
2178 | |
2179 | /** |
2180 | * iommu_group_replace_domain - replace the domain that a group is attached to |
2181 | * @new_domain: new IOMMU domain to replace with |
2182 | * @group: IOMMU group that will be attached to the new domain |
2183 | * |
2184 | * This API allows the group to switch domains without being forced to go to |
2185 | * the blocking domain in-between. |
2186 | * |
2187 | * If the currently attached domain is a core domain (e.g. a default_domain), |
2188 | * it will act just like the iommu_attach_group(). |
2189 | */ |
2190 | int iommu_group_replace_domain(struct iommu_group *group, |
2191 | struct iommu_domain *new_domain) |
2192 | { |
2193 | int ret; |
2194 | |
2195 | if (!new_domain) |
2196 | return -EINVAL; |
2197 | |
2198 | mutex_lock(&group->mutex); |
2199 | ret = __iommu_group_set_domain(group, new_domain); |
2200 | mutex_unlock(lock: &group->mutex); |
2201 | return ret; |
2202 | } |
2203 | EXPORT_SYMBOL_NS_GPL(iommu_group_replace_domain, IOMMUFD_INTERNAL); |
2204 | |
2205 | static int __iommu_device_set_domain(struct iommu_group *group, |
2206 | struct device *dev, |
2207 | struct iommu_domain *new_domain, |
2208 | unsigned int flags) |
2209 | { |
2210 | int ret; |
2211 | |
2212 | /* |
2213 | * If the device requires IOMMU_RESV_DIRECT then we cannot allow |
2214 | * the blocking domain to be attached as it does not contain the |
2215 | * required 1:1 mapping. This test effectively excludes the device |
2216 | * being used with iommu_group_claim_dma_owner() which will block |
2217 | * vfio and iommufd as well. |
2218 | */ |
2219 | if (dev->iommu->require_direct && |
2220 | (new_domain->type == IOMMU_DOMAIN_BLOCKED || |
2221 | new_domain == group->blocking_domain)) { |
2222 | dev_warn(dev, |
2223 | "Firmware has requested this device have a 1:1 IOMMU mapping, rejecting configuring the device without a 1:1 mapping. Contact your platform vendor.\n" ); |
2224 | return -EINVAL; |
2225 | } |
2226 | |
2227 | if (dev->iommu->attach_deferred) { |
2228 | if (new_domain == group->default_domain) |
2229 | return 0; |
2230 | dev->iommu->attach_deferred = 0; |
2231 | } |
2232 | |
2233 | ret = __iommu_attach_device(domain: new_domain, dev); |
2234 | if (ret) { |
2235 | /* |
2236 | * If we have a blocking domain then try to attach that in hopes |
2237 | * of avoiding a UAF. Modern drivers should implement blocking |
2238 | * domains as global statics that cannot fail. |
2239 | */ |
2240 | if ((flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) && |
2241 | group->blocking_domain && |
2242 | group->blocking_domain != new_domain) |
2243 | __iommu_attach_device(domain: group->blocking_domain, dev); |
2244 | return ret; |
2245 | } |
2246 | return 0; |
2247 | } |
2248 | |
2249 | /* |
2250 | * If 0 is returned the group's domain is new_domain. If an error is returned |
2251 | * then the group's domain will be set back to the existing domain unless |
2252 | * IOMMU_SET_DOMAIN_MUST_SUCCEED, otherwise an error is returned and the group's |
2253 | * domains is left inconsistent. This is a driver bug to fail attach with a |
2254 | * previously good domain. We try to avoid a kernel UAF because of this. |
2255 | * |
2256 | * IOMMU groups are really the natural working unit of the IOMMU, but the IOMMU |
2257 | * API works on domains and devices. Bridge that gap by iterating over the |
2258 | * devices in a group. Ideally we'd have a single device which represents the |
2259 | * requestor ID of the group, but we also allow IOMMU drivers to create policy |
2260 | * defined minimum sets, where the physical hardware may be able to distiguish |
2261 | * members, but we wish to group them at a higher level (ex. untrusted |
2262 | * multi-function PCI devices). Thus we attach each device. |
2263 | */ |
2264 | static int __iommu_group_set_domain_internal(struct iommu_group *group, |
2265 | struct iommu_domain *new_domain, |
2266 | unsigned int flags) |
2267 | { |
2268 | struct group_device *last_gdev; |
2269 | struct group_device *gdev; |
2270 | int result; |
2271 | int ret; |
2272 | |
2273 | lockdep_assert_held(&group->mutex); |
2274 | |
2275 | if (group->domain == new_domain) |
2276 | return 0; |
2277 | |
2278 | /* |
2279 | * New drivers should support default domains, so set_platform_dma() |
2280 | * op will never be called. Otherwise the NULL domain represents some |
2281 | * platform specific behavior. |
2282 | */ |
2283 | if (!new_domain) { |
2284 | for_each_group_device(group, gdev) { |
2285 | const struct iommu_ops *ops = dev_iommu_ops(dev: gdev->dev); |
2286 | |
2287 | if (!WARN_ON(!ops->set_platform_dma_ops)) |
2288 | ops->set_platform_dma_ops(gdev->dev); |
2289 | } |
2290 | group->domain = NULL; |
2291 | return 0; |
2292 | } |
2293 | |
2294 | /* |
2295 | * Changing the domain is done by calling attach_dev() on the new |
2296 | * domain. This switch does not have to be atomic and DMA can be |
2297 | * discarded during the transition. DMA must only be able to access |
2298 | * either new_domain or group->domain, never something else. |
2299 | */ |
2300 | result = 0; |
2301 | for_each_group_device(group, gdev) { |
2302 | ret = __iommu_device_set_domain(group, dev: gdev->dev, new_domain, |
2303 | flags); |
2304 | if (ret) { |
2305 | result = ret; |
2306 | /* |
2307 | * Keep trying the other devices in the group. If a |
2308 | * driver fails attach to an otherwise good domain, and |
2309 | * does not support blocking domains, it should at least |
2310 | * drop its reference on the current domain so we don't |
2311 | * UAF. |
2312 | */ |
2313 | if (flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) |
2314 | continue; |
2315 | goto err_revert; |
2316 | } |
2317 | } |
2318 | group->domain = new_domain; |
2319 | return result; |
2320 | |
2321 | err_revert: |
2322 | /* |
2323 | * This is called in error unwind paths. A well behaved driver should |
2324 | * always allow us to attach to a domain that was already attached. |
2325 | */ |
2326 | last_gdev = gdev; |
2327 | for_each_group_device(group, gdev) { |
2328 | const struct iommu_ops *ops = dev_iommu_ops(dev: gdev->dev); |
2329 | |
2330 | /* |
2331 | * If set_platform_dma_ops is not present a NULL domain can |
2332 | * happen only for first probe, in which case we leave |
2333 | * group->domain as NULL and let release clean everything up. |
2334 | */ |
2335 | if (group->domain) |
2336 | WARN_ON(__iommu_device_set_domain( |
2337 | group, gdev->dev, group->domain, |
2338 | IOMMU_SET_DOMAIN_MUST_SUCCEED)); |
2339 | else if (ops->set_platform_dma_ops) |
2340 | ops->set_platform_dma_ops(gdev->dev); |
2341 | if (gdev == last_gdev) |
2342 | break; |
2343 | } |
2344 | return ret; |
2345 | } |
2346 | |
2347 | void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) |
2348 | { |
2349 | mutex_lock(&group->mutex); |
2350 | __iommu_group_set_core_domain(group); |
2351 | mutex_unlock(lock: &group->mutex); |
2352 | } |
2353 | EXPORT_SYMBOL_GPL(iommu_detach_group); |
2354 | |
2355 | phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) |
2356 | { |
2357 | if (domain->type == IOMMU_DOMAIN_IDENTITY) |
2358 | return iova; |
2359 | |
2360 | if (domain->type == IOMMU_DOMAIN_BLOCKED) |
2361 | return 0; |
2362 | |
2363 | return domain->ops->iova_to_phys(domain, iova); |
2364 | } |
2365 | EXPORT_SYMBOL_GPL(iommu_iova_to_phys); |
2366 | |
2367 | static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, |
2368 | phys_addr_t paddr, size_t size, size_t *count) |
2369 | { |
2370 | unsigned int pgsize_idx, pgsize_idx_next; |
2371 | unsigned long pgsizes; |
2372 | size_t offset, pgsize, pgsize_next; |
2373 | unsigned long addr_merge = paddr | iova; |
2374 | |
2375 | /* Page sizes supported by the hardware and small enough for @size */ |
2376 | pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); |
2377 | |
2378 | /* Constrain the page sizes further based on the maximum alignment */ |
2379 | if (likely(addr_merge)) |
2380 | pgsizes &= GENMASK(__ffs(addr_merge), 0); |
2381 | |
2382 | /* Make sure we have at least one suitable page size */ |
2383 | BUG_ON(!pgsizes); |
2384 | |
2385 | /* Pick the biggest page size remaining */ |
2386 | pgsize_idx = __fls(word: pgsizes); |
2387 | pgsize = BIT(pgsize_idx); |
2388 | if (!count) |
2389 | return pgsize; |
2390 | |
2391 | /* Find the next biggest support page size, if it exists */ |
2392 | pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); |
2393 | if (!pgsizes) |
2394 | goto out_set_count; |
2395 | |
2396 | pgsize_idx_next = __ffs(pgsizes); |
2397 | pgsize_next = BIT(pgsize_idx_next); |
2398 | |
2399 | /* |
2400 | * There's no point trying a bigger page size unless the virtual |
2401 | * and physical addresses are similarly offset within the larger page. |
2402 | */ |
2403 | if ((iova ^ paddr) & (pgsize_next - 1)) |
2404 | goto out_set_count; |
2405 | |
2406 | /* Calculate the offset to the next page size alignment boundary */ |
2407 | offset = pgsize_next - (addr_merge & (pgsize_next - 1)); |
2408 | |
2409 | /* |
2410 | * If size is big enough to accommodate the larger page, reduce |
2411 | * the number of smaller pages. |
2412 | */ |
2413 | if (offset + pgsize_next <= size) |
2414 | size = offset; |
2415 | |
2416 | out_set_count: |
2417 | *count = size >> pgsize_idx; |
2418 | return pgsize; |
2419 | } |
2420 | |
2421 | static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, |
2422 | phys_addr_t paddr, size_t size, int prot, |
2423 | gfp_t gfp, size_t *mapped) |
2424 | { |
2425 | const struct iommu_domain_ops *ops = domain->ops; |
2426 | size_t pgsize, count; |
2427 | int ret; |
2428 | |
2429 | pgsize = iommu_pgsize(domain, iova, paddr, size, count: &count); |
2430 | |
2431 | pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n" , |
2432 | iova, &paddr, pgsize, count); |
2433 | |
2434 | if (ops->map_pages) { |
2435 | ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, |
2436 | gfp, mapped); |
2437 | } else { |
2438 | ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); |
2439 | *mapped = ret ? 0 : pgsize; |
2440 | } |
2441 | |
2442 | return ret; |
2443 | } |
2444 | |
2445 | static int __iommu_map(struct iommu_domain *domain, unsigned long iova, |
2446 | phys_addr_t paddr, size_t size, int prot, gfp_t gfp) |
2447 | { |
2448 | const struct iommu_domain_ops *ops = domain->ops; |
2449 | unsigned long orig_iova = iova; |
2450 | unsigned int min_pagesz; |
2451 | size_t orig_size = size; |
2452 | phys_addr_t orig_paddr = paddr; |
2453 | int ret = 0; |
2454 | |
2455 | if (unlikely(!(ops->map || ops->map_pages) || |
2456 | domain->pgsize_bitmap == 0UL)) |
2457 | return -ENODEV; |
2458 | |
2459 | if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) |
2460 | return -EINVAL; |
2461 | |
2462 | /* find out the minimum page size supported */ |
2463 | min_pagesz = 1 << __ffs(domain->pgsize_bitmap); |
2464 | |
2465 | /* |
2466 | * both the virtual address and the physical one, as well as |
2467 | * the size of the mapping, must be aligned (at least) to the |
2468 | * size of the smallest page supported by the hardware |
2469 | */ |
2470 | if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { |
2471 | pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n" , |
2472 | iova, &paddr, size, min_pagesz); |
2473 | return -EINVAL; |
2474 | } |
2475 | |
2476 | pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n" , iova, &paddr, size); |
2477 | |
2478 | while (size) { |
2479 | size_t mapped = 0; |
2480 | |
2481 | ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp, |
2482 | mapped: &mapped); |
2483 | /* |
2484 | * Some pages may have been mapped, even if an error occurred, |
2485 | * so we should account for those so they can be unmapped. |
2486 | */ |
2487 | size -= mapped; |
2488 | |
2489 | if (ret) |
2490 | break; |
2491 | |
2492 | iova += mapped; |
2493 | paddr += mapped; |
2494 | } |
2495 | |
2496 | /* unroll mapping in case something went wrong */ |
2497 | if (ret) |
2498 | iommu_unmap(domain, iova: orig_iova, size: orig_size - size); |
2499 | else |
2500 | trace_map(iova: orig_iova, paddr: orig_paddr, size: orig_size); |
2501 | |
2502 | return ret; |
2503 | } |
2504 | |
2505 | int iommu_map(struct iommu_domain *domain, unsigned long iova, |
2506 | phys_addr_t paddr, size_t size, int prot, gfp_t gfp) |
2507 | { |
2508 | const struct iommu_domain_ops *ops = domain->ops; |
2509 | int ret; |
2510 | |
2511 | might_sleep_if(gfpflags_allow_blocking(gfp)); |
2512 | |
2513 | /* Discourage passing strange GFP flags */ |
2514 | if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | |
2515 | __GFP_HIGHMEM))) |
2516 | return -EINVAL; |
2517 | |
2518 | ret = __iommu_map(domain, iova, paddr, size, prot, gfp); |
2519 | if (ret == 0 && ops->iotlb_sync_map) |
2520 | ops->iotlb_sync_map(domain, iova, size); |
2521 | |
2522 | return ret; |
2523 | } |
2524 | EXPORT_SYMBOL_GPL(iommu_map); |
2525 | |
2526 | static size_t __iommu_unmap_pages(struct iommu_domain *domain, |
2527 | unsigned long iova, size_t size, |
2528 | struct iommu_iotlb_gather *iotlb_gather) |
2529 | { |
2530 | const struct iommu_domain_ops *ops = domain->ops; |
2531 | size_t pgsize, count; |
2532 | |
2533 | pgsize = iommu_pgsize(domain, iova, paddr: iova, size, count: &count); |
2534 | return ops->unmap_pages ? |
2535 | ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) : |
2536 | ops->unmap(domain, iova, pgsize, iotlb_gather); |
2537 | } |
2538 | |
2539 | static size_t __iommu_unmap(struct iommu_domain *domain, |
2540 | unsigned long iova, size_t size, |
2541 | struct iommu_iotlb_gather *iotlb_gather) |
2542 | { |
2543 | const struct iommu_domain_ops *ops = domain->ops; |
2544 | size_t unmapped_page, unmapped = 0; |
2545 | unsigned long orig_iova = iova; |
2546 | unsigned int min_pagesz; |
2547 | |
2548 | if (unlikely(!(ops->unmap || ops->unmap_pages) || |
2549 | domain->pgsize_bitmap == 0UL)) |
2550 | return 0; |
2551 | |
2552 | if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) |
2553 | return 0; |
2554 | |
2555 | /* find out the minimum page size supported */ |
2556 | min_pagesz = 1 << __ffs(domain->pgsize_bitmap); |
2557 | |
2558 | /* |
2559 | * The virtual address, as well as the size of the mapping, must be |
2560 | * aligned (at least) to the size of the smallest page supported |
2561 | * by the hardware |
2562 | */ |
2563 | if (!IS_ALIGNED(iova | size, min_pagesz)) { |
2564 | pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n" , |
2565 | iova, size, min_pagesz); |
2566 | return 0; |
2567 | } |
2568 | |
2569 | pr_debug("unmap this: iova 0x%lx size 0x%zx\n" , iova, size); |
2570 | |
2571 | /* |
2572 | * Keep iterating until we either unmap 'size' bytes (or more) |
2573 | * or we hit an area that isn't mapped. |
2574 | */ |
2575 | while (unmapped < size) { |
2576 | unmapped_page = __iommu_unmap_pages(domain, iova, |
2577 | size: size - unmapped, |
2578 | iotlb_gather); |
2579 | if (!unmapped_page) |
2580 | break; |
2581 | |
2582 | pr_debug("unmapped: iova 0x%lx size 0x%zx\n" , |
2583 | iova, unmapped_page); |
2584 | |
2585 | iova += unmapped_page; |
2586 | unmapped += unmapped_page; |
2587 | } |
2588 | |
2589 | trace_unmap(iova: orig_iova, size, unmapped_size: unmapped); |
2590 | return unmapped; |
2591 | } |
2592 | |
2593 | size_t iommu_unmap(struct iommu_domain *domain, |
2594 | unsigned long iova, size_t size) |
2595 | { |
2596 | struct iommu_iotlb_gather iotlb_gather; |
2597 | size_t ret; |
2598 | |
2599 | iommu_iotlb_gather_init(gather: &iotlb_gather); |
2600 | ret = __iommu_unmap(domain, iova, size, iotlb_gather: &iotlb_gather); |
2601 | iommu_iotlb_sync(domain, iotlb_gather: &iotlb_gather); |
2602 | |
2603 | return ret; |
2604 | } |
2605 | EXPORT_SYMBOL_GPL(iommu_unmap); |
2606 | |
2607 | size_t iommu_unmap_fast(struct iommu_domain *domain, |
2608 | unsigned long iova, size_t size, |
2609 | struct iommu_iotlb_gather *iotlb_gather) |
2610 | { |
2611 | return __iommu_unmap(domain, iova, size, iotlb_gather); |
2612 | } |
2613 | EXPORT_SYMBOL_GPL(iommu_unmap_fast); |
2614 | |
2615 | ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, |
2616 | struct scatterlist *sg, unsigned int nents, int prot, |
2617 | gfp_t gfp) |
2618 | { |
2619 | const struct iommu_domain_ops *ops = domain->ops; |
2620 | size_t len = 0, mapped = 0; |
2621 | phys_addr_t start; |
2622 | unsigned int i = 0; |
2623 | int ret; |
2624 | |
2625 | might_sleep_if(gfpflags_allow_blocking(gfp)); |
2626 | |
2627 | /* Discourage passing strange GFP flags */ |
2628 | if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | |
2629 | __GFP_HIGHMEM))) |
2630 | return -EINVAL; |
2631 | |
2632 | while (i <= nents) { |
2633 | phys_addr_t s_phys = sg_phys(sg); |
2634 | |
2635 | if (len && s_phys != start + len) { |
2636 | ret = __iommu_map(domain, iova: iova + mapped, paddr: start, |
2637 | size: len, prot, gfp); |
2638 | |
2639 | if (ret) |
2640 | goto out_err; |
2641 | |
2642 | mapped += len; |
2643 | len = 0; |
2644 | } |
2645 | |
2646 | if (sg_dma_is_bus_address(sg)) |
2647 | goto next; |
2648 | |
2649 | if (len) { |
2650 | len += sg->length; |
2651 | } else { |
2652 | len = sg->length; |
2653 | start = s_phys; |
2654 | } |
2655 | |
2656 | next: |
2657 | if (++i < nents) |
2658 | sg = sg_next(sg); |
2659 | } |
2660 | |
2661 | if (ops->iotlb_sync_map) |
2662 | ops->iotlb_sync_map(domain, iova, mapped); |
2663 | return mapped; |
2664 | |
2665 | out_err: |
2666 | /* undo mappings already done */ |
2667 | iommu_unmap(domain, iova, mapped); |
2668 | |
2669 | return ret; |
2670 | } |
2671 | EXPORT_SYMBOL_GPL(iommu_map_sg); |
2672 | |
2673 | /** |
2674 | * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework |
2675 | * @domain: the iommu domain where the fault has happened |
2676 | * @dev: the device where the fault has happened |
2677 | * @iova: the faulting address |
2678 | * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) |
2679 | * |
2680 | * This function should be called by the low-level IOMMU implementations |
2681 | * whenever IOMMU faults happen, to allow high-level users, that are |
2682 | * interested in such events, to know about them. |
2683 | * |
2684 | * This event may be useful for several possible use cases: |
2685 | * - mere logging of the event |
2686 | * - dynamic TLB/PTE loading |
2687 | * - if restarting of the faulting device is required |
2688 | * |
2689 | * Returns 0 on success and an appropriate error code otherwise (if dynamic |
2690 | * PTE/TLB loading will one day be supported, implementations will be able |
2691 | * to tell whether it succeeded or not according to this return value). |
2692 | * |
2693 | * Specifically, -ENOSYS is returned if a fault handler isn't installed |
2694 | * (though fault handlers can also return -ENOSYS, in case they want to |
2695 | * elicit the default behavior of the IOMMU drivers). |
2696 | */ |
2697 | int report_iommu_fault(struct iommu_domain *domain, struct device *dev, |
2698 | unsigned long iova, int flags) |
2699 | { |
2700 | int ret = -ENOSYS; |
2701 | |
2702 | /* |
2703 | * if upper layers showed interest and installed a fault handler, |
2704 | * invoke it. |
2705 | */ |
2706 | if (domain->handler) |
2707 | ret = domain->handler(domain, dev, iova, flags, |
2708 | domain->handler_token); |
2709 | |
2710 | trace_io_page_fault(dev, iova, flags); |
2711 | return ret; |
2712 | } |
2713 | EXPORT_SYMBOL_GPL(report_iommu_fault); |
2714 | |
2715 | static int __init iommu_init(void) |
2716 | { |
2717 | iommu_group_kset = kset_create_and_add(name: "iommu_groups" , |
2718 | NULL, parent_kobj: kernel_kobj); |
2719 | BUG_ON(!iommu_group_kset); |
2720 | |
2721 | iommu_debugfs_setup(); |
2722 | |
2723 | return 0; |
2724 | } |
2725 | core_initcall(iommu_init); |
2726 | |
2727 | int iommu_enable_nesting(struct iommu_domain *domain) |
2728 | { |
2729 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) |
2730 | return -EINVAL; |
2731 | if (!domain->ops->enable_nesting) |
2732 | return -EINVAL; |
2733 | return domain->ops->enable_nesting(domain); |
2734 | } |
2735 | EXPORT_SYMBOL_GPL(iommu_enable_nesting); |
2736 | |
2737 | int iommu_set_pgtable_quirks(struct iommu_domain *domain, |
2738 | unsigned long quirk) |
2739 | { |
2740 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) |
2741 | return -EINVAL; |
2742 | if (!domain->ops->set_pgtable_quirks) |
2743 | return -EINVAL; |
2744 | return domain->ops->set_pgtable_quirks(domain, quirk); |
2745 | } |
2746 | EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); |
2747 | |
2748 | /** |
2749 | * iommu_get_resv_regions - get reserved regions |
2750 | * @dev: device for which to get reserved regions |
2751 | * @list: reserved region list for device |
2752 | * |
2753 | * This returns a list of reserved IOVA regions specific to this device. |
2754 | * A domain user should not map IOVA in these ranges. |
2755 | */ |
2756 | void iommu_get_resv_regions(struct device *dev, struct list_head *list) |
2757 | { |
2758 | const struct iommu_ops *ops = dev_iommu_ops(dev); |
2759 | |
2760 | if (ops->get_resv_regions) |
2761 | ops->get_resv_regions(dev, list); |
2762 | } |
2763 | EXPORT_SYMBOL_GPL(iommu_get_resv_regions); |
2764 | |
2765 | /** |
2766 | * iommu_put_resv_regions - release reserved regions |
2767 | * @dev: device for which to free reserved regions |
2768 | * @list: reserved region list for device |
2769 | * |
2770 | * This releases a reserved region list acquired by iommu_get_resv_regions(). |
2771 | */ |
2772 | void iommu_put_resv_regions(struct device *dev, struct list_head *list) |
2773 | { |
2774 | struct iommu_resv_region *entry, *next; |
2775 | |
2776 | list_for_each_entry_safe(entry, next, list, list) { |
2777 | if (entry->free) |
2778 | entry->free(dev, entry); |
2779 | else |
2780 | kfree(objp: entry); |
2781 | } |
2782 | } |
2783 | EXPORT_SYMBOL(iommu_put_resv_regions); |
2784 | |
2785 | struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, |
2786 | size_t length, int prot, |
2787 | enum iommu_resv_type type, |
2788 | gfp_t gfp) |
2789 | { |
2790 | struct iommu_resv_region *region; |
2791 | |
2792 | region = kzalloc(size: sizeof(*region), flags: gfp); |
2793 | if (!region) |
2794 | return NULL; |
2795 | |
2796 | INIT_LIST_HEAD(list: ®ion->list); |
2797 | region->start = start; |
2798 | region->length = length; |
2799 | region->prot = prot; |
2800 | region->type = type; |
2801 | return region; |
2802 | } |
2803 | EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); |
2804 | |
2805 | void iommu_set_default_passthrough(bool cmd_line) |
2806 | { |
2807 | if (cmd_line) |
2808 | iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; |
2809 | iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; |
2810 | } |
2811 | |
2812 | void iommu_set_default_translated(bool cmd_line) |
2813 | { |
2814 | if (cmd_line) |
2815 | iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; |
2816 | iommu_def_domain_type = IOMMU_DOMAIN_DMA; |
2817 | } |
2818 | |
2819 | bool iommu_default_passthrough(void) |
2820 | { |
2821 | return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; |
2822 | } |
2823 | EXPORT_SYMBOL_GPL(iommu_default_passthrough); |
2824 | |
2825 | const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) |
2826 | { |
2827 | const struct iommu_ops *ops = NULL; |
2828 | struct iommu_device *iommu; |
2829 | |
2830 | spin_lock(lock: &iommu_device_lock); |
2831 | list_for_each_entry(iommu, &iommu_device_list, list) |
2832 | if (iommu->fwnode == fwnode) { |
2833 | ops = iommu->ops; |
2834 | break; |
2835 | } |
2836 | spin_unlock(lock: &iommu_device_lock); |
2837 | return ops; |
2838 | } |
2839 | |
2840 | int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, |
2841 | const struct iommu_ops *ops) |
2842 | { |
2843 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
2844 | |
2845 | if (fwspec) |
2846 | return ops == fwspec->ops ? 0 : -EINVAL; |
2847 | |
2848 | if (!dev_iommu_get(dev)) |
2849 | return -ENOMEM; |
2850 | |
2851 | /* Preallocate for the overwhelmingly common case of 1 ID */ |
2852 | fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); |
2853 | if (!fwspec) |
2854 | return -ENOMEM; |
2855 | |
2856 | of_node_get(to_of_node(iommu_fwnode)); |
2857 | fwspec->iommu_fwnode = iommu_fwnode; |
2858 | fwspec->ops = ops; |
2859 | dev_iommu_fwspec_set(dev, fwspec); |
2860 | return 0; |
2861 | } |
2862 | EXPORT_SYMBOL_GPL(iommu_fwspec_init); |
2863 | |
2864 | void iommu_fwspec_free(struct device *dev) |
2865 | { |
2866 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
2867 | |
2868 | if (fwspec) { |
2869 | fwnode_handle_put(fwnode: fwspec->iommu_fwnode); |
2870 | kfree(objp: fwspec); |
2871 | dev_iommu_fwspec_set(dev, NULL); |
2872 | } |
2873 | } |
2874 | EXPORT_SYMBOL_GPL(iommu_fwspec_free); |
2875 | |
2876 | int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) |
2877 | { |
2878 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
2879 | int i, new_num; |
2880 | |
2881 | if (!fwspec) |
2882 | return -EINVAL; |
2883 | |
2884 | new_num = fwspec->num_ids + num_ids; |
2885 | if (new_num > 1) { |
2886 | fwspec = krealloc(objp: fwspec, struct_size(fwspec, ids, new_num), |
2887 | GFP_KERNEL); |
2888 | if (!fwspec) |
2889 | return -ENOMEM; |
2890 | |
2891 | dev_iommu_fwspec_set(dev, fwspec); |
2892 | } |
2893 | |
2894 | for (i = 0; i < num_ids; i++) |
2895 | fwspec->ids[fwspec->num_ids + i] = ids[i]; |
2896 | |
2897 | fwspec->num_ids = new_num; |
2898 | return 0; |
2899 | } |
2900 | EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); |
2901 | |
2902 | /* |
2903 | * Per device IOMMU features. |
2904 | */ |
2905 | int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) |
2906 | { |
2907 | if (dev->iommu && dev->iommu->iommu_dev) { |
2908 | const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; |
2909 | |
2910 | if (ops->dev_enable_feat) |
2911 | return ops->dev_enable_feat(dev, feat); |
2912 | } |
2913 | |
2914 | return -ENODEV; |
2915 | } |
2916 | EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); |
2917 | |
2918 | /* |
2919 | * The device drivers should do the necessary cleanups before calling this. |
2920 | */ |
2921 | int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) |
2922 | { |
2923 | if (dev->iommu && dev->iommu->iommu_dev) { |
2924 | const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; |
2925 | |
2926 | if (ops->dev_disable_feat) |
2927 | return ops->dev_disable_feat(dev, feat); |
2928 | } |
2929 | |
2930 | return -EBUSY; |
2931 | } |
2932 | EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); |
2933 | |
2934 | /** |
2935 | * iommu_setup_default_domain - Set the default_domain for the group |
2936 | * @group: Group to change |
2937 | * @target_type: Domain type to set as the default_domain |
2938 | * |
2939 | * Allocate a default domain and set it as the current domain on the group. If |
2940 | * the group already has a default domain it will be changed to the target_type. |
2941 | * When target_type is 0 the default domain is selected based on driver and |
2942 | * system preferences. |
2943 | */ |
2944 | static int iommu_setup_default_domain(struct iommu_group *group, |
2945 | int target_type) |
2946 | { |
2947 | struct iommu_domain *old_dom = group->default_domain; |
2948 | struct group_device *gdev; |
2949 | struct iommu_domain *dom; |
2950 | bool direct_failed; |
2951 | int req_type; |
2952 | int ret; |
2953 | |
2954 | lockdep_assert_held(&group->mutex); |
2955 | |
2956 | req_type = iommu_get_default_domain_type(group, target_type); |
2957 | if (req_type < 0) |
2958 | return -EINVAL; |
2959 | |
2960 | /* |
2961 | * There are still some drivers which don't support default domains, so |
2962 | * we ignore the failure and leave group->default_domain NULL. |
2963 | * |
2964 | * We assume that the iommu driver starts up the device in |
2965 | * 'set_platform_dma_ops' mode if it does not support default domains. |
2966 | */ |
2967 | dom = iommu_group_alloc_default_domain(group, req_type); |
2968 | if (!dom) { |
2969 | /* Once in default_domain mode we never leave */ |
2970 | if (group->default_domain) |
2971 | return -ENODEV; |
2972 | group->default_domain = NULL; |
2973 | return 0; |
2974 | } |
2975 | |
2976 | if (group->default_domain == dom) |
2977 | return 0; |
2978 | |
2979 | /* |
2980 | * IOMMU_RESV_DIRECT and IOMMU_RESV_DIRECT_RELAXABLE regions must be |
2981 | * mapped before their device is attached, in order to guarantee |
2982 | * continuity with any FW activity |
2983 | */ |
2984 | direct_failed = false; |
2985 | for_each_group_device(group, gdev) { |
2986 | if (iommu_create_device_direct_mappings(domain: dom, dev: gdev->dev)) { |
2987 | direct_failed = true; |
2988 | dev_warn_once( |
2989 | gdev->dev->iommu->iommu_dev->dev, |
2990 | "IOMMU driver was not able to establish FW requested direct mapping." ); |
2991 | } |
2992 | } |
2993 | |
2994 | /* We must set default_domain early for __iommu_device_set_domain */ |
2995 | group->default_domain = dom; |
2996 | if (!group->domain) { |
2997 | /* |
2998 | * Drivers are not allowed to fail the first domain attach. |
2999 | * The only way to recover from this is to fail attaching the |
3000 | * iommu driver and call ops->release_device. Put the domain |
3001 | * in group->default_domain so it is freed after. |
3002 | */ |
3003 | ret = __iommu_group_set_domain_internal( |
3004 | group, new_domain: dom, flags: IOMMU_SET_DOMAIN_MUST_SUCCEED); |
3005 | if (WARN_ON(ret)) |
3006 | goto out_free_old; |
3007 | } else { |
3008 | ret = __iommu_group_set_domain(group, new_domain: dom); |
3009 | if (ret) |
3010 | goto err_restore_def_domain; |
3011 | } |
3012 | |
3013 | /* |
3014 | * Drivers are supposed to allow mappings to be installed in a domain |
3015 | * before device attachment, but some don't. Hack around this defect by |
3016 | * trying again after attaching. If this happens it means the device |
3017 | * will not continuously have the IOMMU_RESV_DIRECT map. |
3018 | */ |
3019 | if (direct_failed) { |
3020 | for_each_group_device(group, gdev) { |
3021 | ret = iommu_create_device_direct_mappings(domain: dom, dev: gdev->dev); |
3022 | if (ret) |
3023 | goto err_restore_domain; |
3024 | } |
3025 | } |
3026 | |
3027 | out_free_old: |
3028 | if (old_dom) |
3029 | iommu_domain_free(old_dom); |
3030 | return ret; |
3031 | |
3032 | err_restore_domain: |
3033 | if (old_dom) |
3034 | __iommu_group_set_domain_internal( |
3035 | group, new_domain: old_dom, flags: IOMMU_SET_DOMAIN_MUST_SUCCEED); |
3036 | err_restore_def_domain: |
3037 | if (old_dom) { |
3038 | iommu_domain_free(dom); |
3039 | group->default_domain = old_dom; |
3040 | } |
3041 | return ret; |
3042 | } |
3043 | |
3044 | /* |
3045 | * Changing the default domain through sysfs requires the users to unbind the |
3046 | * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ |
3047 | * transition. Return failure if this isn't met. |
3048 | * |
3049 | * We need to consider the race between this and the device release path. |
3050 | * group->mutex is used here to guarantee that the device release path |
3051 | * will not be entered at the same time. |
3052 | */ |
3053 | static ssize_t iommu_group_store_type(struct iommu_group *group, |
3054 | const char *buf, size_t count) |
3055 | { |
3056 | struct group_device *gdev; |
3057 | int ret, req_type; |
3058 | |
3059 | if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) |
3060 | return -EACCES; |
3061 | |
3062 | if (WARN_ON(!group) || !group->default_domain) |
3063 | return -EINVAL; |
3064 | |
3065 | if (sysfs_streq(s1: buf, s2: "identity" )) |
3066 | req_type = IOMMU_DOMAIN_IDENTITY; |
3067 | else if (sysfs_streq(s1: buf, s2: "DMA" )) |
3068 | req_type = IOMMU_DOMAIN_DMA; |
3069 | else if (sysfs_streq(s1: buf, s2: "DMA-FQ" )) |
3070 | req_type = IOMMU_DOMAIN_DMA_FQ; |
3071 | else if (sysfs_streq(s1: buf, s2: "auto" )) |
3072 | req_type = 0; |
3073 | else |
3074 | return -EINVAL; |
3075 | |
3076 | mutex_lock(&group->mutex); |
3077 | /* We can bring up a flush queue without tearing down the domain. */ |
3078 | if (req_type == IOMMU_DOMAIN_DMA_FQ && |
3079 | group->default_domain->type == IOMMU_DOMAIN_DMA) { |
3080 | ret = iommu_dma_init_fq(domain: group->default_domain); |
3081 | if (ret) |
3082 | goto out_unlock; |
3083 | |
3084 | group->default_domain->type = IOMMU_DOMAIN_DMA_FQ; |
3085 | ret = count; |
3086 | goto out_unlock; |
3087 | } |
3088 | |
3089 | /* Otherwise, ensure that device exists and no driver is bound. */ |
3090 | if (list_empty(head: &group->devices) || group->owner_cnt) { |
3091 | ret = -EPERM; |
3092 | goto out_unlock; |
3093 | } |
3094 | |
3095 | ret = iommu_setup_default_domain(group, target_type: req_type); |
3096 | if (ret) |
3097 | goto out_unlock; |
3098 | |
3099 | /* |
3100 | * Release the mutex here because ops->probe_finalize() call-back of |
3101 | * some vendor IOMMU drivers calls arm_iommu_attach_device() which |
3102 | * in-turn might call back into IOMMU core code, where it tries to take |
3103 | * group->mutex, resulting in a deadlock. |
3104 | */ |
3105 | mutex_unlock(lock: &group->mutex); |
3106 | |
3107 | /* Make sure dma_ops is appropriatley set */ |
3108 | for_each_group_device(group, gdev) |
3109 | iommu_group_do_probe_finalize(dev: gdev->dev); |
3110 | return count; |
3111 | |
3112 | out_unlock: |
3113 | mutex_unlock(lock: &group->mutex); |
3114 | return ret ?: count; |
3115 | } |
3116 | |
3117 | static bool iommu_is_default_domain(struct iommu_group *group) |
3118 | { |
3119 | if (group->domain == group->default_domain) |
3120 | return true; |
3121 | |
3122 | /* |
3123 | * If the default domain was set to identity and it is still an identity |
3124 | * domain then we consider this a pass. This happens because of |
3125 | * amd_iommu_init_device() replacing the default idenytity domain with an |
3126 | * identity domain that has a different configuration for AMDGPU. |
3127 | */ |
3128 | if (group->default_domain && |
3129 | group->default_domain->type == IOMMU_DOMAIN_IDENTITY && |
3130 | group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY) |
3131 | return true; |
3132 | return false; |
3133 | } |
3134 | |
3135 | /** |
3136 | * iommu_device_use_default_domain() - Device driver wants to handle device |
3137 | * DMA through the kernel DMA API. |
3138 | * @dev: The device. |
3139 | * |
3140 | * The device driver about to bind @dev wants to do DMA through the kernel |
3141 | * DMA API. Return 0 if it is allowed, otherwise an error. |
3142 | */ |
3143 | int iommu_device_use_default_domain(struct device *dev) |
3144 | { |
3145 | struct iommu_group *group = iommu_group_get(dev); |
3146 | int ret = 0; |
3147 | |
3148 | if (!group) |
3149 | return 0; |
3150 | |
3151 | mutex_lock(&group->mutex); |
3152 | if (group->owner_cnt) { |
3153 | if (group->owner || !iommu_is_default_domain(group) || |
3154 | !xa_empty(xa: &group->pasid_array)) { |
3155 | ret = -EBUSY; |
3156 | goto unlock_out; |
3157 | } |
3158 | } |
3159 | |
3160 | group->owner_cnt++; |
3161 | |
3162 | unlock_out: |
3163 | mutex_unlock(lock: &group->mutex); |
3164 | iommu_group_put(group); |
3165 | |
3166 | return ret; |
3167 | } |
3168 | |
3169 | /** |
3170 | * iommu_device_unuse_default_domain() - Device driver stops handling device |
3171 | * DMA through the kernel DMA API. |
3172 | * @dev: The device. |
3173 | * |
3174 | * The device driver doesn't want to do DMA through kernel DMA API anymore. |
3175 | * It must be called after iommu_device_use_default_domain(). |
3176 | */ |
3177 | void iommu_device_unuse_default_domain(struct device *dev) |
3178 | { |
3179 | struct iommu_group *group = iommu_group_get(dev); |
3180 | |
3181 | if (!group) |
3182 | return; |
3183 | |
3184 | mutex_lock(&group->mutex); |
3185 | if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) |
3186 | group->owner_cnt--; |
3187 | |
3188 | mutex_unlock(lock: &group->mutex); |
3189 | iommu_group_put(group); |
3190 | } |
3191 | |
3192 | static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) |
3193 | { |
3194 | struct group_device *dev = |
3195 | list_first_entry(&group->devices, struct group_device, list); |
3196 | |
3197 | if (group->blocking_domain) |
3198 | return 0; |
3199 | |
3200 | group->blocking_domain = |
3201 | __iommu_domain_alloc(bus: dev->dev->bus, IOMMU_DOMAIN_BLOCKED); |
3202 | if (!group->blocking_domain) { |
3203 | /* |
3204 | * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED |
3205 | * create an empty domain instead. |
3206 | */ |
3207 | group->blocking_domain = __iommu_domain_alloc( |
3208 | bus: dev->dev->bus, IOMMU_DOMAIN_UNMANAGED); |
3209 | if (!group->blocking_domain) |
3210 | return -EINVAL; |
3211 | } |
3212 | return 0; |
3213 | } |
3214 | |
3215 | static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner) |
3216 | { |
3217 | int ret; |
3218 | |
3219 | if ((group->domain && group->domain != group->default_domain) || |
3220 | !xa_empty(xa: &group->pasid_array)) |
3221 | return -EBUSY; |
3222 | |
3223 | ret = __iommu_group_alloc_blocking_domain(group); |
3224 | if (ret) |
3225 | return ret; |
3226 | ret = __iommu_group_set_domain(group, new_domain: group->blocking_domain); |
3227 | if (ret) |
3228 | return ret; |
3229 | |
3230 | group->owner = owner; |
3231 | group->owner_cnt++; |
3232 | return 0; |
3233 | } |
3234 | |
3235 | /** |
3236 | * iommu_group_claim_dma_owner() - Set DMA ownership of a group |
3237 | * @group: The group. |
3238 | * @owner: Caller specified pointer. Used for exclusive ownership. |
3239 | * |
3240 | * This is to support backward compatibility for vfio which manages the dma |
3241 | * ownership in iommu_group level. New invocations on this interface should be |
3242 | * prohibited. Only a single owner may exist for a group. |
3243 | */ |
3244 | int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) |
3245 | { |
3246 | int ret = 0; |
3247 | |
3248 | if (WARN_ON(!owner)) |
3249 | return -EINVAL; |
3250 | |
3251 | mutex_lock(&group->mutex); |
3252 | if (group->owner_cnt) { |
3253 | ret = -EPERM; |
3254 | goto unlock_out; |
3255 | } |
3256 | |
3257 | ret = __iommu_take_dma_ownership(group, owner); |
3258 | unlock_out: |
3259 | mutex_unlock(lock: &group->mutex); |
3260 | |
3261 | return ret; |
3262 | } |
3263 | EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); |
3264 | |
3265 | /** |
3266 | * iommu_device_claim_dma_owner() - Set DMA ownership of a device |
3267 | * @dev: The device. |
3268 | * @owner: Caller specified pointer. Used for exclusive ownership. |
3269 | * |
3270 | * Claim the DMA ownership of a device. Multiple devices in the same group may |
3271 | * concurrently claim ownership if they present the same owner value. Returns 0 |
3272 | * on success and error code on failure |
3273 | */ |
3274 | int iommu_device_claim_dma_owner(struct device *dev, void *owner) |
3275 | { |
3276 | struct iommu_group *group; |
3277 | int ret = 0; |
3278 | |
3279 | if (WARN_ON(!owner)) |
3280 | return -EINVAL; |
3281 | |
3282 | group = iommu_group_get(dev); |
3283 | if (!group) |
3284 | return -ENODEV; |
3285 | |
3286 | mutex_lock(&group->mutex); |
3287 | if (group->owner_cnt) { |
3288 | if (group->owner != owner) { |
3289 | ret = -EPERM; |
3290 | goto unlock_out; |
3291 | } |
3292 | group->owner_cnt++; |
3293 | goto unlock_out; |
3294 | } |
3295 | |
3296 | ret = __iommu_take_dma_ownership(group, owner); |
3297 | unlock_out: |
3298 | mutex_unlock(lock: &group->mutex); |
3299 | iommu_group_put(group); |
3300 | |
3301 | return ret; |
3302 | } |
3303 | EXPORT_SYMBOL_GPL(iommu_device_claim_dma_owner); |
3304 | |
3305 | static void __iommu_release_dma_ownership(struct iommu_group *group) |
3306 | { |
3307 | if (WARN_ON(!group->owner_cnt || !group->owner || |
3308 | !xa_empty(&group->pasid_array))) |
3309 | return; |
3310 | |
3311 | group->owner_cnt = 0; |
3312 | group->owner = NULL; |
3313 | __iommu_group_set_domain_nofail(group, new_domain: group->default_domain); |
3314 | } |
3315 | |
3316 | /** |
3317 | * iommu_group_release_dma_owner() - Release DMA ownership of a group |
3318 | * @group: The group |
3319 | * |
3320 | * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). |
3321 | */ |
3322 | void iommu_group_release_dma_owner(struct iommu_group *group) |
3323 | { |
3324 | mutex_lock(&group->mutex); |
3325 | __iommu_release_dma_ownership(group); |
3326 | mutex_unlock(lock: &group->mutex); |
3327 | } |
3328 | EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); |
3329 | |
3330 | /** |
3331 | * iommu_device_release_dma_owner() - Release DMA ownership of a device |
3332 | * @dev: The device. |
3333 | * |
3334 | * Release the DMA ownership claimed by iommu_device_claim_dma_owner(). |
3335 | */ |
3336 | void iommu_device_release_dma_owner(struct device *dev) |
3337 | { |
3338 | struct iommu_group *group = iommu_group_get(dev); |
3339 | |
3340 | mutex_lock(&group->mutex); |
3341 | if (group->owner_cnt > 1) |
3342 | group->owner_cnt--; |
3343 | else |
3344 | __iommu_release_dma_ownership(group); |
3345 | mutex_unlock(lock: &group->mutex); |
3346 | iommu_group_put(group); |
3347 | } |
3348 | EXPORT_SYMBOL_GPL(iommu_device_release_dma_owner); |
3349 | |
3350 | /** |
3351 | * iommu_group_dma_owner_claimed() - Query group dma ownership status |
3352 | * @group: The group. |
3353 | * |
3354 | * This provides status query on a given group. It is racy and only for |
3355 | * non-binding status reporting. |
3356 | */ |
3357 | bool iommu_group_dma_owner_claimed(struct iommu_group *group) |
3358 | { |
3359 | unsigned int user; |
3360 | |
3361 | mutex_lock(&group->mutex); |
3362 | user = group->owner_cnt; |
3363 | mutex_unlock(lock: &group->mutex); |
3364 | |
3365 | return user; |
3366 | } |
3367 | EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); |
3368 | |
3369 | static int __iommu_set_group_pasid(struct iommu_domain *domain, |
3370 | struct iommu_group *group, ioasid_t pasid) |
3371 | { |
3372 | struct group_device *device; |
3373 | int ret = 0; |
3374 | |
3375 | for_each_group_device(group, device) { |
3376 | ret = domain->ops->set_dev_pasid(domain, device->dev, pasid); |
3377 | if (ret) |
3378 | break; |
3379 | } |
3380 | |
3381 | return ret; |
3382 | } |
3383 | |
3384 | static void __iommu_remove_group_pasid(struct iommu_group *group, |
3385 | ioasid_t pasid) |
3386 | { |
3387 | struct group_device *device; |
3388 | const struct iommu_ops *ops; |
3389 | |
3390 | for_each_group_device(group, device) { |
3391 | ops = dev_iommu_ops(dev: device->dev); |
3392 | ops->remove_dev_pasid(device->dev, pasid); |
3393 | } |
3394 | } |
3395 | |
3396 | /* |
3397 | * iommu_attach_device_pasid() - Attach a domain to pasid of device |
3398 | * @domain: the iommu domain. |
3399 | * @dev: the attached device. |
3400 | * @pasid: the pasid of the device. |
3401 | * |
3402 | * Return: 0 on success, or an error. |
3403 | */ |
3404 | int iommu_attach_device_pasid(struct iommu_domain *domain, |
3405 | struct device *dev, ioasid_t pasid) |
3406 | { |
3407 | struct iommu_group *group; |
3408 | void *curr; |
3409 | int ret; |
3410 | |
3411 | if (!domain->ops->set_dev_pasid) |
3412 | return -EOPNOTSUPP; |
3413 | |
3414 | group = iommu_group_get(dev); |
3415 | if (!group) |
3416 | return -ENODEV; |
3417 | |
3418 | mutex_lock(&group->mutex); |
3419 | curr = xa_cmpxchg(xa: &group->pasid_array, index: pasid, NULL, entry: domain, GFP_KERNEL); |
3420 | if (curr) { |
3421 | ret = xa_err(entry: curr) ? : -EBUSY; |
3422 | goto out_unlock; |
3423 | } |
3424 | |
3425 | ret = __iommu_set_group_pasid(domain, group, pasid); |
3426 | if (ret) { |
3427 | __iommu_remove_group_pasid(group, pasid); |
3428 | xa_erase(&group->pasid_array, index: pasid); |
3429 | } |
3430 | out_unlock: |
3431 | mutex_unlock(lock: &group->mutex); |
3432 | iommu_group_put(group); |
3433 | |
3434 | return ret; |
3435 | } |
3436 | EXPORT_SYMBOL_GPL(iommu_attach_device_pasid); |
3437 | |
3438 | /* |
3439 | * iommu_detach_device_pasid() - Detach the domain from pasid of device |
3440 | * @domain: the iommu domain. |
3441 | * @dev: the attached device. |
3442 | * @pasid: the pasid of the device. |
3443 | * |
3444 | * The @domain must have been attached to @pasid of the @dev with |
3445 | * iommu_attach_device_pasid(). |
3446 | */ |
3447 | void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, |
3448 | ioasid_t pasid) |
3449 | { |
3450 | struct iommu_group *group = iommu_group_get(dev); |
3451 | |
3452 | mutex_lock(&group->mutex); |
3453 | __iommu_remove_group_pasid(group, pasid); |
3454 | WARN_ON(xa_erase(&group->pasid_array, pasid) != domain); |
3455 | mutex_unlock(lock: &group->mutex); |
3456 | |
3457 | iommu_group_put(group); |
3458 | } |
3459 | EXPORT_SYMBOL_GPL(iommu_detach_device_pasid); |
3460 | |
3461 | /* |
3462 | * iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev |
3463 | * @dev: the queried device |
3464 | * @pasid: the pasid of the device |
3465 | * @type: matched domain type, 0 for any match |
3466 | * |
3467 | * This is a variant of iommu_get_domain_for_dev(). It returns the existing |
3468 | * domain attached to pasid of a device. Callers must hold a lock around this |
3469 | * function, and both iommu_attach/detach_dev_pasid() whenever a domain of |
3470 | * type is being manipulated. This API does not internally resolve races with |
3471 | * attach/detach. |
3472 | * |
3473 | * Return: attached domain on success, NULL otherwise. |
3474 | */ |
3475 | struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev, |
3476 | ioasid_t pasid, |
3477 | unsigned int type) |
3478 | { |
3479 | struct iommu_domain *domain; |
3480 | struct iommu_group *group; |
3481 | |
3482 | group = iommu_group_get(dev); |
3483 | if (!group) |
3484 | return NULL; |
3485 | |
3486 | xa_lock(&group->pasid_array); |
3487 | domain = xa_load(&group->pasid_array, index: pasid); |
3488 | if (type && domain && domain->type != type) |
3489 | domain = ERR_PTR(error: -EBUSY); |
3490 | xa_unlock(&group->pasid_array); |
3491 | iommu_group_put(group); |
3492 | |
3493 | return domain; |
3494 | } |
3495 | EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid); |
3496 | |
3497 | struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, |
3498 | struct mm_struct *mm) |
3499 | { |
3500 | const struct iommu_ops *ops = dev_iommu_ops(dev); |
3501 | struct iommu_domain *domain; |
3502 | |
3503 | domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); |
3504 | if (!domain) |
3505 | return NULL; |
3506 | |
3507 | domain->type = IOMMU_DOMAIN_SVA; |
3508 | mmgrab(mm); |
3509 | domain->mm = mm; |
3510 | domain->iopf_handler = iommu_sva_handle_iopf; |
3511 | domain->fault_data = mm; |
3512 | |
3513 | return domain; |
3514 | } |
3515 | |
3516 | ioasid_t iommu_alloc_global_pasid(struct device *dev) |
3517 | { |
3518 | int ret; |
3519 | |
3520 | /* max_pasids == 0 means that the device does not support PASID */ |
3521 | if (!dev->iommu->max_pasids) |
3522 | return IOMMU_PASID_INVALID; |
3523 | |
3524 | /* |
3525 | * max_pasids is set up by vendor driver based on number of PASID bits |
3526 | * supported but the IDA allocation is inclusive. |
3527 | */ |
3528 | ret = ida_alloc_range(&iommu_global_pasid_ida, IOMMU_FIRST_GLOBAL_PASID, |
3529 | max: dev->iommu->max_pasids - 1, GFP_KERNEL); |
3530 | return ret < 0 ? IOMMU_PASID_INVALID : ret; |
3531 | } |
3532 | EXPORT_SYMBOL_GPL(iommu_alloc_global_pasid); |
3533 | |
3534 | void iommu_free_global_pasid(ioasid_t pasid) |
3535 | { |
3536 | if (WARN_ON(pasid == IOMMU_PASID_INVALID)) |
3537 | return; |
3538 | |
3539 | ida_free(&iommu_global_pasid_ida, id: pasid); |
3540 | } |
3541 | EXPORT_SYMBOL_GPL(iommu_free_global_pasid); |
3542 | |