1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * |
4 | * Copyright (C) 2013 Freescale Semiconductor, Inc. |
5 | * Author: Varun Sethi <varun.sethi@freescale.com> |
6 | */ |
7 | |
8 | #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__ |
9 | |
10 | #include "fsl_pamu_domain.h" |
11 | |
12 | #include <linux/platform_device.h> |
13 | #include <sysdev/fsl_pci.h> |
14 | |
15 | /* |
16 | * Global spinlock that needs to be held while |
17 | * configuring PAMU. |
18 | */ |
19 | static DEFINE_SPINLOCK(iommu_lock); |
20 | |
21 | static struct kmem_cache *fsl_pamu_domain_cache; |
22 | static struct kmem_cache *iommu_devinfo_cache; |
23 | static DEFINE_SPINLOCK(device_domain_lock); |
24 | |
25 | struct iommu_device pamu_iommu; /* IOMMU core code handle */ |
26 | |
27 | static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom) |
28 | { |
29 | return container_of(dom, struct fsl_dma_domain, iommu_domain); |
30 | } |
31 | |
32 | static int __init iommu_init_mempool(void) |
33 | { |
34 | fsl_pamu_domain_cache = kmem_cache_create(name: "fsl_pamu_domain" , |
35 | size: sizeof(struct fsl_dma_domain), |
36 | align: 0, |
37 | SLAB_HWCACHE_ALIGN, |
38 | NULL); |
39 | if (!fsl_pamu_domain_cache) { |
40 | pr_debug("Couldn't create fsl iommu_domain cache\n" ); |
41 | return -ENOMEM; |
42 | } |
43 | |
44 | iommu_devinfo_cache = kmem_cache_create(name: "iommu_devinfo" , |
45 | size: sizeof(struct device_domain_info), |
46 | align: 0, |
47 | SLAB_HWCACHE_ALIGN, |
48 | NULL); |
49 | if (!iommu_devinfo_cache) { |
50 | pr_debug("Couldn't create devinfo cache\n" ); |
51 | kmem_cache_destroy(s: fsl_pamu_domain_cache); |
52 | return -ENOMEM; |
53 | } |
54 | |
55 | return 0; |
56 | } |
57 | |
58 | static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, |
59 | u32 val) |
60 | { |
61 | int ret = 0; |
62 | unsigned long flags; |
63 | |
64 | spin_lock_irqsave(&iommu_lock, flags); |
65 | ret = pamu_update_paace_stash(liodn, value: val); |
66 | if (ret) { |
67 | pr_debug("Failed to update SPAACE for liodn %d\n " , liodn); |
68 | spin_unlock_irqrestore(lock: &iommu_lock, flags); |
69 | return ret; |
70 | } |
71 | |
72 | spin_unlock_irqrestore(lock: &iommu_lock, flags); |
73 | |
74 | return ret; |
75 | } |
76 | |
77 | /* Set the geometry parameters for a LIODN */ |
78 | static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev, |
79 | int liodn) |
80 | { |
81 | u32 omi_index = ~(u32)0; |
82 | unsigned long flags; |
83 | int ret; |
84 | |
85 | /* |
86 | * Configure the omi_index at the geometry setup time. |
87 | * This is a static value which depends on the type of |
88 | * device and would not change thereafter. |
89 | */ |
90 | get_ome_index(omi_index: &omi_index, dev); |
91 | |
92 | spin_lock_irqsave(&iommu_lock, flags); |
93 | ret = pamu_disable_liodn(liodn); |
94 | if (ret) |
95 | goto out_unlock; |
96 | ret = pamu_config_ppaace(liodn, omi: omi_index, stashid: dma_domain->stash_id, prot: 0); |
97 | if (ret) |
98 | goto out_unlock; |
99 | ret = pamu_config_ppaace(liodn, omi: ~(u32)0, stashid: dma_domain->stash_id, |
100 | PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE); |
101 | out_unlock: |
102 | spin_unlock_irqrestore(lock: &iommu_lock, flags); |
103 | if (ret) { |
104 | pr_debug("PAACE configuration failed for liodn %d\n" , |
105 | liodn); |
106 | } |
107 | return ret; |
108 | } |
109 | |
110 | static void remove_device_ref(struct device_domain_info *info) |
111 | { |
112 | unsigned long flags; |
113 | |
114 | list_del(entry: &info->link); |
115 | spin_lock_irqsave(&iommu_lock, flags); |
116 | pamu_disable_liodn(liodn: info->liodn); |
117 | spin_unlock_irqrestore(lock: &iommu_lock, flags); |
118 | spin_lock_irqsave(&device_domain_lock, flags); |
119 | dev_iommu_priv_set(dev: info->dev, NULL); |
120 | kmem_cache_free(s: iommu_devinfo_cache, objp: info); |
121 | spin_unlock_irqrestore(lock: &device_domain_lock, flags); |
122 | } |
123 | |
124 | static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain) |
125 | { |
126 | struct device_domain_info *info, *tmp; |
127 | unsigned long flags; |
128 | |
129 | spin_lock_irqsave(&dma_domain->domain_lock, flags); |
130 | /* Remove the device from the domain device list */ |
131 | list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) { |
132 | if (!dev || (info->dev == dev)) |
133 | remove_device_ref(info); |
134 | } |
135 | spin_unlock_irqrestore(lock: &dma_domain->domain_lock, flags); |
136 | } |
137 | |
138 | static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev) |
139 | { |
140 | struct device_domain_info *info, *old_domain_info; |
141 | unsigned long flags; |
142 | |
143 | spin_lock_irqsave(&device_domain_lock, flags); |
144 | /* |
145 | * Check here if the device is already attached to domain or not. |
146 | * If the device is already attached to a domain detach it. |
147 | */ |
148 | old_domain_info = dev_iommu_priv_get(dev); |
149 | if (old_domain_info && old_domain_info->domain != dma_domain) { |
150 | spin_unlock_irqrestore(lock: &device_domain_lock, flags); |
151 | detach_device(dev, dma_domain: old_domain_info->domain); |
152 | spin_lock_irqsave(&device_domain_lock, flags); |
153 | } |
154 | |
155 | info = kmem_cache_zalloc(k: iommu_devinfo_cache, GFP_ATOMIC); |
156 | |
157 | info->dev = dev; |
158 | info->liodn = liodn; |
159 | info->domain = dma_domain; |
160 | |
161 | list_add(new: &info->link, head: &dma_domain->devices); |
162 | /* |
163 | * In case of devices with multiple LIODNs just store |
164 | * the info for the first LIODN as all |
165 | * LIODNs share the same domain |
166 | */ |
167 | if (!dev_iommu_priv_get(dev)) |
168 | dev_iommu_priv_set(dev, priv: info); |
169 | spin_unlock_irqrestore(lock: &device_domain_lock, flags); |
170 | } |
171 | |
172 | static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, |
173 | dma_addr_t iova) |
174 | { |
175 | if (iova < domain->geometry.aperture_start || |
176 | iova > domain->geometry.aperture_end) |
177 | return 0; |
178 | return iova; |
179 | } |
180 | |
181 | static bool fsl_pamu_capable(struct device *dev, enum iommu_cap cap) |
182 | { |
183 | return cap == IOMMU_CAP_CACHE_COHERENCY; |
184 | } |
185 | |
186 | static void fsl_pamu_domain_free(struct iommu_domain *domain) |
187 | { |
188 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(dom: domain); |
189 | |
190 | /* remove all the devices from the device list */ |
191 | detach_device(NULL, dma_domain); |
192 | kmem_cache_free(s: fsl_pamu_domain_cache, objp: dma_domain); |
193 | } |
194 | |
195 | static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type) |
196 | { |
197 | struct fsl_dma_domain *dma_domain; |
198 | |
199 | /* |
200 | * FIXME: This isn't creating an unmanaged domain since the |
201 | * default_domain_ops do not have any map/unmap function it doesn't meet |
202 | * the requirements for __IOMMU_DOMAIN_PAGING. The only purpose seems to |
203 | * allow drivers/soc/fsl/qbman/qman_portal.c to do |
204 | * fsl_pamu_configure_l1_stash() |
205 | */ |
206 | if (type != IOMMU_DOMAIN_UNMANAGED) |
207 | return NULL; |
208 | |
209 | dma_domain = kmem_cache_zalloc(k: fsl_pamu_domain_cache, GFP_KERNEL); |
210 | if (!dma_domain) |
211 | return NULL; |
212 | |
213 | dma_domain->stash_id = ~(u32)0; |
214 | INIT_LIST_HEAD(list: &dma_domain->devices); |
215 | spin_lock_init(&dma_domain->domain_lock); |
216 | |
217 | /* default geometry 64 GB i.e. maximum system address */ |
218 | dma_domain->iommu_domain. geometry.aperture_start = 0; |
219 | dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1; |
220 | dma_domain->iommu_domain.geometry.force_aperture = true; |
221 | |
222 | return &dma_domain->iommu_domain; |
223 | } |
224 | |
225 | /* Update stash destination for all LIODNs associated with the domain */ |
226 | static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val) |
227 | { |
228 | struct device_domain_info *info; |
229 | int ret = 0; |
230 | |
231 | list_for_each_entry(info, &dma_domain->devices, link) { |
232 | ret = update_liodn_stash(liodn: info->liodn, dma_domain, val); |
233 | if (ret) |
234 | break; |
235 | } |
236 | |
237 | return ret; |
238 | } |
239 | |
240 | static int fsl_pamu_attach_device(struct iommu_domain *domain, |
241 | struct device *dev) |
242 | { |
243 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(dom: domain); |
244 | unsigned long flags; |
245 | int len, ret = 0, i; |
246 | const u32 *liodn; |
247 | struct pci_dev *pdev = NULL; |
248 | struct pci_controller *pci_ctl; |
249 | |
250 | /* |
251 | * Use LIODN of the PCI controller while attaching a |
252 | * PCI device. |
253 | */ |
254 | if (dev_is_pci(dev)) { |
255 | pdev = to_pci_dev(dev); |
256 | pci_ctl = pci_bus_to_host(pdev->bus); |
257 | /* |
258 | * make dev point to pci controller device |
259 | * so we can get the LIODN programmed by |
260 | * u-boot. |
261 | */ |
262 | dev = pci_ctl->parent; |
263 | } |
264 | |
265 | liodn = of_get_property(node: dev->of_node, name: "fsl,liodn" , lenp: &len); |
266 | if (!liodn) { |
267 | pr_debug("missing fsl,liodn property at %pOF\n" , dev->of_node); |
268 | return -ENODEV; |
269 | } |
270 | |
271 | spin_lock_irqsave(&dma_domain->domain_lock, flags); |
272 | for (i = 0; i < len / sizeof(u32); i++) { |
273 | /* Ensure that LIODN value is valid */ |
274 | if (liodn[i] >= PAACE_NUMBER_ENTRIES) { |
275 | pr_debug("Invalid liodn %d, attach device failed for %pOF\n" , |
276 | liodn[i], dev->of_node); |
277 | ret = -ENODEV; |
278 | break; |
279 | } |
280 | |
281 | attach_device(dma_domain, liodn: liodn[i], dev); |
282 | ret = pamu_set_liodn(dma_domain, dev, liodn: liodn[i]); |
283 | if (ret) |
284 | break; |
285 | ret = pamu_enable_liodn(liodn: liodn[i]); |
286 | if (ret) |
287 | break; |
288 | } |
289 | spin_unlock_irqrestore(lock: &dma_domain->domain_lock, flags); |
290 | return ret; |
291 | } |
292 | |
293 | /* |
294 | * FIXME: fsl/pamu is completely broken in terms of how it works with the iommu |
295 | * API. Immediately after probe the HW is left in an IDENTITY translation and |
296 | * the driver provides a non-working UNMANAGED domain that it can switch over |
297 | * to. However it cannot switch back to an IDENTITY translation, instead it |
298 | * switches to what looks like BLOCKING. |
299 | */ |
300 | static int fsl_pamu_platform_attach(struct iommu_domain *platform_domain, |
301 | struct device *dev) |
302 | { |
303 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
304 | struct fsl_dma_domain *dma_domain; |
305 | const u32 *prop; |
306 | int len; |
307 | struct pci_dev *pdev = NULL; |
308 | struct pci_controller *pci_ctl; |
309 | |
310 | /* |
311 | * Hack to keep things working as they always have, only leaving an |
312 | * UNMANAGED domain makes it BLOCKING. |
313 | */ |
314 | if (domain == platform_domain || !domain || |
315 | domain->type != IOMMU_DOMAIN_UNMANAGED) |
316 | return 0; |
317 | |
318 | dma_domain = to_fsl_dma_domain(dom: domain); |
319 | |
320 | /* |
321 | * Use LIODN of the PCI controller while detaching a |
322 | * PCI device. |
323 | */ |
324 | if (dev_is_pci(dev)) { |
325 | pdev = to_pci_dev(dev); |
326 | pci_ctl = pci_bus_to_host(pdev->bus); |
327 | /* |
328 | * make dev point to pci controller device |
329 | * so we can get the LIODN programmed by |
330 | * u-boot. |
331 | */ |
332 | dev = pci_ctl->parent; |
333 | } |
334 | |
335 | prop = of_get_property(node: dev->of_node, name: "fsl,liodn" , lenp: &len); |
336 | if (prop) |
337 | detach_device(dev, dma_domain); |
338 | else |
339 | pr_debug("missing fsl,liodn property at %pOF\n" , dev->of_node); |
340 | return 0; |
341 | } |
342 | |
343 | static struct iommu_domain_ops fsl_pamu_platform_ops = { |
344 | .attach_dev = fsl_pamu_platform_attach, |
345 | }; |
346 | |
347 | static struct iommu_domain fsl_pamu_platform_domain = { |
348 | .type = IOMMU_DOMAIN_PLATFORM, |
349 | .ops = &fsl_pamu_platform_ops, |
350 | }; |
351 | |
352 | /* Set the domain stash attribute */ |
353 | int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu) |
354 | { |
355 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(dom: domain); |
356 | unsigned long flags; |
357 | int ret; |
358 | |
359 | spin_lock_irqsave(&dma_domain->domain_lock, flags); |
360 | dma_domain->stash_id = get_stash_id(stash_dest_hint: PAMU_ATTR_CACHE_L1, vcpu: cpu); |
361 | if (dma_domain->stash_id == ~(u32)0) { |
362 | pr_debug("Invalid stash attributes\n" ); |
363 | spin_unlock_irqrestore(lock: &dma_domain->domain_lock, flags); |
364 | return -EINVAL; |
365 | } |
366 | ret = update_domain_stash(dma_domain, val: dma_domain->stash_id); |
367 | spin_unlock_irqrestore(lock: &dma_domain->domain_lock, flags); |
368 | |
369 | return ret; |
370 | } |
371 | |
372 | static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl) |
373 | { |
374 | u32 version; |
375 | |
376 | /* Check the PCI controller version number by readding BRR1 register */ |
377 | version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2)); |
378 | version &= PCI_FSL_BRR1_VER; |
379 | /* If PCI controller version is >= 0x204 we can partition endpoints */ |
380 | return version >= 0x204; |
381 | } |
382 | |
383 | static struct iommu_group *fsl_pamu_device_group(struct device *dev) |
384 | { |
385 | struct iommu_group *group; |
386 | struct pci_dev *pdev; |
387 | |
388 | /* |
389 | * For platform devices we allocate a separate group for each of the |
390 | * devices. |
391 | */ |
392 | if (!dev_is_pci(dev)) |
393 | return generic_device_group(dev); |
394 | |
395 | /* |
396 | * We can partition PCIe devices so assign device group to the device |
397 | */ |
398 | pdev = to_pci_dev(dev); |
399 | if (check_pci_ctl_endpt_part(pci_ctl: pci_bus_to_host(pdev->bus))) |
400 | return pci_device_group(dev: &pdev->dev); |
401 | |
402 | /* |
403 | * All devices connected to the controller will share the same device |
404 | * group. |
405 | * |
406 | * Due to ordering between fsl_pamu_init() and fsl_pci_init() it is |
407 | * guaranteed that the pci_ctl->parent platform_device will have the |
408 | * iommu driver bound and will already have a group set. So we just |
409 | * re-use this group as the group for every device in the hose. |
410 | */ |
411 | group = iommu_group_get(dev: pci_bus_to_host(pdev->bus)->parent); |
412 | if (WARN_ON(!group)) |
413 | return ERR_PTR(error: -EINVAL); |
414 | return group; |
415 | } |
416 | |
417 | static struct iommu_device *fsl_pamu_probe_device(struct device *dev) |
418 | { |
419 | int len; |
420 | |
421 | /* |
422 | * uboot must fill the fsl,liodn for platform devices to be supported by |
423 | * the iommu. |
424 | */ |
425 | if (!dev_is_pci(dev) && |
426 | !of_get_property(node: dev->of_node, name: "fsl,liodn" , lenp: &len)) |
427 | return ERR_PTR(error: -ENODEV); |
428 | |
429 | return &pamu_iommu; |
430 | } |
431 | |
432 | static const struct iommu_ops fsl_pamu_ops = { |
433 | .default_domain = &fsl_pamu_platform_domain, |
434 | .capable = fsl_pamu_capable, |
435 | .domain_alloc = fsl_pamu_domain_alloc, |
436 | .probe_device = fsl_pamu_probe_device, |
437 | .device_group = fsl_pamu_device_group, |
438 | .default_domain_ops = &(const struct iommu_domain_ops) { |
439 | .attach_dev = fsl_pamu_attach_device, |
440 | .iova_to_phys = fsl_pamu_iova_to_phys, |
441 | .free = fsl_pamu_domain_free, |
442 | } |
443 | }; |
444 | |
445 | int __init pamu_domain_init(void) |
446 | { |
447 | int ret = 0; |
448 | |
449 | ret = iommu_init_mempool(); |
450 | if (ret) |
451 | return ret; |
452 | |
453 | ret = iommu_device_sysfs_add(iommu: &pamu_iommu, NULL, NULL, fmt: "iommu0" ); |
454 | if (ret) |
455 | return ret; |
456 | |
457 | ret = iommu_device_register(iommu: &pamu_iommu, ops: &fsl_pamu_ops, NULL); |
458 | if (ret) { |
459 | iommu_device_sysfs_remove(iommu: &pamu_iommu); |
460 | pr_err("Can't register iommu device\n" ); |
461 | } |
462 | |
463 | return ret; |
464 | } |
465 | |