1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved. |
4 | */ |
5 | |
6 | #include <linux/bitops.h> |
7 | #include <linux/debugfs.h> |
8 | #include <linux/err.h> |
9 | #include <linux/iommu.h> |
10 | #include <linux/kernel.h> |
11 | #include <linux/of.h> |
12 | #include <linux/of_platform.h> |
13 | #include <linux/pci.h> |
14 | #include <linux/platform_device.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/spinlock.h> |
17 | #include <linux/dma-mapping.h> |
18 | |
19 | #include <soc/tegra/ahb.h> |
20 | #include <soc/tegra/mc.h> |
21 | |
22 | struct tegra_smmu_group { |
23 | struct list_head list; |
24 | struct tegra_smmu *smmu; |
25 | const struct tegra_smmu_group_soc *soc; |
26 | struct iommu_group *group; |
27 | unsigned int swgroup; |
28 | }; |
29 | |
30 | struct tegra_smmu { |
31 | void __iomem *regs; |
32 | struct device *dev; |
33 | |
34 | struct tegra_mc *mc; |
35 | const struct tegra_smmu_soc *soc; |
36 | |
37 | struct list_head groups; |
38 | |
39 | unsigned long pfn_mask; |
40 | unsigned long tlb_mask; |
41 | |
42 | unsigned long *asids; |
43 | struct mutex lock; |
44 | |
45 | struct list_head list; |
46 | |
47 | struct dentry *debugfs; |
48 | |
49 | struct iommu_device iommu; /* IOMMU Core code handle */ |
50 | }; |
51 | |
52 | struct tegra_smmu_as { |
53 | struct iommu_domain domain; |
54 | struct tegra_smmu *smmu; |
55 | unsigned int use_count; |
56 | spinlock_t lock; |
57 | u32 *count; |
58 | struct page **pts; |
59 | struct page *pd; |
60 | dma_addr_t pd_dma; |
61 | unsigned id; |
62 | u32 attr; |
63 | }; |
64 | |
65 | static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom) |
66 | { |
67 | return container_of(dom, struct tegra_smmu_as, domain); |
68 | } |
69 | |
70 | static inline void smmu_writel(struct tegra_smmu *smmu, u32 value, |
71 | unsigned long offset) |
72 | { |
73 | writel(val: value, addr: smmu->regs + offset); |
74 | } |
75 | |
76 | static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) |
77 | { |
78 | return readl(addr: smmu->regs + offset); |
79 | } |
80 | |
81 | #define SMMU_CONFIG 0x010 |
82 | #define SMMU_CONFIG_ENABLE (1 << 0) |
83 | |
84 | #define SMMU_TLB_CONFIG 0x14 |
85 | #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29) |
86 | #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28) |
87 | #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \ |
88 | ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask) |
89 | |
90 | #define SMMU_PTC_CONFIG 0x18 |
91 | #define SMMU_PTC_CONFIG_ENABLE (1 << 29) |
92 | #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24) |
93 | #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f) |
94 | |
95 | #define SMMU_PTB_ASID 0x01c |
96 | #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f) |
97 | |
98 | #define SMMU_PTB_DATA 0x020 |
99 | #define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr)) |
100 | |
101 | #define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr)) |
102 | |
103 | #define SMMU_TLB_FLUSH 0x030 |
104 | #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0) |
105 | #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0) |
106 | #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0) |
107 | #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \ |
108 | SMMU_TLB_FLUSH_VA_MATCH_SECTION) |
109 | #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \ |
110 | SMMU_TLB_FLUSH_VA_MATCH_GROUP) |
111 | #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31) |
112 | |
113 | #define SMMU_PTC_FLUSH 0x034 |
114 | #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0) |
115 | #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0) |
116 | |
117 | #define SMMU_PTC_FLUSH_HI 0x9b8 |
118 | #define SMMU_PTC_FLUSH_HI_MASK 0x3 |
119 | |
120 | /* per-SWGROUP SMMU_*_ASID register */ |
121 | #define SMMU_ASID_ENABLE (1 << 31) |
122 | #define SMMU_ASID_MASK 0x7f |
123 | #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK) |
124 | |
125 | /* page table definitions */ |
126 | #define SMMU_NUM_PDE 1024 |
127 | #define SMMU_NUM_PTE 1024 |
128 | |
129 | #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4) |
130 | #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4) |
131 | |
132 | #define SMMU_PDE_SHIFT 22 |
133 | #define SMMU_PTE_SHIFT 12 |
134 | |
135 | #define SMMU_PAGE_MASK (~(SMMU_SIZE_PT-1)) |
136 | #define SMMU_OFFSET_IN_PAGE(x) ((unsigned long)(x) & ~SMMU_PAGE_MASK) |
137 | #define SMMU_PFN_PHYS(x) ((phys_addr_t)(x) << SMMU_PTE_SHIFT) |
138 | #define SMMU_PHYS_PFN(x) ((unsigned long)((x) >> SMMU_PTE_SHIFT)) |
139 | |
140 | #define SMMU_PD_READABLE (1 << 31) |
141 | #define SMMU_PD_WRITABLE (1 << 30) |
142 | #define SMMU_PD_NONSECURE (1 << 29) |
143 | |
144 | #define SMMU_PDE_READABLE (1 << 31) |
145 | #define SMMU_PDE_WRITABLE (1 << 30) |
146 | #define SMMU_PDE_NONSECURE (1 << 29) |
147 | #define SMMU_PDE_NEXT (1 << 28) |
148 | |
149 | #define SMMU_PTE_READABLE (1 << 31) |
150 | #define SMMU_PTE_WRITABLE (1 << 30) |
151 | #define SMMU_PTE_NONSECURE (1 << 29) |
152 | |
153 | #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \ |
154 | SMMU_PDE_NONSECURE) |
155 | |
156 | static unsigned int iova_pd_index(unsigned long iova) |
157 | { |
158 | return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1); |
159 | } |
160 | |
161 | static unsigned int iova_pt_index(unsigned long iova) |
162 | { |
163 | return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1); |
164 | } |
165 | |
166 | static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr) |
167 | { |
168 | addr >>= 12; |
169 | return (addr & smmu->pfn_mask) == addr; |
170 | } |
171 | |
172 | static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde) |
173 | { |
174 | return (dma_addr_t)(pde & smmu->pfn_mask) << 12; |
175 | } |
176 | |
177 | static void smmu_flush_ptc_all(struct tegra_smmu *smmu) |
178 | { |
179 | smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH); |
180 | } |
181 | |
182 | static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma, |
183 | unsigned long offset) |
184 | { |
185 | u32 value; |
186 | |
187 | offset &= ~(smmu->mc->soc->atom_size - 1); |
188 | |
189 | if (smmu->mc->soc->num_address_bits > 32) { |
190 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
191 | value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK; |
192 | #else |
193 | value = 0; |
194 | #endif |
195 | smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI); |
196 | } |
197 | |
198 | value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR; |
199 | smmu_writel(smmu, value, SMMU_PTC_FLUSH); |
200 | } |
201 | |
202 | static inline void smmu_flush_tlb(struct tegra_smmu *smmu) |
203 | { |
204 | smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH); |
205 | } |
206 | |
207 | static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu, |
208 | unsigned long asid) |
209 | { |
210 | u32 value; |
211 | |
212 | if (smmu->soc->num_asids == 4) |
213 | value = (asid & 0x3) << 29; |
214 | else |
215 | value = (asid & 0x7f) << 24; |
216 | |
217 | value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL; |
218 | smmu_writel(smmu, value, SMMU_TLB_FLUSH); |
219 | } |
220 | |
221 | static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu, |
222 | unsigned long asid, |
223 | unsigned long iova) |
224 | { |
225 | u32 value; |
226 | |
227 | if (smmu->soc->num_asids == 4) |
228 | value = (asid & 0x3) << 29; |
229 | else |
230 | value = (asid & 0x7f) << 24; |
231 | |
232 | value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova); |
233 | smmu_writel(smmu, value, SMMU_TLB_FLUSH); |
234 | } |
235 | |
236 | static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu, |
237 | unsigned long asid, |
238 | unsigned long iova) |
239 | { |
240 | u32 value; |
241 | |
242 | if (smmu->soc->num_asids == 4) |
243 | value = (asid & 0x3) << 29; |
244 | else |
245 | value = (asid & 0x7f) << 24; |
246 | |
247 | value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova); |
248 | smmu_writel(smmu, value, SMMU_TLB_FLUSH); |
249 | } |
250 | |
251 | static inline void smmu_flush(struct tegra_smmu *smmu) |
252 | { |
253 | smmu_readl(smmu, SMMU_PTB_ASID); |
254 | } |
255 | |
256 | static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp) |
257 | { |
258 | unsigned long id; |
259 | |
260 | id = find_first_zero_bit(addr: smmu->asids, size: smmu->soc->num_asids); |
261 | if (id >= smmu->soc->num_asids) |
262 | return -ENOSPC; |
263 | |
264 | set_bit(nr: id, addr: smmu->asids); |
265 | *idp = id; |
266 | |
267 | return 0; |
268 | } |
269 | |
270 | static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id) |
271 | { |
272 | clear_bit(nr: id, addr: smmu->asids); |
273 | } |
274 | |
275 | static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type) |
276 | { |
277 | struct tegra_smmu_as *as; |
278 | |
279 | if (type != IOMMU_DOMAIN_UNMANAGED) |
280 | return NULL; |
281 | |
282 | as = kzalloc(size: sizeof(*as), GFP_KERNEL); |
283 | if (!as) |
284 | return NULL; |
285 | |
286 | as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE; |
287 | |
288 | as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO); |
289 | if (!as->pd) { |
290 | kfree(objp: as); |
291 | return NULL; |
292 | } |
293 | |
294 | as->count = kcalloc(SMMU_NUM_PDE, size: sizeof(u32), GFP_KERNEL); |
295 | if (!as->count) { |
296 | __free_page(as->pd); |
297 | kfree(objp: as); |
298 | return NULL; |
299 | } |
300 | |
301 | as->pts = kcalloc(SMMU_NUM_PDE, size: sizeof(*as->pts), GFP_KERNEL); |
302 | if (!as->pts) { |
303 | kfree(objp: as->count); |
304 | __free_page(as->pd); |
305 | kfree(objp: as); |
306 | return NULL; |
307 | } |
308 | |
309 | spin_lock_init(&as->lock); |
310 | |
311 | /* setup aperture */ |
312 | as->domain.geometry.aperture_start = 0; |
313 | as->domain.geometry.aperture_end = 0xffffffff; |
314 | as->domain.geometry.force_aperture = true; |
315 | |
316 | return &as->domain; |
317 | } |
318 | |
319 | static void tegra_smmu_domain_free(struct iommu_domain *domain) |
320 | { |
321 | struct tegra_smmu_as *as = to_smmu_as(dom: domain); |
322 | |
323 | /* TODO: free page directory and page tables */ |
324 | |
325 | WARN_ON_ONCE(as->use_count); |
326 | kfree(objp: as->count); |
327 | kfree(objp: as->pts); |
328 | kfree(objp: as); |
329 | } |
330 | |
331 | static const struct tegra_smmu_swgroup * |
332 | tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup) |
333 | { |
334 | const struct tegra_smmu_swgroup *group = NULL; |
335 | unsigned int i; |
336 | |
337 | for (i = 0; i < smmu->soc->num_swgroups; i++) { |
338 | if (smmu->soc->swgroups[i].swgroup == swgroup) { |
339 | group = &smmu->soc->swgroups[i]; |
340 | break; |
341 | } |
342 | } |
343 | |
344 | return group; |
345 | } |
346 | |
347 | static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup, |
348 | unsigned int asid) |
349 | { |
350 | const struct tegra_smmu_swgroup *group; |
351 | unsigned int i; |
352 | u32 value; |
353 | |
354 | group = tegra_smmu_find_swgroup(smmu, swgroup); |
355 | if (group) { |
356 | value = smmu_readl(smmu, offset: group->reg); |
357 | value &= ~SMMU_ASID_MASK; |
358 | value |= SMMU_ASID_VALUE(asid); |
359 | value |= SMMU_ASID_ENABLE; |
360 | smmu_writel(smmu, value, offset: group->reg); |
361 | } else { |
362 | pr_warn("%s group from swgroup %u not found\n" , __func__, |
363 | swgroup); |
364 | /* No point moving ahead if group was not found */ |
365 | return; |
366 | } |
367 | |
368 | for (i = 0; i < smmu->soc->num_clients; i++) { |
369 | const struct tegra_mc_client *client = &smmu->soc->clients[i]; |
370 | |
371 | if (client->swgroup != swgroup) |
372 | continue; |
373 | |
374 | value = smmu_readl(smmu, offset: client->regs.smmu.reg); |
375 | value |= BIT(client->regs.smmu.bit); |
376 | smmu_writel(smmu, value, offset: client->regs.smmu.reg); |
377 | } |
378 | } |
379 | |
380 | static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup, |
381 | unsigned int asid) |
382 | { |
383 | const struct tegra_smmu_swgroup *group; |
384 | unsigned int i; |
385 | u32 value; |
386 | |
387 | group = tegra_smmu_find_swgroup(smmu, swgroup); |
388 | if (group) { |
389 | value = smmu_readl(smmu, offset: group->reg); |
390 | value &= ~SMMU_ASID_MASK; |
391 | value |= SMMU_ASID_VALUE(asid); |
392 | value &= ~SMMU_ASID_ENABLE; |
393 | smmu_writel(smmu, value, offset: group->reg); |
394 | } |
395 | |
396 | for (i = 0; i < smmu->soc->num_clients; i++) { |
397 | const struct tegra_mc_client *client = &smmu->soc->clients[i]; |
398 | |
399 | if (client->swgroup != swgroup) |
400 | continue; |
401 | |
402 | value = smmu_readl(smmu, offset: client->regs.smmu.reg); |
403 | value &= ~BIT(client->regs.smmu.bit); |
404 | smmu_writel(smmu, value, offset: client->regs.smmu.reg); |
405 | } |
406 | } |
407 | |
408 | static int tegra_smmu_as_prepare(struct tegra_smmu *smmu, |
409 | struct tegra_smmu_as *as) |
410 | { |
411 | u32 value; |
412 | int err = 0; |
413 | |
414 | mutex_lock(&smmu->lock); |
415 | |
416 | if (as->use_count > 0) { |
417 | as->use_count++; |
418 | goto unlock; |
419 | } |
420 | |
421 | as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD, |
422 | DMA_TO_DEVICE); |
423 | if (dma_mapping_error(dev: smmu->dev, dma_addr: as->pd_dma)) { |
424 | err = -ENOMEM; |
425 | goto unlock; |
426 | } |
427 | |
428 | /* We can't handle 64-bit DMA addresses */ |
429 | if (!smmu_dma_addr_valid(smmu, addr: as->pd_dma)) { |
430 | err = -ENOMEM; |
431 | goto err_unmap; |
432 | } |
433 | |
434 | err = tegra_smmu_alloc_asid(smmu, idp: &as->id); |
435 | if (err < 0) |
436 | goto err_unmap; |
437 | |
438 | smmu_flush_ptc(smmu, dma: as->pd_dma, offset: 0); |
439 | smmu_flush_tlb_asid(smmu, asid: as->id); |
440 | |
441 | smmu_writel(smmu, value: as->id & 0x7f, SMMU_PTB_ASID); |
442 | value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr); |
443 | smmu_writel(smmu, value, SMMU_PTB_DATA); |
444 | smmu_flush(smmu); |
445 | |
446 | as->smmu = smmu; |
447 | as->use_count++; |
448 | |
449 | mutex_unlock(lock: &smmu->lock); |
450 | |
451 | return 0; |
452 | |
453 | err_unmap: |
454 | dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); |
455 | unlock: |
456 | mutex_unlock(lock: &smmu->lock); |
457 | |
458 | return err; |
459 | } |
460 | |
461 | static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu, |
462 | struct tegra_smmu_as *as) |
463 | { |
464 | mutex_lock(&smmu->lock); |
465 | |
466 | if (--as->use_count > 0) { |
467 | mutex_unlock(lock: &smmu->lock); |
468 | return; |
469 | } |
470 | |
471 | tegra_smmu_free_asid(smmu, id: as->id); |
472 | |
473 | dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); |
474 | |
475 | as->smmu = NULL; |
476 | |
477 | mutex_unlock(lock: &smmu->lock); |
478 | } |
479 | |
480 | static int tegra_smmu_attach_dev(struct iommu_domain *domain, |
481 | struct device *dev) |
482 | { |
483 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
484 | struct tegra_smmu *smmu = dev_iommu_priv_get(dev); |
485 | struct tegra_smmu_as *as = to_smmu_as(dom: domain); |
486 | unsigned int index; |
487 | int err; |
488 | |
489 | if (!fwspec) |
490 | return -ENOENT; |
491 | |
492 | for (index = 0; index < fwspec->num_ids; index++) { |
493 | err = tegra_smmu_as_prepare(smmu, as); |
494 | if (err) |
495 | goto disable; |
496 | |
497 | tegra_smmu_enable(smmu, swgroup: fwspec->ids[index], asid: as->id); |
498 | } |
499 | |
500 | if (index == 0) |
501 | return -ENODEV; |
502 | |
503 | return 0; |
504 | |
505 | disable: |
506 | while (index--) { |
507 | tegra_smmu_disable(smmu, swgroup: fwspec->ids[index], asid: as->id); |
508 | tegra_smmu_as_unprepare(smmu, as); |
509 | } |
510 | |
511 | return err; |
512 | } |
513 | |
514 | static void tegra_smmu_set_platform_dma(struct device *dev) |
515 | { |
516 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
517 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
518 | struct tegra_smmu_as *as = to_smmu_as(dom: domain); |
519 | struct tegra_smmu *smmu = as->smmu; |
520 | unsigned int index; |
521 | |
522 | if (!fwspec) |
523 | return; |
524 | |
525 | for (index = 0; index < fwspec->num_ids; index++) { |
526 | tegra_smmu_disable(smmu, swgroup: fwspec->ids[index], asid: as->id); |
527 | tegra_smmu_as_unprepare(smmu, as); |
528 | } |
529 | } |
530 | |
531 | static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova, |
532 | u32 value) |
533 | { |
534 | unsigned int pd_index = iova_pd_index(iova); |
535 | struct tegra_smmu *smmu = as->smmu; |
536 | u32 *pd = page_address(as->pd); |
537 | unsigned long offset = pd_index * sizeof(*pd); |
538 | |
539 | /* Set the page directory entry first */ |
540 | pd[pd_index] = value; |
541 | |
542 | /* The flush the page directory entry from caches */ |
543 | dma_sync_single_range_for_device(dev: smmu->dev, addr: as->pd_dma, offset, |
544 | size: sizeof(*pd), dir: DMA_TO_DEVICE); |
545 | |
546 | /* And flush the iommu */ |
547 | smmu_flush_ptc(smmu, dma: as->pd_dma, offset); |
548 | smmu_flush_tlb_section(smmu, asid: as->id, iova); |
549 | smmu_flush(smmu); |
550 | } |
551 | |
552 | static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova) |
553 | { |
554 | u32 *pt = page_address(pt_page); |
555 | |
556 | return pt + iova_pt_index(iova); |
557 | } |
558 | |
559 | static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, |
560 | dma_addr_t *dmap) |
561 | { |
562 | unsigned int pd_index = iova_pd_index(iova); |
563 | struct tegra_smmu *smmu = as->smmu; |
564 | struct page *pt_page; |
565 | u32 *pd; |
566 | |
567 | pt_page = as->pts[pd_index]; |
568 | if (!pt_page) |
569 | return NULL; |
570 | |
571 | pd = page_address(as->pd); |
572 | *dmap = smmu_pde_to_dma(smmu, pde: pd[pd_index]); |
573 | |
574 | return tegra_smmu_pte_offset(pt_page, iova); |
575 | } |
576 | |
577 | static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, |
578 | dma_addr_t *dmap, struct page *page) |
579 | { |
580 | unsigned int pde = iova_pd_index(iova); |
581 | struct tegra_smmu *smmu = as->smmu; |
582 | |
583 | if (!as->pts[pde]) { |
584 | dma_addr_t dma; |
585 | |
586 | dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT, |
587 | DMA_TO_DEVICE); |
588 | if (dma_mapping_error(dev: smmu->dev, dma_addr: dma)) { |
589 | __free_page(page); |
590 | return NULL; |
591 | } |
592 | |
593 | if (!smmu_dma_addr_valid(smmu, addr: dma)) { |
594 | dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT, |
595 | DMA_TO_DEVICE); |
596 | __free_page(page); |
597 | return NULL; |
598 | } |
599 | |
600 | as->pts[pde] = page; |
601 | |
602 | tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR | |
603 | SMMU_PDE_NEXT)); |
604 | |
605 | *dmap = dma; |
606 | } else { |
607 | u32 *pd = page_address(as->pd); |
608 | |
609 | *dmap = smmu_pde_to_dma(smmu, pde: pd[pde]); |
610 | } |
611 | |
612 | return tegra_smmu_pte_offset(pt_page: as->pts[pde], iova); |
613 | } |
614 | |
615 | static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova) |
616 | { |
617 | unsigned int pd_index = iova_pd_index(iova); |
618 | |
619 | as->count[pd_index]++; |
620 | } |
621 | |
622 | static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) |
623 | { |
624 | unsigned int pde = iova_pd_index(iova); |
625 | struct page *page = as->pts[pde]; |
626 | |
627 | /* |
628 | * When no entries in this page table are used anymore, return the |
629 | * memory page to the system. |
630 | */ |
631 | if (--as->count[pde] == 0) { |
632 | struct tegra_smmu *smmu = as->smmu; |
633 | u32 *pd = page_address(as->pd); |
634 | dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pde: pd[pde]); |
635 | |
636 | tegra_smmu_set_pde(as, iova, value: 0); |
637 | |
638 | dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE); |
639 | __free_page(page); |
640 | as->pts[pde] = NULL; |
641 | } |
642 | } |
643 | |
644 | static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova, |
645 | u32 *pte, dma_addr_t pte_dma, u32 val) |
646 | { |
647 | struct tegra_smmu *smmu = as->smmu; |
648 | unsigned long offset = SMMU_OFFSET_IN_PAGE(pte); |
649 | |
650 | *pte = val; |
651 | |
652 | dma_sync_single_range_for_device(dev: smmu->dev, addr: pte_dma, offset, |
653 | size: 4, dir: DMA_TO_DEVICE); |
654 | smmu_flush_ptc(smmu, dma: pte_dma, offset); |
655 | smmu_flush_tlb_group(smmu, asid: as->id, iova); |
656 | smmu_flush(smmu); |
657 | } |
658 | |
659 | static struct page *as_get_pde_page(struct tegra_smmu_as *as, |
660 | unsigned long iova, gfp_t gfp, |
661 | unsigned long *flags) |
662 | { |
663 | unsigned int pde = iova_pd_index(iova); |
664 | struct page *page = as->pts[pde]; |
665 | |
666 | /* at first check whether allocation needs to be done at all */ |
667 | if (page) |
668 | return page; |
669 | |
670 | /* |
671 | * In order to prevent exhaustion of the atomic memory pool, we |
672 | * allocate page in a sleeping context if GFP flags permit. Hence |
673 | * spinlock needs to be unlocked and re-locked after allocation. |
674 | */ |
675 | if (gfpflags_allow_blocking(gfp_flags: gfp)) |
676 | spin_unlock_irqrestore(lock: &as->lock, flags: *flags); |
677 | |
678 | page = alloc_page(gfp | __GFP_DMA | __GFP_ZERO); |
679 | |
680 | if (gfpflags_allow_blocking(gfp_flags: gfp)) |
681 | spin_lock_irqsave(&as->lock, *flags); |
682 | |
683 | /* |
684 | * In a case of blocking allocation, a concurrent mapping may win |
685 | * the PDE allocation. In this case the allocated page isn't needed |
686 | * if allocation succeeded and the allocation failure isn't fatal. |
687 | */ |
688 | if (as->pts[pde]) { |
689 | if (page) |
690 | __free_page(page); |
691 | |
692 | page = as->pts[pde]; |
693 | } |
694 | |
695 | return page; |
696 | } |
697 | |
698 | static int |
699 | __tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, |
700 | phys_addr_t paddr, size_t size, int prot, gfp_t gfp, |
701 | unsigned long *flags) |
702 | { |
703 | struct tegra_smmu_as *as = to_smmu_as(dom: domain); |
704 | dma_addr_t pte_dma; |
705 | struct page *page; |
706 | u32 pte_attrs; |
707 | u32 *pte; |
708 | |
709 | page = as_get_pde_page(as, iova, gfp, flags); |
710 | if (!page) |
711 | return -ENOMEM; |
712 | |
713 | pte = as_get_pte(as, iova, dmap: &pte_dma, page); |
714 | if (!pte) |
715 | return -ENOMEM; |
716 | |
717 | /* If we aren't overwriting a pre-existing entry, increment use */ |
718 | if (*pte == 0) |
719 | tegra_smmu_pte_get_use(as, iova); |
720 | |
721 | pte_attrs = SMMU_PTE_NONSECURE; |
722 | |
723 | if (prot & IOMMU_READ) |
724 | pte_attrs |= SMMU_PTE_READABLE; |
725 | |
726 | if (prot & IOMMU_WRITE) |
727 | pte_attrs |= SMMU_PTE_WRITABLE; |
728 | |
729 | tegra_smmu_set_pte(as, iova, pte, pte_dma, |
730 | SMMU_PHYS_PFN(paddr) | pte_attrs); |
731 | |
732 | return 0; |
733 | } |
734 | |
735 | static size_t |
736 | __tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, |
737 | size_t size, struct iommu_iotlb_gather *gather) |
738 | { |
739 | struct tegra_smmu_as *as = to_smmu_as(dom: domain); |
740 | dma_addr_t pte_dma; |
741 | u32 *pte; |
742 | |
743 | pte = tegra_smmu_pte_lookup(as, iova, dmap: &pte_dma); |
744 | if (!pte || !*pte) |
745 | return 0; |
746 | |
747 | tegra_smmu_set_pte(as, iova, pte, pte_dma, val: 0); |
748 | tegra_smmu_pte_put_use(as, iova); |
749 | |
750 | return size; |
751 | } |
752 | |
753 | static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, |
754 | phys_addr_t paddr, size_t size, int prot, gfp_t gfp) |
755 | { |
756 | struct tegra_smmu_as *as = to_smmu_as(dom: domain); |
757 | unsigned long flags; |
758 | int ret; |
759 | |
760 | spin_lock_irqsave(&as->lock, flags); |
761 | ret = __tegra_smmu_map(domain, iova, paddr, size, prot, gfp, flags: &flags); |
762 | spin_unlock_irqrestore(lock: &as->lock, flags); |
763 | |
764 | return ret; |
765 | } |
766 | |
767 | static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, |
768 | size_t size, struct iommu_iotlb_gather *gather) |
769 | { |
770 | struct tegra_smmu_as *as = to_smmu_as(dom: domain); |
771 | unsigned long flags; |
772 | |
773 | spin_lock_irqsave(&as->lock, flags); |
774 | size = __tegra_smmu_unmap(domain, iova, size, gather); |
775 | spin_unlock_irqrestore(lock: &as->lock, flags); |
776 | |
777 | return size; |
778 | } |
779 | |
780 | static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain, |
781 | dma_addr_t iova) |
782 | { |
783 | struct tegra_smmu_as *as = to_smmu_as(dom: domain); |
784 | unsigned long pfn; |
785 | dma_addr_t pte_dma; |
786 | u32 *pte; |
787 | |
788 | pte = tegra_smmu_pte_lookup(as, iova, dmap: &pte_dma); |
789 | if (!pte || !*pte) |
790 | return 0; |
791 | |
792 | pfn = *pte & as->smmu->pfn_mask; |
793 | |
794 | return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova); |
795 | } |
796 | |
797 | static struct tegra_smmu *tegra_smmu_find(struct device_node *np) |
798 | { |
799 | struct platform_device *pdev; |
800 | struct tegra_mc *mc; |
801 | |
802 | pdev = of_find_device_by_node(np); |
803 | if (!pdev) |
804 | return NULL; |
805 | |
806 | mc = platform_get_drvdata(pdev); |
807 | if (!mc) { |
808 | put_device(dev: &pdev->dev); |
809 | return NULL; |
810 | } |
811 | |
812 | return mc->smmu; |
813 | } |
814 | |
815 | static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev, |
816 | struct of_phandle_args *args) |
817 | { |
818 | const struct iommu_ops *ops = smmu->iommu.ops; |
819 | int err; |
820 | |
821 | err = iommu_fwspec_init(dev, iommu_fwnode: &dev->of_node->fwnode, ops); |
822 | if (err < 0) { |
823 | dev_err(dev, "failed to initialize fwspec: %d\n" , err); |
824 | return err; |
825 | } |
826 | |
827 | err = ops->of_xlate(dev, args); |
828 | if (err < 0) { |
829 | dev_err(dev, "failed to parse SW group ID: %d\n" , err); |
830 | iommu_fwspec_free(dev); |
831 | return err; |
832 | } |
833 | |
834 | return 0; |
835 | } |
836 | |
837 | static struct iommu_device *tegra_smmu_probe_device(struct device *dev) |
838 | { |
839 | struct device_node *np = dev->of_node; |
840 | struct tegra_smmu *smmu = NULL; |
841 | struct of_phandle_args args; |
842 | unsigned int index = 0; |
843 | int err; |
844 | |
845 | while (of_parse_phandle_with_args(np, list_name: "iommus" , cells_name: "#iommu-cells" , index, |
846 | out_args: &args) == 0) { |
847 | smmu = tegra_smmu_find(np: args.np); |
848 | if (smmu) { |
849 | err = tegra_smmu_configure(smmu, dev, args: &args); |
850 | |
851 | if (err < 0) { |
852 | of_node_put(node: args.np); |
853 | return ERR_PTR(error: err); |
854 | } |
855 | } |
856 | |
857 | of_node_put(node: args.np); |
858 | index++; |
859 | } |
860 | |
861 | smmu = dev_iommu_priv_get(dev); |
862 | if (!smmu) |
863 | return ERR_PTR(error: -ENODEV); |
864 | |
865 | return &smmu->iommu; |
866 | } |
867 | |
868 | static const struct tegra_smmu_group_soc * |
869 | tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup) |
870 | { |
871 | unsigned int i, j; |
872 | |
873 | for (i = 0; i < smmu->soc->num_groups; i++) |
874 | for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++) |
875 | if (smmu->soc->groups[i].swgroups[j] == swgroup) |
876 | return &smmu->soc->groups[i]; |
877 | |
878 | return NULL; |
879 | } |
880 | |
881 | static void tegra_smmu_group_release(void *iommu_data) |
882 | { |
883 | struct tegra_smmu_group *group = iommu_data; |
884 | struct tegra_smmu *smmu = group->smmu; |
885 | |
886 | mutex_lock(&smmu->lock); |
887 | list_del(entry: &group->list); |
888 | mutex_unlock(lock: &smmu->lock); |
889 | } |
890 | |
891 | static struct iommu_group *tegra_smmu_device_group(struct device *dev) |
892 | { |
893 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
894 | struct tegra_smmu *smmu = dev_iommu_priv_get(dev); |
895 | const struct tegra_smmu_group_soc *soc; |
896 | unsigned int swgroup = fwspec->ids[0]; |
897 | struct tegra_smmu_group *group; |
898 | struct iommu_group *grp; |
899 | |
900 | /* Find group_soc associating with swgroup */ |
901 | soc = tegra_smmu_find_group(smmu, swgroup); |
902 | |
903 | mutex_lock(&smmu->lock); |
904 | |
905 | /* Find existing iommu_group associating with swgroup or group_soc */ |
906 | list_for_each_entry(group, &smmu->groups, list) |
907 | if ((group->swgroup == swgroup) || (soc && group->soc == soc)) { |
908 | grp = iommu_group_ref_get(group: group->group); |
909 | mutex_unlock(lock: &smmu->lock); |
910 | return grp; |
911 | } |
912 | |
913 | group = devm_kzalloc(dev: smmu->dev, size: sizeof(*group), GFP_KERNEL); |
914 | if (!group) { |
915 | mutex_unlock(lock: &smmu->lock); |
916 | return NULL; |
917 | } |
918 | |
919 | INIT_LIST_HEAD(list: &group->list); |
920 | group->swgroup = swgroup; |
921 | group->smmu = smmu; |
922 | group->soc = soc; |
923 | |
924 | if (dev_is_pci(dev)) |
925 | group->group = pci_device_group(dev); |
926 | else |
927 | group->group = generic_device_group(dev); |
928 | |
929 | if (IS_ERR(ptr: group->group)) { |
930 | devm_kfree(dev: smmu->dev, p: group); |
931 | mutex_unlock(lock: &smmu->lock); |
932 | return NULL; |
933 | } |
934 | |
935 | iommu_group_set_iommudata(group: group->group, iommu_data: group, release: tegra_smmu_group_release); |
936 | if (soc) |
937 | iommu_group_set_name(group: group->group, name: soc->name); |
938 | list_add_tail(new: &group->list, head: &smmu->groups); |
939 | mutex_unlock(lock: &smmu->lock); |
940 | |
941 | return group->group; |
942 | } |
943 | |
944 | static int tegra_smmu_of_xlate(struct device *dev, |
945 | struct of_phandle_args *args) |
946 | { |
947 | struct platform_device *iommu_pdev = of_find_device_by_node(np: args->np); |
948 | struct tegra_mc *mc = platform_get_drvdata(pdev: iommu_pdev); |
949 | u32 id = args->args[0]; |
950 | |
951 | /* |
952 | * Note: we are here releasing the reference of &iommu_pdev->dev, which |
953 | * is mc->dev. Although some functions in tegra_smmu_ops may keep using |
954 | * its private data beyond this point, it's still safe to do so because |
955 | * the SMMU parent device is the same as the MC, so the reference count |
956 | * isn't strictly necessary. |
957 | */ |
958 | put_device(dev: &iommu_pdev->dev); |
959 | |
960 | dev_iommu_priv_set(dev, priv: mc->smmu); |
961 | |
962 | return iommu_fwspec_add_ids(dev, ids: &id, num_ids: 1); |
963 | } |
964 | |
965 | static const struct iommu_ops tegra_smmu_ops = { |
966 | .domain_alloc = tegra_smmu_domain_alloc, |
967 | .probe_device = tegra_smmu_probe_device, |
968 | .device_group = tegra_smmu_device_group, |
969 | .set_platform_dma_ops = tegra_smmu_set_platform_dma, |
970 | .of_xlate = tegra_smmu_of_xlate, |
971 | .pgsize_bitmap = SZ_4K, |
972 | .default_domain_ops = &(const struct iommu_domain_ops) { |
973 | .attach_dev = tegra_smmu_attach_dev, |
974 | .map = tegra_smmu_map, |
975 | .unmap = tegra_smmu_unmap, |
976 | .iova_to_phys = tegra_smmu_iova_to_phys, |
977 | .free = tegra_smmu_domain_free, |
978 | } |
979 | }; |
980 | |
981 | static void tegra_smmu_ahb_enable(void) |
982 | { |
983 | static const struct of_device_id ahb_match[] = { |
984 | { .compatible = "nvidia,tegra30-ahb" , }, |
985 | { } |
986 | }; |
987 | struct device_node *ahb; |
988 | |
989 | ahb = of_find_matching_node(NULL, matches: ahb_match); |
990 | if (ahb) { |
991 | tegra_ahb_enable_smmu(ahb); |
992 | of_node_put(node: ahb); |
993 | } |
994 | } |
995 | |
996 | static int tegra_smmu_swgroups_show(struct seq_file *s, void *data) |
997 | { |
998 | struct tegra_smmu *smmu = s->private; |
999 | unsigned int i; |
1000 | u32 value; |
1001 | |
1002 | seq_printf(m: s, fmt: "swgroup enabled ASID\n" ); |
1003 | seq_printf(m: s, fmt: "------------------------\n" ); |
1004 | |
1005 | for (i = 0; i < smmu->soc->num_swgroups; i++) { |
1006 | const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i]; |
1007 | const char *status; |
1008 | unsigned int asid; |
1009 | |
1010 | value = smmu_readl(smmu, offset: group->reg); |
1011 | |
1012 | if (value & SMMU_ASID_ENABLE) |
1013 | status = "yes" ; |
1014 | else |
1015 | status = "no" ; |
1016 | |
1017 | asid = value & SMMU_ASID_MASK; |
1018 | |
1019 | seq_printf(m: s, fmt: "%-9s %-7s %#04x\n" , group->name, status, |
1020 | asid); |
1021 | } |
1022 | |
1023 | return 0; |
1024 | } |
1025 | |
1026 | DEFINE_SHOW_ATTRIBUTE(tegra_smmu_swgroups); |
1027 | |
1028 | static int tegra_smmu_clients_show(struct seq_file *s, void *data) |
1029 | { |
1030 | struct tegra_smmu *smmu = s->private; |
1031 | unsigned int i; |
1032 | u32 value; |
1033 | |
1034 | seq_printf(m: s, fmt: "client enabled\n" ); |
1035 | seq_printf(m: s, fmt: "--------------------\n" ); |
1036 | |
1037 | for (i = 0; i < smmu->soc->num_clients; i++) { |
1038 | const struct tegra_mc_client *client = &smmu->soc->clients[i]; |
1039 | const char *status; |
1040 | |
1041 | value = smmu_readl(smmu, offset: client->regs.smmu.reg); |
1042 | |
1043 | if (value & BIT(client->regs.smmu.bit)) |
1044 | status = "yes" ; |
1045 | else |
1046 | status = "no" ; |
1047 | |
1048 | seq_printf(m: s, fmt: "%-12s %s\n" , client->name, status); |
1049 | } |
1050 | |
1051 | return 0; |
1052 | } |
1053 | |
1054 | DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients); |
1055 | |
1056 | static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu) |
1057 | { |
1058 | smmu->debugfs = debugfs_create_dir(name: "smmu" , NULL); |
1059 | if (!smmu->debugfs) |
1060 | return; |
1061 | |
1062 | debugfs_create_file(name: "swgroups" , S_IRUGO, parent: smmu->debugfs, data: smmu, |
1063 | fops: &tegra_smmu_swgroups_fops); |
1064 | debugfs_create_file(name: "clients" , S_IRUGO, parent: smmu->debugfs, data: smmu, |
1065 | fops: &tegra_smmu_clients_fops); |
1066 | } |
1067 | |
1068 | static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu) |
1069 | { |
1070 | debugfs_remove_recursive(dentry: smmu->debugfs); |
1071 | } |
1072 | |
1073 | struct tegra_smmu *tegra_smmu_probe(struct device *dev, |
1074 | const struct tegra_smmu_soc *soc, |
1075 | struct tegra_mc *mc) |
1076 | { |
1077 | struct tegra_smmu *smmu; |
1078 | u32 value; |
1079 | int err; |
1080 | |
1081 | smmu = devm_kzalloc(dev, size: sizeof(*smmu), GFP_KERNEL); |
1082 | if (!smmu) |
1083 | return ERR_PTR(error: -ENOMEM); |
1084 | |
1085 | /* |
1086 | * This is a bit of a hack. Ideally we'd want to simply return this |
1087 | * value. However iommu_device_register() will attempt to add |
1088 | * all devices to the IOMMU before we get that far. In order |
1089 | * not to rely on global variables to track the IOMMU instance, we |
1090 | * set it here so that it can be looked up from the .probe_device() |
1091 | * callback via the IOMMU device's .drvdata field. |
1092 | */ |
1093 | mc->smmu = smmu; |
1094 | |
1095 | smmu->asids = devm_bitmap_zalloc(dev, nbits: soc->num_asids, GFP_KERNEL); |
1096 | if (!smmu->asids) |
1097 | return ERR_PTR(error: -ENOMEM); |
1098 | |
1099 | INIT_LIST_HEAD(list: &smmu->groups); |
1100 | mutex_init(&smmu->lock); |
1101 | |
1102 | smmu->regs = mc->regs; |
1103 | smmu->soc = soc; |
1104 | smmu->dev = dev; |
1105 | smmu->mc = mc; |
1106 | |
1107 | smmu->pfn_mask = |
1108 | BIT_MASK(mc->soc->num_address_bits - SMMU_PTE_SHIFT) - 1; |
1109 | dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n" , |
1110 | mc->soc->num_address_bits, smmu->pfn_mask); |
1111 | smmu->tlb_mask = (1 << fls(x: smmu->soc->num_tlb_lines)) - 1; |
1112 | dev_dbg(dev, "TLB lines: %u, mask: %#lx\n" , smmu->soc->num_tlb_lines, |
1113 | smmu->tlb_mask); |
1114 | |
1115 | value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f); |
1116 | |
1117 | if (soc->supports_request_limit) |
1118 | value |= SMMU_PTC_CONFIG_REQ_LIMIT(8); |
1119 | |
1120 | smmu_writel(smmu, value, SMMU_PTC_CONFIG); |
1121 | |
1122 | value = SMMU_TLB_CONFIG_HIT_UNDER_MISS | |
1123 | SMMU_TLB_CONFIG_ACTIVE_LINES(smmu); |
1124 | |
1125 | if (soc->supports_round_robin_arbitration) |
1126 | value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION; |
1127 | |
1128 | smmu_writel(smmu, value, SMMU_TLB_CONFIG); |
1129 | |
1130 | smmu_flush_ptc_all(smmu); |
1131 | smmu_flush_tlb(smmu); |
1132 | smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG); |
1133 | smmu_flush(smmu); |
1134 | |
1135 | tegra_smmu_ahb_enable(); |
1136 | |
1137 | err = iommu_device_sysfs_add(iommu: &smmu->iommu, parent: dev, NULL, fmt: dev_name(dev)); |
1138 | if (err) |
1139 | return ERR_PTR(error: err); |
1140 | |
1141 | err = iommu_device_register(iommu: &smmu->iommu, ops: &tegra_smmu_ops, hwdev: dev); |
1142 | if (err) { |
1143 | iommu_device_sysfs_remove(iommu: &smmu->iommu); |
1144 | return ERR_PTR(error: err); |
1145 | } |
1146 | |
1147 | if (IS_ENABLED(CONFIG_DEBUG_FS)) |
1148 | tegra_smmu_debugfs_init(smmu); |
1149 | |
1150 | return smmu; |
1151 | } |
1152 | |
1153 | void tegra_smmu_remove(struct tegra_smmu *smmu) |
1154 | { |
1155 | iommu_device_unregister(iommu: &smmu->iommu); |
1156 | iommu_device_sysfs_remove(iommu: &smmu->iommu); |
1157 | |
1158 | if (IS_ENABLED(CONFIG_DEBUG_FS)) |
1159 | tegra_smmu_debugfs_exit(smmu); |
1160 | } |
1161 | |