1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * omap iommu: tlb and pagetable primitives |
4 | * |
5 | * Copyright (C) 2008-2010 Nokia Corporation |
6 | * Copyright (C) 2013-2017 Texas Instruments Incorporated - https://www.ti.com/ |
7 | * |
8 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, |
9 | * Paul Mundt and Toshihiro Kobayashi |
10 | */ |
11 | |
12 | #include <linux/dma-mapping.h> |
13 | #include <linux/err.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/interrupt.h> |
16 | #include <linux/ioport.h> |
17 | #include <linux/platform_device.h> |
18 | #include <linux/iommu.h> |
19 | #include <linux/omap-iommu.h> |
20 | #include <linux/mutex.h> |
21 | #include <linux/spinlock.h> |
22 | #include <linux/io.h> |
23 | #include <linux/pm_runtime.h> |
24 | #include <linux/of.h> |
25 | #include <linux/of_irq.h> |
26 | #include <linux/of_platform.h> |
27 | #include <linux/regmap.h> |
28 | #include <linux/mfd/syscon.h> |
29 | |
30 | #include <linux/platform_data/iommu-omap.h> |
31 | |
32 | #include "omap-iopgtable.h" |
33 | #include "omap-iommu.h" |
34 | |
35 | static const struct iommu_ops omap_iommu_ops; |
36 | |
37 | #define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev)) |
38 | |
39 | /* bitmap of the page sizes currently supported */ |
40 | #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) |
41 | |
42 | #define MMU_LOCK_BASE_SHIFT 10 |
43 | #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT) |
44 | #define MMU_LOCK_BASE(x) \ |
45 | ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT) |
46 | |
47 | #define MMU_LOCK_VICT_SHIFT 4 |
48 | #define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT) |
49 | #define MMU_LOCK_VICT(x) \ |
50 | ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT) |
51 | |
52 | static struct platform_driver omap_iommu_driver; |
53 | static struct kmem_cache *iopte_cachep; |
54 | |
55 | /** |
56 | * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain |
57 | * @dom: generic iommu domain handle |
58 | **/ |
59 | static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom) |
60 | { |
61 | return container_of(dom, struct omap_iommu_domain, domain); |
62 | } |
63 | |
64 | /** |
65 | * omap_iommu_save_ctx - Save registers for pm off-mode support |
66 | * @dev: client device |
67 | * |
68 | * This should be treated as an deprecated API. It is preserved only |
69 | * to maintain existing functionality for OMAP3 ISP driver. |
70 | **/ |
71 | void omap_iommu_save_ctx(struct device *dev) |
72 | { |
73 | struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); |
74 | struct omap_iommu *obj; |
75 | u32 *p; |
76 | int i; |
77 | |
78 | if (!arch_data) |
79 | return; |
80 | |
81 | while (arch_data->iommu_dev) { |
82 | obj = arch_data->iommu_dev; |
83 | p = obj->ctx; |
84 | for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { |
85 | p[i] = iommu_read_reg(obj, offs: i * sizeof(u32)); |
86 | dev_dbg(obj->dev, "%s\t[%02d] %08x\n" , __func__, i, |
87 | p[i]); |
88 | } |
89 | arch_data++; |
90 | } |
91 | } |
92 | EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); |
93 | |
94 | /** |
95 | * omap_iommu_restore_ctx - Restore registers for pm off-mode support |
96 | * @dev: client device |
97 | * |
98 | * This should be treated as an deprecated API. It is preserved only |
99 | * to maintain existing functionality for OMAP3 ISP driver. |
100 | **/ |
101 | void omap_iommu_restore_ctx(struct device *dev) |
102 | { |
103 | struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); |
104 | struct omap_iommu *obj; |
105 | u32 *p; |
106 | int i; |
107 | |
108 | if (!arch_data) |
109 | return; |
110 | |
111 | while (arch_data->iommu_dev) { |
112 | obj = arch_data->iommu_dev; |
113 | p = obj->ctx; |
114 | for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { |
115 | iommu_write_reg(obj, val: p[i], offs: i * sizeof(u32)); |
116 | dev_dbg(obj->dev, "%s\t[%02d] %08x\n" , __func__, i, |
117 | p[i]); |
118 | } |
119 | arch_data++; |
120 | } |
121 | } |
122 | EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); |
123 | |
124 | static void dra7_cfg_dspsys_mmu(struct omap_iommu *obj, bool enable) |
125 | { |
126 | u32 val, mask; |
127 | |
128 | if (!obj->syscfg) |
129 | return; |
130 | |
131 | mask = (1 << (obj->id * DSP_SYS_MMU_CONFIG_EN_SHIFT)); |
132 | val = enable ? mask : 0; |
133 | regmap_update_bits(map: obj->syscfg, DSP_SYS_MMU_CONFIG, mask, val); |
134 | } |
135 | |
136 | static void __iommu_set_twl(struct omap_iommu *obj, bool on) |
137 | { |
138 | u32 l = iommu_read_reg(obj, MMU_CNTL); |
139 | |
140 | if (on) |
141 | iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE); |
142 | else |
143 | iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE); |
144 | |
145 | l &= ~MMU_CNTL_MASK; |
146 | if (on) |
147 | l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN); |
148 | else |
149 | l |= (MMU_CNTL_MMU_EN); |
150 | |
151 | iommu_write_reg(obj, val: l, MMU_CNTL); |
152 | } |
153 | |
154 | static int omap2_iommu_enable(struct omap_iommu *obj) |
155 | { |
156 | u32 l, pa; |
157 | |
158 | if (!obj->iopgd || !IS_ALIGNED((unsigned long)obj->iopgd, SZ_16K)) |
159 | return -EINVAL; |
160 | |
161 | pa = virt_to_phys(address: obj->iopgd); |
162 | if (!IS_ALIGNED(pa, SZ_16K)) |
163 | return -EINVAL; |
164 | |
165 | l = iommu_read_reg(obj, MMU_REVISION); |
166 | dev_info(obj->dev, "%s: version %d.%d\n" , obj->name, |
167 | (l >> 4) & 0xf, l & 0xf); |
168 | |
169 | iommu_write_reg(obj, val: pa, MMU_TTB); |
170 | |
171 | dra7_cfg_dspsys_mmu(obj, enable: true); |
172 | |
173 | if (obj->has_bus_err_back) |
174 | iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG); |
175 | |
176 | __iommu_set_twl(obj, on: true); |
177 | |
178 | return 0; |
179 | } |
180 | |
181 | static void omap2_iommu_disable(struct omap_iommu *obj) |
182 | { |
183 | u32 l = iommu_read_reg(obj, MMU_CNTL); |
184 | |
185 | l &= ~MMU_CNTL_MASK; |
186 | iommu_write_reg(obj, val: l, MMU_CNTL); |
187 | dra7_cfg_dspsys_mmu(obj, enable: false); |
188 | |
189 | dev_dbg(obj->dev, "%s is shutting down\n" , obj->name); |
190 | } |
191 | |
192 | static int iommu_enable(struct omap_iommu *obj) |
193 | { |
194 | int ret; |
195 | |
196 | ret = pm_runtime_get_sync(dev: obj->dev); |
197 | if (ret < 0) |
198 | pm_runtime_put_noidle(dev: obj->dev); |
199 | |
200 | return ret < 0 ? ret : 0; |
201 | } |
202 | |
203 | static void iommu_disable(struct omap_iommu *obj) |
204 | { |
205 | pm_runtime_put_sync(dev: obj->dev); |
206 | } |
207 | |
208 | /* |
209 | * TLB operations |
210 | */ |
211 | static u32 iotlb_cr_to_virt(struct cr_regs *cr) |
212 | { |
213 | u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK; |
214 | u32 mask = get_cam_va_mask(cr->cam & page_size); |
215 | |
216 | return cr->cam & mask; |
217 | } |
218 | |
219 | static u32 get_iopte_attr(struct iotlb_entry *e) |
220 | { |
221 | u32 attr; |
222 | |
223 | attr = e->mixed << 5; |
224 | attr |= e->endian; |
225 | attr |= e->elsz >> 3; |
226 | attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) || |
227 | (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6); |
228 | return attr; |
229 | } |
230 | |
231 | static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) |
232 | { |
233 | u32 status, fault_addr; |
234 | |
235 | status = iommu_read_reg(obj, MMU_IRQSTATUS); |
236 | status &= MMU_IRQ_MASK; |
237 | if (!status) { |
238 | *da = 0; |
239 | return 0; |
240 | } |
241 | |
242 | fault_addr = iommu_read_reg(obj, MMU_FAULT_AD); |
243 | *da = fault_addr; |
244 | |
245 | iommu_write_reg(obj, val: status, MMU_IRQSTATUS); |
246 | |
247 | return status; |
248 | } |
249 | |
250 | void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) |
251 | { |
252 | u32 val; |
253 | |
254 | val = iommu_read_reg(obj, MMU_LOCK); |
255 | |
256 | l->base = MMU_LOCK_BASE(val); |
257 | l->vict = MMU_LOCK_VICT(val); |
258 | } |
259 | |
260 | void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) |
261 | { |
262 | u32 val; |
263 | |
264 | val = (l->base << MMU_LOCK_BASE_SHIFT); |
265 | val |= (l->vict << MMU_LOCK_VICT_SHIFT); |
266 | |
267 | iommu_write_reg(obj, val, MMU_LOCK); |
268 | } |
269 | |
270 | static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) |
271 | { |
272 | cr->cam = iommu_read_reg(obj, MMU_READ_CAM); |
273 | cr->ram = iommu_read_reg(obj, MMU_READ_RAM); |
274 | } |
275 | |
276 | static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) |
277 | { |
278 | iommu_write_reg(obj, val: cr->cam | MMU_CAM_V, MMU_CAM); |
279 | iommu_write_reg(obj, val: cr->ram, MMU_RAM); |
280 | |
281 | iommu_write_reg(obj, val: 1, MMU_FLUSH_ENTRY); |
282 | iommu_write_reg(obj, val: 1, MMU_LD_TLB); |
283 | } |
284 | |
285 | /* only used in iotlb iteration for-loop */ |
286 | struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) |
287 | { |
288 | struct cr_regs cr; |
289 | struct iotlb_lock l; |
290 | |
291 | iotlb_lock_get(obj, l: &l); |
292 | l.vict = n; |
293 | iotlb_lock_set(obj, l: &l); |
294 | iotlb_read_cr(obj, cr: &cr); |
295 | |
296 | return cr; |
297 | } |
298 | |
299 | #ifdef PREFETCH_IOTLB |
300 | static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, |
301 | struct iotlb_entry *e) |
302 | { |
303 | struct cr_regs *cr; |
304 | |
305 | if (!e) |
306 | return NULL; |
307 | |
308 | if (e->da & ~(get_cam_va_mask(e->pgsz))) { |
309 | dev_err(obj->dev, "%s:\twrong alignment: %08x\n" , __func__, |
310 | e->da); |
311 | return ERR_PTR(-EINVAL); |
312 | } |
313 | |
314 | cr = kmalloc(sizeof(*cr), GFP_KERNEL); |
315 | if (!cr) |
316 | return ERR_PTR(-ENOMEM); |
317 | |
318 | cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid; |
319 | cr->ram = e->pa | e->endian | e->elsz | e->mixed; |
320 | |
321 | return cr; |
322 | } |
323 | |
324 | /** |
325 | * load_iotlb_entry - Set an iommu tlb entry |
326 | * @obj: target iommu |
327 | * @e: an iommu tlb entry info |
328 | **/ |
329 | static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
330 | { |
331 | int err = 0; |
332 | struct iotlb_lock l; |
333 | struct cr_regs *cr; |
334 | |
335 | if (!obj || !obj->nr_tlb_entries || !e) |
336 | return -EINVAL; |
337 | |
338 | pm_runtime_get_sync(obj->dev); |
339 | |
340 | iotlb_lock_get(obj, &l); |
341 | if (l.base == obj->nr_tlb_entries) { |
342 | dev_warn(obj->dev, "%s: preserve entries full\n" , __func__); |
343 | err = -EBUSY; |
344 | goto out; |
345 | } |
346 | if (!e->prsvd) { |
347 | int i; |
348 | struct cr_regs tmp; |
349 | |
350 | for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp) |
351 | if (!iotlb_cr_valid(&tmp)) |
352 | break; |
353 | |
354 | if (i == obj->nr_tlb_entries) { |
355 | dev_dbg(obj->dev, "%s: full: no entry\n" , __func__); |
356 | err = -EBUSY; |
357 | goto out; |
358 | } |
359 | |
360 | iotlb_lock_get(obj, &l); |
361 | } else { |
362 | l.vict = l.base; |
363 | iotlb_lock_set(obj, &l); |
364 | } |
365 | |
366 | cr = iotlb_alloc_cr(obj, e); |
367 | if (IS_ERR(cr)) { |
368 | pm_runtime_put_sync(obj->dev); |
369 | return PTR_ERR(cr); |
370 | } |
371 | |
372 | iotlb_load_cr(obj, cr); |
373 | kfree(cr); |
374 | |
375 | if (e->prsvd) |
376 | l.base++; |
377 | /* increment victim for next tlb load */ |
378 | if (++l.vict == obj->nr_tlb_entries) |
379 | l.vict = l.base; |
380 | iotlb_lock_set(obj, &l); |
381 | out: |
382 | pm_runtime_put_sync(obj->dev); |
383 | return err; |
384 | } |
385 | |
386 | #else /* !PREFETCH_IOTLB */ |
387 | |
388 | static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
389 | { |
390 | return 0; |
391 | } |
392 | |
393 | #endif /* !PREFETCH_IOTLB */ |
394 | |
395 | static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
396 | { |
397 | return load_iotlb_entry(obj, e); |
398 | } |
399 | |
400 | /** |
401 | * flush_iotlb_page - Clear an iommu tlb entry |
402 | * @obj: target iommu |
403 | * @da: iommu device virtual address |
404 | * |
405 | * Clear an iommu tlb entry which includes 'da' address. |
406 | **/ |
407 | static void flush_iotlb_page(struct omap_iommu *obj, u32 da) |
408 | { |
409 | int i; |
410 | struct cr_regs cr; |
411 | |
412 | pm_runtime_get_sync(dev: obj->dev); |
413 | |
414 | for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { |
415 | u32 start; |
416 | size_t bytes; |
417 | |
418 | if (!iotlb_cr_valid(cr: &cr)) |
419 | continue; |
420 | |
421 | start = iotlb_cr_to_virt(cr: &cr); |
422 | bytes = iopgsz_to_bytes(cr.cam & 3); |
423 | |
424 | if ((start <= da) && (da < start + bytes)) { |
425 | dev_dbg(obj->dev, "%s: %08x<=%08x(%zx)\n" , |
426 | __func__, start, da, bytes); |
427 | iotlb_load_cr(obj, cr: &cr); |
428 | iommu_write_reg(obj, val: 1, MMU_FLUSH_ENTRY); |
429 | break; |
430 | } |
431 | } |
432 | pm_runtime_put_sync(dev: obj->dev); |
433 | |
434 | if (i == obj->nr_tlb_entries) |
435 | dev_dbg(obj->dev, "%s: no page for %08x\n" , __func__, da); |
436 | } |
437 | |
438 | /** |
439 | * flush_iotlb_all - Clear all iommu tlb entries |
440 | * @obj: target iommu |
441 | **/ |
442 | static void flush_iotlb_all(struct omap_iommu *obj) |
443 | { |
444 | struct iotlb_lock l; |
445 | |
446 | pm_runtime_get_sync(dev: obj->dev); |
447 | |
448 | l.base = 0; |
449 | l.vict = 0; |
450 | iotlb_lock_set(obj, l: &l); |
451 | |
452 | iommu_write_reg(obj, val: 1, MMU_GFLUSH); |
453 | |
454 | pm_runtime_put_sync(dev: obj->dev); |
455 | } |
456 | |
457 | /* |
458 | * H/W pagetable operations |
459 | */ |
460 | static void flush_iopte_range(struct device *dev, dma_addr_t dma, |
461 | unsigned long offset, int num_entries) |
462 | { |
463 | size_t size = num_entries * sizeof(u32); |
464 | |
465 | dma_sync_single_range_for_device(dev, addr: dma, offset, size, dir: DMA_TO_DEVICE); |
466 | } |
467 | |
468 | static void iopte_free(struct omap_iommu *obj, u32 *iopte, bool dma_valid) |
469 | { |
470 | dma_addr_t pt_dma; |
471 | |
472 | /* Note: freed iopte's must be clean ready for re-use */ |
473 | if (iopte) { |
474 | if (dma_valid) { |
475 | pt_dma = virt_to_phys(address: iopte); |
476 | dma_unmap_single(obj->dev, pt_dma, IOPTE_TABLE_SIZE, |
477 | DMA_TO_DEVICE); |
478 | } |
479 | |
480 | kmem_cache_free(s: iopte_cachep, objp: iopte); |
481 | } |
482 | } |
483 | |
484 | static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, |
485 | dma_addr_t *pt_dma, u32 da) |
486 | { |
487 | u32 *iopte; |
488 | unsigned long offset = iopgd_index(da) * sizeof(da); |
489 | |
490 | /* a table has already existed */ |
491 | if (*iopgd) |
492 | goto pte_ready; |
493 | |
494 | /* |
495 | * do the allocation outside the page table lock |
496 | */ |
497 | spin_unlock(lock: &obj->page_table_lock); |
498 | iopte = kmem_cache_zalloc(k: iopte_cachep, GFP_KERNEL); |
499 | spin_lock(lock: &obj->page_table_lock); |
500 | |
501 | if (!*iopgd) { |
502 | if (!iopte) |
503 | return ERR_PTR(error: -ENOMEM); |
504 | |
505 | *pt_dma = dma_map_single(obj->dev, iopte, IOPTE_TABLE_SIZE, |
506 | DMA_TO_DEVICE); |
507 | if (dma_mapping_error(dev: obj->dev, dma_addr: *pt_dma)) { |
508 | dev_err(obj->dev, "DMA map error for L2 table\n" ); |
509 | iopte_free(obj, iopte, dma_valid: false); |
510 | return ERR_PTR(error: -ENOMEM); |
511 | } |
512 | |
513 | /* |
514 | * we rely on dma address and the physical address to be |
515 | * the same for mapping the L2 table |
516 | */ |
517 | if (WARN_ON(*pt_dma != virt_to_phys(iopte))) { |
518 | dev_err(obj->dev, "DMA translation error for L2 table\n" ); |
519 | dma_unmap_single(obj->dev, *pt_dma, IOPTE_TABLE_SIZE, |
520 | DMA_TO_DEVICE); |
521 | iopte_free(obj, iopte, dma_valid: false); |
522 | return ERR_PTR(error: -ENOMEM); |
523 | } |
524 | |
525 | *iopgd = virt_to_phys(address: iopte) | IOPGD_TABLE; |
526 | |
527 | flush_iopte_range(dev: obj->dev, dma: obj->pd_dma, offset, num_entries: 1); |
528 | dev_vdbg(obj->dev, "%s: a new pte:%p\n" , __func__, iopte); |
529 | } else { |
530 | /* We raced, free the reduniovant table */ |
531 | iopte_free(obj, iopte, dma_valid: false); |
532 | } |
533 | |
534 | pte_ready: |
535 | iopte = iopte_offset(iopgd, da); |
536 | *pt_dma = iopgd_page_paddr(iopgd); |
537 | dev_vdbg(obj->dev, |
538 | "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n" , |
539 | __func__, da, iopgd, *iopgd, iopte, *iopte); |
540 | |
541 | return iopte; |
542 | } |
543 | |
544 | static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
545 | { |
546 | u32 *iopgd = iopgd_offset(obj, da); |
547 | unsigned long offset = iopgd_index(da) * sizeof(da); |
548 | |
549 | if ((da | pa) & ~IOSECTION_MASK) { |
550 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n" , |
551 | __func__, da, pa, IOSECTION_SIZE); |
552 | return -EINVAL; |
553 | } |
554 | |
555 | *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; |
556 | flush_iopte_range(dev: obj->dev, dma: obj->pd_dma, offset, num_entries: 1); |
557 | return 0; |
558 | } |
559 | |
560 | static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
561 | { |
562 | u32 *iopgd = iopgd_offset(obj, da); |
563 | unsigned long offset = iopgd_index(da) * sizeof(da); |
564 | int i; |
565 | |
566 | if ((da | pa) & ~IOSUPER_MASK) { |
567 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n" , |
568 | __func__, da, pa, IOSUPER_SIZE); |
569 | return -EINVAL; |
570 | } |
571 | |
572 | for (i = 0; i < 16; i++) |
573 | *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; |
574 | flush_iopte_range(dev: obj->dev, dma: obj->pd_dma, offset, num_entries: 16); |
575 | return 0; |
576 | } |
577 | |
578 | static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
579 | { |
580 | u32 *iopgd = iopgd_offset(obj, da); |
581 | dma_addr_t pt_dma; |
582 | u32 *iopte = iopte_alloc(obj, iopgd, pt_dma: &pt_dma, da); |
583 | unsigned long offset = iopte_index(da) * sizeof(da); |
584 | |
585 | if (IS_ERR(ptr: iopte)) |
586 | return PTR_ERR(ptr: iopte); |
587 | |
588 | *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; |
589 | flush_iopte_range(dev: obj->dev, dma: pt_dma, offset, num_entries: 1); |
590 | |
591 | dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n" , |
592 | __func__, da, pa, iopte, *iopte); |
593 | |
594 | return 0; |
595 | } |
596 | |
597 | static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
598 | { |
599 | u32 *iopgd = iopgd_offset(obj, da); |
600 | dma_addr_t pt_dma; |
601 | u32 *iopte = iopte_alloc(obj, iopgd, pt_dma: &pt_dma, da); |
602 | unsigned long offset = iopte_index(da) * sizeof(da); |
603 | int i; |
604 | |
605 | if ((da | pa) & ~IOLARGE_MASK) { |
606 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n" , |
607 | __func__, da, pa, IOLARGE_SIZE); |
608 | return -EINVAL; |
609 | } |
610 | |
611 | if (IS_ERR(ptr: iopte)) |
612 | return PTR_ERR(ptr: iopte); |
613 | |
614 | for (i = 0; i < 16; i++) |
615 | *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; |
616 | flush_iopte_range(dev: obj->dev, dma: pt_dma, offset, num_entries: 16); |
617 | return 0; |
618 | } |
619 | |
620 | static int |
621 | iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) |
622 | { |
623 | int (*fn)(struct omap_iommu *, u32, u32, u32); |
624 | u32 prot; |
625 | int err; |
626 | |
627 | if (!obj || !e) |
628 | return -EINVAL; |
629 | |
630 | switch (e->pgsz) { |
631 | case MMU_CAM_PGSZ_16M: |
632 | fn = iopgd_alloc_super; |
633 | break; |
634 | case MMU_CAM_PGSZ_1M: |
635 | fn = iopgd_alloc_section; |
636 | break; |
637 | case MMU_CAM_PGSZ_64K: |
638 | fn = iopte_alloc_large; |
639 | break; |
640 | case MMU_CAM_PGSZ_4K: |
641 | fn = iopte_alloc_page; |
642 | break; |
643 | default: |
644 | fn = NULL; |
645 | break; |
646 | } |
647 | |
648 | if (WARN_ON(!fn)) |
649 | return -EINVAL; |
650 | |
651 | prot = get_iopte_attr(e); |
652 | |
653 | spin_lock(lock: &obj->page_table_lock); |
654 | err = fn(obj, e->da, e->pa, prot); |
655 | spin_unlock(lock: &obj->page_table_lock); |
656 | |
657 | return err; |
658 | } |
659 | |
660 | /** |
661 | * omap_iopgtable_store_entry - Make an iommu pte entry |
662 | * @obj: target iommu |
663 | * @e: an iommu tlb entry info |
664 | **/ |
665 | static int |
666 | omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
667 | { |
668 | int err; |
669 | |
670 | flush_iotlb_page(obj, da: e->da); |
671 | err = iopgtable_store_entry_core(obj, e); |
672 | if (!err) |
673 | prefetch_iotlb_entry(obj, e); |
674 | return err; |
675 | } |
676 | |
677 | /** |
678 | * iopgtable_lookup_entry - Lookup an iommu pte entry |
679 | * @obj: target iommu |
680 | * @da: iommu device virtual address |
681 | * @ppgd: iommu pgd entry pointer to be returned |
682 | * @ppte: iommu pte entry pointer to be returned |
683 | **/ |
684 | static void |
685 | iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte) |
686 | { |
687 | u32 *iopgd, *iopte = NULL; |
688 | |
689 | iopgd = iopgd_offset(obj, da); |
690 | if (!*iopgd) |
691 | goto out; |
692 | |
693 | if (iopgd_is_table(*iopgd)) |
694 | iopte = iopte_offset(iopgd, da); |
695 | out: |
696 | *ppgd = iopgd; |
697 | *ppte = iopte; |
698 | } |
699 | |
700 | static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) |
701 | { |
702 | size_t bytes; |
703 | u32 *iopgd = iopgd_offset(obj, da); |
704 | int nent = 1; |
705 | dma_addr_t pt_dma; |
706 | unsigned long pd_offset = iopgd_index(da) * sizeof(da); |
707 | unsigned long pt_offset = iopte_index(da) * sizeof(da); |
708 | |
709 | if (!*iopgd) |
710 | return 0; |
711 | |
712 | if (iopgd_is_table(*iopgd)) { |
713 | int i; |
714 | u32 *iopte = iopte_offset(iopgd, da); |
715 | |
716 | bytes = IOPTE_SIZE; |
717 | if (*iopte & IOPTE_LARGE) { |
718 | nent *= 16; |
719 | /* rewind to the 1st entry */ |
720 | iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); |
721 | } |
722 | bytes *= nent; |
723 | memset(iopte, 0, nent * sizeof(*iopte)); |
724 | pt_dma = iopgd_page_paddr(iopgd); |
725 | flush_iopte_range(dev: obj->dev, dma: pt_dma, offset: pt_offset, num_entries: nent); |
726 | |
727 | /* |
728 | * do table walk to check if this table is necessary or not |
729 | */ |
730 | iopte = iopte_offset(iopgd, 0); |
731 | for (i = 0; i < PTRS_PER_IOPTE; i++) |
732 | if (iopte[i]) |
733 | goto out; |
734 | |
735 | iopte_free(obj, iopte, dma_valid: true); |
736 | nent = 1; /* for the next L1 entry */ |
737 | } else { |
738 | bytes = IOPGD_SIZE; |
739 | if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { |
740 | nent *= 16; |
741 | /* rewind to the 1st entry */ |
742 | iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); |
743 | } |
744 | bytes *= nent; |
745 | } |
746 | memset(iopgd, 0, nent * sizeof(*iopgd)); |
747 | flush_iopte_range(dev: obj->dev, dma: obj->pd_dma, offset: pd_offset, num_entries: nent); |
748 | out: |
749 | return bytes; |
750 | } |
751 | |
752 | /** |
753 | * iopgtable_clear_entry - Remove an iommu pte entry |
754 | * @obj: target iommu |
755 | * @da: iommu device virtual address |
756 | **/ |
757 | static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da) |
758 | { |
759 | size_t bytes; |
760 | |
761 | spin_lock(lock: &obj->page_table_lock); |
762 | |
763 | bytes = iopgtable_clear_entry_core(obj, da); |
764 | flush_iotlb_page(obj, da); |
765 | |
766 | spin_unlock(lock: &obj->page_table_lock); |
767 | |
768 | return bytes; |
769 | } |
770 | |
771 | static void iopgtable_clear_entry_all(struct omap_iommu *obj) |
772 | { |
773 | unsigned long offset; |
774 | int i; |
775 | |
776 | spin_lock(lock: &obj->page_table_lock); |
777 | |
778 | for (i = 0; i < PTRS_PER_IOPGD; i++) { |
779 | u32 da; |
780 | u32 *iopgd; |
781 | |
782 | da = i << IOPGD_SHIFT; |
783 | iopgd = iopgd_offset(obj, da); |
784 | offset = iopgd_index(da) * sizeof(da); |
785 | |
786 | if (!*iopgd) |
787 | continue; |
788 | |
789 | if (iopgd_is_table(*iopgd)) |
790 | iopte_free(obj, iopte_offset(iopgd, 0), dma_valid: true); |
791 | |
792 | *iopgd = 0; |
793 | flush_iopte_range(dev: obj->dev, dma: obj->pd_dma, offset, num_entries: 1); |
794 | } |
795 | |
796 | flush_iotlb_all(obj); |
797 | |
798 | spin_unlock(lock: &obj->page_table_lock); |
799 | } |
800 | |
801 | /* |
802 | * Device IOMMU generic operations |
803 | */ |
804 | static irqreturn_t iommu_fault_handler(int irq, void *data) |
805 | { |
806 | u32 da, errs; |
807 | u32 *iopgd, *iopte; |
808 | struct omap_iommu *obj = data; |
809 | struct iommu_domain *domain = obj->domain; |
810 | struct omap_iommu_domain *omap_domain = to_omap_domain(dom: domain); |
811 | |
812 | if (!omap_domain->dev) |
813 | return IRQ_NONE; |
814 | |
815 | errs = iommu_report_fault(obj, da: &da); |
816 | if (errs == 0) |
817 | return IRQ_HANDLED; |
818 | |
819 | /* Fault callback or TLB/PTE Dynamic loading */ |
820 | if (!report_iommu_fault(domain, dev: obj->dev, iova: da, flags: 0)) |
821 | return IRQ_HANDLED; |
822 | |
823 | iommu_write_reg(obj, val: 0, MMU_IRQENABLE); |
824 | |
825 | iopgd = iopgd_offset(obj, da); |
826 | |
827 | if (!iopgd_is_table(*iopgd)) { |
828 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n" , |
829 | obj->name, errs, da, iopgd, *iopgd); |
830 | return IRQ_NONE; |
831 | } |
832 | |
833 | iopte = iopte_offset(iopgd, da); |
834 | |
835 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n" , |
836 | obj->name, errs, da, iopgd, *iopgd, iopte, *iopte); |
837 | |
838 | return IRQ_NONE; |
839 | } |
840 | |
841 | /** |
842 | * omap_iommu_attach() - attach iommu device to an iommu domain |
843 | * @obj: target omap iommu device |
844 | * @iopgd: page table |
845 | **/ |
846 | static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd) |
847 | { |
848 | int err; |
849 | |
850 | spin_lock(lock: &obj->iommu_lock); |
851 | |
852 | obj->pd_dma = dma_map_single(obj->dev, iopgd, IOPGD_TABLE_SIZE, |
853 | DMA_TO_DEVICE); |
854 | if (dma_mapping_error(dev: obj->dev, dma_addr: obj->pd_dma)) { |
855 | dev_err(obj->dev, "DMA map error for L1 table\n" ); |
856 | err = -ENOMEM; |
857 | goto out_err; |
858 | } |
859 | |
860 | obj->iopgd = iopgd; |
861 | err = iommu_enable(obj); |
862 | if (err) |
863 | goto out_err; |
864 | flush_iotlb_all(obj); |
865 | |
866 | spin_unlock(lock: &obj->iommu_lock); |
867 | |
868 | dev_dbg(obj->dev, "%s: %s\n" , __func__, obj->name); |
869 | |
870 | return 0; |
871 | |
872 | out_err: |
873 | spin_unlock(lock: &obj->iommu_lock); |
874 | |
875 | return err; |
876 | } |
877 | |
878 | /** |
879 | * omap_iommu_detach - release iommu device |
880 | * @obj: target iommu |
881 | **/ |
882 | static void omap_iommu_detach(struct omap_iommu *obj) |
883 | { |
884 | if (!obj || IS_ERR(ptr: obj)) |
885 | return; |
886 | |
887 | spin_lock(lock: &obj->iommu_lock); |
888 | |
889 | dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE, |
890 | DMA_TO_DEVICE); |
891 | obj->pd_dma = 0; |
892 | obj->iopgd = NULL; |
893 | iommu_disable(obj); |
894 | |
895 | spin_unlock(lock: &obj->iommu_lock); |
896 | |
897 | dev_dbg(obj->dev, "%s: %s\n" , __func__, obj->name); |
898 | } |
899 | |
900 | static void omap_iommu_save_tlb_entries(struct omap_iommu *obj) |
901 | { |
902 | struct iotlb_lock lock; |
903 | struct cr_regs cr; |
904 | struct cr_regs *tmp; |
905 | int i; |
906 | |
907 | /* check if there are any locked tlbs to save */ |
908 | iotlb_lock_get(obj, l: &lock); |
909 | obj->num_cr_ctx = lock.base; |
910 | if (!obj->num_cr_ctx) |
911 | return; |
912 | |
913 | tmp = obj->cr_ctx; |
914 | for_each_iotlb_cr(obj, obj->num_cr_ctx, i, cr) |
915 | * tmp++ = cr; |
916 | } |
917 | |
918 | static void omap_iommu_restore_tlb_entries(struct omap_iommu *obj) |
919 | { |
920 | struct iotlb_lock l; |
921 | struct cr_regs *tmp; |
922 | int i; |
923 | |
924 | /* no locked tlbs to restore */ |
925 | if (!obj->num_cr_ctx) |
926 | return; |
927 | |
928 | l.base = 0; |
929 | tmp = obj->cr_ctx; |
930 | for (i = 0; i < obj->num_cr_ctx; i++, tmp++) { |
931 | l.vict = i; |
932 | iotlb_lock_set(obj, l: &l); |
933 | iotlb_load_cr(obj, cr: tmp); |
934 | } |
935 | l.base = obj->num_cr_ctx; |
936 | l.vict = i; |
937 | iotlb_lock_set(obj, l: &l); |
938 | } |
939 | |
940 | /** |
941 | * omap_iommu_domain_deactivate - deactivate attached iommu devices |
942 | * @domain: iommu domain attached to the target iommu device |
943 | * |
944 | * This API allows the client devices of IOMMU devices to suspend |
945 | * the IOMMUs they control at runtime, after they are idled and |
946 | * suspended all activity. System Suspend will leverage the PM |
947 | * driver late callbacks. |
948 | **/ |
949 | int omap_iommu_domain_deactivate(struct iommu_domain *domain) |
950 | { |
951 | struct omap_iommu_domain *omap_domain = to_omap_domain(dom: domain); |
952 | struct omap_iommu_device *iommu; |
953 | struct omap_iommu *oiommu; |
954 | int i; |
955 | |
956 | if (!omap_domain->dev) |
957 | return 0; |
958 | |
959 | iommu = omap_domain->iommus; |
960 | iommu += (omap_domain->num_iommus - 1); |
961 | for (i = 0; i < omap_domain->num_iommus; i++, iommu--) { |
962 | oiommu = iommu->iommu_dev; |
963 | pm_runtime_put_sync(dev: oiommu->dev); |
964 | } |
965 | |
966 | return 0; |
967 | } |
968 | EXPORT_SYMBOL_GPL(omap_iommu_domain_deactivate); |
969 | |
970 | /** |
971 | * omap_iommu_domain_activate - activate attached iommu devices |
972 | * @domain: iommu domain attached to the target iommu device |
973 | * |
974 | * This API allows the client devices of IOMMU devices to resume the |
975 | * IOMMUs they control at runtime, before they can resume operations. |
976 | * System Resume will leverage the PM driver late callbacks. |
977 | **/ |
978 | int omap_iommu_domain_activate(struct iommu_domain *domain) |
979 | { |
980 | struct omap_iommu_domain *omap_domain = to_omap_domain(dom: domain); |
981 | struct omap_iommu_device *iommu; |
982 | struct omap_iommu *oiommu; |
983 | int i; |
984 | |
985 | if (!omap_domain->dev) |
986 | return 0; |
987 | |
988 | iommu = omap_domain->iommus; |
989 | for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { |
990 | oiommu = iommu->iommu_dev; |
991 | pm_runtime_get_sync(dev: oiommu->dev); |
992 | } |
993 | |
994 | return 0; |
995 | } |
996 | EXPORT_SYMBOL_GPL(omap_iommu_domain_activate); |
997 | |
998 | /** |
999 | * omap_iommu_runtime_suspend - disable an iommu device |
1000 | * @dev: iommu device |
1001 | * |
1002 | * This function performs all that is necessary to disable an |
1003 | * IOMMU device, either during final detachment from a client |
1004 | * device, or during system/runtime suspend of the device. This |
1005 | * includes programming all the appropriate IOMMU registers, and |
1006 | * managing the associated omap_hwmod's state and the device's |
1007 | * reset line. This function also saves the context of any |
1008 | * locked TLBs if suspending. |
1009 | **/ |
1010 | static __maybe_unused int omap_iommu_runtime_suspend(struct device *dev) |
1011 | { |
1012 | struct platform_device *pdev = to_platform_device(dev); |
1013 | struct iommu_platform_data *pdata = dev_get_platdata(dev); |
1014 | struct omap_iommu *obj = to_iommu(dev); |
1015 | int ret; |
1016 | |
1017 | /* save the TLBs only during suspend, and not for power down */ |
1018 | if (obj->domain && obj->iopgd) |
1019 | omap_iommu_save_tlb_entries(obj); |
1020 | |
1021 | omap2_iommu_disable(obj); |
1022 | |
1023 | if (pdata && pdata->device_idle) |
1024 | pdata->device_idle(pdev); |
1025 | |
1026 | if (pdata && pdata->assert_reset) |
1027 | pdata->assert_reset(pdev, pdata->reset_name); |
1028 | |
1029 | if (pdata && pdata->set_pwrdm_constraint) { |
1030 | ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst); |
1031 | if (ret) { |
1032 | dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n" , |
1033 | ret); |
1034 | } |
1035 | } |
1036 | |
1037 | return 0; |
1038 | } |
1039 | |
1040 | /** |
1041 | * omap_iommu_runtime_resume - enable an iommu device |
1042 | * @dev: iommu device |
1043 | * |
1044 | * This function performs all that is necessary to enable an |
1045 | * IOMMU device, either during initial attachment to a client |
1046 | * device, or during system/runtime resume of the device. This |
1047 | * includes programming all the appropriate IOMMU registers, and |
1048 | * managing the associated omap_hwmod's state and the device's |
1049 | * reset line. The function also restores any locked TLBs if |
1050 | * resuming after a suspend. |
1051 | **/ |
1052 | static __maybe_unused int omap_iommu_runtime_resume(struct device *dev) |
1053 | { |
1054 | struct platform_device *pdev = to_platform_device(dev); |
1055 | struct iommu_platform_data *pdata = dev_get_platdata(dev); |
1056 | struct omap_iommu *obj = to_iommu(dev); |
1057 | int ret = 0; |
1058 | |
1059 | if (pdata && pdata->set_pwrdm_constraint) { |
1060 | ret = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst); |
1061 | if (ret) { |
1062 | dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n" , |
1063 | ret); |
1064 | } |
1065 | } |
1066 | |
1067 | if (pdata && pdata->deassert_reset) { |
1068 | ret = pdata->deassert_reset(pdev, pdata->reset_name); |
1069 | if (ret) { |
1070 | dev_err(dev, "deassert_reset failed: %d\n" , ret); |
1071 | return ret; |
1072 | } |
1073 | } |
1074 | |
1075 | if (pdata && pdata->device_enable) |
1076 | pdata->device_enable(pdev); |
1077 | |
1078 | /* restore the TLBs only during resume, and not for power up */ |
1079 | if (obj->domain) |
1080 | omap_iommu_restore_tlb_entries(obj); |
1081 | |
1082 | ret = omap2_iommu_enable(obj); |
1083 | |
1084 | return ret; |
1085 | } |
1086 | |
1087 | /** |
1088 | * omap_iommu_prepare - prepare() dev_pm_ops implementation |
1089 | * @dev: iommu device |
1090 | * |
1091 | * This function performs the necessary checks to determine if the IOMMU |
1092 | * device needs suspending or not. The function checks if the runtime_pm |
1093 | * status of the device is suspended, and returns 1 in that case. This |
1094 | * results in the PM core to skip invoking any of the Sleep PM callbacks |
1095 | * (suspend, suspend_late, resume, resume_early etc). |
1096 | */ |
1097 | static int omap_iommu_prepare(struct device *dev) |
1098 | { |
1099 | if (pm_runtime_status_suspended(dev)) |
1100 | return 1; |
1101 | return 0; |
1102 | } |
1103 | |
1104 | static bool omap_iommu_can_register(struct platform_device *pdev) |
1105 | { |
1106 | struct device_node *np = pdev->dev.of_node; |
1107 | |
1108 | if (!of_device_is_compatible(device: np, "ti,dra7-dsp-iommu" )) |
1109 | return true; |
1110 | |
1111 | /* |
1112 | * restrict IOMMU core registration only for processor-port MDMA MMUs |
1113 | * on DRA7 DSPs |
1114 | */ |
1115 | if ((!strcmp(dev_name(dev: &pdev->dev), "40d01000.mmu" )) || |
1116 | (!strcmp(dev_name(dev: &pdev->dev), "41501000.mmu" ))) |
1117 | return true; |
1118 | |
1119 | return false; |
1120 | } |
1121 | |
1122 | static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev, |
1123 | struct omap_iommu *obj) |
1124 | { |
1125 | struct device_node *np = pdev->dev.of_node; |
1126 | int ret; |
1127 | |
1128 | if (!of_device_is_compatible(device: np, "ti,dra7-dsp-iommu" )) |
1129 | return 0; |
1130 | |
1131 | if (!of_property_read_bool(np, propname: "ti,syscon-mmuconfig" )) { |
1132 | dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n" ); |
1133 | return -EINVAL; |
1134 | } |
1135 | |
1136 | obj->syscfg = |
1137 | syscon_regmap_lookup_by_phandle(np, property: "ti,syscon-mmuconfig" ); |
1138 | if (IS_ERR(ptr: obj->syscfg)) { |
1139 | /* can fail with -EPROBE_DEFER */ |
1140 | ret = PTR_ERR(ptr: obj->syscfg); |
1141 | return ret; |
1142 | } |
1143 | |
1144 | if (of_property_read_u32_index(np, propname: "ti,syscon-mmuconfig" , index: 1, |
1145 | out_value: &obj->id)) { |
1146 | dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n" ); |
1147 | return -EINVAL; |
1148 | } |
1149 | |
1150 | if (obj->id != 0 && obj->id != 1) { |
1151 | dev_err(&pdev->dev, "invalid IOMMU instance id\n" ); |
1152 | return -EINVAL; |
1153 | } |
1154 | |
1155 | return 0; |
1156 | } |
1157 | |
1158 | /* |
1159 | * OMAP Device MMU(IOMMU) detection |
1160 | */ |
1161 | static int omap_iommu_probe(struct platform_device *pdev) |
1162 | { |
1163 | int err = -ENODEV; |
1164 | int irq; |
1165 | struct omap_iommu *obj; |
1166 | struct resource *res; |
1167 | struct device_node *of = pdev->dev.of_node; |
1168 | |
1169 | if (!of) { |
1170 | pr_err("%s: only DT-based devices are supported\n" , __func__); |
1171 | return -ENODEV; |
1172 | } |
1173 | |
1174 | obj = devm_kzalloc(dev: &pdev->dev, size: sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); |
1175 | if (!obj) |
1176 | return -ENOMEM; |
1177 | |
1178 | /* |
1179 | * self-manage the ordering dependencies between omap_device_enable/idle |
1180 | * and omap_device_assert/deassert_hardreset API |
1181 | */ |
1182 | if (pdev->dev.pm_domain) { |
1183 | dev_dbg(&pdev->dev, "device pm_domain is being reset\n" ); |
1184 | pdev->dev.pm_domain = NULL; |
1185 | } |
1186 | |
1187 | obj->name = dev_name(dev: &pdev->dev); |
1188 | obj->nr_tlb_entries = 32; |
1189 | err = of_property_read_u32(np: of, propname: "ti,#tlb-entries" , out_value: &obj->nr_tlb_entries); |
1190 | if (err && err != -EINVAL) |
1191 | return err; |
1192 | if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) |
1193 | return -EINVAL; |
1194 | if (of_property_read_bool(np: of, propname: "ti,iommu-bus-err-back" )) |
1195 | obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; |
1196 | |
1197 | obj->dev = &pdev->dev; |
1198 | obj->ctx = (void *)obj + sizeof(*obj); |
1199 | obj->cr_ctx = devm_kzalloc(dev: &pdev->dev, |
1200 | size: sizeof(*obj->cr_ctx) * obj->nr_tlb_entries, |
1201 | GFP_KERNEL); |
1202 | if (!obj->cr_ctx) |
1203 | return -ENOMEM; |
1204 | |
1205 | spin_lock_init(&obj->iommu_lock); |
1206 | spin_lock_init(&obj->page_table_lock); |
1207 | |
1208 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1209 | obj->regbase = devm_ioremap_resource(dev: obj->dev, res); |
1210 | if (IS_ERR(ptr: obj->regbase)) |
1211 | return PTR_ERR(ptr: obj->regbase); |
1212 | |
1213 | err = omap_iommu_dra7_get_dsp_system_cfg(pdev, obj); |
1214 | if (err) |
1215 | return err; |
1216 | |
1217 | irq = platform_get_irq(pdev, 0); |
1218 | if (irq < 0) |
1219 | return -ENODEV; |
1220 | |
1221 | err = devm_request_irq(dev: obj->dev, irq, handler: iommu_fault_handler, IRQF_SHARED, |
1222 | devname: dev_name(dev: obj->dev), dev_id: obj); |
1223 | if (err < 0) |
1224 | return err; |
1225 | platform_set_drvdata(pdev, data: obj); |
1226 | |
1227 | if (omap_iommu_can_register(pdev)) { |
1228 | obj->group = iommu_group_alloc(); |
1229 | if (IS_ERR(ptr: obj->group)) |
1230 | return PTR_ERR(ptr: obj->group); |
1231 | |
1232 | err = iommu_device_sysfs_add(iommu: &obj->iommu, parent: obj->dev, NULL, |
1233 | fmt: obj->name); |
1234 | if (err) |
1235 | goto out_group; |
1236 | |
1237 | err = iommu_device_register(iommu: &obj->iommu, ops: &omap_iommu_ops, hwdev: &pdev->dev); |
1238 | if (err) |
1239 | goto out_sysfs; |
1240 | } |
1241 | |
1242 | pm_runtime_enable(dev: obj->dev); |
1243 | |
1244 | omap_iommu_debugfs_add(obj); |
1245 | |
1246 | dev_info(&pdev->dev, "%s registered\n" , obj->name); |
1247 | |
1248 | /* Re-probe bus to probe device attached to this IOMMU */ |
1249 | bus_iommu_probe(bus: &platform_bus_type); |
1250 | |
1251 | return 0; |
1252 | |
1253 | out_sysfs: |
1254 | iommu_device_sysfs_remove(iommu: &obj->iommu); |
1255 | out_group: |
1256 | iommu_group_put(group: obj->group); |
1257 | return err; |
1258 | } |
1259 | |
1260 | static void omap_iommu_remove(struct platform_device *pdev) |
1261 | { |
1262 | struct omap_iommu *obj = platform_get_drvdata(pdev); |
1263 | |
1264 | if (obj->group) { |
1265 | iommu_group_put(group: obj->group); |
1266 | obj->group = NULL; |
1267 | |
1268 | iommu_device_sysfs_remove(iommu: &obj->iommu); |
1269 | iommu_device_unregister(iommu: &obj->iommu); |
1270 | } |
1271 | |
1272 | omap_iommu_debugfs_remove(obj); |
1273 | |
1274 | pm_runtime_disable(dev: obj->dev); |
1275 | |
1276 | dev_info(&pdev->dev, "%s removed\n" , obj->name); |
1277 | } |
1278 | |
1279 | static const struct dev_pm_ops omap_iommu_pm_ops = { |
1280 | .prepare = omap_iommu_prepare, |
1281 | SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, |
1282 | pm_runtime_force_resume) |
1283 | SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend, |
1284 | omap_iommu_runtime_resume, NULL) |
1285 | }; |
1286 | |
1287 | static const struct of_device_id omap_iommu_of_match[] = { |
1288 | { .compatible = "ti,omap2-iommu" }, |
1289 | { .compatible = "ti,omap4-iommu" }, |
1290 | { .compatible = "ti,dra7-iommu" }, |
1291 | { .compatible = "ti,dra7-dsp-iommu" }, |
1292 | {}, |
1293 | }; |
1294 | |
1295 | static struct platform_driver omap_iommu_driver = { |
1296 | .probe = omap_iommu_probe, |
1297 | .remove_new = omap_iommu_remove, |
1298 | .driver = { |
1299 | .name = "omap-iommu" , |
1300 | .pm = &omap_iommu_pm_ops, |
1301 | .of_match_table = of_match_ptr(omap_iommu_of_match), |
1302 | }, |
1303 | }; |
1304 | |
1305 | static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) |
1306 | { |
1307 | memset(e, 0, sizeof(*e)); |
1308 | |
1309 | e->da = da; |
1310 | e->pa = pa; |
1311 | e->valid = MMU_CAM_V; |
1312 | e->pgsz = pgsz; |
1313 | e->endian = MMU_RAM_ENDIAN_LITTLE; |
1314 | e->elsz = MMU_RAM_ELSZ_8; |
1315 | e->mixed = 0; |
1316 | |
1317 | return iopgsz_to_bytes(e->pgsz); |
1318 | } |
1319 | |
1320 | static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, |
1321 | phys_addr_t pa, size_t bytes, int prot, gfp_t gfp) |
1322 | { |
1323 | struct omap_iommu_domain *omap_domain = to_omap_domain(dom: domain); |
1324 | struct device *dev = omap_domain->dev; |
1325 | struct omap_iommu_device *iommu; |
1326 | struct omap_iommu *oiommu; |
1327 | struct iotlb_entry e; |
1328 | int omap_pgsz; |
1329 | u32 ret = -EINVAL; |
1330 | int i; |
1331 | |
1332 | omap_pgsz = bytes_to_iopgsz(bytes); |
1333 | if (omap_pgsz < 0) { |
1334 | dev_err(dev, "invalid size to map: %zu\n" , bytes); |
1335 | return -EINVAL; |
1336 | } |
1337 | |
1338 | dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%zx\n" , da, &pa, bytes); |
1339 | |
1340 | iotlb_init_entry(e: &e, da, pa, pgsz: omap_pgsz); |
1341 | |
1342 | iommu = omap_domain->iommus; |
1343 | for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { |
1344 | oiommu = iommu->iommu_dev; |
1345 | ret = omap_iopgtable_store_entry(obj: oiommu, e: &e); |
1346 | if (ret) { |
1347 | dev_err(dev, "omap_iopgtable_store_entry failed: %d\n" , |
1348 | ret); |
1349 | break; |
1350 | } |
1351 | } |
1352 | |
1353 | if (ret) { |
1354 | while (i--) { |
1355 | iommu--; |
1356 | oiommu = iommu->iommu_dev; |
1357 | iopgtable_clear_entry(obj: oiommu, da); |
1358 | } |
1359 | } |
1360 | |
1361 | return ret; |
1362 | } |
1363 | |
1364 | static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, |
1365 | size_t size, struct iommu_iotlb_gather *gather) |
1366 | { |
1367 | struct omap_iommu_domain *omap_domain = to_omap_domain(dom: domain); |
1368 | struct device *dev = omap_domain->dev; |
1369 | struct omap_iommu_device *iommu; |
1370 | struct omap_iommu *oiommu; |
1371 | bool error = false; |
1372 | size_t bytes = 0; |
1373 | int i; |
1374 | |
1375 | dev_dbg(dev, "unmapping da 0x%lx size %zu\n" , da, size); |
1376 | |
1377 | iommu = omap_domain->iommus; |
1378 | for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { |
1379 | oiommu = iommu->iommu_dev; |
1380 | bytes = iopgtable_clear_entry(obj: oiommu, da); |
1381 | if (!bytes) |
1382 | error = true; |
1383 | } |
1384 | |
1385 | /* |
1386 | * simplify return - we are only checking if any of the iommus |
1387 | * reported an error, but not if all of them are unmapping the |
1388 | * same number of entries. This should not occur due to the |
1389 | * mirror programming. |
1390 | */ |
1391 | return error ? 0 : bytes; |
1392 | } |
1393 | |
1394 | static int omap_iommu_count(struct device *dev) |
1395 | { |
1396 | struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); |
1397 | int count = 0; |
1398 | |
1399 | while (arch_data->iommu_dev) { |
1400 | count++; |
1401 | arch_data++; |
1402 | } |
1403 | |
1404 | return count; |
1405 | } |
1406 | |
1407 | /* caller should call cleanup if this function fails */ |
1408 | static int omap_iommu_attach_init(struct device *dev, |
1409 | struct omap_iommu_domain *odomain) |
1410 | { |
1411 | struct omap_iommu_device *iommu; |
1412 | int i; |
1413 | |
1414 | odomain->num_iommus = omap_iommu_count(dev); |
1415 | if (!odomain->num_iommus) |
1416 | return -ENODEV; |
1417 | |
1418 | odomain->iommus = kcalloc(n: odomain->num_iommus, size: sizeof(*iommu), |
1419 | GFP_ATOMIC); |
1420 | if (!odomain->iommus) |
1421 | return -ENOMEM; |
1422 | |
1423 | iommu = odomain->iommus; |
1424 | for (i = 0; i < odomain->num_iommus; i++, iommu++) { |
1425 | iommu->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_ATOMIC); |
1426 | if (!iommu->pgtable) |
1427 | return -ENOMEM; |
1428 | |
1429 | /* |
1430 | * should never fail, but please keep this around to ensure |
1431 | * we keep the hardware happy |
1432 | */ |
1433 | if (WARN_ON(!IS_ALIGNED((long)iommu->pgtable, |
1434 | IOPGD_TABLE_SIZE))) |
1435 | return -EINVAL; |
1436 | } |
1437 | |
1438 | return 0; |
1439 | } |
1440 | |
1441 | static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain) |
1442 | { |
1443 | int i; |
1444 | struct omap_iommu_device *iommu = odomain->iommus; |
1445 | |
1446 | for (i = 0; iommu && i < odomain->num_iommus; i++, iommu++) |
1447 | kfree(objp: iommu->pgtable); |
1448 | |
1449 | kfree(objp: odomain->iommus); |
1450 | odomain->num_iommus = 0; |
1451 | odomain->iommus = NULL; |
1452 | } |
1453 | |
1454 | static int |
1455 | omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) |
1456 | { |
1457 | struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); |
1458 | struct omap_iommu_domain *omap_domain = to_omap_domain(dom: domain); |
1459 | struct omap_iommu_device *iommu; |
1460 | struct omap_iommu *oiommu; |
1461 | int ret = 0; |
1462 | int i; |
1463 | |
1464 | if (!arch_data || !arch_data->iommu_dev) { |
1465 | dev_err(dev, "device doesn't have an associated iommu\n" ); |
1466 | return -ENODEV; |
1467 | } |
1468 | |
1469 | spin_lock(lock: &omap_domain->lock); |
1470 | |
1471 | /* only a single client device can be attached to a domain */ |
1472 | if (omap_domain->dev) { |
1473 | dev_err(dev, "iommu domain is already attached\n" ); |
1474 | ret = -EINVAL; |
1475 | goto out; |
1476 | } |
1477 | |
1478 | ret = omap_iommu_attach_init(dev, odomain: omap_domain); |
1479 | if (ret) { |
1480 | dev_err(dev, "failed to allocate required iommu data %d\n" , |
1481 | ret); |
1482 | goto init_fail; |
1483 | } |
1484 | |
1485 | iommu = omap_domain->iommus; |
1486 | for (i = 0; i < omap_domain->num_iommus; i++, iommu++, arch_data++) { |
1487 | /* configure and enable the omap iommu */ |
1488 | oiommu = arch_data->iommu_dev; |
1489 | ret = omap_iommu_attach(obj: oiommu, iopgd: iommu->pgtable); |
1490 | if (ret) { |
1491 | dev_err(dev, "can't get omap iommu: %d\n" , ret); |
1492 | goto attach_fail; |
1493 | } |
1494 | |
1495 | oiommu->domain = domain; |
1496 | iommu->iommu_dev = oiommu; |
1497 | } |
1498 | |
1499 | omap_domain->dev = dev; |
1500 | |
1501 | goto out; |
1502 | |
1503 | attach_fail: |
1504 | while (i--) { |
1505 | iommu--; |
1506 | arch_data--; |
1507 | oiommu = iommu->iommu_dev; |
1508 | omap_iommu_detach(obj: oiommu); |
1509 | iommu->iommu_dev = NULL; |
1510 | oiommu->domain = NULL; |
1511 | } |
1512 | init_fail: |
1513 | omap_iommu_detach_fini(odomain: omap_domain); |
1514 | out: |
1515 | spin_unlock(lock: &omap_domain->lock); |
1516 | return ret; |
1517 | } |
1518 | |
1519 | static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, |
1520 | struct device *dev) |
1521 | { |
1522 | struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); |
1523 | struct omap_iommu_device *iommu = omap_domain->iommus; |
1524 | struct omap_iommu *oiommu; |
1525 | int i; |
1526 | |
1527 | if (!omap_domain->dev) { |
1528 | dev_err(dev, "domain has no attached device\n" ); |
1529 | return; |
1530 | } |
1531 | |
1532 | /* only a single device is supported per domain for now */ |
1533 | if (omap_domain->dev != dev) { |
1534 | dev_err(dev, "invalid attached device\n" ); |
1535 | return; |
1536 | } |
1537 | |
1538 | /* |
1539 | * cleanup in the reverse order of attachment - this addresses |
1540 | * any h/w dependencies between multiple instances, if any |
1541 | */ |
1542 | iommu += (omap_domain->num_iommus - 1); |
1543 | arch_data += (omap_domain->num_iommus - 1); |
1544 | for (i = 0; i < omap_domain->num_iommus; i++, iommu--, arch_data--) { |
1545 | oiommu = iommu->iommu_dev; |
1546 | iopgtable_clear_entry_all(obj: oiommu); |
1547 | |
1548 | omap_iommu_detach(obj: oiommu); |
1549 | iommu->iommu_dev = NULL; |
1550 | oiommu->domain = NULL; |
1551 | } |
1552 | |
1553 | omap_iommu_detach_fini(odomain: omap_domain); |
1554 | |
1555 | omap_domain->dev = NULL; |
1556 | } |
1557 | |
1558 | static void omap_iommu_set_platform_dma(struct device *dev) |
1559 | { |
1560 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
1561 | struct omap_iommu_domain *omap_domain = to_omap_domain(dom: domain); |
1562 | |
1563 | spin_lock(lock: &omap_domain->lock); |
1564 | _omap_iommu_detach_dev(omap_domain, dev); |
1565 | spin_unlock(lock: &omap_domain->lock); |
1566 | } |
1567 | |
1568 | static struct iommu_domain *omap_iommu_domain_alloc(unsigned type) |
1569 | { |
1570 | struct omap_iommu_domain *omap_domain; |
1571 | |
1572 | if (type != IOMMU_DOMAIN_UNMANAGED) |
1573 | return NULL; |
1574 | |
1575 | omap_domain = kzalloc(size: sizeof(*omap_domain), GFP_KERNEL); |
1576 | if (!omap_domain) |
1577 | return NULL; |
1578 | |
1579 | spin_lock_init(&omap_domain->lock); |
1580 | |
1581 | omap_domain->domain.geometry.aperture_start = 0; |
1582 | omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1; |
1583 | omap_domain->domain.geometry.force_aperture = true; |
1584 | |
1585 | return &omap_domain->domain; |
1586 | } |
1587 | |
1588 | static void omap_iommu_domain_free(struct iommu_domain *domain) |
1589 | { |
1590 | struct omap_iommu_domain *omap_domain = to_omap_domain(dom: domain); |
1591 | |
1592 | /* |
1593 | * An iommu device is still attached |
1594 | * (currently, only one device can be attached) ? |
1595 | */ |
1596 | if (omap_domain->dev) |
1597 | _omap_iommu_detach_dev(omap_domain, dev: omap_domain->dev); |
1598 | |
1599 | kfree(objp: omap_domain); |
1600 | } |
1601 | |
1602 | static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, |
1603 | dma_addr_t da) |
1604 | { |
1605 | struct omap_iommu_domain *omap_domain = to_omap_domain(dom: domain); |
1606 | struct omap_iommu_device *iommu = omap_domain->iommus; |
1607 | struct omap_iommu *oiommu = iommu->iommu_dev; |
1608 | struct device *dev = oiommu->dev; |
1609 | u32 *pgd, *pte; |
1610 | phys_addr_t ret = 0; |
1611 | |
1612 | /* |
1613 | * all the iommus within the domain will have identical programming, |
1614 | * so perform the lookup using just the first iommu |
1615 | */ |
1616 | iopgtable_lookup_entry(obj: oiommu, da, ppgd: &pgd, ppte: &pte); |
1617 | |
1618 | if (pte) { |
1619 | if (iopte_is_small(*pte)) |
1620 | ret = omap_iommu_translate(d: *pte, va: da, IOPTE_MASK); |
1621 | else if (iopte_is_large(*pte)) |
1622 | ret = omap_iommu_translate(d: *pte, va: da, IOLARGE_MASK); |
1623 | else |
1624 | dev_err(dev, "bogus pte 0x%x, da 0x%llx" , *pte, |
1625 | (unsigned long long)da); |
1626 | } else { |
1627 | if (iopgd_is_section(*pgd)) |
1628 | ret = omap_iommu_translate(d: *pgd, va: da, IOSECTION_MASK); |
1629 | else if (iopgd_is_super(*pgd)) |
1630 | ret = omap_iommu_translate(d: *pgd, va: da, IOSUPER_MASK); |
1631 | else |
1632 | dev_err(dev, "bogus pgd 0x%x, da 0x%llx" , *pgd, |
1633 | (unsigned long long)da); |
1634 | } |
1635 | |
1636 | return ret; |
1637 | } |
1638 | |
1639 | static struct iommu_device *omap_iommu_probe_device(struct device *dev) |
1640 | { |
1641 | struct omap_iommu_arch_data *arch_data, *tmp; |
1642 | struct platform_device *pdev; |
1643 | struct omap_iommu *oiommu; |
1644 | struct device_node *np; |
1645 | int num_iommus, i; |
1646 | |
1647 | /* |
1648 | * Allocate the per-device iommu structure for DT-based devices. |
1649 | * |
1650 | * TODO: Simplify this when removing non-DT support completely from the |
1651 | * IOMMU users. |
1652 | */ |
1653 | if (!dev->of_node) |
1654 | return ERR_PTR(error: -ENODEV); |
1655 | |
1656 | /* |
1657 | * retrieve the count of IOMMU nodes using phandle size as element size |
1658 | * since #iommu-cells = 0 for OMAP |
1659 | */ |
1660 | num_iommus = of_property_count_elems_of_size(np: dev->of_node, propname: "iommus" , |
1661 | elem_size: sizeof(phandle)); |
1662 | if (num_iommus < 0) |
1663 | return ERR_PTR(error: -ENODEV); |
1664 | |
1665 | arch_data = kcalloc(n: num_iommus + 1, size: sizeof(*arch_data), GFP_KERNEL); |
1666 | if (!arch_data) |
1667 | return ERR_PTR(error: -ENOMEM); |
1668 | |
1669 | for (i = 0, tmp = arch_data; i < num_iommus; i++, tmp++) { |
1670 | np = of_parse_phandle(np: dev->of_node, phandle_name: "iommus" , index: i); |
1671 | if (!np) { |
1672 | kfree(objp: arch_data); |
1673 | return ERR_PTR(error: -EINVAL); |
1674 | } |
1675 | |
1676 | pdev = of_find_device_by_node(np); |
1677 | if (!pdev) { |
1678 | of_node_put(node: np); |
1679 | kfree(objp: arch_data); |
1680 | return ERR_PTR(error: -ENODEV); |
1681 | } |
1682 | |
1683 | oiommu = platform_get_drvdata(pdev); |
1684 | if (!oiommu) { |
1685 | of_node_put(node: np); |
1686 | kfree(objp: arch_data); |
1687 | return ERR_PTR(error: -EINVAL); |
1688 | } |
1689 | |
1690 | tmp->iommu_dev = oiommu; |
1691 | tmp->dev = &pdev->dev; |
1692 | |
1693 | of_node_put(node: np); |
1694 | } |
1695 | |
1696 | dev_iommu_priv_set(dev, priv: arch_data); |
1697 | |
1698 | /* |
1699 | * use the first IOMMU alone for the sysfs device linking. |
1700 | * TODO: Evaluate if a single iommu_group needs to be |
1701 | * maintained for both IOMMUs |
1702 | */ |
1703 | oiommu = arch_data->iommu_dev; |
1704 | |
1705 | return &oiommu->iommu; |
1706 | } |
1707 | |
1708 | static void omap_iommu_release_device(struct device *dev) |
1709 | { |
1710 | struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); |
1711 | |
1712 | if (!dev->of_node || !arch_data) |
1713 | return; |
1714 | |
1715 | dev_iommu_priv_set(dev, NULL); |
1716 | kfree(objp: arch_data); |
1717 | |
1718 | } |
1719 | |
1720 | static struct iommu_group *omap_iommu_device_group(struct device *dev) |
1721 | { |
1722 | struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); |
1723 | struct iommu_group *group = ERR_PTR(error: -EINVAL); |
1724 | |
1725 | if (!arch_data) |
1726 | return ERR_PTR(error: -ENODEV); |
1727 | |
1728 | if (arch_data->iommu_dev) |
1729 | group = iommu_group_ref_get(group: arch_data->iommu_dev->group); |
1730 | |
1731 | return group; |
1732 | } |
1733 | |
1734 | static const struct iommu_ops omap_iommu_ops = { |
1735 | .domain_alloc = omap_iommu_domain_alloc, |
1736 | .probe_device = omap_iommu_probe_device, |
1737 | .release_device = omap_iommu_release_device, |
1738 | .device_group = omap_iommu_device_group, |
1739 | .set_platform_dma_ops = omap_iommu_set_platform_dma, |
1740 | .pgsize_bitmap = OMAP_IOMMU_PGSIZES, |
1741 | .default_domain_ops = &(const struct iommu_domain_ops) { |
1742 | .attach_dev = omap_iommu_attach_dev, |
1743 | .map = omap_iommu_map, |
1744 | .unmap = omap_iommu_unmap, |
1745 | .iova_to_phys = omap_iommu_iova_to_phys, |
1746 | .free = omap_iommu_domain_free, |
1747 | } |
1748 | }; |
1749 | |
1750 | static int __init omap_iommu_init(void) |
1751 | { |
1752 | struct kmem_cache *p; |
1753 | const slab_flags_t flags = SLAB_HWCACHE_ALIGN; |
1754 | size_t align = 1 << 10; /* L2 pagetable alignement */ |
1755 | struct device_node *np; |
1756 | int ret; |
1757 | |
1758 | np = of_find_matching_node(NULL, matches: omap_iommu_of_match); |
1759 | if (!np) |
1760 | return 0; |
1761 | |
1762 | of_node_put(node: np); |
1763 | |
1764 | p = kmem_cache_create(name: "iopte_cache" , IOPTE_TABLE_SIZE, align, flags, |
1765 | NULL); |
1766 | if (!p) |
1767 | return -ENOMEM; |
1768 | iopte_cachep = p; |
1769 | |
1770 | omap_iommu_debugfs_init(); |
1771 | |
1772 | ret = platform_driver_register(&omap_iommu_driver); |
1773 | if (ret) { |
1774 | pr_err("%s: failed to register driver\n" , __func__); |
1775 | goto fail_driver; |
1776 | } |
1777 | |
1778 | return 0; |
1779 | |
1780 | fail_driver: |
1781 | kmem_cache_destroy(s: iopte_cachep); |
1782 | return ret; |
1783 | } |
1784 | subsys_initcall(omap_iommu_init); |
1785 | /* must be ready before omap3isp is probed */ |
1786 | |