1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. |
3 | * |
4 | * Author: Stepan Moskovchenko <stepanm@codeaurora.org> |
5 | */ |
6 | |
7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
8 | #include <linux/kernel.h> |
9 | #include <linux/init.h> |
10 | #include <linux/platform_device.h> |
11 | #include <linux/errno.h> |
12 | #include <linux/io.h> |
13 | #include <linux/io-pgtable.h> |
14 | #include <linux/interrupt.h> |
15 | #include <linux/list.h> |
16 | #include <linux/spinlock.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/iommu.h> |
19 | #include <linux/clk.h> |
20 | #include <linux/err.h> |
21 | |
22 | #include <asm/cacheflush.h> |
23 | #include <linux/sizes.h> |
24 | |
25 | #include "msm_iommu_hw-8xxx.h" |
26 | #include "msm_iommu.h" |
27 | |
28 | #define MRC(reg, processor, op1, crn, crm, op2) \ |
29 | __asm__ __volatile__ ( \ |
30 | " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \ |
31 | : "=r" (reg)) |
32 | |
33 | /* bitmap of the page sizes currently supported */ |
34 | #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) |
35 | |
36 | static DEFINE_SPINLOCK(msm_iommu_lock); |
37 | static LIST_HEAD(qcom_iommu_devices); |
38 | static struct iommu_ops msm_iommu_ops; |
39 | |
40 | struct msm_priv { |
41 | struct list_head list_attached; |
42 | struct iommu_domain domain; |
43 | struct io_pgtable_cfg cfg; |
44 | struct io_pgtable_ops *iop; |
45 | struct device *dev; |
46 | spinlock_t pgtlock; /* pagetable lock */ |
47 | }; |
48 | |
49 | static struct msm_priv *to_msm_priv(struct iommu_domain *dom) |
50 | { |
51 | return container_of(dom, struct msm_priv, domain); |
52 | } |
53 | |
54 | static int __enable_clocks(struct msm_iommu_dev *iommu) |
55 | { |
56 | int ret; |
57 | |
58 | ret = clk_enable(clk: iommu->pclk); |
59 | if (ret) |
60 | goto fail; |
61 | |
62 | if (iommu->clk) { |
63 | ret = clk_enable(clk: iommu->clk); |
64 | if (ret) |
65 | clk_disable(clk: iommu->pclk); |
66 | } |
67 | fail: |
68 | return ret; |
69 | } |
70 | |
71 | static void __disable_clocks(struct msm_iommu_dev *iommu) |
72 | { |
73 | if (iommu->clk) |
74 | clk_disable(clk: iommu->clk); |
75 | clk_disable(clk: iommu->pclk); |
76 | } |
77 | |
78 | static void msm_iommu_reset(void __iomem *base, int ncb) |
79 | { |
80 | int ctx; |
81 | |
82 | SET_RPUE(base, 0); |
83 | SET_RPUEIE(base, 0); |
84 | SET_ESRRESTORE(base, 0); |
85 | SET_TBE(base, 0); |
86 | SET_CR(base, 0); |
87 | SET_SPDMBE(base, 0); |
88 | SET_TESTBUSCR(base, 0); |
89 | SET_TLBRSW(base, 0); |
90 | SET_GLOBAL_TLBIALL(base, 0); |
91 | SET_RPU_ACR(base, 0); |
92 | SET_TLBLKCRWE(base, 1); |
93 | |
94 | for (ctx = 0; ctx < ncb; ctx++) { |
95 | SET_BPRCOSH(base, ctx, 0); |
96 | SET_BPRCISH(base, ctx, 0); |
97 | SET_BPRCNSH(base, ctx, 0); |
98 | SET_BPSHCFG(base, ctx, 0); |
99 | SET_BPMTCFG(base, ctx, 0); |
100 | SET_ACTLR(base, ctx, 0); |
101 | SET_SCTLR(base, ctx, 0); |
102 | SET_FSRRESTORE(base, ctx, 0); |
103 | SET_TTBR0(base, ctx, 0); |
104 | SET_TTBR1(base, ctx, 0); |
105 | SET_TTBCR(base, ctx, 0); |
106 | SET_BFBCR(base, ctx, 0); |
107 | SET_PAR(base, ctx, 0); |
108 | SET_FAR(base, ctx, 0); |
109 | SET_CTX_TLBIALL(base, ctx, 0); |
110 | SET_TLBFLPTER(base, ctx, 0); |
111 | SET_TLBSLPTER(base, ctx, 0); |
112 | SET_TLBLKCR(base, ctx, 0); |
113 | SET_CONTEXTIDR(base, ctx, 0); |
114 | } |
115 | } |
116 | |
117 | static void __flush_iotlb(void *cookie) |
118 | { |
119 | struct msm_priv *priv = cookie; |
120 | struct msm_iommu_dev *iommu = NULL; |
121 | struct msm_iommu_ctx_dev *master; |
122 | int ret = 0; |
123 | |
124 | list_for_each_entry(iommu, &priv->list_attached, dom_node) { |
125 | ret = __enable_clocks(iommu); |
126 | if (ret) |
127 | goto fail; |
128 | |
129 | list_for_each_entry(master, &iommu->ctx_list, list) |
130 | SET_CTX_TLBIALL(iommu->base, master->num, 0); |
131 | |
132 | __disable_clocks(iommu); |
133 | } |
134 | fail: |
135 | return; |
136 | } |
137 | |
138 | static void __flush_iotlb_range(unsigned long iova, size_t size, |
139 | size_t granule, bool leaf, void *cookie) |
140 | { |
141 | struct msm_priv *priv = cookie; |
142 | struct msm_iommu_dev *iommu = NULL; |
143 | struct msm_iommu_ctx_dev *master; |
144 | int ret = 0; |
145 | int temp_size; |
146 | |
147 | list_for_each_entry(iommu, &priv->list_attached, dom_node) { |
148 | ret = __enable_clocks(iommu); |
149 | if (ret) |
150 | goto fail; |
151 | |
152 | list_for_each_entry(master, &iommu->ctx_list, list) { |
153 | temp_size = size; |
154 | do { |
155 | iova &= TLBIVA_VA; |
156 | iova |= GET_CONTEXTIDR_ASID(iommu->base, |
157 | master->num); |
158 | SET_TLBIVA(iommu->base, master->num, iova); |
159 | iova += granule; |
160 | } while (temp_size -= granule); |
161 | } |
162 | |
163 | __disable_clocks(iommu); |
164 | } |
165 | |
166 | fail: |
167 | return; |
168 | } |
169 | |
170 | static void __flush_iotlb_walk(unsigned long iova, size_t size, |
171 | size_t granule, void *cookie) |
172 | { |
173 | __flush_iotlb_range(iova, size, granule, leaf: false, cookie); |
174 | } |
175 | |
176 | static void __flush_iotlb_page(struct iommu_iotlb_gather *gather, |
177 | unsigned long iova, size_t granule, void *cookie) |
178 | { |
179 | __flush_iotlb_range(iova, size: granule, granule, leaf: true, cookie); |
180 | } |
181 | |
182 | static const struct iommu_flush_ops msm_iommu_flush_ops = { |
183 | .tlb_flush_all = __flush_iotlb, |
184 | .tlb_flush_walk = __flush_iotlb_walk, |
185 | .tlb_add_page = __flush_iotlb_page, |
186 | }; |
187 | |
188 | static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end) |
189 | { |
190 | int idx; |
191 | |
192 | do { |
193 | idx = find_next_zero_bit(addr: map, size: end, offset: start); |
194 | if (idx == end) |
195 | return -ENOSPC; |
196 | } while (test_and_set_bit(nr: idx, addr: map)); |
197 | |
198 | return idx; |
199 | } |
200 | |
201 | static void msm_iommu_free_ctx(unsigned long *map, int idx) |
202 | { |
203 | clear_bit(nr: idx, addr: map); |
204 | } |
205 | |
206 | static void config_mids(struct msm_iommu_dev *iommu, |
207 | struct msm_iommu_ctx_dev *master) |
208 | { |
209 | int mid, ctx, i; |
210 | |
211 | for (i = 0; i < master->num_mids; i++) { |
212 | mid = master->mids[i]; |
213 | ctx = master->num; |
214 | |
215 | SET_M2VCBR_N(iommu->base, mid, 0); |
216 | SET_CBACR_N(iommu->base, ctx, 0); |
217 | |
218 | /* Set VMID = 0 */ |
219 | SET_VMID(iommu->base, mid, 0); |
220 | |
221 | /* Set the context number for that MID to this context */ |
222 | SET_CBNDX(iommu->base, mid, ctx); |
223 | |
224 | /* Set MID associated with this context bank to 0*/ |
225 | SET_CBVMID(iommu->base, ctx, 0); |
226 | |
227 | /* Set the ASID for TLB tagging for this context */ |
228 | SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx); |
229 | |
230 | /* Set security bit override to be Non-secure */ |
231 | SET_NSCFG(iommu->base, mid, 3); |
232 | } |
233 | } |
234 | |
235 | static void __reset_context(void __iomem *base, int ctx) |
236 | { |
237 | SET_BPRCOSH(base, ctx, 0); |
238 | SET_BPRCISH(base, ctx, 0); |
239 | SET_BPRCNSH(base, ctx, 0); |
240 | SET_BPSHCFG(base, ctx, 0); |
241 | SET_BPMTCFG(base, ctx, 0); |
242 | SET_ACTLR(base, ctx, 0); |
243 | SET_SCTLR(base, ctx, 0); |
244 | SET_FSRRESTORE(base, ctx, 0); |
245 | SET_TTBR0(base, ctx, 0); |
246 | SET_TTBR1(base, ctx, 0); |
247 | SET_TTBCR(base, ctx, 0); |
248 | SET_BFBCR(base, ctx, 0); |
249 | SET_PAR(base, ctx, 0); |
250 | SET_FAR(base, ctx, 0); |
251 | SET_CTX_TLBIALL(base, ctx, 0); |
252 | SET_TLBFLPTER(base, ctx, 0); |
253 | SET_TLBSLPTER(base, ctx, 0); |
254 | SET_TLBLKCR(base, ctx, 0); |
255 | } |
256 | |
257 | static void __program_context(void __iomem *base, int ctx, |
258 | struct msm_priv *priv) |
259 | { |
260 | __reset_context(base, ctx); |
261 | |
262 | /* Turn on TEX Remap */ |
263 | SET_TRE(base, ctx, 1); |
264 | SET_AFE(base, ctx, 1); |
265 | |
266 | /* Set up HTW mode */ |
267 | /* TLB miss configuration: perform HTW on miss */ |
268 | SET_TLBMCFG(base, ctx, 0x3); |
269 | |
270 | /* V2P configuration: HTW for access */ |
271 | SET_V2PCFG(base, ctx, 0x3); |
272 | |
273 | SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr); |
274 | SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr); |
275 | SET_TTBR1(base, ctx, 0); |
276 | |
277 | /* Set prrr and nmrr */ |
278 | SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr); |
279 | SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr); |
280 | |
281 | /* Invalidate the TLB for this context */ |
282 | SET_CTX_TLBIALL(base, ctx, 0); |
283 | |
284 | /* Set interrupt number to "secure" interrupt */ |
285 | SET_IRPTNDX(base, ctx, 0); |
286 | |
287 | /* Enable context fault interrupt */ |
288 | SET_CFEIE(base, ctx, 1); |
289 | |
290 | /* Stall access on a context fault and let the handler deal with it */ |
291 | SET_CFCFG(base, ctx, 1); |
292 | |
293 | /* Redirect all cacheable requests to L2 slave port. */ |
294 | SET_RCISH(base, ctx, 1); |
295 | SET_RCOSH(base, ctx, 1); |
296 | SET_RCNSH(base, ctx, 1); |
297 | |
298 | /* Turn on BFB prefetch */ |
299 | SET_BFBDFE(base, ctx, 1); |
300 | |
301 | /* Enable the MMU */ |
302 | SET_M(base, ctx, 1); |
303 | } |
304 | |
305 | static struct iommu_domain *msm_iommu_domain_alloc(unsigned type) |
306 | { |
307 | struct msm_priv *priv; |
308 | |
309 | if (type != IOMMU_DOMAIN_UNMANAGED) |
310 | return NULL; |
311 | |
312 | priv = kzalloc(size: sizeof(*priv), GFP_KERNEL); |
313 | if (!priv) |
314 | goto fail_nomem; |
315 | |
316 | INIT_LIST_HEAD(list: &priv->list_attached); |
317 | |
318 | priv->domain.geometry.aperture_start = 0; |
319 | priv->domain.geometry.aperture_end = (1ULL << 32) - 1; |
320 | priv->domain.geometry.force_aperture = true; |
321 | |
322 | return &priv->domain; |
323 | |
324 | fail_nomem: |
325 | kfree(objp: priv); |
326 | return NULL; |
327 | } |
328 | |
329 | static void msm_iommu_domain_free(struct iommu_domain *domain) |
330 | { |
331 | struct msm_priv *priv; |
332 | unsigned long flags; |
333 | |
334 | spin_lock_irqsave(&msm_iommu_lock, flags); |
335 | priv = to_msm_priv(dom: domain); |
336 | kfree(objp: priv); |
337 | spin_unlock_irqrestore(lock: &msm_iommu_lock, flags); |
338 | } |
339 | |
340 | static int msm_iommu_domain_config(struct msm_priv *priv) |
341 | { |
342 | spin_lock_init(&priv->pgtlock); |
343 | |
344 | priv->cfg = (struct io_pgtable_cfg) { |
345 | .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap, |
346 | .ias = 32, |
347 | .oas = 32, |
348 | .tlb = &msm_iommu_flush_ops, |
349 | .iommu_dev = priv->dev, |
350 | }; |
351 | |
352 | priv->iop = alloc_io_pgtable_ops(fmt: ARM_V7S, cfg: &priv->cfg, cookie: priv); |
353 | if (!priv->iop) { |
354 | dev_err(priv->dev, "Failed to allocate pgtable\n" ); |
355 | return -EINVAL; |
356 | } |
357 | |
358 | msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap; |
359 | |
360 | return 0; |
361 | } |
362 | |
363 | /* Must be called under msm_iommu_lock */ |
364 | static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev) |
365 | { |
366 | struct msm_iommu_dev *iommu, *ret = NULL; |
367 | struct msm_iommu_ctx_dev *master; |
368 | |
369 | list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) { |
370 | master = list_first_entry(&iommu->ctx_list, |
371 | struct msm_iommu_ctx_dev, |
372 | list); |
373 | if (master->of_node == dev->of_node) { |
374 | ret = iommu; |
375 | break; |
376 | } |
377 | } |
378 | |
379 | return ret; |
380 | } |
381 | |
382 | static struct iommu_device *msm_iommu_probe_device(struct device *dev) |
383 | { |
384 | struct msm_iommu_dev *iommu; |
385 | unsigned long flags; |
386 | |
387 | spin_lock_irqsave(&msm_iommu_lock, flags); |
388 | iommu = find_iommu_for_dev(dev); |
389 | spin_unlock_irqrestore(lock: &msm_iommu_lock, flags); |
390 | |
391 | if (!iommu) |
392 | return ERR_PTR(error: -ENODEV); |
393 | |
394 | return &iommu->iommu; |
395 | } |
396 | |
397 | static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) |
398 | { |
399 | int ret = 0; |
400 | unsigned long flags; |
401 | struct msm_iommu_dev *iommu; |
402 | struct msm_priv *priv = to_msm_priv(dom: domain); |
403 | struct msm_iommu_ctx_dev *master; |
404 | |
405 | priv->dev = dev; |
406 | msm_iommu_domain_config(priv); |
407 | |
408 | spin_lock_irqsave(&msm_iommu_lock, flags); |
409 | list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) { |
410 | master = list_first_entry(&iommu->ctx_list, |
411 | struct msm_iommu_ctx_dev, |
412 | list); |
413 | if (master->of_node == dev->of_node) { |
414 | ret = __enable_clocks(iommu); |
415 | if (ret) |
416 | goto fail; |
417 | |
418 | list_for_each_entry(master, &iommu->ctx_list, list) { |
419 | if (master->num) { |
420 | dev_err(dev, "domain already attached" ); |
421 | ret = -EEXIST; |
422 | goto fail; |
423 | } |
424 | master->num = |
425 | msm_iommu_alloc_ctx(map: iommu->context_map, |
426 | start: 0, end: iommu->ncb); |
427 | if (IS_ERR_VALUE(master->num)) { |
428 | ret = -ENODEV; |
429 | goto fail; |
430 | } |
431 | config_mids(iommu, master); |
432 | __program_context(base: iommu->base, ctx: master->num, |
433 | priv); |
434 | } |
435 | __disable_clocks(iommu); |
436 | list_add(new: &iommu->dom_node, head: &priv->list_attached); |
437 | } |
438 | } |
439 | |
440 | fail: |
441 | spin_unlock_irqrestore(lock: &msm_iommu_lock, flags); |
442 | |
443 | return ret; |
444 | } |
445 | |
446 | static void msm_iommu_set_platform_dma(struct device *dev) |
447 | { |
448 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
449 | struct msm_priv *priv = to_msm_priv(dom: domain); |
450 | unsigned long flags; |
451 | struct msm_iommu_dev *iommu; |
452 | struct msm_iommu_ctx_dev *master; |
453 | int ret; |
454 | |
455 | free_io_pgtable_ops(ops: priv->iop); |
456 | |
457 | spin_lock_irqsave(&msm_iommu_lock, flags); |
458 | list_for_each_entry(iommu, &priv->list_attached, dom_node) { |
459 | ret = __enable_clocks(iommu); |
460 | if (ret) |
461 | goto fail; |
462 | |
463 | list_for_each_entry(master, &iommu->ctx_list, list) { |
464 | msm_iommu_free_ctx(map: iommu->context_map, idx: master->num); |
465 | __reset_context(base: iommu->base, ctx: master->num); |
466 | } |
467 | __disable_clocks(iommu); |
468 | } |
469 | fail: |
470 | spin_unlock_irqrestore(lock: &msm_iommu_lock, flags); |
471 | } |
472 | |
473 | static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, |
474 | phys_addr_t pa, size_t pgsize, size_t pgcount, |
475 | int prot, gfp_t gfp, size_t *mapped) |
476 | { |
477 | struct msm_priv *priv = to_msm_priv(dom: domain); |
478 | unsigned long flags; |
479 | int ret; |
480 | |
481 | spin_lock_irqsave(&priv->pgtlock, flags); |
482 | ret = priv->iop->map_pages(priv->iop, iova, pa, pgsize, pgcount, prot, |
483 | GFP_ATOMIC, mapped); |
484 | spin_unlock_irqrestore(lock: &priv->pgtlock, flags); |
485 | |
486 | return ret; |
487 | } |
488 | |
489 | static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova, |
490 | size_t size) |
491 | { |
492 | struct msm_priv *priv = to_msm_priv(dom: domain); |
493 | |
494 | __flush_iotlb_range(iova, size, SZ_4K, leaf: false, cookie: priv); |
495 | } |
496 | |
497 | static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova, |
498 | size_t pgsize, size_t pgcount, |
499 | struct iommu_iotlb_gather *gather) |
500 | { |
501 | struct msm_priv *priv = to_msm_priv(dom: domain); |
502 | unsigned long flags; |
503 | size_t ret; |
504 | |
505 | spin_lock_irqsave(&priv->pgtlock, flags); |
506 | ret = priv->iop->unmap_pages(priv->iop, iova, pgsize, pgcount, gather); |
507 | spin_unlock_irqrestore(lock: &priv->pgtlock, flags); |
508 | |
509 | return ret; |
510 | } |
511 | |
512 | static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, |
513 | dma_addr_t va) |
514 | { |
515 | struct msm_priv *priv; |
516 | struct msm_iommu_dev *iommu; |
517 | struct msm_iommu_ctx_dev *master; |
518 | unsigned int par; |
519 | unsigned long flags; |
520 | phys_addr_t ret = 0; |
521 | |
522 | spin_lock_irqsave(&msm_iommu_lock, flags); |
523 | |
524 | priv = to_msm_priv(dom: domain); |
525 | iommu = list_first_entry(&priv->list_attached, |
526 | struct msm_iommu_dev, dom_node); |
527 | |
528 | if (list_empty(head: &iommu->ctx_list)) |
529 | goto fail; |
530 | |
531 | master = list_first_entry(&iommu->ctx_list, |
532 | struct msm_iommu_ctx_dev, list); |
533 | if (!master) |
534 | goto fail; |
535 | |
536 | ret = __enable_clocks(iommu); |
537 | if (ret) |
538 | goto fail; |
539 | |
540 | /* Invalidate context TLB */ |
541 | SET_CTX_TLBIALL(iommu->base, master->num, 0); |
542 | SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA); |
543 | |
544 | par = GET_PAR(iommu->base, master->num); |
545 | |
546 | /* We are dealing with a supersection */ |
547 | if (GET_NOFAULT_SS(iommu->base, master->num)) |
548 | ret = (par & 0xFF000000) | (va & 0x00FFFFFF); |
549 | else /* Upper 20 bits from PAR, lower 12 from VA */ |
550 | ret = (par & 0xFFFFF000) | (va & 0x00000FFF); |
551 | |
552 | if (GET_FAULT(iommu->base, master->num)) |
553 | ret = 0; |
554 | |
555 | __disable_clocks(iommu); |
556 | fail: |
557 | spin_unlock_irqrestore(lock: &msm_iommu_lock, flags); |
558 | return ret; |
559 | } |
560 | |
561 | static void print_ctx_regs(void __iomem *base, int ctx) |
562 | { |
563 | unsigned int fsr = GET_FSR(base, ctx); |
564 | pr_err("FAR = %08x PAR = %08x\n" , |
565 | GET_FAR(base, ctx), GET_PAR(base, ctx)); |
566 | pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n" , fsr, |
567 | (fsr & 0x02) ? "TF " : "" , |
568 | (fsr & 0x04) ? "AFF " : "" , |
569 | (fsr & 0x08) ? "APF " : "" , |
570 | (fsr & 0x10) ? "TLBMF " : "" , |
571 | (fsr & 0x20) ? "HTWDEEF " : "" , |
572 | (fsr & 0x40) ? "HTWSEEF " : "" , |
573 | (fsr & 0x80) ? "MHF " : "" , |
574 | (fsr & 0x10000) ? "SL " : "" , |
575 | (fsr & 0x40000000) ? "SS " : "" , |
576 | (fsr & 0x80000000) ? "MULTI " : "" ); |
577 | |
578 | pr_err("FSYNR0 = %08x FSYNR1 = %08x\n" , |
579 | GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx)); |
580 | pr_err("TTBR0 = %08x TTBR1 = %08x\n" , |
581 | GET_TTBR0(base, ctx), GET_TTBR1(base, ctx)); |
582 | pr_err("SCTLR = %08x ACTLR = %08x\n" , |
583 | GET_SCTLR(base, ctx), GET_ACTLR(base, ctx)); |
584 | } |
585 | |
586 | static int insert_iommu_master(struct device *dev, |
587 | struct msm_iommu_dev **iommu, |
588 | struct of_phandle_args *spec) |
589 | { |
590 | struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev); |
591 | int sid; |
592 | |
593 | if (list_empty(head: &(*iommu)->ctx_list)) { |
594 | master = kzalloc(size: sizeof(*master), GFP_ATOMIC); |
595 | if (!master) { |
596 | dev_err(dev, "Failed to allocate iommu_master\n" ); |
597 | return -ENOMEM; |
598 | } |
599 | master->of_node = dev->of_node; |
600 | list_add(new: &master->list, head: &(*iommu)->ctx_list); |
601 | dev_iommu_priv_set(dev, priv: master); |
602 | } |
603 | |
604 | for (sid = 0; sid < master->num_mids; sid++) |
605 | if (master->mids[sid] == spec->args[0]) { |
606 | dev_warn(dev, "Stream ID 0x%x repeated; ignoring\n" , |
607 | sid); |
608 | return 0; |
609 | } |
610 | |
611 | master->mids[master->num_mids++] = spec->args[0]; |
612 | return 0; |
613 | } |
614 | |
615 | static int qcom_iommu_of_xlate(struct device *dev, |
616 | struct of_phandle_args *spec) |
617 | { |
618 | struct msm_iommu_dev *iommu = NULL, *iter; |
619 | unsigned long flags; |
620 | int ret = 0; |
621 | |
622 | spin_lock_irqsave(&msm_iommu_lock, flags); |
623 | list_for_each_entry(iter, &qcom_iommu_devices, dev_node) { |
624 | if (iter->dev->of_node == spec->np) { |
625 | iommu = iter; |
626 | break; |
627 | } |
628 | } |
629 | |
630 | if (!iommu) { |
631 | ret = -ENODEV; |
632 | goto fail; |
633 | } |
634 | |
635 | ret = insert_iommu_master(dev, iommu: &iommu, spec); |
636 | fail: |
637 | spin_unlock_irqrestore(lock: &msm_iommu_lock, flags); |
638 | |
639 | return ret; |
640 | } |
641 | |
642 | irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) |
643 | { |
644 | struct msm_iommu_dev *iommu = dev_id; |
645 | unsigned int fsr; |
646 | int i, ret; |
647 | |
648 | spin_lock(lock: &msm_iommu_lock); |
649 | |
650 | if (!iommu) { |
651 | pr_err("Invalid device ID in context interrupt handler\n" ); |
652 | goto fail; |
653 | } |
654 | |
655 | pr_err("Unexpected IOMMU page fault!\n" ); |
656 | pr_err("base = %08x\n" , (unsigned int)iommu->base); |
657 | |
658 | ret = __enable_clocks(iommu); |
659 | if (ret) |
660 | goto fail; |
661 | |
662 | for (i = 0; i < iommu->ncb; i++) { |
663 | fsr = GET_FSR(iommu->base, i); |
664 | if (fsr) { |
665 | pr_err("Fault occurred in context %d.\n" , i); |
666 | pr_err("Interesting registers:\n" ); |
667 | print_ctx_regs(base: iommu->base, ctx: i); |
668 | SET_FSR(iommu->base, i, 0x4000000F); |
669 | } |
670 | } |
671 | __disable_clocks(iommu); |
672 | fail: |
673 | spin_unlock(lock: &msm_iommu_lock); |
674 | return 0; |
675 | } |
676 | |
677 | static struct iommu_ops msm_iommu_ops = { |
678 | .domain_alloc = msm_iommu_domain_alloc, |
679 | .probe_device = msm_iommu_probe_device, |
680 | .device_group = generic_device_group, |
681 | .set_platform_dma_ops = msm_iommu_set_platform_dma, |
682 | .pgsize_bitmap = MSM_IOMMU_PGSIZES, |
683 | .of_xlate = qcom_iommu_of_xlate, |
684 | .default_domain_ops = &(const struct iommu_domain_ops) { |
685 | .attach_dev = msm_iommu_attach_dev, |
686 | .map_pages = msm_iommu_map, |
687 | .unmap_pages = msm_iommu_unmap, |
688 | /* |
689 | * Nothing is needed here, the barrier to guarantee |
690 | * completion of the tlb sync operation is implicitly |
691 | * taken care when the iommu client does a writel before |
692 | * kick starting the other master. |
693 | */ |
694 | .iotlb_sync = NULL, |
695 | .iotlb_sync_map = msm_iommu_sync_map, |
696 | .iova_to_phys = msm_iommu_iova_to_phys, |
697 | .free = msm_iommu_domain_free, |
698 | } |
699 | }; |
700 | |
701 | static int msm_iommu_probe(struct platform_device *pdev) |
702 | { |
703 | struct resource *r; |
704 | resource_size_t ioaddr; |
705 | struct msm_iommu_dev *iommu; |
706 | int ret, par, val; |
707 | |
708 | iommu = devm_kzalloc(dev: &pdev->dev, size: sizeof(*iommu), GFP_KERNEL); |
709 | if (!iommu) |
710 | return -ENODEV; |
711 | |
712 | iommu->dev = &pdev->dev; |
713 | INIT_LIST_HEAD(list: &iommu->ctx_list); |
714 | |
715 | iommu->pclk = devm_clk_get(dev: iommu->dev, id: "smmu_pclk" ); |
716 | if (IS_ERR(ptr: iommu->pclk)) |
717 | return dev_err_probe(dev: iommu->dev, err: PTR_ERR(ptr: iommu->pclk), |
718 | fmt: "could not get smmu_pclk\n" ); |
719 | |
720 | ret = clk_prepare(clk: iommu->pclk); |
721 | if (ret) |
722 | return dev_err_probe(dev: iommu->dev, err: ret, |
723 | fmt: "could not prepare smmu_pclk\n" ); |
724 | |
725 | iommu->clk = devm_clk_get(dev: iommu->dev, id: "iommu_clk" ); |
726 | if (IS_ERR(ptr: iommu->clk)) { |
727 | clk_unprepare(clk: iommu->pclk); |
728 | return dev_err_probe(dev: iommu->dev, err: PTR_ERR(ptr: iommu->clk), |
729 | fmt: "could not get iommu_clk\n" ); |
730 | } |
731 | |
732 | ret = clk_prepare(clk: iommu->clk); |
733 | if (ret) { |
734 | clk_unprepare(clk: iommu->pclk); |
735 | return dev_err_probe(dev: iommu->dev, err: ret, fmt: "could not prepare iommu_clk\n" ); |
736 | } |
737 | |
738 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
739 | iommu->base = devm_ioremap_resource(dev: iommu->dev, res: r); |
740 | if (IS_ERR(ptr: iommu->base)) { |
741 | ret = dev_err_probe(dev: iommu->dev, err: PTR_ERR(ptr: iommu->base), fmt: "could not get iommu base\n" ); |
742 | goto fail; |
743 | } |
744 | ioaddr = r->start; |
745 | |
746 | iommu->irq = platform_get_irq(pdev, 0); |
747 | if (iommu->irq < 0) { |
748 | ret = -ENODEV; |
749 | goto fail; |
750 | } |
751 | |
752 | ret = of_property_read_u32(np: iommu->dev->of_node, propname: "qcom,ncb" , out_value: &val); |
753 | if (ret) { |
754 | dev_err(iommu->dev, "could not get ncb\n" ); |
755 | goto fail; |
756 | } |
757 | iommu->ncb = val; |
758 | |
759 | msm_iommu_reset(base: iommu->base, ncb: iommu->ncb); |
760 | SET_M(iommu->base, 0, 1); |
761 | SET_PAR(iommu->base, 0, 0); |
762 | SET_V2PCFG(iommu->base, 0, 1); |
763 | SET_V2PPR(iommu->base, 0, 0); |
764 | par = GET_PAR(iommu->base, 0); |
765 | SET_V2PCFG(iommu->base, 0, 0); |
766 | SET_M(iommu->base, 0, 0); |
767 | |
768 | if (!par) { |
769 | pr_err("Invalid PAR value detected\n" ); |
770 | ret = -ENODEV; |
771 | goto fail; |
772 | } |
773 | |
774 | ret = devm_request_threaded_irq(dev: iommu->dev, irq: iommu->irq, NULL, |
775 | thread_fn: msm_iommu_fault_handler, |
776 | IRQF_ONESHOT | IRQF_SHARED, |
777 | devname: "msm_iommu_secure_irpt_handler" , |
778 | dev_id: iommu); |
779 | if (ret) { |
780 | pr_err("Request IRQ %d failed with ret=%d\n" , iommu->irq, ret); |
781 | goto fail; |
782 | } |
783 | |
784 | list_add(new: &iommu->dev_node, head: &qcom_iommu_devices); |
785 | |
786 | ret = iommu_device_sysfs_add(iommu: &iommu->iommu, parent: iommu->dev, NULL, |
787 | fmt: "msm-smmu.%pa" , &ioaddr); |
788 | if (ret) { |
789 | pr_err("Could not add msm-smmu at %pa to sysfs\n" , &ioaddr); |
790 | goto fail; |
791 | } |
792 | |
793 | ret = iommu_device_register(iommu: &iommu->iommu, ops: &msm_iommu_ops, hwdev: &pdev->dev); |
794 | if (ret) { |
795 | pr_err("Could not register msm-smmu at %pa\n" , &ioaddr); |
796 | goto fail; |
797 | } |
798 | |
799 | pr_info("device mapped at %p, irq %d with %d ctx banks\n" , |
800 | iommu->base, iommu->irq, iommu->ncb); |
801 | |
802 | return ret; |
803 | fail: |
804 | clk_unprepare(clk: iommu->clk); |
805 | clk_unprepare(clk: iommu->pclk); |
806 | return ret; |
807 | } |
808 | |
809 | static const struct of_device_id msm_iommu_dt_match[] = { |
810 | { .compatible = "qcom,apq8064-iommu" }, |
811 | {} |
812 | }; |
813 | |
814 | static void msm_iommu_remove(struct platform_device *pdev) |
815 | { |
816 | struct msm_iommu_dev *iommu = platform_get_drvdata(pdev); |
817 | |
818 | clk_unprepare(clk: iommu->clk); |
819 | clk_unprepare(clk: iommu->pclk); |
820 | } |
821 | |
822 | static struct platform_driver msm_iommu_driver = { |
823 | .driver = { |
824 | .name = "msm_iommu" , |
825 | .of_match_table = msm_iommu_dt_match, |
826 | }, |
827 | .probe = msm_iommu_probe, |
828 | .remove_new = msm_iommu_remove, |
829 | }; |
830 | builtin_platform_driver(msm_iommu_driver); |
831 | |