1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * IOMMU API for Renesas VMSA-compatible IPMMU |
4 | * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> |
5 | * |
6 | * Copyright (C) 2014-2020 Renesas Electronics Corporation |
7 | */ |
8 | |
9 | #include <linux/bitmap.h> |
10 | #include <linux/delay.h> |
11 | #include <linux/dma-mapping.h> |
12 | #include <linux/err.h> |
13 | #include <linux/export.h> |
14 | #include <linux/init.h> |
15 | #include <linux/interrupt.h> |
16 | #include <linux/io.h> |
17 | #include <linux/iopoll.h> |
18 | #include <linux/io-pgtable.h> |
19 | #include <linux/iommu.h> |
20 | #include <linux/of.h> |
21 | #include <linux/of_platform.h> |
22 | #include <linux/pci.h> |
23 | #include <linux/platform_device.h> |
24 | #include <linux/sizes.h> |
25 | #include <linux/slab.h> |
26 | #include <linux/sys_soc.h> |
27 | |
28 | #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) |
29 | #include <asm/dma-iommu.h> |
30 | #else |
31 | #define arm_iommu_create_mapping(...) NULL |
32 | #define arm_iommu_attach_device(...) -ENODEV |
33 | #define arm_iommu_release_mapping(...) do {} while (0) |
34 | #endif |
35 | |
36 | #define IPMMU_CTX_MAX 16U |
37 | #define IPMMU_CTX_INVALID -1 |
38 | |
39 | #define IPMMU_UTLB_MAX 64U |
40 | |
41 | struct ipmmu_features { |
42 | bool use_ns_alias_offset; |
43 | bool has_cache_leaf_nodes; |
44 | unsigned int number_of_contexts; |
45 | unsigned int num_utlbs; |
46 | bool setup_imbuscr; |
47 | bool twobit_imttbcr_sl0; |
48 | bool reserved_context; |
49 | bool cache_snoop; |
50 | unsigned int ctx_offset_base; |
51 | unsigned int ctx_offset_stride; |
52 | unsigned int utlb_offset_base; |
53 | }; |
54 | |
55 | struct ipmmu_vmsa_device { |
56 | struct device *dev; |
57 | void __iomem *base; |
58 | struct iommu_device iommu; |
59 | struct ipmmu_vmsa_device *root; |
60 | const struct ipmmu_features *features; |
61 | unsigned int num_ctx; |
62 | spinlock_t lock; /* Protects ctx and domains[] */ |
63 | DECLARE_BITMAP(ctx, IPMMU_CTX_MAX); |
64 | struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX]; |
65 | s8 utlb_ctx[IPMMU_UTLB_MAX]; |
66 | |
67 | struct iommu_group *group; |
68 | struct dma_iommu_mapping *mapping; |
69 | }; |
70 | |
71 | struct ipmmu_vmsa_domain { |
72 | struct ipmmu_vmsa_device *mmu; |
73 | struct iommu_domain io_domain; |
74 | |
75 | struct io_pgtable_cfg cfg; |
76 | struct io_pgtable_ops *iop; |
77 | |
78 | unsigned int context_id; |
79 | struct mutex mutex; /* Protects mappings */ |
80 | }; |
81 | |
82 | static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) |
83 | { |
84 | return container_of(dom, struct ipmmu_vmsa_domain, io_domain); |
85 | } |
86 | |
87 | static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev) |
88 | { |
89 | return dev_iommu_priv_get(dev); |
90 | } |
91 | |
92 | #define TLB_LOOP_TIMEOUT 100 /* 100us */ |
93 | |
94 | /* ----------------------------------------------------------------------------- |
95 | * Registers Definition |
96 | */ |
97 | |
98 | #define IM_NS_ALIAS_OFFSET 0x800 |
99 | |
100 | /* MMU "context" registers */ |
101 | #define IMCTR 0x0000 /* R-Car Gen2/3 */ |
102 | #define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */ |
103 | #define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */ |
104 | #define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */ |
105 | |
106 | #define IMTTBCR 0x0008 /* R-Car Gen2/3 */ |
107 | #define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */ |
108 | #define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */ |
109 | #define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */ |
110 | #define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */ |
111 | #define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */ |
112 | #define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */ |
113 | |
114 | #define IMBUSCR 0x000c /* R-Car Gen2 only */ |
115 | #define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */ |
116 | #define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */ |
117 | |
118 | #define IMTTLBR0 0x0010 /* R-Car Gen2/3 */ |
119 | #define IMTTUBR0 0x0014 /* R-Car Gen2/3 */ |
120 | |
121 | #define IMSTR 0x0020 /* R-Car Gen2/3 */ |
122 | #define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */ |
123 | #define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */ |
124 | #define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */ |
125 | #define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */ |
126 | |
127 | #define IMMAIR0 0x0028 /* R-Car Gen2/3 */ |
128 | |
129 | #define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */ |
130 | #define IMEUAR 0x0034 /* R-Car Gen3 only */ |
131 | |
132 | /* uTLB registers */ |
133 | #define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n)) |
134 | #define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */ |
135 | #define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */ |
136 | #define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */ |
137 | #define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */ |
138 | #define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */ |
139 | |
140 | #define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n)) |
141 | #define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */ |
142 | #define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */ |
143 | |
144 | /* ----------------------------------------------------------------------------- |
145 | * Root device handling |
146 | */ |
147 | |
148 | static struct platform_driver ipmmu_driver; |
149 | |
150 | static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) |
151 | { |
152 | return mmu->root == mmu; |
153 | } |
154 | |
155 | static int __ipmmu_check_device(struct device *dev, void *data) |
156 | { |
157 | struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); |
158 | struct ipmmu_vmsa_device **rootp = data; |
159 | |
160 | if (ipmmu_is_root(mmu)) |
161 | *rootp = mmu; |
162 | |
163 | return 0; |
164 | } |
165 | |
166 | static struct ipmmu_vmsa_device *ipmmu_find_root(void) |
167 | { |
168 | struct ipmmu_vmsa_device *root = NULL; |
169 | |
170 | return driver_for_each_device(drv: &ipmmu_driver.driver, NULL, data: &root, |
171 | fn: __ipmmu_check_device) == 0 ? root : NULL; |
172 | } |
173 | |
174 | /* ----------------------------------------------------------------------------- |
175 | * Read/Write Access |
176 | */ |
177 | |
178 | static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) |
179 | { |
180 | return ioread32(mmu->base + offset); |
181 | } |
182 | |
183 | static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, |
184 | u32 data) |
185 | { |
186 | iowrite32(data, mmu->base + offset); |
187 | } |
188 | |
189 | static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu, |
190 | unsigned int context_id, unsigned int reg) |
191 | { |
192 | unsigned int base = mmu->features->ctx_offset_base; |
193 | |
194 | if (context_id > 7) |
195 | base += 0x800 - 8 * 0x40; |
196 | |
197 | return base + context_id * mmu->features->ctx_offset_stride + reg; |
198 | } |
199 | |
200 | static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu, |
201 | unsigned int context_id, unsigned int reg) |
202 | { |
203 | return ipmmu_read(mmu, offset: ipmmu_ctx_reg(mmu, context_id, reg)); |
204 | } |
205 | |
206 | static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu, |
207 | unsigned int context_id, unsigned int reg, u32 data) |
208 | { |
209 | ipmmu_write(mmu, offset: ipmmu_ctx_reg(mmu, context_id, reg), data); |
210 | } |
211 | |
212 | static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain, |
213 | unsigned int reg) |
214 | { |
215 | return ipmmu_ctx_read(mmu: domain->mmu->root, context_id: domain->context_id, reg); |
216 | } |
217 | |
218 | static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain, |
219 | unsigned int reg, u32 data) |
220 | { |
221 | ipmmu_ctx_write(mmu: domain->mmu->root, context_id: domain->context_id, reg, data); |
222 | } |
223 | |
224 | static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain, |
225 | unsigned int reg, u32 data) |
226 | { |
227 | if (domain->mmu != domain->mmu->root) |
228 | ipmmu_ctx_write(mmu: domain->mmu, context_id: domain->context_id, reg, data); |
229 | |
230 | ipmmu_ctx_write(mmu: domain->mmu->root, context_id: domain->context_id, reg, data); |
231 | } |
232 | |
233 | static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg) |
234 | { |
235 | return mmu->features->utlb_offset_base + reg; |
236 | } |
237 | |
238 | static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu, |
239 | unsigned int utlb, u32 data) |
240 | { |
241 | ipmmu_write(mmu, offset: ipmmu_utlb_reg(mmu, IMUASID(utlb)), data); |
242 | } |
243 | |
244 | static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu, |
245 | unsigned int utlb, u32 data) |
246 | { |
247 | ipmmu_write(mmu, offset: ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data); |
248 | } |
249 | |
250 | /* ----------------------------------------------------------------------------- |
251 | * TLB and microTLB Management |
252 | */ |
253 | |
254 | /* Wait for any pending TLB invalidations to complete */ |
255 | static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) |
256 | { |
257 | u32 val; |
258 | |
259 | if (read_poll_timeout_atomic(ipmmu_ctx_read_root, val, |
260 | !(val & IMCTR_FLUSH), 1, TLB_LOOP_TIMEOUT, |
261 | false, domain, IMCTR)) |
262 | dev_err_ratelimited(domain->mmu->dev, |
263 | "TLB sync timed out -- MMU may be deadlocked\n" ); |
264 | } |
265 | |
266 | static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) |
267 | { |
268 | u32 reg; |
269 | |
270 | reg = ipmmu_ctx_read_root(domain, IMCTR); |
271 | reg |= IMCTR_FLUSH; |
272 | ipmmu_ctx_write_all(domain, IMCTR, data: reg); |
273 | |
274 | ipmmu_tlb_sync(domain); |
275 | } |
276 | |
277 | /* |
278 | * Enable MMU translation for the microTLB. |
279 | */ |
280 | static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, |
281 | unsigned int utlb) |
282 | { |
283 | struct ipmmu_vmsa_device *mmu = domain->mmu; |
284 | |
285 | /* |
286 | * TODO: Reference-count the microTLB as several bus masters can be |
287 | * connected to the same microTLB. |
288 | */ |
289 | |
290 | /* TODO: What should we set the ASID to ? */ |
291 | ipmmu_imuasid_write(mmu, utlb, data: 0); |
292 | /* TODO: Do we need to flush the microTLB ? */ |
293 | ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) | |
294 | IMUCTR_FLUSH | IMUCTR_MMUEN); |
295 | mmu->utlb_ctx[utlb] = domain->context_id; |
296 | } |
297 | |
298 | static void ipmmu_tlb_flush_all(void *cookie) |
299 | { |
300 | struct ipmmu_vmsa_domain *domain = cookie; |
301 | |
302 | ipmmu_tlb_invalidate(domain); |
303 | } |
304 | |
305 | static void ipmmu_tlb_flush(unsigned long iova, size_t size, |
306 | size_t granule, void *cookie) |
307 | { |
308 | ipmmu_tlb_flush_all(cookie); |
309 | } |
310 | |
311 | static const struct iommu_flush_ops ipmmu_flush_ops = { |
312 | .tlb_flush_all = ipmmu_tlb_flush_all, |
313 | .tlb_flush_walk = ipmmu_tlb_flush, |
314 | }; |
315 | |
316 | /* ----------------------------------------------------------------------------- |
317 | * Domain/Context Management |
318 | */ |
319 | |
320 | static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu, |
321 | struct ipmmu_vmsa_domain *domain) |
322 | { |
323 | unsigned long flags; |
324 | int ret; |
325 | |
326 | spin_lock_irqsave(&mmu->lock, flags); |
327 | |
328 | ret = find_first_zero_bit(addr: mmu->ctx, size: mmu->num_ctx); |
329 | if (ret != mmu->num_ctx) { |
330 | mmu->domains[ret] = domain; |
331 | set_bit(nr: ret, addr: mmu->ctx); |
332 | } else |
333 | ret = -EBUSY; |
334 | |
335 | spin_unlock_irqrestore(lock: &mmu->lock, flags); |
336 | |
337 | return ret; |
338 | } |
339 | |
340 | static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, |
341 | unsigned int context_id) |
342 | { |
343 | unsigned long flags; |
344 | |
345 | spin_lock_irqsave(&mmu->lock, flags); |
346 | |
347 | clear_bit(nr: context_id, addr: mmu->ctx); |
348 | mmu->domains[context_id] = NULL; |
349 | |
350 | spin_unlock_irqrestore(lock: &mmu->lock, flags); |
351 | } |
352 | |
353 | static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain) |
354 | { |
355 | u64 ttbr; |
356 | u32 tmp; |
357 | |
358 | /* TTBR0 */ |
359 | ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr; |
360 | ipmmu_ctx_write_root(domain, IMTTLBR0, data: ttbr); |
361 | ipmmu_ctx_write_root(domain, IMTTUBR0, data: ttbr >> 32); |
362 | |
363 | /* |
364 | * TTBCR |
365 | * We use long descriptors and allocate the whole 32-bit VA space to |
366 | * TTBR0. |
367 | */ |
368 | if (domain->mmu->features->twobit_imttbcr_sl0) |
369 | tmp = IMTTBCR_SL0_TWOBIT_LVL_1; |
370 | else |
371 | tmp = IMTTBCR_SL0_LVL_1; |
372 | |
373 | if (domain->mmu->features->cache_snoop) |
374 | tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | |
375 | IMTTBCR_IRGN0_WB_WA; |
376 | |
377 | ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp); |
378 | |
379 | /* MAIR0 */ |
380 | ipmmu_ctx_write_root(domain, IMMAIR0, |
381 | data: domain->cfg.arm_lpae_s1_cfg.mair); |
382 | |
383 | /* IMBUSCR */ |
384 | if (domain->mmu->features->setup_imbuscr) |
385 | ipmmu_ctx_write_root(domain, IMBUSCR, |
386 | data: ipmmu_ctx_read_root(domain, IMBUSCR) & |
387 | ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); |
388 | |
389 | /* |
390 | * IMSTR |
391 | * Clear all interrupt flags. |
392 | */ |
393 | ipmmu_ctx_write_root(domain, IMSTR, data: ipmmu_ctx_read_root(domain, IMSTR)); |
394 | |
395 | /* |
396 | * IMCTR |
397 | * Enable the MMU and interrupt generation. The long-descriptor |
398 | * translation table format doesn't use TEX remapping. Don't enable AF |
399 | * software management as we have no use for it. Flush the TLB as |
400 | * required when modifying the context registers. |
401 | */ |
402 | ipmmu_ctx_write_all(domain, IMCTR, |
403 | IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); |
404 | } |
405 | |
406 | static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) |
407 | { |
408 | int ret; |
409 | |
410 | /* |
411 | * Allocate the page table operations. |
412 | * |
413 | * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory |
414 | * access, Long-descriptor format" that the NStable bit being set in a |
415 | * table descriptor will result in the NStable and NS bits of all child |
416 | * entries being ignored and considered as being set. The IPMMU seems |
417 | * not to comply with this, as it generates a secure access page fault |
418 | * if any of the NStable and NS bits isn't set when running in |
419 | * non-secure mode. |
420 | */ |
421 | domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; |
422 | domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; |
423 | domain->cfg.ias = 32; |
424 | domain->cfg.oas = 40; |
425 | domain->cfg.tlb = &ipmmu_flush_ops; |
426 | domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); |
427 | domain->io_domain.geometry.force_aperture = true; |
428 | /* |
429 | * TODO: Add support for coherent walk through CCI with DVM and remove |
430 | * cache handling. For now, delegate it to the io-pgtable code. |
431 | */ |
432 | domain->cfg.coherent_walk = false; |
433 | domain->cfg.iommu_dev = domain->mmu->root->dev; |
434 | |
435 | /* |
436 | * Find an unused context. |
437 | */ |
438 | ret = ipmmu_domain_allocate_context(mmu: domain->mmu->root, domain); |
439 | if (ret < 0) |
440 | return ret; |
441 | |
442 | domain->context_id = ret; |
443 | |
444 | domain->iop = alloc_io_pgtable_ops(fmt: ARM_32_LPAE_S1, cfg: &domain->cfg, |
445 | cookie: domain); |
446 | if (!domain->iop) { |
447 | ipmmu_domain_free_context(mmu: domain->mmu->root, |
448 | context_id: domain->context_id); |
449 | return -EINVAL; |
450 | } |
451 | |
452 | ipmmu_domain_setup_context(domain); |
453 | return 0; |
454 | } |
455 | |
456 | static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) |
457 | { |
458 | if (!domain->mmu) |
459 | return; |
460 | |
461 | /* |
462 | * Disable the context. Flush the TLB as required when modifying the |
463 | * context registers. |
464 | * |
465 | * TODO: Is TLB flush really needed ? |
466 | */ |
467 | ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH); |
468 | ipmmu_tlb_sync(domain); |
469 | ipmmu_domain_free_context(mmu: domain->mmu->root, context_id: domain->context_id); |
470 | } |
471 | |
472 | /* ----------------------------------------------------------------------------- |
473 | * Fault Handling |
474 | */ |
475 | |
476 | static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) |
477 | { |
478 | const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF; |
479 | struct ipmmu_vmsa_device *mmu = domain->mmu; |
480 | unsigned long iova; |
481 | u32 status; |
482 | |
483 | status = ipmmu_ctx_read_root(domain, IMSTR); |
484 | if (!(status & err_mask)) |
485 | return IRQ_NONE; |
486 | |
487 | iova = ipmmu_ctx_read_root(domain, IMELAR); |
488 | if (IS_ENABLED(CONFIG_64BIT)) |
489 | iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32; |
490 | |
491 | /* |
492 | * Clear the error status flags. Unlike traditional interrupt flag |
493 | * registers that must be cleared by writing 1, this status register |
494 | * seems to require 0. The error address register must be read before, |
495 | * otherwise its value will be 0. |
496 | */ |
497 | ipmmu_ctx_write_root(domain, IMSTR, data: 0); |
498 | |
499 | /* Log fatal errors. */ |
500 | if (status & IMSTR_MHIT) |
501 | dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n" , |
502 | iova); |
503 | if (status & IMSTR_ABORT) |
504 | dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n" , |
505 | iova); |
506 | |
507 | if (!(status & (IMSTR_PF | IMSTR_TF))) |
508 | return IRQ_NONE; |
509 | |
510 | /* |
511 | * Try to handle page faults and translation faults. |
512 | * |
513 | * TODO: We need to look up the faulty device based on the I/O VA. Use |
514 | * the IOMMU device for now. |
515 | */ |
516 | if (!report_iommu_fault(domain: &domain->io_domain, dev: mmu->dev, iova, flags: 0)) |
517 | return IRQ_HANDLED; |
518 | |
519 | dev_err_ratelimited(mmu->dev, |
520 | "Unhandled fault: status 0x%08x iova 0x%lx\n" , |
521 | status, iova); |
522 | |
523 | return IRQ_HANDLED; |
524 | } |
525 | |
526 | static irqreturn_t ipmmu_irq(int irq, void *dev) |
527 | { |
528 | struct ipmmu_vmsa_device *mmu = dev; |
529 | irqreturn_t status = IRQ_NONE; |
530 | unsigned int i; |
531 | unsigned long flags; |
532 | |
533 | spin_lock_irqsave(&mmu->lock, flags); |
534 | |
535 | /* |
536 | * Check interrupts for all active contexts. |
537 | */ |
538 | for (i = 0; i < mmu->num_ctx; i++) { |
539 | if (!mmu->domains[i]) |
540 | continue; |
541 | if (ipmmu_domain_irq(domain: mmu->domains[i]) == IRQ_HANDLED) |
542 | status = IRQ_HANDLED; |
543 | } |
544 | |
545 | spin_unlock_irqrestore(lock: &mmu->lock, flags); |
546 | |
547 | return status; |
548 | } |
549 | |
550 | /* ----------------------------------------------------------------------------- |
551 | * IOMMU Operations |
552 | */ |
553 | |
554 | static struct iommu_domain *ipmmu_domain_alloc(unsigned type) |
555 | { |
556 | struct ipmmu_vmsa_domain *domain; |
557 | |
558 | if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) |
559 | return NULL; |
560 | |
561 | domain = kzalloc(size: sizeof(*domain), GFP_KERNEL); |
562 | if (!domain) |
563 | return NULL; |
564 | |
565 | mutex_init(&domain->mutex); |
566 | |
567 | return &domain->io_domain; |
568 | } |
569 | |
570 | static void ipmmu_domain_free(struct iommu_domain *io_domain) |
571 | { |
572 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(dom: io_domain); |
573 | |
574 | /* |
575 | * Free the domain resources. We assume that all devices have already |
576 | * been detached. |
577 | */ |
578 | ipmmu_domain_destroy_context(domain); |
579 | free_io_pgtable_ops(ops: domain->iop); |
580 | kfree(objp: domain); |
581 | } |
582 | |
583 | static int ipmmu_attach_device(struct iommu_domain *io_domain, |
584 | struct device *dev) |
585 | { |
586 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
587 | struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); |
588 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(dom: io_domain); |
589 | unsigned int i; |
590 | int ret = 0; |
591 | |
592 | if (!mmu) { |
593 | dev_err(dev, "Cannot attach to IPMMU\n" ); |
594 | return -ENXIO; |
595 | } |
596 | |
597 | mutex_lock(&domain->mutex); |
598 | |
599 | if (!domain->mmu) { |
600 | /* The domain hasn't been used yet, initialize it. */ |
601 | domain->mmu = mmu; |
602 | ret = ipmmu_domain_init_context(domain); |
603 | if (ret < 0) { |
604 | dev_err(dev, "Unable to initialize IPMMU context\n" ); |
605 | domain->mmu = NULL; |
606 | } else { |
607 | dev_info(dev, "Using IPMMU context %u\n" , |
608 | domain->context_id); |
609 | } |
610 | } else if (domain->mmu != mmu) { |
611 | /* |
612 | * Something is wrong, we can't attach two devices using |
613 | * different IOMMUs to the same domain. |
614 | */ |
615 | ret = -EINVAL; |
616 | } else |
617 | dev_info(dev, "Reusing IPMMU context %u\n" , domain->context_id); |
618 | |
619 | mutex_unlock(lock: &domain->mutex); |
620 | |
621 | if (ret < 0) |
622 | return ret; |
623 | |
624 | for (i = 0; i < fwspec->num_ids; ++i) |
625 | ipmmu_utlb_enable(domain, utlb: fwspec->ids[i]); |
626 | |
627 | return 0; |
628 | } |
629 | |
630 | static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, |
631 | phys_addr_t paddr, size_t pgsize, size_t pgcount, |
632 | int prot, gfp_t gfp, size_t *mapped) |
633 | { |
634 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(dom: io_domain); |
635 | |
636 | return domain->iop->map_pages(domain->iop, iova, paddr, pgsize, pgcount, |
637 | prot, gfp, mapped); |
638 | } |
639 | |
640 | static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, |
641 | size_t pgsize, size_t pgcount, |
642 | struct iommu_iotlb_gather *gather) |
643 | { |
644 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(dom: io_domain); |
645 | |
646 | return domain->iop->unmap_pages(domain->iop, iova, pgsize, pgcount, gather); |
647 | } |
648 | |
649 | static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain) |
650 | { |
651 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(dom: io_domain); |
652 | |
653 | if (domain->mmu) |
654 | ipmmu_tlb_flush_all(cookie: domain); |
655 | } |
656 | |
657 | static void ipmmu_iotlb_sync(struct iommu_domain *io_domain, |
658 | struct iommu_iotlb_gather *gather) |
659 | { |
660 | ipmmu_flush_iotlb_all(io_domain); |
661 | } |
662 | |
663 | static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, |
664 | dma_addr_t iova) |
665 | { |
666 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(dom: io_domain); |
667 | |
668 | /* TODO: Is locking needed ? */ |
669 | |
670 | return domain->iop->iova_to_phys(domain->iop, iova); |
671 | } |
672 | |
673 | static int ipmmu_init_platform_device(struct device *dev, |
674 | struct of_phandle_args *args) |
675 | { |
676 | struct platform_device *ipmmu_pdev; |
677 | |
678 | ipmmu_pdev = of_find_device_by_node(np: args->np); |
679 | if (!ipmmu_pdev) |
680 | return -ENODEV; |
681 | |
682 | dev_iommu_priv_set(dev, priv: platform_get_drvdata(pdev: ipmmu_pdev)); |
683 | |
684 | return 0; |
685 | } |
686 | |
687 | static const struct soc_device_attribute soc_needs_opt_in[] = { |
688 | { .family = "R-Car Gen3" , }, |
689 | { .family = "R-Car Gen4" , }, |
690 | { .family = "RZ/G2" , }, |
691 | { /* sentinel */ } |
692 | }; |
693 | |
694 | static const struct soc_device_attribute soc_denylist[] = { |
695 | { .soc_id = "r8a774a1" , }, |
696 | { .soc_id = "r8a7795" , .revision = "ES2.*" }, |
697 | { .soc_id = "r8a7796" , }, |
698 | { /* sentinel */ } |
699 | }; |
700 | |
701 | static const char * const devices_allowlist[] = { |
702 | "ee100000.mmc" , |
703 | "ee120000.mmc" , |
704 | "ee140000.mmc" , |
705 | "ee160000.mmc" |
706 | }; |
707 | |
708 | static bool ipmmu_device_is_allowed(struct device *dev) |
709 | { |
710 | unsigned int i; |
711 | |
712 | /* |
713 | * R-Car Gen3/4 and RZ/G2 use the allow list to opt-in devices. |
714 | * For Other SoCs, this returns true anyway. |
715 | */ |
716 | if (!soc_device_match(matches: soc_needs_opt_in)) |
717 | return true; |
718 | |
719 | /* Check whether this SoC can use the IPMMU correctly or not */ |
720 | if (soc_device_match(matches: soc_denylist)) |
721 | return false; |
722 | |
723 | /* Check whether this device is a PCI device */ |
724 | if (dev_is_pci(dev)) |
725 | return true; |
726 | |
727 | /* Check whether this device can work with the IPMMU */ |
728 | for (i = 0; i < ARRAY_SIZE(devices_allowlist); i++) { |
729 | if (!strcmp(dev_name(dev), devices_allowlist[i])) |
730 | return true; |
731 | } |
732 | |
733 | /* Otherwise, do not allow use of IPMMU */ |
734 | return false; |
735 | } |
736 | |
737 | static int ipmmu_of_xlate(struct device *dev, |
738 | struct of_phandle_args *spec) |
739 | { |
740 | if (!ipmmu_device_is_allowed(dev)) |
741 | return -ENODEV; |
742 | |
743 | iommu_fwspec_add_ids(dev, ids: spec->args, num_ids: 1); |
744 | |
745 | /* Initialize once - xlate() will call multiple times */ |
746 | if (to_ipmmu(dev)) |
747 | return 0; |
748 | |
749 | return ipmmu_init_platform_device(dev, args: spec); |
750 | } |
751 | |
752 | static int ipmmu_init_arm_mapping(struct device *dev) |
753 | { |
754 | struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); |
755 | int ret; |
756 | |
757 | /* |
758 | * Create the ARM mapping, used by the ARM DMA mapping core to allocate |
759 | * VAs. This will allocate a corresponding IOMMU domain. |
760 | * |
761 | * TODO: |
762 | * - Create one mapping per context (TLB). |
763 | * - Make the mapping size configurable ? We currently use a 2GB mapping |
764 | * at a 1GB offset to ensure that NULL VAs will fault. |
765 | */ |
766 | if (!mmu->mapping) { |
767 | struct dma_iommu_mapping *mapping; |
768 | |
769 | mapping = arm_iommu_create_mapping(&platform_bus_type, |
770 | SZ_1G, SZ_2G); |
771 | if (IS_ERR(ptr: mapping)) { |
772 | dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n" ); |
773 | ret = PTR_ERR(ptr: mapping); |
774 | goto error; |
775 | } |
776 | |
777 | mmu->mapping = mapping; |
778 | } |
779 | |
780 | /* Attach the ARM VA mapping to the device. */ |
781 | ret = arm_iommu_attach_device(dev, mmu->mapping); |
782 | if (ret < 0) { |
783 | dev_err(dev, "Failed to attach device to VA mapping\n" ); |
784 | goto error; |
785 | } |
786 | |
787 | return 0; |
788 | |
789 | error: |
790 | if (mmu->mapping) |
791 | arm_iommu_release_mapping(mmu->mapping); |
792 | |
793 | return ret; |
794 | } |
795 | |
796 | static struct iommu_device *ipmmu_probe_device(struct device *dev) |
797 | { |
798 | struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); |
799 | |
800 | /* |
801 | * Only let through devices that have been verified in xlate() |
802 | */ |
803 | if (!mmu) |
804 | return ERR_PTR(error: -ENODEV); |
805 | |
806 | return &mmu->iommu; |
807 | } |
808 | |
809 | static void ipmmu_probe_finalize(struct device *dev) |
810 | { |
811 | int ret = 0; |
812 | |
813 | if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)) |
814 | ret = ipmmu_init_arm_mapping(dev); |
815 | |
816 | if (ret) |
817 | dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n" ); |
818 | } |
819 | |
820 | static void ipmmu_release_device(struct device *dev) |
821 | { |
822 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
823 | struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); |
824 | unsigned int i; |
825 | |
826 | for (i = 0; i < fwspec->num_ids; ++i) { |
827 | unsigned int utlb = fwspec->ids[i]; |
828 | |
829 | ipmmu_imuctr_write(mmu, utlb, data: 0); |
830 | mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID; |
831 | } |
832 | |
833 | arm_iommu_release_mapping(mmu->mapping); |
834 | } |
835 | |
836 | static struct iommu_group *ipmmu_find_group(struct device *dev) |
837 | { |
838 | struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); |
839 | struct iommu_group *group; |
840 | |
841 | if (mmu->group) |
842 | return iommu_group_ref_get(group: mmu->group); |
843 | |
844 | group = iommu_group_alloc(); |
845 | if (!IS_ERR(ptr: group)) |
846 | mmu->group = group; |
847 | |
848 | return group; |
849 | } |
850 | |
851 | static const struct iommu_ops ipmmu_ops = { |
852 | .domain_alloc = ipmmu_domain_alloc, |
853 | .probe_device = ipmmu_probe_device, |
854 | .release_device = ipmmu_release_device, |
855 | .probe_finalize = ipmmu_probe_finalize, |
856 | .device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA) |
857 | ? generic_device_group : ipmmu_find_group, |
858 | .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, |
859 | .of_xlate = ipmmu_of_xlate, |
860 | .default_domain_ops = &(const struct iommu_domain_ops) { |
861 | .attach_dev = ipmmu_attach_device, |
862 | .map_pages = ipmmu_map, |
863 | .unmap_pages = ipmmu_unmap, |
864 | .flush_iotlb_all = ipmmu_flush_iotlb_all, |
865 | .iotlb_sync = ipmmu_iotlb_sync, |
866 | .iova_to_phys = ipmmu_iova_to_phys, |
867 | .free = ipmmu_domain_free, |
868 | } |
869 | }; |
870 | |
871 | /* ----------------------------------------------------------------------------- |
872 | * Probe/remove and init |
873 | */ |
874 | |
875 | static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) |
876 | { |
877 | unsigned int i; |
878 | |
879 | /* Disable all contexts. */ |
880 | for (i = 0; i < mmu->num_ctx; ++i) |
881 | ipmmu_ctx_write(mmu, context_id: i, IMCTR, data: 0); |
882 | } |
883 | |
884 | static const struct ipmmu_features ipmmu_features_default = { |
885 | .use_ns_alias_offset = true, |
886 | .has_cache_leaf_nodes = false, |
887 | .number_of_contexts = 1, /* software only tested with one context */ |
888 | .num_utlbs = 32, |
889 | .setup_imbuscr = true, |
890 | .twobit_imttbcr_sl0 = false, |
891 | .reserved_context = false, |
892 | .cache_snoop = true, |
893 | .ctx_offset_base = 0, |
894 | .ctx_offset_stride = 0x40, |
895 | .utlb_offset_base = 0, |
896 | }; |
897 | |
898 | static const struct ipmmu_features ipmmu_features_rcar_gen3 = { |
899 | .use_ns_alias_offset = false, |
900 | .has_cache_leaf_nodes = true, |
901 | .number_of_contexts = 8, |
902 | .num_utlbs = 48, |
903 | .setup_imbuscr = false, |
904 | .twobit_imttbcr_sl0 = true, |
905 | .reserved_context = true, |
906 | .cache_snoop = false, |
907 | .ctx_offset_base = 0, |
908 | .ctx_offset_stride = 0x40, |
909 | .utlb_offset_base = 0, |
910 | }; |
911 | |
912 | static const struct ipmmu_features ipmmu_features_rcar_gen4 = { |
913 | .use_ns_alias_offset = false, |
914 | .has_cache_leaf_nodes = true, |
915 | .number_of_contexts = 16, |
916 | .num_utlbs = 64, |
917 | .setup_imbuscr = false, |
918 | .twobit_imttbcr_sl0 = true, |
919 | .reserved_context = true, |
920 | .cache_snoop = false, |
921 | .ctx_offset_base = 0x10000, |
922 | .ctx_offset_stride = 0x1040, |
923 | .utlb_offset_base = 0x3000, |
924 | }; |
925 | |
926 | static const struct of_device_id ipmmu_of_ids[] = { |
927 | { |
928 | .compatible = "renesas,ipmmu-vmsa" , |
929 | .data = &ipmmu_features_default, |
930 | }, { |
931 | .compatible = "renesas,ipmmu-r8a774a1" , |
932 | .data = &ipmmu_features_rcar_gen3, |
933 | }, { |
934 | .compatible = "renesas,ipmmu-r8a774b1" , |
935 | .data = &ipmmu_features_rcar_gen3, |
936 | }, { |
937 | .compatible = "renesas,ipmmu-r8a774c0" , |
938 | .data = &ipmmu_features_rcar_gen3, |
939 | }, { |
940 | .compatible = "renesas,ipmmu-r8a774e1" , |
941 | .data = &ipmmu_features_rcar_gen3, |
942 | }, { |
943 | .compatible = "renesas,ipmmu-r8a7795" , |
944 | .data = &ipmmu_features_rcar_gen3, |
945 | }, { |
946 | .compatible = "renesas,ipmmu-r8a7796" , |
947 | .data = &ipmmu_features_rcar_gen3, |
948 | }, { |
949 | .compatible = "renesas,ipmmu-r8a77961" , |
950 | .data = &ipmmu_features_rcar_gen3, |
951 | }, { |
952 | .compatible = "renesas,ipmmu-r8a77965" , |
953 | .data = &ipmmu_features_rcar_gen3, |
954 | }, { |
955 | .compatible = "renesas,ipmmu-r8a77970" , |
956 | .data = &ipmmu_features_rcar_gen3, |
957 | }, { |
958 | .compatible = "renesas,ipmmu-r8a77980" , |
959 | .data = &ipmmu_features_rcar_gen3, |
960 | }, { |
961 | .compatible = "renesas,ipmmu-r8a77990" , |
962 | .data = &ipmmu_features_rcar_gen3, |
963 | }, { |
964 | .compatible = "renesas,ipmmu-r8a77995" , |
965 | .data = &ipmmu_features_rcar_gen3, |
966 | }, { |
967 | .compatible = "renesas,ipmmu-r8a779a0" , |
968 | .data = &ipmmu_features_rcar_gen4, |
969 | }, { |
970 | .compatible = "renesas,rcar-gen4-ipmmu-vmsa" , |
971 | .data = &ipmmu_features_rcar_gen4, |
972 | }, { |
973 | /* Terminator */ |
974 | }, |
975 | }; |
976 | |
977 | static int ipmmu_probe(struct platform_device *pdev) |
978 | { |
979 | struct ipmmu_vmsa_device *mmu; |
980 | struct resource *res; |
981 | int irq; |
982 | int ret; |
983 | |
984 | mmu = devm_kzalloc(dev: &pdev->dev, size: sizeof(*mmu), GFP_KERNEL); |
985 | if (!mmu) { |
986 | dev_err(&pdev->dev, "cannot allocate device data\n" ); |
987 | return -ENOMEM; |
988 | } |
989 | |
990 | mmu->dev = &pdev->dev; |
991 | spin_lock_init(&mmu->lock); |
992 | bitmap_zero(dst: mmu->ctx, IPMMU_CTX_MAX); |
993 | mmu->features = of_device_get_match_data(dev: &pdev->dev); |
994 | memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs); |
995 | ret = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(40)); |
996 | if (ret) |
997 | return ret; |
998 | |
999 | /* Map I/O memory and request IRQ. */ |
1000 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1001 | mmu->base = devm_ioremap_resource(dev: &pdev->dev, res); |
1002 | if (IS_ERR(ptr: mmu->base)) |
1003 | return PTR_ERR(ptr: mmu->base); |
1004 | |
1005 | /* |
1006 | * The IPMMU has two register banks, for secure and non-secure modes. |
1007 | * The bank mapped at the beginning of the IPMMU address space |
1008 | * corresponds to the running mode of the CPU. When running in secure |
1009 | * mode the non-secure register bank is also available at an offset. |
1010 | * |
1011 | * Secure mode operation isn't clearly documented and is thus currently |
1012 | * not implemented in the driver. Furthermore, preliminary tests of |
1013 | * non-secure operation with the main register bank were not successful. |
1014 | * Offset the registers base unconditionally to point to the non-secure |
1015 | * alias space for now. |
1016 | */ |
1017 | if (mmu->features->use_ns_alias_offset) |
1018 | mmu->base += IM_NS_ALIAS_OFFSET; |
1019 | |
1020 | mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts); |
1021 | |
1022 | /* |
1023 | * Determine if this IPMMU instance is a root device by checking for |
1024 | * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property. |
1025 | */ |
1026 | if (!mmu->features->has_cache_leaf_nodes || |
1027 | !of_property_present(np: pdev->dev.of_node, propname: "renesas,ipmmu-main" )) |
1028 | mmu->root = mmu; |
1029 | else |
1030 | mmu->root = ipmmu_find_root(); |
1031 | |
1032 | /* |
1033 | * Wait until the root device has been registered for sure. |
1034 | */ |
1035 | if (!mmu->root) |
1036 | return -EPROBE_DEFER; |
1037 | |
1038 | /* Root devices have mandatory IRQs */ |
1039 | if (ipmmu_is_root(mmu)) { |
1040 | irq = platform_get_irq(pdev, 0); |
1041 | if (irq < 0) |
1042 | return irq; |
1043 | |
1044 | ret = devm_request_irq(dev: &pdev->dev, irq, handler: ipmmu_irq, irqflags: 0, |
1045 | devname: dev_name(dev: &pdev->dev), dev_id: mmu); |
1046 | if (ret < 0) { |
1047 | dev_err(&pdev->dev, "failed to request IRQ %d\n" , irq); |
1048 | return ret; |
1049 | } |
1050 | |
1051 | ipmmu_device_reset(mmu); |
1052 | |
1053 | if (mmu->features->reserved_context) { |
1054 | dev_info(&pdev->dev, "IPMMU context 0 is reserved\n" ); |
1055 | set_bit(nr: 0, addr: mmu->ctx); |
1056 | } |
1057 | } |
1058 | |
1059 | /* |
1060 | * Register the IPMMU to the IOMMU subsystem in the following cases: |
1061 | * - R-Car Gen2 IPMMU (all devices registered) |
1062 | * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device) |
1063 | */ |
1064 | if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) { |
1065 | ret = iommu_device_sysfs_add(iommu: &mmu->iommu, parent: &pdev->dev, NULL, |
1066 | fmt: dev_name(dev: &pdev->dev)); |
1067 | if (ret) |
1068 | return ret; |
1069 | |
1070 | ret = iommu_device_register(iommu: &mmu->iommu, ops: &ipmmu_ops, hwdev: &pdev->dev); |
1071 | if (ret) |
1072 | return ret; |
1073 | } |
1074 | |
1075 | /* |
1076 | * We can't create the ARM mapping here as it requires the bus to have |
1077 | * an IOMMU, which only happens when bus_set_iommu() is called in |
1078 | * ipmmu_init() after the probe function returns. |
1079 | */ |
1080 | |
1081 | platform_set_drvdata(pdev, data: mmu); |
1082 | |
1083 | return 0; |
1084 | } |
1085 | |
1086 | static void ipmmu_remove(struct platform_device *pdev) |
1087 | { |
1088 | struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev); |
1089 | |
1090 | iommu_device_sysfs_remove(iommu: &mmu->iommu); |
1091 | iommu_device_unregister(iommu: &mmu->iommu); |
1092 | |
1093 | arm_iommu_release_mapping(mmu->mapping); |
1094 | |
1095 | ipmmu_device_reset(mmu); |
1096 | } |
1097 | |
1098 | #ifdef CONFIG_PM_SLEEP |
1099 | static int ipmmu_resume_noirq(struct device *dev) |
1100 | { |
1101 | struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); |
1102 | unsigned int i; |
1103 | |
1104 | /* Reset root MMU and restore contexts */ |
1105 | if (ipmmu_is_root(mmu)) { |
1106 | ipmmu_device_reset(mmu); |
1107 | |
1108 | for (i = 0; i < mmu->num_ctx; i++) { |
1109 | if (!mmu->domains[i]) |
1110 | continue; |
1111 | |
1112 | ipmmu_domain_setup_context(domain: mmu->domains[i]); |
1113 | } |
1114 | } |
1115 | |
1116 | /* Re-enable active micro-TLBs */ |
1117 | for (i = 0; i < mmu->features->num_utlbs; i++) { |
1118 | if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID) |
1119 | continue; |
1120 | |
1121 | ipmmu_utlb_enable(domain: mmu->root->domains[mmu->utlb_ctx[i]], utlb: i); |
1122 | } |
1123 | |
1124 | return 0; |
1125 | } |
1126 | |
1127 | static const struct dev_pm_ops ipmmu_pm = { |
1128 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq) |
1129 | }; |
1130 | #define DEV_PM_OPS &ipmmu_pm |
1131 | #else |
1132 | #define DEV_PM_OPS NULL |
1133 | #endif /* CONFIG_PM_SLEEP */ |
1134 | |
1135 | static struct platform_driver ipmmu_driver = { |
1136 | .driver = { |
1137 | .name = "ipmmu-vmsa" , |
1138 | .of_match_table = of_match_ptr(ipmmu_of_ids), |
1139 | .pm = DEV_PM_OPS, |
1140 | }, |
1141 | .probe = ipmmu_probe, |
1142 | .remove_new = ipmmu_remove, |
1143 | }; |
1144 | builtin_platform_driver(ipmmu_driver); |
1145 | |