1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2015-2016 MediaTek Inc. |
4 | * Author: Yong Wu <yong.wu@mediatek.com> |
5 | */ |
6 | #include <linux/arm-smccc.h> |
7 | #include <linux/bitfield.h> |
8 | #include <linux/bug.h> |
9 | #include <linux/clk.h> |
10 | #include <linux/component.h> |
11 | #include <linux/device.h> |
12 | #include <linux/err.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/io.h> |
15 | #include <linux/iommu.h> |
16 | #include <linux/iopoll.h> |
17 | #include <linux/io-pgtable.h> |
18 | #include <linux/list.h> |
19 | #include <linux/mfd/syscon.h> |
20 | #include <linux/module.h> |
21 | #include <linux/of_address.h> |
22 | #include <linux/of_irq.h> |
23 | #include <linux/of_platform.h> |
24 | #include <linux/pci.h> |
25 | #include <linux/platform_device.h> |
26 | #include <linux/pm_runtime.h> |
27 | #include <linux/regmap.h> |
28 | #include <linux/slab.h> |
29 | #include <linux/spinlock.h> |
30 | #include <linux/soc/mediatek/infracfg.h> |
31 | #include <linux/soc/mediatek/mtk_sip_svc.h> |
32 | #include <asm/barrier.h> |
33 | #include <soc/mediatek/smi.h> |
34 | |
35 | #include <dt-bindings/memory/mtk-memory-port.h> |
36 | |
37 | #define REG_MMU_PT_BASE_ADDR 0x000 |
38 | |
39 | #define REG_MMU_INVALIDATE 0x020 |
40 | #define F_ALL_INVLD 0x2 |
41 | #define F_MMU_INV_RANGE 0x1 |
42 | |
43 | #define REG_MMU_INVLD_START_A 0x024 |
44 | #define REG_MMU_INVLD_END_A 0x028 |
45 | |
46 | #define REG_MMU_INV_SEL_GEN2 0x02c |
47 | #define REG_MMU_INV_SEL_GEN1 0x038 |
48 | #define F_INVLD_EN0 BIT(0) |
49 | #define F_INVLD_EN1 BIT(1) |
50 | |
51 | #define REG_MMU_MISC_CTRL 0x048 |
52 | #define F_MMU_IN_ORDER_WR_EN_MASK (BIT(1) | BIT(17)) |
53 | #define F_MMU_STANDARD_AXI_MODE_MASK (BIT(3) | BIT(19)) |
54 | |
55 | #define REG_MMU_DCM_DIS 0x050 |
56 | #define F_MMU_DCM BIT(8) |
57 | |
58 | #define REG_MMU_WR_LEN_CTRL 0x054 |
59 | #define F_MMU_WR_THROT_DIS_MASK (BIT(5) | BIT(21)) |
60 | |
61 | #define REG_MMU_CTRL_REG 0x110 |
62 | #define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4) |
63 | #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4) |
64 | #define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 (2 << 5) |
65 | |
66 | #define REG_MMU_IVRP_PADDR 0x114 |
67 | |
68 | #define REG_MMU_VLD_PA_RNG 0x118 |
69 | #define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA)) |
70 | |
71 | #define REG_MMU_INT_CONTROL0 0x120 |
72 | #define F_L2_MULIT_HIT_EN BIT(0) |
73 | #define F_TABLE_WALK_FAULT_INT_EN BIT(1) |
74 | #define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2) |
75 | #define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3) |
76 | #define F_PREFETCH_FIFO_ERR_INT_EN BIT(5) |
77 | #define F_MISS_FIFO_ERR_INT_EN BIT(6) |
78 | #define F_INT_CLR_BIT BIT(12) |
79 | |
80 | #define REG_MMU_INT_MAIN_CONTROL 0x124 |
81 | /* mmu0 | mmu1 */ |
82 | #define F_INT_TRANSLATION_FAULT (BIT(0) | BIT(7)) |
83 | #define F_INT_MAIN_MULTI_HIT_FAULT (BIT(1) | BIT(8)) |
84 | #define F_INT_INVALID_PA_FAULT (BIT(2) | BIT(9)) |
85 | #define F_INT_ENTRY_REPLACEMENT_FAULT (BIT(3) | BIT(10)) |
86 | #define F_INT_TLB_MISS_FAULT (BIT(4) | BIT(11)) |
87 | #define F_INT_MISS_TRANSACTION_FIFO_FAULT (BIT(5) | BIT(12)) |
88 | #define F_INT_PRETETCH_TRANSATION_FIFO_FAULT (BIT(6) | BIT(13)) |
89 | |
90 | #define REG_MMU_CPE_DONE 0x12C |
91 | |
92 | #define REG_MMU_FAULT_ST1 0x134 |
93 | #define F_REG_MMU0_FAULT_MASK GENMASK(6, 0) |
94 | #define F_REG_MMU1_FAULT_MASK GENMASK(13, 7) |
95 | |
96 | #define REG_MMU0_FAULT_VA 0x13c |
97 | #define F_MMU_INVAL_VA_31_12_MASK GENMASK(31, 12) |
98 | #define F_MMU_INVAL_VA_34_32_MASK GENMASK(11, 9) |
99 | #define F_MMU_INVAL_PA_34_32_MASK GENMASK(8, 6) |
100 | #define F_MMU_FAULT_VA_WRITE_BIT BIT(1) |
101 | #define F_MMU_FAULT_VA_LAYER_BIT BIT(0) |
102 | |
103 | #define REG_MMU0_INVLD_PA 0x140 |
104 | #define REG_MMU1_FAULT_VA 0x144 |
105 | #define REG_MMU1_INVLD_PA 0x148 |
106 | #define REG_MMU0_INT_ID 0x150 |
107 | #define REG_MMU1_INT_ID 0x154 |
108 | #define F_MMU_INT_ID_COMM_ID(a) (((a) >> 9) & 0x7) |
109 | #define F_MMU_INT_ID_SUB_COMM_ID(a) (((a) >> 7) & 0x3) |
110 | #define F_MMU_INT_ID_COMM_ID_EXT(a) (((a) >> 10) & 0x7) |
111 | #define F_MMU_INT_ID_SUB_COMM_ID_EXT(a) (((a) >> 7) & 0x7) |
112 | /* Macro for 5 bits length port ID field (default) */ |
113 | #define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7) |
114 | #define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f) |
115 | /* Macro for 6 bits length port ID field */ |
116 | #define F_MMU_INT_ID_LARB_ID_WID_6(a) (((a) >> 8) & 0x7) |
117 | #define F_MMU_INT_ID_PORT_ID_WID_6(a) (((a) >> 2) & 0x3f) |
118 | |
119 | #define MTK_PROTECT_PA_ALIGN 256 |
120 | #define MTK_IOMMU_BANK_SZ 0x1000 |
121 | |
122 | #define PERICFG_IOMMU_1 0x714 |
123 | |
124 | #define HAS_4GB_MODE BIT(0) |
125 | /* HW will use the EMI clock if there isn't the "bclk". */ |
126 | #define HAS_BCLK BIT(1) |
127 | #define HAS_VLD_PA_RNG BIT(2) |
128 | #define RESET_AXI BIT(3) |
129 | #define OUT_ORDER_WR_EN BIT(4) |
130 | #define HAS_SUB_COMM_2BITS BIT(5) |
131 | #define HAS_SUB_COMM_3BITS BIT(6) |
132 | #define WR_THROT_EN BIT(7) |
133 | #define HAS_LEGACY_IVRP_PADDR BIT(8) |
134 | #define IOVA_34_EN BIT(9) |
135 | #define SHARE_PGTABLE BIT(10) /* 2 HW share pgtable */ |
136 | #define DCM_DISABLE BIT(11) |
137 | #define STD_AXI_MODE BIT(12) /* For non MM iommu */ |
138 | /* 2 bits: iommu type */ |
139 | #define MTK_IOMMU_TYPE_MM (0x0 << 13) |
140 | #define MTK_IOMMU_TYPE_INFRA (0x1 << 13) |
141 | #define MTK_IOMMU_TYPE_MASK (0x3 << 13) |
142 | /* PM and clock always on. e.g. infra iommu */ |
143 | #define PM_CLK_AO BIT(15) |
144 | #define IFA_IOMMU_PCIE_SUPPORT BIT(16) |
145 | #define PGTABLE_PA_35_EN BIT(17) |
146 | #define TF_PORT_TO_ADDR_MT8173 BIT(18) |
147 | #define INT_ID_PORT_WIDTH_6 BIT(19) |
148 | #define CFG_IFA_MASTER_IN_ATF BIT(20) |
149 | |
150 | #define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \ |
151 | ((((pdata)->flags) & (mask)) == (_x)) |
152 | |
153 | #define MTK_IOMMU_HAS_FLAG(pdata, _x) MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, _x) |
154 | #define MTK_IOMMU_IS_TYPE(pdata, _x) MTK_IOMMU_HAS_FLAG_MASK(pdata, _x,\ |
155 | MTK_IOMMU_TYPE_MASK) |
156 | |
157 | #define MTK_INVALID_LARBID MTK_LARB_NR_MAX |
158 | |
159 | #define MTK_LARB_COM_MAX 8 |
160 | #define MTK_LARB_SUBCOM_MAX 8 |
161 | |
162 | #define MTK_IOMMU_GROUP_MAX 8 |
163 | #define MTK_IOMMU_BANK_MAX 5 |
164 | |
165 | enum mtk_iommu_plat { |
166 | M4U_MT2712, |
167 | M4U_MT6779, |
168 | M4U_MT6795, |
169 | M4U_MT8167, |
170 | M4U_MT8173, |
171 | M4U_MT8183, |
172 | M4U_MT8186, |
173 | M4U_MT8188, |
174 | M4U_MT8192, |
175 | M4U_MT8195, |
176 | M4U_MT8365, |
177 | }; |
178 | |
179 | struct mtk_iommu_iova_region { |
180 | dma_addr_t iova_base; |
181 | unsigned long long size; |
182 | }; |
183 | |
184 | struct mtk_iommu_suspend_reg { |
185 | u32 misc_ctrl; |
186 | u32 dcm_dis; |
187 | u32 ctrl_reg; |
188 | u32 vld_pa_rng; |
189 | u32 wr_len_ctrl; |
190 | |
191 | u32 int_control[MTK_IOMMU_BANK_MAX]; |
192 | u32 int_main_control[MTK_IOMMU_BANK_MAX]; |
193 | u32 ivrp_paddr[MTK_IOMMU_BANK_MAX]; |
194 | }; |
195 | |
196 | struct mtk_iommu_plat_data { |
197 | enum mtk_iommu_plat m4u_plat; |
198 | u32 flags; |
199 | u32 inv_sel_reg; |
200 | |
201 | char *pericfg_comp_str; |
202 | struct list_head *hw_list; |
203 | |
204 | /* |
205 | * The IOMMU HW may support 16GB iova. In order to balance the IOVA ranges, |
206 | * different masters will be put in different iova ranges, for example vcodec |
207 | * is in 4G-8G and cam is in 8G-12G. Meanwhile, some masters may have the |
208 | * special IOVA range requirement, like CCU can only support the address |
209 | * 0x40000000-0x44000000. |
210 | * Here list the iova ranges this SoC supports and which larbs/ports are in |
211 | * which region. |
212 | * |
213 | * 16GB iova all use one pgtable, but each a region is a iommu group. |
214 | */ |
215 | struct { |
216 | unsigned int iova_region_nr; |
217 | const struct mtk_iommu_iova_region *iova_region; |
218 | /* |
219 | * Indicate the correspondance between larbs, ports and regions. |
220 | * |
221 | * The index is the same as iova_region and larb port numbers are |
222 | * described as bit positions. |
223 | * For example, storing BIT(0) at index 2,1 means "larb 1, port0 is in region 2". |
224 | * [2] = { [1] = BIT(0) } |
225 | */ |
226 | const u32 (*iova_region_larb_msk)[MTK_LARB_NR_MAX]; |
227 | }; |
228 | |
229 | /* |
230 | * The IOMMU HW may have 5 banks. Each bank has a independent pgtable. |
231 | * Here list how many banks this SoC supports/enables and which ports are in which bank. |
232 | */ |
233 | struct { |
234 | u8 banks_num; |
235 | bool banks_enable[MTK_IOMMU_BANK_MAX]; |
236 | unsigned int banks_portmsk[MTK_IOMMU_BANK_MAX]; |
237 | }; |
238 | |
239 | unsigned char larbid_remap[MTK_LARB_COM_MAX][MTK_LARB_SUBCOM_MAX]; |
240 | }; |
241 | |
242 | struct mtk_iommu_bank_data { |
243 | void __iomem *base; |
244 | int irq; |
245 | u8 id; |
246 | struct device *parent_dev; |
247 | struct mtk_iommu_data *parent_data; |
248 | spinlock_t tlb_lock; /* lock for tlb range flush */ |
249 | struct mtk_iommu_domain *m4u_dom; /* Each bank has a domain */ |
250 | }; |
251 | |
252 | struct mtk_iommu_data { |
253 | struct device *dev; |
254 | struct clk *bclk; |
255 | phys_addr_t protect_base; /* protect memory base */ |
256 | struct mtk_iommu_suspend_reg reg; |
257 | struct iommu_group *m4u_group[MTK_IOMMU_GROUP_MAX]; |
258 | bool enable_4GB; |
259 | |
260 | struct iommu_device iommu; |
261 | const struct mtk_iommu_plat_data *plat_data; |
262 | struct device *smicomm_dev; |
263 | |
264 | struct mtk_iommu_bank_data *bank; |
265 | struct mtk_iommu_domain *share_dom; |
266 | |
267 | struct regmap *pericfg; |
268 | struct mutex mutex; /* Protect m4u_group/m4u_dom above */ |
269 | |
270 | /* |
271 | * In the sharing pgtable case, list data->list to the global list like m4ulist. |
272 | * In the non-sharing pgtable case, list data->list to the itself hw_list_head. |
273 | */ |
274 | struct list_head *hw_list; |
275 | struct list_head hw_list_head; |
276 | struct list_head list; |
277 | struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX]; |
278 | }; |
279 | |
280 | struct mtk_iommu_domain { |
281 | struct io_pgtable_cfg cfg; |
282 | struct io_pgtable_ops *iop; |
283 | |
284 | struct mtk_iommu_bank_data *bank; |
285 | struct iommu_domain domain; |
286 | |
287 | struct mutex mutex; /* Protect "data" in this structure */ |
288 | }; |
289 | |
290 | static int mtk_iommu_bind(struct device *dev) |
291 | { |
292 | struct mtk_iommu_data *data = dev_get_drvdata(dev); |
293 | |
294 | return component_bind_all(parent: dev, data: &data->larb_imu); |
295 | } |
296 | |
297 | static void mtk_iommu_unbind(struct device *dev) |
298 | { |
299 | struct mtk_iommu_data *data = dev_get_drvdata(dev); |
300 | |
301 | component_unbind_all(parent: dev, data: &data->larb_imu); |
302 | } |
303 | |
304 | static const struct iommu_ops mtk_iommu_ops; |
305 | |
306 | static int mtk_iommu_hw_init(const struct mtk_iommu_data *data, unsigned int bankid); |
307 | |
308 | #define MTK_IOMMU_TLB_ADDR(iova) ({ \ |
309 | dma_addr_t _addr = iova; \ |
310 | ((lower_32_bits(_addr) & GENMASK(31, 12)) | upper_32_bits(_addr));\ |
311 | }) |
312 | |
313 | /* |
314 | * In M4U 4GB mode, the physical address is remapped as below: |
315 | * |
316 | * CPU Physical address: |
317 | * ==================== |
318 | * |
319 | * 0 1G 2G 3G 4G 5G |
320 | * |---A---|---B---|---C---|---D---|---E---| |
321 | * +--I/O--+------------Memory-------------+ |
322 | * |
323 | * IOMMU output physical address: |
324 | * ============================= |
325 | * |
326 | * 4G 5G 6G 7G 8G |
327 | * |---E---|---B---|---C---|---D---| |
328 | * +------------Memory-------------+ |
329 | * |
330 | * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the |
331 | * bit32 of the CPU physical address always is needed to set, and for Region |
332 | * 'E', the CPU physical address keep as is. |
333 | * Additionally, The iommu consumers always use the CPU phyiscal address. |
334 | */ |
335 | #define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL |
336 | |
337 | static LIST_HEAD(m4ulist); /* List all the M4U HWs */ |
338 | |
339 | #define for_each_m4u(data, head) list_for_each_entry(data, head, list) |
340 | |
341 | #define MTK_IOMMU_IOVA_SZ_4G (SZ_4G - SZ_8M) /* 8M as gap */ |
342 | |
343 | static const struct mtk_iommu_iova_region single_domain[] = { |
344 | {.iova_base = 0, .size = MTK_IOMMU_IOVA_SZ_4G}, |
345 | }; |
346 | |
347 | #define MT8192_MULTI_REGION_NR_MAX 6 |
348 | |
349 | #define MT8192_MULTI_REGION_NR (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) ? \ |
350 | MT8192_MULTI_REGION_NR_MAX : 1) |
351 | |
352 | static const struct mtk_iommu_iova_region mt8192_multi_dom[MT8192_MULTI_REGION_NR] = { |
353 | { .iova_base = 0x0, .size = MTK_IOMMU_IOVA_SZ_4G}, /* 0 ~ 4G, */ |
354 | #if IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) |
355 | { .iova_base = SZ_4G, .size = MTK_IOMMU_IOVA_SZ_4G}, /* 4G ~ 8G */ |
356 | { .iova_base = SZ_4G * 2, .size = MTK_IOMMU_IOVA_SZ_4G}, /* 8G ~ 12G */ |
357 | { .iova_base = SZ_4G * 3, .size = MTK_IOMMU_IOVA_SZ_4G}, /* 12G ~ 16G */ |
358 | |
359 | { .iova_base = 0x240000000ULL, .size = 0x4000000}, /* CCU0 */ |
360 | { .iova_base = 0x244000000ULL, .size = 0x4000000}, /* CCU1 */ |
361 | #endif |
362 | }; |
363 | |
364 | /* If 2 M4U share a domain(use the same hwlist), Put the corresponding info in first data.*/ |
365 | static struct mtk_iommu_data *mtk_iommu_get_frst_data(struct list_head *hwlist) |
366 | { |
367 | return list_first_entry(hwlist, struct mtk_iommu_data, list); |
368 | } |
369 | |
370 | static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) |
371 | { |
372 | return container_of(dom, struct mtk_iommu_domain, domain); |
373 | } |
374 | |
375 | static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data) |
376 | { |
377 | /* Tlb flush all always is in bank0. */ |
378 | struct mtk_iommu_bank_data *bank = &data->bank[0]; |
379 | void __iomem *base = bank->base; |
380 | unsigned long flags; |
381 | |
382 | spin_lock_irqsave(&bank->tlb_lock, flags); |
383 | writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, base + data->plat_data->inv_sel_reg); |
384 | writel_relaxed(F_ALL_INVLD, base + REG_MMU_INVALIDATE); |
385 | wmb(); /* Make sure the tlb flush all done */ |
386 | spin_unlock_irqrestore(lock: &bank->tlb_lock, flags); |
387 | } |
388 | |
389 | static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, |
390 | struct mtk_iommu_bank_data *bank) |
391 | { |
392 | struct list_head *head = bank->parent_data->hw_list; |
393 | struct mtk_iommu_bank_data *curbank; |
394 | struct mtk_iommu_data *data; |
395 | bool check_pm_status; |
396 | unsigned long flags; |
397 | void __iomem *base; |
398 | int ret; |
399 | u32 tmp; |
400 | |
401 | for_each_m4u(data, head) { |
402 | /* |
403 | * To avoid resume the iommu device frequently when the iommu device |
404 | * is not active, it doesn't always call pm_runtime_get here, then tlb |
405 | * flush depends on the tlb flush all in the runtime resume. |
406 | * |
407 | * There are 2 special cases: |
408 | * |
409 | * Case1: The iommu dev doesn't have power domain but has bclk. This case |
410 | * should also avoid the tlb flush while the dev is not active to mute |
411 | * the tlb timeout log. like mt8173. |
412 | * |
413 | * Case2: The power/clock of infra iommu is always on, and it doesn't |
414 | * have the device link with the master devices. This case should avoid |
415 | * the PM status check. |
416 | */ |
417 | check_pm_status = !MTK_IOMMU_HAS_FLAG(data->plat_data, PM_CLK_AO); |
418 | |
419 | if (check_pm_status) { |
420 | if (pm_runtime_get_if_in_use(dev: data->dev) <= 0) |
421 | continue; |
422 | } |
423 | |
424 | curbank = &data->bank[bank->id]; |
425 | base = curbank->base; |
426 | |
427 | spin_lock_irqsave(&curbank->tlb_lock, flags); |
428 | writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, |
429 | base + data->plat_data->inv_sel_reg); |
430 | |
431 | writel_relaxed(MTK_IOMMU_TLB_ADDR(iova), base + REG_MMU_INVLD_START_A); |
432 | writel_relaxed(MTK_IOMMU_TLB_ADDR(iova + size - 1), |
433 | base + REG_MMU_INVLD_END_A); |
434 | writel_relaxed(F_MMU_INV_RANGE, base + REG_MMU_INVALIDATE); |
435 | |
436 | /* tlb sync */ |
437 | ret = readl_poll_timeout_atomic(base + REG_MMU_CPE_DONE, |
438 | tmp, tmp != 0, 10, 1000); |
439 | |
440 | /* Clear the CPE status */ |
441 | writel_relaxed(0, base + REG_MMU_CPE_DONE); |
442 | spin_unlock_irqrestore(lock: &curbank->tlb_lock, flags); |
443 | |
444 | if (ret) { |
445 | dev_warn(data->dev, |
446 | "Partial TLB flush timed out, falling back to full flush\n" ); |
447 | mtk_iommu_tlb_flush_all(data); |
448 | } |
449 | |
450 | if (check_pm_status) |
451 | pm_runtime_put(dev: data->dev); |
452 | } |
453 | } |
454 | |
455 | static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) |
456 | { |
457 | struct mtk_iommu_bank_data *bank = dev_id; |
458 | struct mtk_iommu_data *data = bank->parent_data; |
459 | struct mtk_iommu_domain *dom = bank->m4u_dom; |
460 | unsigned int fault_larb = MTK_INVALID_LARBID, fault_port = 0, sub_comm = 0; |
461 | u32 int_state, regval, va34_32, pa34_32; |
462 | const struct mtk_iommu_plat_data *plat_data = data->plat_data; |
463 | void __iomem *base = bank->base; |
464 | u64 fault_iova, fault_pa; |
465 | bool layer, write; |
466 | |
467 | /* Read error info from registers */ |
468 | int_state = readl_relaxed(base + REG_MMU_FAULT_ST1); |
469 | if (int_state & F_REG_MMU0_FAULT_MASK) { |
470 | regval = readl_relaxed(base + REG_MMU0_INT_ID); |
471 | fault_iova = readl_relaxed(base + REG_MMU0_FAULT_VA); |
472 | fault_pa = readl_relaxed(base + REG_MMU0_INVLD_PA); |
473 | } else { |
474 | regval = readl_relaxed(base + REG_MMU1_INT_ID); |
475 | fault_iova = readl_relaxed(base + REG_MMU1_FAULT_VA); |
476 | fault_pa = readl_relaxed(base + REG_MMU1_INVLD_PA); |
477 | } |
478 | layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT; |
479 | write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT; |
480 | if (MTK_IOMMU_HAS_FLAG(plat_data, IOVA_34_EN)) { |
481 | va34_32 = FIELD_GET(F_MMU_INVAL_VA_34_32_MASK, fault_iova); |
482 | fault_iova = fault_iova & F_MMU_INVAL_VA_31_12_MASK; |
483 | fault_iova |= (u64)va34_32 << 32; |
484 | } |
485 | pa34_32 = FIELD_GET(F_MMU_INVAL_PA_34_32_MASK, fault_iova); |
486 | fault_pa |= (u64)pa34_32 << 32; |
487 | |
488 | if (MTK_IOMMU_IS_TYPE(plat_data, MTK_IOMMU_TYPE_MM)) { |
489 | if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_2BITS)) { |
490 | fault_larb = F_MMU_INT_ID_COMM_ID(regval); |
491 | sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval); |
492 | fault_port = F_MMU_INT_ID_PORT_ID(regval); |
493 | } else if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_3BITS)) { |
494 | fault_larb = F_MMU_INT_ID_COMM_ID_EXT(regval); |
495 | sub_comm = F_MMU_INT_ID_SUB_COMM_ID_EXT(regval); |
496 | fault_port = F_MMU_INT_ID_PORT_ID(regval); |
497 | } else if (MTK_IOMMU_HAS_FLAG(plat_data, INT_ID_PORT_WIDTH_6)) { |
498 | fault_port = F_MMU_INT_ID_PORT_ID_WID_6(regval); |
499 | fault_larb = F_MMU_INT_ID_LARB_ID_WID_6(regval); |
500 | } else { |
501 | fault_port = F_MMU_INT_ID_PORT_ID(regval); |
502 | fault_larb = F_MMU_INT_ID_LARB_ID(regval); |
503 | } |
504 | fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm]; |
505 | } |
506 | |
507 | if (!dom || report_iommu_fault(domain: &dom->domain, dev: bank->parent_dev, iova: fault_iova, |
508 | flags: write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) { |
509 | dev_err_ratelimited( |
510 | bank->parent_dev, |
511 | "fault type=0x%x iova=0x%llx pa=0x%llx master=0x%x(larb=%d port=%d) layer=%d %s\n" , |
512 | int_state, fault_iova, fault_pa, regval, fault_larb, fault_port, |
513 | layer, write ? "write" : "read" ); |
514 | } |
515 | |
516 | /* Interrupt clear */ |
517 | regval = readl_relaxed(base + REG_MMU_INT_CONTROL0); |
518 | regval |= F_INT_CLR_BIT; |
519 | writel_relaxed(regval, base + REG_MMU_INT_CONTROL0); |
520 | |
521 | mtk_iommu_tlb_flush_all(data); |
522 | |
523 | return IRQ_HANDLED; |
524 | } |
525 | |
526 | static unsigned int mtk_iommu_get_bank_id(struct device *dev, |
527 | const struct mtk_iommu_plat_data *plat_data) |
528 | { |
529 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
530 | unsigned int i, portmsk = 0, bankid = 0; |
531 | |
532 | if (plat_data->banks_num == 1) |
533 | return bankid; |
534 | |
535 | for (i = 0; i < fwspec->num_ids; i++) |
536 | portmsk |= BIT(MTK_M4U_TO_PORT(fwspec->ids[i])); |
537 | |
538 | for (i = 0; i < plat_data->banks_num && i < MTK_IOMMU_BANK_MAX; i++) { |
539 | if (!plat_data->banks_enable[i]) |
540 | continue; |
541 | |
542 | if (portmsk & plat_data->banks_portmsk[i]) { |
543 | bankid = i; |
544 | break; |
545 | } |
546 | } |
547 | return bankid; /* default is 0 */ |
548 | } |
549 | |
550 | static int mtk_iommu_get_iova_region_id(struct device *dev, |
551 | const struct mtk_iommu_plat_data *plat_data) |
552 | { |
553 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
554 | unsigned int portidmsk = 0, larbid; |
555 | const u32 *rgn_larb_msk; |
556 | int i; |
557 | |
558 | if (plat_data->iova_region_nr == 1) |
559 | return 0; |
560 | |
561 | larbid = MTK_M4U_TO_LARB(fwspec->ids[0]); |
562 | for (i = 0; i < fwspec->num_ids; i++) |
563 | portidmsk |= BIT(MTK_M4U_TO_PORT(fwspec->ids[i])); |
564 | |
565 | for (i = 0; i < plat_data->iova_region_nr; i++) { |
566 | rgn_larb_msk = plat_data->iova_region_larb_msk[i]; |
567 | if (!rgn_larb_msk) |
568 | continue; |
569 | |
570 | if ((rgn_larb_msk[larbid] & portidmsk) == portidmsk) |
571 | return i; |
572 | } |
573 | |
574 | dev_err(dev, "Can NOT find the region for larb(%d-%x).\n" , |
575 | larbid, portidmsk); |
576 | return -EINVAL; |
577 | } |
578 | |
579 | static int mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev, |
580 | bool enable, unsigned int regionid) |
581 | { |
582 | struct mtk_smi_larb_iommu *larb_mmu; |
583 | unsigned int larbid, portid; |
584 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
585 | const struct mtk_iommu_iova_region *region; |
586 | unsigned long portid_msk = 0; |
587 | struct arm_smccc_res res; |
588 | int i, ret = 0; |
589 | |
590 | for (i = 0; i < fwspec->num_ids; ++i) { |
591 | portid = MTK_M4U_TO_PORT(fwspec->ids[i]); |
592 | portid_msk |= BIT(portid); |
593 | } |
594 | |
595 | if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { |
596 | /* All ports should be in the same larb. just use 0 here */ |
597 | larbid = MTK_M4U_TO_LARB(fwspec->ids[0]); |
598 | larb_mmu = &data->larb_imu[larbid]; |
599 | region = data->plat_data->iova_region + regionid; |
600 | |
601 | for_each_set_bit(portid, &portid_msk, 32) |
602 | larb_mmu->bank[portid] = upper_32_bits(region->iova_base); |
603 | |
604 | dev_dbg(dev, "%s iommu for larb(%s) port 0x%lx region %d rgn-bank %d.\n" , |
605 | enable ? "enable" : "disable" , dev_name(larb_mmu->dev), |
606 | portid_msk, regionid, upper_32_bits(region->iova_base)); |
607 | |
608 | if (enable) |
609 | larb_mmu->mmu |= portid_msk; |
610 | else |
611 | larb_mmu->mmu &= ~portid_msk; |
612 | } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) { |
613 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, CFG_IFA_MASTER_IN_ATF)) { |
614 | arm_smccc_smc(MTK_SIP_KERNEL_IOMMU_CONTROL, |
615 | IOMMU_ATF_CMD_CONFIG_INFRA_IOMMU, |
616 | portid_msk, enable, 0, 0, 0, 0, &res); |
617 | ret = res.a0; |
618 | } else { |
619 | /* PCI dev has only one output id, enable the next writing bit for PCIe */ |
620 | if (dev_is_pci(dev)) { |
621 | if (fwspec->num_ids != 1) { |
622 | dev_err(dev, "PCI dev can only have one port.\n" ); |
623 | return -ENODEV; |
624 | } |
625 | portid_msk |= BIT(portid + 1); |
626 | } |
627 | |
628 | ret = regmap_update_bits(map: data->pericfg, PERICFG_IOMMU_1, |
629 | mask: (u32)portid_msk, val: enable ? (u32)portid_msk : 0); |
630 | } |
631 | if (ret) |
632 | dev_err(dev, "%s iommu(%s) inframaster 0x%lx fail(%d).\n" , |
633 | enable ? "enable" : "disable" , |
634 | dev_name(data->dev), portid_msk, ret); |
635 | } |
636 | return ret; |
637 | } |
638 | |
639 | static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom, |
640 | struct mtk_iommu_data *data, |
641 | unsigned int region_id) |
642 | { |
643 | struct mtk_iommu_domain *share_dom = data->share_dom; |
644 | const struct mtk_iommu_iova_region *region; |
645 | |
646 | /* Share pgtable when 2 MM IOMMU share the pgtable or one IOMMU use multiple iova ranges */ |
647 | if (share_dom) { |
648 | dom->iop = share_dom->iop; |
649 | dom->cfg = share_dom->cfg; |
650 | dom->domain.pgsize_bitmap = share_dom->cfg.pgsize_bitmap; |
651 | goto update_iova_region; |
652 | } |
653 | |
654 | dom->cfg = (struct io_pgtable_cfg) { |
655 | .quirks = IO_PGTABLE_QUIRK_ARM_NS | |
656 | IO_PGTABLE_QUIRK_NO_PERMS | |
657 | IO_PGTABLE_QUIRK_ARM_MTK_EXT, |
658 | .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap, |
659 | .ias = MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN) ? 34 : 32, |
660 | .iommu_dev = data->dev, |
661 | }; |
662 | |
663 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, PGTABLE_PA_35_EN)) |
664 | dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT; |
665 | |
666 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) |
667 | dom->cfg.oas = data->enable_4GB ? 33 : 32; |
668 | else |
669 | dom->cfg.oas = 35; |
670 | |
671 | dom->iop = alloc_io_pgtable_ops(fmt: ARM_V7S, cfg: &dom->cfg, cookie: data); |
672 | if (!dom->iop) { |
673 | dev_err(data->dev, "Failed to alloc io pgtable\n" ); |
674 | return -ENOMEM; |
675 | } |
676 | |
677 | /* Update our support page sizes bitmap */ |
678 | dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap; |
679 | |
680 | data->share_dom = dom; |
681 | |
682 | update_iova_region: |
683 | /* Update the iova region for this domain */ |
684 | region = data->plat_data->iova_region + region_id; |
685 | dom->domain.geometry.aperture_start = region->iova_base; |
686 | dom->domain.geometry.aperture_end = region->iova_base + region->size - 1; |
687 | dom->domain.geometry.force_aperture = true; |
688 | return 0; |
689 | } |
690 | |
691 | static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) |
692 | { |
693 | struct mtk_iommu_domain *dom; |
694 | |
695 | if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED) |
696 | return NULL; |
697 | |
698 | dom = kzalloc(size: sizeof(*dom), GFP_KERNEL); |
699 | if (!dom) |
700 | return NULL; |
701 | mutex_init(&dom->mutex); |
702 | |
703 | return &dom->domain; |
704 | } |
705 | |
706 | static void mtk_iommu_domain_free(struct iommu_domain *domain) |
707 | { |
708 | kfree(objp: to_mtk_domain(dom: domain)); |
709 | } |
710 | |
711 | static int mtk_iommu_attach_device(struct iommu_domain *domain, |
712 | struct device *dev) |
713 | { |
714 | struct mtk_iommu_data *data = dev_iommu_priv_get(dev), *frstdata; |
715 | struct mtk_iommu_domain *dom = to_mtk_domain(dom: domain); |
716 | struct list_head *hw_list = data->hw_list; |
717 | struct device *m4udev = data->dev; |
718 | struct mtk_iommu_bank_data *bank; |
719 | unsigned int bankid; |
720 | int ret, region_id; |
721 | |
722 | region_id = mtk_iommu_get_iova_region_id(dev, plat_data: data->plat_data); |
723 | if (region_id < 0) |
724 | return region_id; |
725 | |
726 | bankid = mtk_iommu_get_bank_id(dev, plat_data: data->plat_data); |
727 | mutex_lock(&dom->mutex); |
728 | if (!dom->bank) { |
729 | /* Data is in the frstdata in sharing pgtable case. */ |
730 | frstdata = mtk_iommu_get_frst_data(hwlist: hw_list); |
731 | |
732 | mutex_lock(&frstdata->mutex); |
733 | ret = mtk_iommu_domain_finalise(dom, data: frstdata, region_id); |
734 | mutex_unlock(lock: &frstdata->mutex); |
735 | if (ret) { |
736 | mutex_unlock(lock: &dom->mutex); |
737 | return ret; |
738 | } |
739 | dom->bank = &data->bank[bankid]; |
740 | } |
741 | mutex_unlock(lock: &dom->mutex); |
742 | |
743 | mutex_lock(&data->mutex); |
744 | bank = &data->bank[bankid]; |
745 | if (!bank->m4u_dom) { /* Initialize the M4U HW for each a BANK */ |
746 | ret = pm_runtime_resume_and_get(dev: m4udev); |
747 | if (ret < 0) { |
748 | dev_err(m4udev, "pm get fail(%d) in attach.\n" , ret); |
749 | goto err_unlock; |
750 | } |
751 | |
752 | ret = mtk_iommu_hw_init(data, bankid); |
753 | if (ret) { |
754 | pm_runtime_put(dev: m4udev); |
755 | goto err_unlock; |
756 | } |
757 | bank->m4u_dom = dom; |
758 | writel(val: dom->cfg.arm_v7s_cfg.ttbr, addr: bank->base + REG_MMU_PT_BASE_ADDR); |
759 | |
760 | pm_runtime_put(dev: m4udev); |
761 | } |
762 | mutex_unlock(lock: &data->mutex); |
763 | |
764 | if (region_id > 0) { |
765 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(34)); |
766 | if (ret) { |
767 | dev_err(m4udev, "Failed to set dma_mask for %s(%d).\n" , dev_name(dev), ret); |
768 | return ret; |
769 | } |
770 | } |
771 | |
772 | return mtk_iommu_config(data, dev, enable: true, regionid: region_id); |
773 | |
774 | err_unlock: |
775 | mutex_unlock(lock: &data->mutex); |
776 | return ret; |
777 | } |
778 | |
779 | static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, |
780 | phys_addr_t paddr, size_t pgsize, size_t pgcount, |
781 | int prot, gfp_t gfp, size_t *mapped) |
782 | { |
783 | struct mtk_iommu_domain *dom = to_mtk_domain(dom: domain); |
784 | |
785 | /* The "4GB mode" M4U physically can not use the lower remap of Dram. */ |
786 | if (dom->bank->parent_data->enable_4GB) |
787 | paddr |= BIT_ULL(32); |
788 | |
789 | /* Synchronize with the tlb_lock */ |
790 | return dom->iop->map_pages(dom->iop, iova, paddr, pgsize, pgcount, prot, gfp, mapped); |
791 | } |
792 | |
793 | static size_t mtk_iommu_unmap(struct iommu_domain *domain, |
794 | unsigned long iova, size_t pgsize, size_t pgcount, |
795 | struct iommu_iotlb_gather *gather) |
796 | { |
797 | struct mtk_iommu_domain *dom = to_mtk_domain(dom: domain); |
798 | |
799 | iommu_iotlb_gather_add_range(gather, iova, size: pgsize * pgcount); |
800 | return dom->iop->unmap_pages(dom->iop, iova, pgsize, pgcount, gather); |
801 | } |
802 | |
803 | static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain) |
804 | { |
805 | struct mtk_iommu_domain *dom = to_mtk_domain(dom: domain); |
806 | |
807 | if (dom->bank) |
808 | mtk_iommu_tlb_flush_all(data: dom->bank->parent_data); |
809 | } |
810 | |
811 | static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, |
812 | struct iommu_iotlb_gather *gather) |
813 | { |
814 | struct mtk_iommu_domain *dom = to_mtk_domain(dom: domain); |
815 | size_t length = gather->end - gather->start + 1; |
816 | |
817 | mtk_iommu_tlb_flush_range_sync(iova: gather->start, size: length, bank: dom->bank); |
818 | } |
819 | |
820 | static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova, |
821 | size_t size) |
822 | { |
823 | struct mtk_iommu_domain *dom = to_mtk_domain(dom: domain); |
824 | |
825 | mtk_iommu_tlb_flush_range_sync(iova, size, bank: dom->bank); |
826 | } |
827 | |
828 | static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, |
829 | dma_addr_t iova) |
830 | { |
831 | struct mtk_iommu_domain *dom = to_mtk_domain(dom: domain); |
832 | phys_addr_t pa; |
833 | |
834 | pa = dom->iop->iova_to_phys(dom->iop, iova); |
835 | if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT) && |
836 | dom->bank->parent_data->enable_4GB && |
837 | pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE) |
838 | pa &= ~BIT_ULL(32); |
839 | |
840 | return pa; |
841 | } |
842 | |
843 | static struct iommu_device *mtk_iommu_probe_device(struct device *dev) |
844 | { |
845 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
846 | struct mtk_iommu_data *data; |
847 | struct device_link *link; |
848 | struct device *larbdev; |
849 | unsigned int larbid, larbidx, i; |
850 | |
851 | if (!fwspec || fwspec->ops != &mtk_iommu_ops) |
852 | return ERR_PTR(error: -ENODEV); /* Not a iommu client device */ |
853 | |
854 | data = dev_iommu_priv_get(dev); |
855 | |
856 | if (!MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) |
857 | return &data->iommu; |
858 | |
859 | /* |
860 | * Link the consumer device with the smi-larb device(supplier). |
861 | * The device that connects with each a larb is a independent HW. |
862 | * All the ports in each a device should be in the same larbs. |
863 | */ |
864 | larbid = MTK_M4U_TO_LARB(fwspec->ids[0]); |
865 | if (larbid >= MTK_LARB_NR_MAX) |
866 | return ERR_PTR(error: -EINVAL); |
867 | |
868 | for (i = 1; i < fwspec->num_ids; i++) { |
869 | larbidx = MTK_M4U_TO_LARB(fwspec->ids[i]); |
870 | if (larbid != larbidx) { |
871 | dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n" , |
872 | larbid, larbidx); |
873 | return ERR_PTR(error: -EINVAL); |
874 | } |
875 | } |
876 | larbdev = data->larb_imu[larbid].dev; |
877 | if (!larbdev) |
878 | return ERR_PTR(error: -EINVAL); |
879 | |
880 | link = device_link_add(consumer: dev, supplier: larbdev, |
881 | DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS); |
882 | if (!link) |
883 | dev_err(dev, "Unable to link %s\n" , dev_name(larbdev)); |
884 | return &data->iommu; |
885 | } |
886 | |
887 | static void mtk_iommu_release_device(struct device *dev) |
888 | { |
889 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
890 | struct mtk_iommu_data *data; |
891 | struct device *larbdev; |
892 | unsigned int larbid; |
893 | |
894 | data = dev_iommu_priv_get(dev); |
895 | if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { |
896 | larbid = MTK_M4U_TO_LARB(fwspec->ids[0]); |
897 | larbdev = data->larb_imu[larbid].dev; |
898 | device_link_remove(consumer: dev, supplier: larbdev); |
899 | } |
900 | } |
901 | |
902 | static int mtk_iommu_get_group_id(struct device *dev, const struct mtk_iommu_plat_data *plat_data) |
903 | { |
904 | unsigned int bankid; |
905 | |
906 | /* |
907 | * If the bank function is enabled, each bank is a iommu group/domain. |
908 | * Otherwise, each iova region is a iommu group/domain. |
909 | */ |
910 | bankid = mtk_iommu_get_bank_id(dev, plat_data); |
911 | if (bankid) |
912 | return bankid; |
913 | |
914 | return mtk_iommu_get_iova_region_id(dev, plat_data); |
915 | } |
916 | |
917 | static struct iommu_group *mtk_iommu_device_group(struct device *dev) |
918 | { |
919 | struct mtk_iommu_data *c_data = dev_iommu_priv_get(dev), *data; |
920 | struct list_head *hw_list = c_data->hw_list; |
921 | struct iommu_group *group; |
922 | int groupid; |
923 | |
924 | data = mtk_iommu_get_frst_data(hwlist: hw_list); |
925 | if (!data) |
926 | return ERR_PTR(error: -ENODEV); |
927 | |
928 | groupid = mtk_iommu_get_group_id(dev, plat_data: data->plat_data); |
929 | if (groupid < 0) |
930 | return ERR_PTR(error: groupid); |
931 | |
932 | mutex_lock(&data->mutex); |
933 | group = data->m4u_group[groupid]; |
934 | if (!group) { |
935 | group = iommu_group_alloc(); |
936 | if (!IS_ERR(ptr: group)) |
937 | data->m4u_group[groupid] = group; |
938 | } else { |
939 | iommu_group_ref_get(group); |
940 | } |
941 | mutex_unlock(lock: &data->mutex); |
942 | return group; |
943 | } |
944 | |
945 | static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) |
946 | { |
947 | struct platform_device *m4updev; |
948 | |
949 | if (args->args_count != 1) { |
950 | dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n" , |
951 | args->args_count); |
952 | return -EINVAL; |
953 | } |
954 | |
955 | if (!dev_iommu_priv_get(dev)) { |
956 | /* Get the m4u device */ |
957 | m4updev = of_find_device_by_node(np: args->np); |
958 | if (WARN_ON(!m4updev)) |
959 | return -EINVAL; |
960 | |
961 | dev_iommu_priv_set(dev, priv: platform_get_drvdata(pdev: m4updev)); |
962 | } |
963 | |
964 | return iommu_fwspec_add_ids(dev, ids: args->args, num_ids: 1); |
965 | } |
966 | |
967 | static void mtk_iommu_get_resv_regions(struct device *dev, |
968 | struct list_head *head) |
969 | { |
970 | struct mtk_iommu_data *data = dev_iommu_priv_get(dev); |
971 | unsigned int regionid = mtk_iommu_get_iova_region_id(dev, plat_data: data->plat_data), i; |
972 | const struct mtk_iommu_iova_region *resv, *curdom; |
973 | struct iommu_resv_region *region; |
974 | int prot = IOMMU_WRITE | IOMMU_READ; |
975 | |
976 | if ((int)regionid < 0) |
977 | return; |
978 | curdom = data->plat_data->iova_region + regionid; |
979 | for (i = 0; i < data->plat_data->iova_region_nr; i++) { |
980 | resv = data->plat_data->iova_region + i; |
981 | |
982 | /* Only reserve when the region is inside the current domain */ |
983 | if (resv->iova_base <= curdom->iova_base || |
984 | resv->iova_base + resv->size >= curdom->iova_base + curdom->size) |
985 | continue; |
986 | |
987 | region = iommu_alloc_resv_region(start: resv->iova_base, length: resv->size, |
988 | prot, type: IOMMU_RESV_RESERVED, |
989 | GFP_KERNEL); |
990 | if (!region) |
991 | return; |
992 | |
993 | list_add_tail(new: ®ion->list, head); |
994 | } |
995 | } |
996 | |
997 | static const struct iommu_ops mtk_iommu_ops = { |
998 | .domain_alloc = mtk_iommu_domain_alloc, |
999 | .probe_device = mtk_iommu_probe_device, |
1000 | .release_device = mtk_iommu_release_device, |
1001 | .device_group = mtk_iommu_device_group, |
1002 | .of_xlate = mtk_iommu_of_xlate, |
1003 | .get_resv_regions = mtk_iommu_get_resv_regions, |
1004 | .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, |
1005 | .owner = THIS_MODULE, |
1006 | .default_domain_ops = &(const struct iommu_domain_ops) { |
1007 | .attach_dev = mtk_iommu_attach_device, |
1008 | .map_pages = mtk_iommu_map, |
1009 | .unmap_pages = mtk_iommu_unmap, |
1010 | .flush_iotlb_all = mtk_iommu_flush_iotlb_all, |
1011 | .iotlb_sync = mtk_iommu_iotlb_sync, |
1012 | .iotlb_sync_map = mtk_iommu_sync_map, |
1013 | .iova_to_phys = mtk_iommu_iova_to_phys, |
1014 | .free = mtk_iommu_domain_free, |
1015 | } |
1016 | }; |
1017 | |
1018 | static int mtk_iommu_hw_init(const struct mtk_iommu_data *data, unsigned int bankid) |
1019 | { |
1020 | const struct mtk_iommu_bank_data *bankx = &data->bank[bankid]; |
1021 | const struct mtk_iommu_bank_data *bank0 = &data->bank[0]; |
1022 | u32 regval; |
1023 | |
1024 | /* |
1025 | * Global control settings are in bank0. May re-init these global registers |
1026 | * since no sure if there is bank0 consumers. |
1027 | */ |
1028 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, TF_PORT_TO_ADDR_MT8173)) { |
1029 | regval = F_MMU_PREFETCH_RT_REPLACE_MOD | |
1030 | F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173; |
1031 | } else { |
1032 | regval = readl_relaxed(bank0->base + REG_MMU_CTRL_REG); |
1033 | regval |= F_MMU_TF_PROT_TO_PROGRAM_ADDR; |
1034 | } |
1035 | writel_relaxed(regval, bank0->base + REG_MMU_CTRL_REG); |
1036 | |
1037 | if (data->enable_4GB && |
1038 | MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) { |
1039 | /* |
1040 | * If 4GB mode is enabled, the validate PA range is from |
1041 | * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30]. |
1042 | */ |
1043 | regval = F_MMU_VLD_PA_RNG(7, 4); |
1044 | writel_relaxed(regval, bank0->base + REG_MMU_VLD_PA_RNG); |
1045 | } |
1046 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, DCM_DISABLE)) |
1047 | writel_relaxed(F_MMU_DCM, bank0->base + REG_MMU_DCM_DIS); |
1048 | else |
1049 | writel_relaxed(0, bank0->base + REG_MMU_DCM_DIS); |
1050 | |
1051 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, WR_THROT_EN)) { |
1052 | /* write command throttling mode */ |
1053 | regval = readl_relaxed(bank0->base + REG_MMU_WR_LEN_CTRL); |
1054 | regval &= ~F_MMU_WR_THROT_DIS_MASK; |
1055 | writel_relaxed(regval, bank0->base + REG_MMU_WR_LEN_CTRL); |
1056 | } |
1057 | |
1058 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) { |
1059 | /* The register is called STANDARD_AXI_MODE in this case */ |
1060 | regval = 0; |
1061 | } else { |
1062 | regval = readl_relaxed(bank0->base + REG_MMU_MISC_CTRL); |
1063 | if (!MTK_IOMMU_HAS_FLAG(data->plat_data, STD_AXI_MODE)) |
1064 | regval &= ~F_MMU_STANDARD_AXI_MODE_MASK; |
1065 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN)) |
1066 | regval &= ~F_MMU_IN_ORDER_WR_EN_MASK; |
1067 | } |
1068 | writel_relaxed(regval, bank0->base + REG_MMU_MISC_CTRL); |
1069 | |
1070 | /* Independent settings for each bank */ |
1071 | regval = F_L2_MULIT_HIT_EN | |
1072 | F_TABLE_WALK_FAULT_INT_EN | |
1073 | F_PREETCH_FIFO_OVERFLOW_INT_EN | |
1074 | F_MISS_FIFO_OVERFLOW_INT_EN | |
1075 | F_PREFETCH_FIFO_ERR_INT_EN | |
1076 | F_MISS_FIFO_ERR_INT_EN; |
1077 | writel_relaxed(regval, bankx->base + REG_MMU_INT_CONTROL0); |
1078 | |
1079 | regval = F_INT_TRANSLATION_FAULT | |
1080 | F_INT_MAIN_MULTI_HIT_FAULT | |
1081 | F_INT_INVALID_PA_FAULT | |
1082 | F_INT_ENTRY_REPLACEMENT_FAULT | |
1083 | F_INT_TLB_MISS_FAULT | |
1084 | F_INT_MISS_TRANSACTION_FIFO_FAULT | |
1085 | F_INT_PRETETCH_TRANSATION_FIFO_FAULT; |
1086 | writel_relaxed(regval, bankx->base + REG_MMU_INT_MAIN_CONTROL); |
1087 | |
1088 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_LEGACY_IVRP_PADDR)) |
1089 | regval = (data->protect_base >> 1) | (data->enable_4GB << 31); |
1090 | else |
1091 | regval = lower_32_bits(data->protect_base) | |
1092 | upper_32_bits(data->protect_base); |
1093 | writel_relaxed(regval, bankx->base + REG_MMU_IVRP_PADDR); |
1094 | |
1095 | if (devm_request_irq(dev: bankx->parent_dev, irq: bankx->irq, handler: mtk_iommu_isr, irqflags: 0, |
1096 | devname: dev_name(dev: bankx->parent_dev), dev_id: (void *)bankx)) { |
1097 | writel_relaxed(0, bankx->base + REG_MMU_PT_BASE_ADDR); |
1098 | dev_err(bankx->parent_dev, "Failed @ IRQ-%d Request\n" , bankx->irq); |
1099 | return -ENODEV; |
1100 | } |
1101 | |
1102 | return 0; |
1103 | } |
1104 | |
1105 | static const struct component_master_ops mtk_iommu_com_ops = { |
1106 | .bind = mtk_iommu_bind, |
1107 | .unbind = mtk_iommu_unbind, |
1108 | }; |
1109 | |
1110 | static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **match, |
1111 | struct mtk_iommu_data *data) |
1112 | { |
1113 | struct device_node *larbnode, *frst_avail_smicomm_node = NULL; |
1114 | struct platform_device *plarbdev, *pcommdev; |
1115 | struct device_link *link; |
1116 | int i, larb_nr, ret; |
1117 | |
1118 | larb_nr = of_count_phandle_with_args(np: dev->of_node, list_name: "mediatek,larbs" , NULL); |
1119 | if (larb_nr < 0) |
1120 | return larb_nr; |
1121 | if (larb_nr == 0 || larb_nr > MTK_LARB_NR_MAX) |
1122 | return -EINVAL; |
1123 | |
1124 | for (i = 0; i < larb_nr; i++) { |
1125 | struct device_node *smicomm_node, *smi_subcomm_node; |
1126 | u32 id; |
1127 | |
1128 | larbnode = of_parse_phandle(np: dev->of_node, phandle_name: "mediatek,larbs" , index: i); |
1129 | if (!larbnode) { |
1130 | ret = -EINVAL; |
1131 | goto err_larbdev_put; |
1132 | } |
1133 | |
1134 | if (!of_device_is_available(device: larbnode)) { |
1135 | of_node_put(node: larbnode); |
1136 | continue; |
1137 | } |
1138 | |
1139 | ret = of_property_read_u32(np: larbnode, propname: "mediatek,larb-id" , out_value: &id); |
1140 | if (ret)/* The id is consecutive if there is no this property */ |
1141 | id = i; |
1142 | if (id >= MTK_LARB_NR_MAX) { |
1143 | of_node_put(node: larbnode); |
1144 | ret = -EINVAL; |
1145 | goto err_larbdev_put; |
1146 | } |
1147 | |
1148 | plarbdev = of_find_device_by_node(np: larbnode); |
1149 | of_node_put(node: larbnode); |
1150 | if (!plarbdev) { |
1151 | ret = -ENODEV; |
1152 | goto err_larbdev_put; |
1153 | } |
1154 | if (data->larb_imu[id].dev) { |
1155 | platform_device_put(pdev: plarbdev); |
1156 | ret = -EEXIST; |
1157 | goto err_larbdev_put; |
1158 | } |
1159 | data->larb_imu[id].dev = &plarbdev->dev; |
1160 | |
1161 | if (!plarbdev->dev.driver) { |
1162 | ret = -EPROBE_DEFER; |
1163 | goto err_larbdev_put; |
1164 | } |
1165 | |
1166 | /* Get smi-(sub)-common dev from the last larb. */ |
1167 | smi_subcomm_node = of_parse_phandle(np: larbnode, phandle_name: "mediatek,smi" , index: 0); |
1168 | if (!smi_subcomm_node) { |
1169 | ret = -EINVAL; |
1170 | goto err_larbdev_put; |
1171 | } |
1172 | |
1173 | /* |
1174 | * It may have two level smi-common. the node is smi-sub-common if it |
1175 | * has a new mediatek,smi property. otherwise it is smi-commmon. |
1176 | */ |
1177 | smicomm_node = of_parse_phandle(np: smi_subcomm_node, phandle_name: "mediatek,smi" , index: 0); |
1178 | if (smicomm_node) |
1179 | of_node_put(node: smi_subcomm_node); |
1180 | else |
1181 | smicomm_node = smi_subcomm_node; |
1182 | |
1183 | /* |
1184 | * All the larbs that connect to one IOMMU must connect with the same |
1185 | * smi-common. |
1186 | */ |
1187 | if (!frst_avail_smicomm_node) { |
1188 | frst_avail_smicomm_node = smicomm_node; |
1189 | } else if (frst_avail_smicomm_node != smicomm_node) { |
1190 | dev_err(dev, "mediatek,smi property is not right @larb%d." , id); |
1191 | of_node_put(node: smicomm_node); |
1192 | ret = -EINVAL; |
1193 | goto err_larbdev_put; |
1194 | } else { |
1195 | of_node_put(node: smicomm_node); |
1196 | } |
1197 | |
1198 | component_match_add(parent: dev, matchptr: match, compare: component_compare_dev, compare_data: &plarbdev->dev); |
1199 | platform_device_put(pdev: plarbdev); |
1200 | } |
1201 | |
1202 | if (!frst_avail_smicomm_node) |
1203 | return -EINVAL; |
1204 | |
1205 | pcommdev = of_find_device_by_node(np: frst_avail_smicomm_node); |
1206 | of_node_put(node: frst_avail_smicomm_node); |
1207 | if (!pcommdev) |
1208 | return -ENODEV; |
1209 | data->smicomm_dev = &pcommdev->dev; |
1210 | |
1211 | link = device_link_add(consumer: data->smicomm_dev, supplier: dev, |
1212 | DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME); |
1213 | platform_device_put(pdev: pcommdev); |
1214 | if (!link) { |
1215 | dev_err(dev, "Unable to link %s.\n" , dev_name(data->smicomm_dev)); |
1216 | return -EINVAL; |
1217 | } |
1218 | return 0; |
1219 | |
1220 | err_larbdev_put: |
1221 | for (i = MTK_LARB_NR_MAX - 1; i >= 0; i--) { |
1222 | if (!data->larb_imu[i].dev) |
1223 | continue; |
1224 | put_device(dev: data->larb_imu[i].dev); |
1225 | } |
1226 | return ret; |
1227 | } |
1228 | |
1229 | static int mtk_iommu_probe(struct platform_device *pdev) |
1230 | { |
1231 | struct mtk_iommu_data *data; |
1232 | struct device *dev = &pdev->dev; |
1233 | struct resource *res; |
1234 | resource_size_t ioaddr; |
1235 | struct component_match *match = NULL; |
1236 | struct regmap *infracfg; |
1237 | void *protect; |
1238 | int ret, banks_num, i = 0; |
1239 | u32 val; |
1240 | char *p; |
1241 | struct mtk_iommu_bank_data *bank; |
1242 | void __iomem *base; |
1243 | |
1244 | data = devm_kzalloc(dev, size: sizeof(*data), GFP_KERNEL); |
1245 | if (!data) |
1246 | return -ENOMEM; |
1247 | data->dev = dev; |
1248 | data->plat_data = of_device_get_match_data(dev); |
1249 | |
1250 | /* Protect memory. HW will access here while translation fault.*/ |
1251 | protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL); |
1252 | if (!protect) |
1253 | return -ENOMEM; |
1254 | data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN); |
1255 | |
1256 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) { |
1257 | infracfg = syscon_regmap_lookup_by_phandle(np: dev->of_node, property: "mediatek,infracfg" ); |
1258 | if (IS_ERR(ptr: infracfg)) { |
1259 | /* |
1260 | * Legacy devicetrees will not specify a phandle to |
1261 | * mediatek,infracfg: in that case, we use the older |
1262 | * way to retrieve a syscon to infra. |
1263 | * |
1264 | * This is for retrocompatibility purposes only, hence |
1265 | * no more compatibles shall be added to this. |
1266 | */ |
1267 | switch (data->plat_data->m4u_plat) { |
1268 | case M4U_MT2712: |
1269 | p = "mediatek,mt2712-infracfg" ; |
1270 | break; |
1271 | case M4U_MT8173: |
1272 | p = "mediatek,mt8173-infracfg" ; |
1273 | break; |
1274 | default: |
1275 | p = NULL; |
1276 | } |
1277 | |
1278 | infracfg = syscon_regmap_lookup_by_compatible(s: p); |
1279 | if (IS_ERR(ptr: infracfg)) |
1280 | return PTR_ERR(ptr: infracfg); |
1281 | } |
1282 | |
1283 | ret = regmap_read(map: infracfg, REG_INFRA_MISC, val: &val); |
1284 | if (ret) |
1285 | return ret; |
1286 | data->enable_4GB = !!(val & F_DDR_4GB_SUPPORT_EN); |
1287 | } |
1288 | |
1289 | banks_num = data->plat_data->banks_num; |
1290 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1291 | if (!res) |
1292 | return -EINVAL; |
1293 | if (resource_size(res) < banks_num * MTK_IOMMU_BANK_SZ) { |
1294 | dev_err(dev, "banknr %d. res %pR is not enough.\n" , banks_num, res); |
1295 | return -EINVAL; |
1296 | } |
1297 | base = devm_ioremap_resource(dev, res); |
1298 | if (IS_ERR(ptr: base)) |
1299 | return PTR_ERR(ptr: base); |
1300 | ioaddr = res->start; |
1301 | |
1302 | data->bank = devm_kmalloc(dev, size: banks_num * sizeof(*data->bank), GFP_KERNEL); |
1303 | if (!data->bank) |
1304 | return -ENOMEM; |
1305 | |
1306 | do { |
1307 | if (!data->plat_data->banks_enable[i]) |
1308 | continue; |
1309 | bank = &data->bank[i]; |
1310 | bank->id = i; |
1311 | bank->base = base + i * MTK_IOMMU_BANK_SZ; |
1312 | bank->m4u_dom = NULL; |
1313 | |
1314 | bank->irq = platform_get_irq(pdev, i); |
1315 | if (bank->irq < 0) |
1316 | return bank->irq; |
1317 | bank->parent_dev = dev; |
1318 | bank->parent_data = data; |
1319 | spin_lock_init(&bank->tlb_lock); |
1320 | } while (++i < banks_num); |
1321 | |
1322 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) { |
1323 | data->bclk = devm_clk_get(dev, id: "bclk" ); |
1324 | if (IS_ERR(ptr: data->bclk)) |
1325 | return PTR_ERR(ptr: data->bclk); |
1326 | } |
1327 | |
1328 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, PGTABLE_PA_35_EN)) { |
1329 | ret = dma_set_mask(dev, DMA_BIT_MASK(35)); |
1330 | if (ret) { |
1331 | dev_err(dev, "Failed to set dma_mask 35.\n" ); |
1332 | return ret; |
1333 | } |
1334 | } |
1335 | |
1336 | pm_runtime_enable(dev); |
1337 | |
1338 | if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { |
1339 | ret = mtk_iommu_mm_dts_parse(dev, match: &match, data); |
1340 | if (ret) { |
1341 | dev_err_probe(dev, err: ret, fmt: "mm dts parse fail\n" ); |
1342 | goto out_runtime_disable; |
1343 | } |
1344 | } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) && |
1345 | !MTK_IOMMU_HAS_FLAG(data->plat_data, CFG_IFA_MASTER_IN_ATF)) { |
1346 | p = data->plat_data->pericfg_comp_str; |
1347 | data->pericfg = syscon_regmap_lookup_by_compatible(s: p); |
1348 | if (IS_ERR(ptr: data->pericfg)) { |
1349 | ret = PTR_ERR(ptr: data->pericfg); |
1350 | goto out_runtime_disable; |
1351 | } |
1352 | } |
1353 | |
1354 | platform_set_drvdata(pdev, data); |
1355 | mutex_init(&data->mutex); |
1356 | |
1357 | ret = iommu_device_sysfs_add(iommu: &data->iommu, parent: dev, NULL, |
1358 | fmt: "mtk-iommu.%pa" , &ioaddr); |
1359 | if (ret) |
1360 | goto out_link_remove; |
1361 | |
1362 | ret = iommu_device_register(iommu: &data->iommu, ops: &mtk_iommu_ops, hwdev: dev); |
1363 | if (ret) |
1364 | goto out_sysfs_remove; |
1365 | |
1366 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE)) { |
1367 | list_add_tail(new: &data->list, head: data->plat_data->hw_list); |
1368 | data->hw_list = data->plat_data->hw_list; |
1369 | } else { |
1370 | INIT_LIST_HEAD(list: &data->hw_list_head); |
1371 | list_add_tail(new: &data->list, head: &data->hw_list_head); |
1372 | data->hw_list = &data->hw_list_head; |
1373 | } |
1374 | |
1375 | if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { |
1376 | ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match); |
1377 | if (ret) |
1378 | goto out_list_del; |
1379 | } |
1380 | return ret; |
1381 | |
1382 | out_list_del: |
1383 | list_del(entry: &data->list); |
1384 | iommu_device_unregister(iommu: &data->iommu); |
1385 | out_sysfs_remove: |
1386 | iommu_device_sysfs_remove(iommu: &data->iommu); |
1387 | out_link_remove: |
1388 | if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) |
1389 | device_link_remove(consumer: data->smicomm_dev, supplier: dev); |
1390 | out_runtime_disable: |
1391 | pm_runtime_disable(dev); |
1392 | return ret; |
1393 | } |
1394 | |
1395 | static void mtk_iommu_remove(struct platform_device *pdev) |
1396 | { |
1397 | struct mtk_iommu_data *data = platform_get_drvdata(pdev); |
1398 | struct mtk_iommu_bank_data *bank; |
1399 | int i; |
1400 | |
1401 | iommu_device_sysfs_remove(iommu: &data->iommu); |
1402 | iommu_device_unregister(iommu: &data->iommu); |
1403 | |
1404 | list_del(entry: &data->list); |
1405 | |
1406 | if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { |
1407 | device_link_remove(consumer: data->smicomm_dev, supplier: &pdev->dev); |
1408 | component_master_del(&pdev->dev, &mtk_iommu_com_ops); |
1409 | } |
1410 | pm_runtime_disable(dev: &pdev->dev); |
1411 | for (i = 0; i < data->plat_data->banks_num; i++) { |
1412 | bank = &data->bank[i]; |
1413 | if (!bank->m4u_dom) |
1414 | continue; |
1415 | devm_free_irq(dev: &pdev->dev, irq: bank->irq, dev_id: bank); |
1416 | } |
1417 | } |
1418 | |
1419 | static int __maybe_unused mtk_iommu_runtime_suspend(struct device *dev) |
1420 | { |
1421 | struct mtk_iommu_data *data = dev_get_drvdata(dev); |
1422 | struct mtk_iommu_suspend_reg *reg = &data->reg; |
1423 | void __iomem *base; |
1424 | int i = 0; |
1425 | |
1426 | base = data->bank[i].base; |
1427 | reg->wr_len_ctrl = readl_relaxed(base + REG_MMU_WR_LEN_CTRL); |
1428 | reg->misc_ctrl = readl_relaxed(base + REG_MMU_MISC_CTRL); |
1429 | reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS); |
1430 | reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); |
1431 | reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG); |
1432 | do { |
1433 | if (!data->plat_data->banks_enable[i]) |
1434 | continue; |
1435 | base = data->bank[i].base; |
1436 | reg->int_control[i] = readl_relaxed(base + REG_MMU_INT_CONTROL0); |
1437 | reg->int_main_control[i] = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); |
1438 | reg->ivrp_paddr[i] = readl_relaxed(base + REG_MMU_IVRP_PADDR); |
1439 | } while (++i < data->plat_data->banks_num); |
1440 | clk_disable_unprepare(clk: data->bclk); |
1441 | return 0; |
1442 | } |
1443 | |
1444 | static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev) |
1445 | { |
1446 | struct mtk_iommu_data *data = dev_get_drvdata(dev); |
1447 | struct mtk_iommu_suspend_reg *reg = &data->reg; |
1448 | struct mtk_iommu_domain *m4u_dom; |
1449 | void __iomem *base; |
1450 | int ret, i = 0; |
1451 | |
1452 | ret = clk_prepare_enable(clk: data->bclk); |
1453 | if (ret) { |
1454 | dev_err(data->dev, "Failed to enable clk(%d) in resume\n" , ret); |
1455 | return ret; |
1456 | } |
1457 | |
1458 | /* |
1459 | * Uppon first resume, only enable the clk and return, since the values of the |
1460 | * registers are not yet set. |
1461 | */ |
1462 | if (!reg->wr_len_ctrl) |
1463 | return 0; |
1464 | |
1465 | base = data->bank[i].base; |
1466 | writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL); |
1467 | writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL); |
1468 | writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS); |
1469 | writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); |
1470 | writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG); |
1471 | do { |
1472 | m4u_dom = data->bank[i].m4u_dom; |
1473 | if (!data->plat_data->banks_enable[i] || !m4u_dom) |
1474 | continue; |
1475 | base = data->bank[i].base; |
1476 | writel_relaxed(reg->int_control[i], base + REG_MMU_INT_CONTROL0); |
1477 | writel_relaxed(reg->int_main_control[i], base + REG_MMU_INT_MAIN_CONTROL); |
1478 | writel_relaxed(reg->ivrp_paddr[i], base + REG_MMU_IVRP_PADDR); |
1479 | writel(val: m4u_dom->cfg.arm_v7s_cfg.ttbr, addr: base + REG_MMU_PT_BASE_ADDR); |
1480 | } while (++i < data->plat_data->banks_num); |
1481 | |
1482 | /* |
1483 | * Users may allocate dma buffer before they call pm_runtime_get, |
1484 | * in which case it will lack the necessary tlb flush. |
1485 | * Thus, make sure to update the tlb after each PM resume. |
1486 | */ |
1487 | mtk_iommu_tlb_flush_all(data); |
1488 | return 0; |
1489 | } |
1490 | |
1491 | static const struct dev_pm_ops mtk_iommu_pm_ops = { |
1492 | SET_RUNTIME_PM_OPS(mtk_iommu_runtime_suspend, mtk_iommu_runtime_resume, NULL) |
1493 | SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, |
1494 | pm_runtime_force_resume) |
1495 | }; |
1496 | |
1497 | static const struct mtk_iommu_plat_data mt2712_data = { |
1498 | .m4u_plat = M4U_MT2712, |
1499 | .flags = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG | SHARE_PGTABLE | |
1500 | MTK_IOMMU_TYPE_MM, |
1501 | .hw_list = &m4ulist, |
1502 | .inv_sel_reg = REG_MMU_INV_SEL_GEN1, |
1503 | .iova_region = single_domain, |
1504 | .banks_num = 1, |
1505 | .banks_enable = {true}, |
1506 | .iova_region_nr = ARRAY_SIZE(single_domain), |
1507 | .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}}, |
1508 | }; |
1509 | |
1510 | static const struct mtk_iommu_plat_data mt6779_data = { |
1511 | .m4u_plat = M4U_MT6779, |
1512 | .flags = HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | WR_THROT_EN | |
1513 | MTK_IOMMU_TYPE_MM | PGTABLE_PA_35_EN, |
1514 | .inv_sel_reg = REG_MMU_INV_SEL_GEN2, |
1515 | .banks_num = 1, |
1516 | .banks_enable = {true}, |
1517 | .iova_region = single_domain, |
1518 | .iova_region_nr = ARRAY_SIZE(single_domain), |
1519 | .larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}}, |
1520 | }; |
1521 | |
1522 | static const struct mtk_iommu_plat_data mt6795_data = { |
1523 | .m4u_plat = M4U_MT6795, |
1524 | .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI | |
1525 | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM | |
1526 | TF_PORT_TO_ADDR_MT8173, |
1527 | .inv_sel_reg = REG_MMU_INV_SEL_GEN1, |
1528 | .banks_num = 1, |
1529 | .banks_enable = {true}, |
1530 | .iova_region = single_domain, |
1531 | .iova_region_nr = ARRAY_SIZE(single_domain), |
1532 | .larbid_remap = {{0}, {1}, {2}, {3}, {4}}, /* Linear mapping. */ |
1533 | }; |
1534 | |
1535 | static const struct mtk_iommu_plat_data mt8167_data = { |
1536 | .m4u_plat = M4U_MT8167, |
1537 | .flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM, |
1538 | .inv_sel_reg = REG_MMU_INV_SEL_GEN1, |
1539 | .banks_num = 1, |
1540 | .banks_enable = {true}, |
1541 | .iova_region = single_domain, |
1542 | .iova_region_nr = ARRAY_SIZE(single_domain), |
1543 | .larbid_remap = {{0}, {1}, {2}}, /* Linear mapping. */ |
1544 | }; |
1545 | |
1546 | static const struct mtk_iommu_plat_data mt8173_data = { |
1547 | .m4u_plat = M4U_MT8173, |
1548 | .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI | |
1549 | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM | |
1550 | TF_PORT_TO_ADDR_MT8173, |
1551 | .inv_sel_reg = REG_MMU_INV_SEL_GEN1, |
1552 | .banks_num = 1, |
1553 | .banks_enable = {true}, |
1554 | .iova_region = single_domain, |
1555 | .iova_region_nr = ARRAY_SIZE(single_domain), |
1556 | .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */ |
1557 | }; |
1558 | |
1559 | static const struct mtk_iommu_plat_data mt8183_data = { |
1560 | .m4u_plat = M4U_MT8183, |
1561 | .flags = RESET_AXI | MTK_IOMMU_TYPE_MM, |
1562 | .inv_sel_reg = REG_MMU_INV_SEL_GEN1, |
1563 | .banks_num = 1, |
1564 | .banks_enable = {true}, |
1565 | .iova_region = single_domain, |
1566 | .iova_region_nr = ARRAY_SIZE(single_domain), |
1567 | .larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}}, |
1568 | }; |
1569 | |
1570 | static const unsigned int mt8186_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = { |
1571 | [0] = {~0, ~0, ~0}, /* Region0: all ports for larb0/1/2 */ |
1572 | [1] = {0, 0, 0, 0, ~0, 0, 0, ~0}, /* Region1: larb4/7 */ |
1573 | [2] = {0, 0, 0, 0, 0, 0, 0, 0, /* Region2: larb8/9/11/13/16/17/19/20 */ |
1574 | ~0, ~0, 0, ~0, 0, ~(u32)(BIT(9) | BIT(10)), 0, 0, |
1575 | /* larb13: the other ports except port9/10 */ |
1576 | ~0, ~0, 0, ~0, ~0}, |
1577 | [3] = {0}, |
1578 | [4] = {[13] = BIT(9) | BIT(10)}, /* larb13 port9/10 */ |
1579 | [5] = {[14] = ~0}, /* larb14 */ |
1580 | }; |
1581 | |
1582 | static const struct mtk_iommu_plat_data mt8186_data_mm = { |
1583 | .m4u_plat = M4U_MT8186, |
1584 | .flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | |
1585 | WR_THROT_EN | IOVA_34_EN | MTK_IOMMU_TYPE_MM, |
1586 | .larbid_remap = {{0}, {1, MTK_INVALID_LARBID, 8}, {4}, {7}, {2}, {9, 11, 19, 20}, |
1587 | {MTK_INVALID_LARBID, 14, 16}, |
1588 | {MTK_INVALID_LARBID, 13, MTK_INVALID_LARBID, 17}}, |
1589 | .inv_sel_reg = REG_MMU_INV_SEL_GEN2, |
1590 | .banks_num = 1, |
1591 | .banks_enable = {true}, |
1592 | .iova_region = mt8192_multi_dom, |
1593 | .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), |
1594 | .iova_region_larb_msk = mt8186_larb_region_msk, |
1595 | }; |
1596 | |
1597 | static const struct mtk_iommu_plat_data mt8188_data_infra = { |
1598 | .m4u_plat = M4U_MT8188, |
1599 | .flags = WR_THROT_EN | DCM_DISABLE | STD_AXI_MODE | PM_CLK_AO | |
1600 | MTK_IOMMU_TYPE_INFRA | IFA_IOMMU_PCIE_SUPPORT | |
1601 | PGTABLE_PA_35_EN | CFG_IFA_MASTER_IN_ATF, |
1602 | .inv_sel_reg = REG_MMU_INV_SEL_GEN2, |
1603 | .banks_num = 1, |
1604 | .banks_enable = {true}, |
1605 | .iova_region = single_domain, |
1606 | .iova_region_nr = ARRAY_SIZE(single_domain), |
1607 | }; |
1608 | |
1609 | static const u32 mt8188_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = { |
1610 | [0] = {~0, ~0, ~0, ~0}, /* Region0: all ports for larb0/1/2/3 */ |
1611 | [1] = {0, 0, 0, 0, 0, 0, 0, 0, |
1612 | 0, 0, 0, 0, 0, 0, 0, 0, |
1613 | 0, 0, 0, 0, 0, ~0, ~0, ~0}, /* Region1: larb19(21)/21(22)/23 */ |
1614 | [2] = {0, 0, 0, 0, ~0, ~0, ~0, ~0, /* Region2: the other larbs. */ |
1615 | ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, |
1616 | ~0, ~0, ~0, ~0, ~0, 0, 0, 0, |
1617 | 0, ~0}, |
1618 | [3] = {0}, |
1619 | [4] = {[24] = BIT(0) | BIT(1)}, /* Only larb27(24) port0/1 */ |
1620 | [5] = {[24] = BIT(2) | BIT(3)}, /* Only larb27(24) port2/3 */ |
1621 | }; |
1622 | |
1623 | static const struct mtk_iommu_plat_data mt8188_data_vdo = { |
1624 | .m4u_plat = M4U_MT8188, |
1625 | .flags = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN | |
1626 | WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | |
1627 | PGTABLE_PA_35_EN | MTK_IOMMU_TYPE_MM, |
1628 | .hw_list = &m4ulist, |
1629 | .inv_sel_reg = REG_MMU_INV_SEL_GEN2, |
1630 | .banks_num = 1, |
1631 | .banks_enable = {true}, |
1632 | .iova_region = mt8192_multi_dom, |
1633 | .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), |
1634 | .iova_region_larb_msk = mt8188_larb_region_msk, |
1635 | .larbid_remap = {{2}, {0}, {21}, {0}, {19}, {9, 10, |
1636 | 11 /* 11a */, 25 /* 11c */}, |
1637 | {13, 0, 29 /* 16b */, 30 /* 17b */, 0}, {5}}, |
1638 | }; |
1639 | |
1640 | static const struct mtk_iommu_plat_data mt8188_data_vpp = { |
1641 | .m4u_plat = M4U_MT8188, |
1642 | .flags = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN | |
1643 | WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | |
1644 | PGTABLE_PA_35_EN | MTK_IOMMU_TYPE_MM, |
1645 | .hw_list = &m4ulist, |
1646 | .inv_sel_reg = REG_MMU_INV_SEL_GEN2, |
1647 | .banks_num = 1, |
1648 | .banks_enable = {true}, |
1649 | .iova_region = mt8192_multi_dom, |
1650 | .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), |
1651 | .iova_region_larb_msk = mt8188_larb_region_msk, |
1652 | .larbid_remap = {{1}, {3}, {23}, {7}, {MTK_INVALID_LARBID}, |
1653 | {12, 15, 24 /* 11b */}, {14, MTK_INVALID_LARBID, |
1654 | 16 /* 16a */, 17 /* 17a */, MTK_INVALID_LARBID, |
1655 | 27, 28 /* ccu0 */, MTK_INVALID_LARBID}, {4, 6}}, |
1656 | }; |
1657 | |
1658 | static const unsigned int mt8192_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = { |
1659 | [0] = {~0, ~0}, /* Region0: larb0/1 */ |
1660 | [1] = {0, 0, 0, 0, ~0, ~0, 0, ~0}, /* Region1: larb4/5/7 */ |
1661 | [2] = {0, 0, ~0, 0, 0, 0, 0, 0, /* Region2: larb2/9/11/13/14/16/17/18/19/20 */ |
1662 | 0, ~0, 0, ~0, 0, ~(u32)(BIT(9) | BIT(10)), ~(u32)(BIT(4) | BIT(5)), 0, |
1663 | ~0, ~0, ~0, ~0, ~0}, |
1664 | [3] = {0}, |
1665 | [4] = {[13] = BIT(9) | BIT(10)}, /* larb13 port9/10 */ |
1666 | [5] = {[14] = BIT(4) | BIT(5)}, /* larb14 port4/5 */ |
1667 | }; |
1668 | |
1669 | static const struct mtk_iommu_plat_data mt8192_data = { |
1670 | .m4u_plat = M4U_MT8192, |
1671 | .flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | |
1672 | WR_THROT_EN | IOVA_34_EN | MTK_IOMMU_TYPE_MM, |
1673 | .inv_sel_reg = REG_MMU_INV_SEL_GEN2, |
1674 | .banks_num = 1, |
1675 | .banks_enable = {true}, |
1676 | .iova_region = mt8192_multi_dom, |
1677 | .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), |
1678 | .iova_region_larb_msk = mt8192_larb_region_msk, |
1679 | .larbid_remap = {{0}, {1}, {4, 5}, {7}, {2}, {9, 11, 19, 20}, |
1680 | {0, 14, 16}, {0, 13, 18, 17}}, |
1681 | }; |
1682 | |
1683 | static const struct mtk_iommu_plat_data mt8195_data_infra = { |
1684 | .m4u_plat = M4U_MT8195, |
1685 | .flags = WR_THROT_EN | DCM_DISABLE | STD_AXI_MODE | PM_CLK_AO | |
1686 | MTK_IOMMU_TYPE_INFRA | IFA_IOMMU_PCIE_SUPPORT, |
1687 | .pericfg_comp_str = "mediatek,mt8195-pericfg_ao" , |
1688 | .inv_sel_reg = REG_MMU_INV_SEL_GEN2, |
1689 | .banks_num = 5, |
1690 | .banks_enable = {true, false, false, false, true}, |
1691 | .banks_portmsk = {[0] = GENMASK(19, 16), /* PCIe */ |
1692 | [4] = GENMASK(31, 20), /* USB */ |
1693 | }, |
1694 | .iova_region = single_domain, |
1695 | .iova_region_nr = ARRAY_SIZE(single_domain), |
1696 | }; |
1697 | |
1698 | static const unsigned int mt8195_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = { |
1699 | [0] = {~0, ~0, ~0, ~0}, /* Region0: all ports for larb0/1/2/3 */ |
1700 | [1] = {0, 0, 0, 0, 0, 0, 0, 0, |
1701 | 0, 0, 0, 0, 0, 0, 0, 0, |
1702 | 0, 0, 0, ~0, ~0, ~0, ~0, ~0, /* Region1: larb19/20/21/22/23/24 */ |
1703 | ~0}, |
1704 | [2] = {0, 0, 0, 0, ~0, ~0, ~0, ~0, /* Region2: the other larbs. */ |
1705 | ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, |
1706 | ~0, ~0, 0, 0, 0, 0, 0, 0, |
1707 | 0, ~0, ~0, ~0, ~0}, |
1708 | [3] = {0}, |
1709 | [4] = {[18] = BIT(0) | BIT(1)}, /* Only larb18 port0/1 */ |
1710 | [5] = {[18] = BIT(2) | BIT(3)}, /* Only larb18 port2/3 */ |
1711 | }; |
1712 | |
1713 | static const struct mtk_iommu_plat_data mt8195_data_vdo = { |
1714 | .m4u_plat = M4U_MT8195, |
1715 | .flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | |
1716 | WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | MTK_IOMMU_TYPE_MM, |
1717 | .hw_list = &m4ulist, |
1718 | .inv_sel_reg = REG_MMU_INV_SEL_GEN2, |
1719 | .banks_num = 1, |
1720 | .banks_enable = {true}, |
1721 | .iova_region = mt8192_multi_dom, |
1722 | .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), |
1723 | .iova_region_larb_msk = mt8195_larb_region_msk, |
1724 | .larbid_remap = {{2, 0}, {21}, {24}, {7}, {19}, {9, 10, 11}, |
1725 | {13, 17, 15/* 17b */, 25}, {5}}, |
1726 | }; |
1727 | |
1728 | static const struct mtk_iommu_plat_data mt8195_data_vpp = { |
1729 | .m4u_plat = M4U_MT8195, |
1730 | .flags = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN | |
1731 | WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | MTK_IOMMU_TYPE_MM, |
1732 | .hw_list = &m4ulist, |
1733 | .inv_sel_reg = REG_MMU_INV_SEL_GEN2, |
1734 | .banks_num = 1, |
1735 | .banks_enable = {true}, |
1736 | .iova_region = mt8192_multi_dom, |
1737 | .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), |
1738 | .iova_region_larb_msk = mt8195_larb_region_msk, |
1739 | .larbid_remap = {{1}, {3}, |
1740 | {22, MTK_INVALID_LARBID, MTK_INVALID_LARBID, MTK_INVALID_LARBID, 23}, |
1741 | {8}, {20}, {12}, |
1742 | /* 16: 16a; 29: 16b; 30: CCUtop0; 31: CCUtop1 */ |
1743 | {14, 16, 29, 26, 30, 31, 18}, |
1744 | {4, MTK_INVALID_LARBID, MTK_INVALID_LARBID, MTK_INVALID_LARBID, 6}}, |
1745 | }; |
1746 | |
1747 | static const struct mtk_iommu_plat_data mt8365_data = { |
1748 | .m4u_plat = M4U_MT8365, |
1749 | .flags = RESET_AXI | INT_ID_PORT_WIDTH_6, |
1750 | .inv_sel_reg = REG_MMU_INV_SEL_GEN1, |
1751 | .banks_num = 1, |
1752 | .banks_enable = {true}, |
1753 | .iova_region = single_domain, |
1754 | .iova_region_nr = ARRAY_SIZE(single_domain), |
1755 | .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */ |
1756 | }; |
1757 | |
1758 | static const struct of_device_id mtk_iommu_of_ids[] = { |
1759 | { .compatible = "mediatek,mt2712-m4u" , .data = &mt2712_data}, |
1760 | { .compatible = "mediatek,mt6779-m4u" , .data = &mt6779_data}, |
1761 | { .compatible = "mediatek,mt6795-m4u" , .data = &mt6795_data}, |
1762 | { .compatible = "mediatek,mt8167-m4u" , .data = &mt8167_data}, |
1763 | { .compatible = "mediatek,mt8173-m4u" , .data = &mt8173_data}, |
1764 | { .compatible = "mediatek,mt8183-m4u" , .data = &mt8183_data}, |
1765 | { .compatible = "mediatek,mt8186-iommu-mm" , .data = &mt8186_data_mm}, /* mm: m4u */ |
1766 | { .compatible = "mediatek,mt8188-iommu-infra" , .data = &mt8188_data_infra}, |
1767 | { .compatible = "mediatek,mt8188-iommu-vdo" , .data = &mt8188_data_vdo}, |
1768 | { .compatible = "mediatek,mt8188-iommu-vpp" , .data = &mt8188_data_vpp}, |
1769 | { .compatible = "mediatek,mt8192-m4u" , .data = &mt8192_data}, |
1770 | { .compatible = "mediatek,mt8195-iommu-infra" , .data = &mt8195_data_infra}, |
1771 | { .compatible = "mediatek,mt8195-iommu-vdo" , .data = &mt8195_data_vdo}, |
1772 | { .compatible = "mediatek,mt8195-iommu-vpp" , .data = &mt8195_data_vpp}, |
1773 | { .compatible = "mediatek,mt8365-m4u" , .data = &mt8365_data}, |
1774 | {} |
1775 | }; |
1776 | |
1777 | static struct platform_driver mtk_iommu_driver = { |
1778 | .probe = mtk_iommu_probe, |
1779 | .remove_new = mtk_iommu_remove, |
1780 | .driver = { |
1781 | .name = "mtk-iommu" , |
1782 | .of_match_table = mtk_iommu_of_ids, |
1783 | .pm = &mtk_iommu_pm_ops, |
1784 | } |
1785 | }; |
1786 | module_platform_driver(mtk_iommu_driver); |
1787 | |
1788 | MODULE_DESCRIPTION("IOMMU API for MediaTek M4U implementations" ); |
1789 | MODULE_LICENSE("GPL v2" ); |
1790 | |