1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2017 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
4 */
5
6#include <dt-bindings/firmware/imx/rsrc.h>
7#include <linux/arm-smccc.h>
8#include <linux/clk.h>
9#include <linux/err.h>
10#include <linux/firmware/imx/sci.h>
11#include <linux/interrupt.h>
12#include <linux/kernel.h>
13#include <linux/mailbox_client.h>
14#include <linux/mfd/syscon.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/of_address.h>
18#include <linux/of_reserved_mem.h>
19#include <linux/platform_device.h>
20#include <linux/pm_domain.h>
21#include <linux/regmap.h>
22#include <linux/remoteproc.h>
23#include <linux/workqueue.h>
24
25#include "imx_rproc.h"
26#include "remoteproc_internal.h"
27
28#define IMX7D_SRC_SCR 0x0C
29#define IMX7D_ENABLE_M4 BIT(3)
30#define IMX7D_SW_M4P_RST BIT(2)
31#define IMX7D_SW_M4C_RST BIT(1)
32#define IMX7D_SW_M4C_NON_SCLR_RST BIT(0)
33
34#define IMX7D_M4_RST_MASK (IMX7D_ENABLE_M4 | IMX7D_SW_M4P_RST \
35 | IMX7D_SW_M4C_RST \
36 | IMX7D_SW_M4C_NON_SCLR_RST)
37
38#define IMX7D_M4_START (IMX7D_ENABLE_M4 | IMX7D_SW_M4P_RST \
39 | IMX7D_SW_M4C_RST)
40#define IMX7D_M4_STOP (IMX7D_ENABLE_M4 | IMX7D_SW_M4C_RST | \
41 IMX7D_SW_M4C_NON_SCLR_RST)
42
43#define IMX8M_M7_STOP (IMX7D_ENABLE_M4 | IMX7D_SW_M4C_RST)
44#define IMX8M_M7_POLL IMX7D_ENABLE_M4
45
46#define IMX8M_GPR22 0x58
47#define IMX8M_GPR22_CM7_CPUWAIT BIT(0)
48
49/* Address: 0x020D8000 */
50#define IMX6SX_SRC_SCR 0x00
51#define IMX6SX_ENABLE_M4 BIT(22)
52#define IMX6SX_SW_M4P_RST BIT(12)
53#define IMX6SX_SW_M4C_NON_SCLR_RST BIT(4)
54#define IMX6SX_SW_M4C_RST BIT(3)
55
56#define IMX6SX_M4_START (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4P_RST \
57 | IMX6SX_SW_M4C_RST)
58#define IMX6SX_M4_STOP (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4C_RST | \
59 IMX6SX_SW_M4C_NON_SCLR_RST)
60#define IMX6SX_M4_RST_MASK (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4P_RST \
61 | IMX6SX_SW_M4C_NON_SCLR_RST \
62 | IMX6SX_SW_M4C_RST)
63
64#define IMX_RPROC_MEM_MAX 32
65
66#define IMX_SIP_RPROC 0xC2000005
67#define IMX_SIP_RPROC_START 0x00
68#define IMX_SIP_RPROC_STARTED 0x01
69#define IMX_SIP_RPROC_STOP 0x02
70
71#define IMX_SC_IRQ_GROUP_REBOOTED 5
72
73/**
74 * struct imx_rproc_mem - slim internal memory structure
75 * @cpu_addr: MPU virtual address of the memory region
76 * @sys_addr: Bus address used to access the memory region
77 * @size: Size of the memory region
78 */
79struct imx_rproc_mem {
80 void __iomem *cpu_addr;
81 phys_addr_t sys_addr;
82 size_t size;
83};
84
85/* att flags: lower 16 bits specifying core, higher 16 bits for flags */
86/* M4 own area. Can be mapped at probe */
87#define ATT_OWN BIT(31)
88#define ATT_IOMEM BIT(30)
89
90#define ATT_CORE_MASK 0xffff
91#define ATT_CORE(I) BIT((I))
92
93static int imx_rproc_xtr_mbox_init(struct rproc *rproc);
94static void imx_rproc_free_mbox(struct rproc *rproc);
95static int imx_rproc_detach_pd(struct rproc *rproc);
96
97struct imx_rproc {
98 struct device *dev;
99 struct regmap *regmap;
100 struct regmap *gpr;
101 struct rproc *rproc;
102 const struct imx_rproc_dcfg *dcfg;
103 struct imx_rproc_mem mem[IMX_RPROC_MEM_MAX];
104 struct clk *clk;
105 struct mbox_client cl;
106 struct mbox_chan *tx_ch;
107 struct mbox_chan *rx_ch;
108 struct work_struct rproc_work;
109 struct workqueue_struct *workqueue;
110 void __iomem *rsc_table;
111 struct imx_sc_ipc *ipc_handle;
112 struct notifier_block rproc_nb;
113 u32 rproc_pt; /* partition id */
114 u32 rsrc_id; /* resource id */
115 u32 entry; /* cpu start address */
116 int num_pd;
117 u32 core_index;
118 struct device **pd_dev;
119 struct device_link **pd_dev_link;
120};
121
122static const struct imx_rproc_att imx_rproc_att_imx93[] = {
123 /* dev addr , sys addr , size , flags */
124 /* TCM CODE NON-SECURE */
125 { 0x0FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM },
126 { 0x0FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
127
128 /* TCM CODE SECURE */
129 { 0x1FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM },
130 { 0x1FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
131
132 /* TCM SYS NON-SECURE*/
133 { 0x20000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM },
134 { 0x20020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM },
135
136 /* TCM SYS SECURE*/
137 { 0x30000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM },
138 { 0x30020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM },
139
140 /* DDR */
141 { 0x80000000, 0x80000000, 0x10000000, 0 },
142 { 0x90000000, 0x80000000, 0x10000000, 0 },
143
144 { 0xC0000000, 0xC0000000, 0x10000000, 0 },
145 { 0xD0000000, 0xC0000000, 0x10000000, 0 },
146};
147
148static const struct imx_rproc_att imx_rproc_att_imx8qm[] = {
149 /* dev addr , sys addr , size , flags */
150 { 0x08000000, 0x08000000, 0x10000000, 0},
151 /* TCML */
152 { 0x1FFE0000, 0x34FE0000, 0x00020000, ATT_OWN | ATT_IOMEM | ATT_CORE(0)},
153 { 0x1FFE0000, 0x38FE0000, 0x00020000, ATT_OWN | ATT_IOMEM | ATT_CORE(1)},
154 /* TCMU */
155 { 0x20000000, 0x35000000, 0x00020000, ATT_OWN | ATT_IOMEM | ATT_CORE(0)},
156 { 0x20000000, 0x39000000, 0x00020000, ATT_OWN | ATT_IOMEM | ATT_CORE(1)},
157 /* DDR (Data) */
158 { 0x80000000, 0x80000000, 0x60000000, 0 },
159};
160
161static const struct imx_rproc_att imx_rproc_att_imx8qxp[] = {
162 { 0x08000000, 0x08000000, 0x10000000, 0 },
163 /* TCML/U */
164 { 0x1FFE0000, 0x34FE0000, 0x00040000, ATT_OWN | ATT_IOMEM },
165 /* OCRAM(Low 96KB) */
166 { 0x21000000, 0x00100000, 0x00018000, 0 },
167 /* OCRAM */
168 { 0x21100000, 0x00100000, 0x00040000, 0 },
169 /* DDR (Data) */
170 { 0x80000000, 0x80000000, 0x60000000, 0 },
171};
172
173static const struct imx_rproc_att imx_rproc_att_imx8mn[] = {
174 /* dev addr , sys addr , size , flags */
175 /* ITCM */
176 { 0x00000000, 0x007E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
177 /* OCRAM_S */
178 { 0x00180000, 0x00180000, 0x00009000, 0 },
179 /* OCRAM */
180 { 0x00900000, 0x00900000, 0x00020000, 0 },
181 /* OCRAM */
182 { 0x00920000, 0x00920000, 0x00020000, 0 },
183 /* OCRAM */
184 { 0x00940000, 0x00940000, 0x00050000, 0 },
185 /* QSPI Code - alias */
186 { 0x08000000, 0x08000000, 0x08000000, 0 },
187 /* DDR (Code) - alias */
188 { 0x10000000, 0x40000000, 0x0FFE0000, 0 },
189 /* DTCM */
190 { 0x20000000, 0x00800000, 0x00020000, ATT_OWN | ATT_IOMEM },
191 /* OCRAM_S - alias */
192 { 0x20180000, 0x00180000, 0x00008000, ATT_OWN },
193 /* OCRAM */
194 { 0x20200000, 0x00900000, 0x00020000, ATT_OWN },
195 /* OCRAM */
196 { 0x20220000, 0x00920000, 0x00020000, ATT_OWN },
197 /* OCRAM */
198 { 0x20240000, 0x00940000, 0x00040000, ATT_OWN },
199 /* DDR (Data) */
200 { 0x40000000, 0x40000000, 0x80000000, 0 },
201};
202
203static const struct imx_rproc_att imx_rproc_att_imx8mq[] = {
204 /* dev addr , sys addr , size , flags */
205 /* TCML - alias */
206 { 0x00000000, 0x007e0000, 0x00020000, ATT_IOMEM},
207 /* OCRAM_S */
208 { 0x00180000, 0x00180000, 0x00008000, 0 },
209 /* OCRAM */
210 { 0x00900000, 0x00900000, 0x00020000, 0 },
211 /* OCRAM */
212 { 0x00920000, 0x00920000, 0x00020000, 0 },
213 /* QSPI Code - alias */
214 { 0x08000000, 0x08000000, 0x08000000, 0 },
215 /* DDR (Code) - alias */
216 { 0x10000000, 0x80000000, 0x0FFE0000, 0 },
217 /* TCML */
218 { 0x1FFE0000, 0x007E0000, 0x00020000, ATT_OWN | ATT_IOMEM},
219 /* TCMU */
220 { 0x20000000, 0x00800000, 0x00020000, ATT_OWN | ATT_IOMEM},
221 /* OCRAM_S */
222 { 0x20180000, 0x00180000, 0x00008000, ATT_OWN },
223 /* OCRAM */
224 { 0x20200000, 0x00900000, 0x00020000, ATT_OWN },
225 /* OCRAM */
226 { 0x20220000, 0x00920000, 0x00020000, ATT_OWN },
227 /* DDR (Data) */
228 { 0x40000000, 0x40000000, 0x80000000, 0 },
229};
230
231static const struct imx_rproc_att imx_rproc_att_imx8ulp[] = {
232 {0x1FFC0000, 0x1FFC0000, 0xC0000, ATT_OWN},
233 {0x21000000, 0x21000000, 0x10000, ATT_OWN},
234 {0x80000000, 0x80000000, 0x60000000, 0}
235};
236
237static const struct imx_rproc_att imx_rproc_att_imx7ulp[] = {
238 {0x1FFD0000, 0x1FFD0000, 0x30000, ATT_OWN},
239 {0x20000000, 0x20000000, 0x10000, ATT_OWN},
240 {0x2F000000, 0x2F000000, 0x20000, ATT_OWN},
241 {0x2F020000, 0x2F020000, 0x20000, ATT_OWN},
242 {0x60000000, 0x60000000, 0x40000000, 0}
243};
244
245static const struct imx_rproc_att imx_rproc_att_imx7d[] = {
246 /* dev addr , sys addr , size , flags */
247 /* OCRAM_S (M4 Boot code) - alias */
248 { 0x00000000, 0x00180000, 0x00008000, 0 },
249 /* OCRAM_S (Code) */
250 { 0x00180000, 0x00180000, 0x00008000, ATT_OWN },
251 /* OCRAM (Code) - alias */
252 { 0x00900000, 0x00900000, 0x00020000, 0 },
253 /* OCRAM_EPDC (Code) - alias */
254 { 0x00920000, 0x00920000, 0x00020000, 0 },
255 /* OCRAM_PXP (Code) - alias */
256 { 0x00940000, 0x00940000, 0x00008000, 0 },
257 /* TCML (Code) */
258 { 0x1FFF8000, 0x007F8000, 0x00008000, ATT_OWN | ATT_IOMEM },
259 /* DDR (Code) - alias, first part of DDR (Data) */
260 { 0x10000000, 0x80000000, 0x0FFF0000, 0 },
261
262 /* TCMU (Data) */
263 { 0x20000000, 0x00800000, 0x00008000, ATT_OWN | ATT_IOMEM },
264 /* OCRAM (Data) */
265 { 0x20200000, 0x00900000, 0x00020000, 0 },
266 /* OCRAM_EPDC (Data) */
267 { 0x20220000, 0x00920000, 0x00020000, 0 },
268 /* OCRAM_PXP (Data) */
269 { 0x20240000, 0x00940000, 0x00008000, 0 },
270 /* DDR (Data) */
271 { 0x80000000, 0x80000000, 0x60000000, 0 },
272};
273
274static const struct imx_rproc_att imx_rproc_att_imx6sx[] = {
275 /* dev addr , sys addr , size , flags */
276 /* TCML (M4 Boot Code) - alias */
277 { 0x00000000, 0x007F8000, 0x00008000, ATT_IOMEM },
278 /* OCRAM_S (Code) */
279 { 0x00180000, 0x008F8000, 0x00004000, 0 },
280 /* OCRAM_S (Code) - alias */
281 { 0x00180000, 0x008FC000, 0x00004000, 0 },
282 /* TCML (Code) */
283 { 0x1FFF8000, 0x007F8000, 0x00008000, ATT_OWN | ATT_IOMEM },
284 /* DDR (Code) - alias, first part of DDR (Data) */
285 { 0x10000000, 0x80000000, 0x0FFF8000, 0 },
286
287 /* TCMU (Data) */
288 { 0x20000000, 0x00800000, 0x00008000, ATT_OWN | ATT_IOMEM },
289 /* OCRAM_S (Data) - alias? */
290 { 0x208F8000, 0x008F8000, 0x00004000, 0 },
291 /* DDR (Data) */
292 { 0x80000000, 0x80000000, 0x60000000, 0 },
293};
294
295static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn_mmio = {
296 .src_reg = IMX7D_SRC_SCR,
297 .src_mask = IMX7D_M4_RST_MASK,
298 .src_start = IMX7D_M4_START,
299 .src_stop = IMX8M_M7_STOP,
300 .gpr_reg = IMX8M_GPR22,
301 .gpr_wait = IMX8M_GPR22_CM7_CPUWAIT,
302 .att = imx_rproc_att_imx8mn,
303 .att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
304 .method = IMX_RPROC_MMIO,
305};
306
307static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn = {
308 .att = imx_rproc_att_imx8mn,
309 .att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
310 .method = IMX_RPROC_SMC,
311};
312
313static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mq = {
314 .src_reg = IMX7D_SRC_SCR,
315 .src_mask = IMX7D_M4_RST_MASK,
316 .src_start = IMX7D_M4_START,
317 .src_stop = IMX7D_M4_STOP,
318 .att = imx_rproc_att_imx8mq,
319 .att_size = ARRAY_SIZE(imx_rproc_att_imx8mq),
320 .method = IMX_RPROC_MMIO,
321};
322
323static const struct imx_rproc_dcfg imx_rproc_cfg_imx8qm = {
324 .att = imx_rproc_att_imx8qm,
325 .att_size = ARRAY_SIZE(imx_rproc_att_imx8qm),
326 .method = IMX_RPROC_SCU_API,
327};
328
329static const struct imx_rproc_dcfg imx_rproc_cfg_imx8qxp = {
330 .att = imx_rproc_att_imx8qxp,
331 .att_size = ARRAY_SIZE(imx_rproc_att_imx8qxp),
332 .method = IMX_RPROC_SCU_API,
333};
334
335static const struct imx_rproc_dcfg imx_rproc_cfg_imx8ulp = {
336 .att = imx_rproc_att_imx8ulp,
337 .att_size = ARRAY_SIZE(imx_rproc_att_imx8ulp),
338 .method = IMX_RPROC_NONE,
339};
340
341static const struct imx_rproc_dcfg imx_rproc_cfg_imx7ulp = {
342 .att = imx_rproc_att_imx7ulp,
343 .att_size = ARRAY_SIZE(imx_rproc_att_imx7ulp),
344 .method = IMX_RPROC_NONE,
345};
346
347static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = {
348 .src_reg = IMX7D_SRC_SCR,
349 .src_mask = IMX7D_M4_RST_MASK,
350 .src_start = IMX7D_M4_START,
351 .src_stop = IMX7D_M4_STOP,
352 .att = imx_rproc_att_imx7d,
353 .att_size = ARRAY_SIZE(imx_rproc_att_imx7d),
354 .method = IMX_RPROC_MMIO,
355};
356
357static const struct imx_rproc_dcfg imx_rproc_cfg_imx6sx = {
358 .src_reg = IMX6SX_SRC_SCR,
359 .src_mask = IMX6SX_M4_RST_MASK,
360 .src_start = IMX6SX_M4_START,
361 .src_stop = IMX6SX_M4_STOP,
362 .att = imx_rproc_att_imx6sx,
363 .att_size = ARRAY_SIZE(imx_rproc_att_imx6sx),
364 .method = IMX_RPROC_MMIO,
365};
366
367static const struct imx_rproc_dcfg imx_rproc_cfg_imx93 = {
368 .att = imx_rproc_att_imx93,
369 .att_size = ARRAY_SIZE(imx_rproc_att_imx93),
370 .method = IMX_RPROC_SMC,
371};
372
373static int imx_rproc_start(struct rproc *rproc)
374{
375 struct imx_rproc *priv = rproc->priv;
376 const struct imx_rproc_dcfg *dcfg = priv->dcfg;
377 struct device *dev = priv->dev;
378 struct arm_smccc_res res;
379 int ret;
380
381 ret = imx_rproc_xtr_mbox_init(rproc);
382 if (ret)
383 return ret;
384
385 switch (dcfg->method) {
386 case IMX_RPROC_MMIO:
387 if (priv->gpr) {
388 ret = regmap_clear_bits(map: priv->gpr, reg: dcfg->gpr_reg,
389 bits: dcfg->gpr_wait);
390 } else {
391 ret = regmap_update_bits(map: priv->regmap, reg: dcfg->src_reg,
392 mask: dcfg->src_mask,
393 val: dcfg->src_start);
394 }
395 break;
396 case IMX_RPROC_SMC:
397 arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_START, 0, 0, 0, 0, 0, 0, &res);
398 ret = res.a0;
399 break;
400 case IMX_RPROC_SCU_API:
401 ret = imx_sc_pm_cpu_start(ipc: priv->ipc_handle, resource: priv->rsrc_id, enable: true, phys_addr: priv->entry);
402 break;
403 default:
404 return -EOPNOTSUPP;
405 }
406
407 if (ret)
408 dev_err(dev, "Failed to enable remote core!\n");
409
410 return ret;
411}
412
413static int imx_rproc_stop(struct rproc *rproc)
414{
415 struct imx_rproc *priv = rproc->priv;
416 const struct imx_rproc_dcfg *dcfg = priv->dcfg;
417 struct device *dev = priv->dev;
418 struct arm_smccc_res res;
419 int ret;
420
421 switch (dcfg->method) {
422 case IMX_RPROC_MMIO:
423 if (priv->gpr) {
424 ret = regmap_set_bits(map: priv->gpr, reg: dcfg->gpr_reg,
425 bits: dcfg->gpr_wait);
426 if (ret) {
427 dev_err(priv->dev,
428 "Failed to quiescence M4 platform!\n");
429 return ret;
430 }
431 }
432
433 ret = regmap_update_bits(map: priv->regmap, reg: dcfg->src_reg, mask: dcfg->src_mask,
434 val: dcfg->src_stop);
435 break;
436 case IMX_RPROC_SMC:
437 arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_STOP, 0, 0, 0, 0, 0, 0, &res);
438 ret = res.a0;
439 if (res.a1)
440 dev_info(dev, "Not in wfi, force stopped\n");
441 break;
442 case IMX_RPROC_SCU_API:
443 ret = imx_sc_pm_cpu_start(ipc: priv->ipc_handle, resource: priv->rsrc_id, enable: false, phys_addr: priv->entry);
444 break;
445 default:
446 return -EOPNOTSUPP;
447 }
448
449 if (ret)
450 dev_err(dev, "Failed to stop remote core\n");
451 else
452 imx_rproc_free_mbox(rproc);
453
454 return ret;
455}
456
457static int imx_rproc_da_to_sys(struct imx_rproc *priv, u64 da,
458 size_t len, u64 *sys, bool *is_iomem)
459{
460 const struct imx_rproc_dcfg *dcfg = priv->dcfg;
461 int i;
462
463 /* parse address translation table */
464 for (i = 0; i < dcfg->att_size; i++) {
465 const struct imx_rproc_att *att = &dcfg->att[i];
466
467 /*
468 * Ignore entries not belong to current core:
469 * i.MX8QM has dual general M4_[0,1] cores, M4_0's own entries
470 * has "ATT_CORE(0) & BIT(0)" true, M4_1's own entries has
471 * "ATT_CORE(1) & BIT(1)" true.
472 */
473 if (att->flags & ATT_CORE_MASK) {
474 if (!((BIT(priv->core_index)) & (att->flags & ATT_CORE_MASK)))
475 continue;
476 }
477
478 if (da >= att->da && da + len < att->da + att->size) {
479 unsigned int offset = da - att->da;
480
481 *sys = att->sa + offset;
482 if (is_iomem)
483 *is_iomem = att->flags & ATT_IOMEM;
484 return 0;
485 }
486 }
487
488 dev_warn(priv->dev, "Translation failed: da = 0x%llx len = 0x%zx\n",
489 da, len);
490 return -ENOENT;
491}
492
493static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
494{
495 struct imx_rproc *priv = rproc->priv;
496 void *va = NULL;
497 u64 sys;
498 int i;
499
500 if (len == 0)
501 return NULL;
502
503 /*
504 * On device side we have many aliases, so we need to convert device
505 * address (M4) to system bus address first.
506 */
507 if (imx_rproc_da_to_sys(priv, da, len, sys: &sys, is_iomem))
508 return NULL;
509
510 for (i = 0; i < IMX_RPROC_MEM_MAX; i++) {
511 if (sys >= priv->mem[i].sys_addr && sys + len <
512 priv->mem[i].sys_addr + priv->mem[i].size) {
513 unsigned int offset = sys - priv->mem[i].sys_addr;
514 /* __force to make sparse happy with type conversion */
515 va = (__force void *)(priv->mem[i].cpu_addr + offset);
516 break;
517 }
518 }
519
520 dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%zx va = 0x%p\n",
521 da, len, va);
522
523 return va;
524}
525
526static int imx_rproc_mem_alloc(struct rproc *rproc,
527 struct rproc_mem_entry *mem)
528{
529 struct device *dev = rproc->dev.parent;
530 void *va;
531
532 dev_dbg(dev, "map memory: %p+%zx\n", &mem->dma, mem->len);
533 va = ioremap_wc(offset: mem->dma, size: mem->len);
534 if (IS_ERR_OR_NULL(ptr: va)) {
535 dev_err(dev, "Unable to map memory region: %p+%zx\n",
536 &mem->dma, mem->len);
537 return -ENOMEM;
538 }
539
540 /* Update memory entry va */
541 mem->va = va;
542
543 return 0;
544}
545
546static int imx_rproc_mem_release(struct rproc *rproc,
547 struct rproc_mem_entry *mem)
548{
549 dev_dbg(rproc->dev.parent, "unmap memory: %pa\n", &mem->dma);
550 iounmap(addr: mem->va);
551
552 return 0;
553}
554
555static int imx_rproc_prepare(struct rproc *rproc)
556{
557 struct imx_rproc *priv = rproc->priv;
558 struct device_node *np = priv->dev->of_node;
559 struct of_phandle_iterator it;
560 struct rproc_mem_entry *mem;
561 struct reserved_mem *rmem;
562 u32 da;
563
564 /* Register associated reserved memory regions */
565 of_phandle_iterator_init(it: &it, np, list_name: "memory-region", NULL, cell_count: 0);
566 while (of_phandle_iterator_next(it: &it) == 0) {
567 /*
568 * Ignore the first memory region which will be used vdev buffer.
569 * No need to do extra handlings, rproc_add_virtio_dev will handle it.
570 */
571 if (!strcmp(it.node->name, "vdev0buffer"))
572 continue;
573
574 if (!strcmp(it.node->name, "rsc-table"))
575 continue;
576
577 rmem = of_reserved_mem_lookup(np: it.node);
578 if (!rmem) {
579 of_node_put(node: it.node);
580 dev_err(priv->dev, "unable to acquire memory-region\n");
581 return -EINVAL;
582 }
583
584 /* No need to translate pa to da, i.MX use same map */
585 da = rmem->base;
586
587 /* Register memory region */
588 mem = rproc_mem_entry_init(dev: priv->dev, NULL, dma: (dma_addr_t)rmem->base, len: rmem->size, da,
589 alloc: imx_rproc_mem_alloc, release: imx_rproc_mem_release,
590 name: it.node->name);
591
592 if (mem) {
593 rproc_coredump_add_segment(rproc, da, size: rmem->size);
594 } else {
595 of_node_put(node: it.node);
596 return -ENOMEM;
597 }
598
599 rproc_add_carveout(rproc, mem);
600 }
601
602 return 0;
603}
604
605static int imx_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
606{
607 int ret;
608
609 ret = rproc_elf_load_rsc_table(rproc, fw);
610 if (ret)
611 dev_info(&rproc->dev, "No resource table in elf\n");
612
613 return 0;
614}
615
616static void imx_rproc_kick(struct rproc *rproc, int vqid)
617{
618 struct imx_rproc *priv = rproc->priv;
619 int err;
620 __u32 mmsg;
621
622 if (!priv->tx_ch) {
623 dev_err(priv->dev, "No initialized mbox tx channel\n");
624 return;
625 }
626
627 /*
628 * Send the index of the triggered virtqueue as the mu payload.
629 * Let remote processor know which virtqueue is used.
630 */
631 mmsg = vqid << 16;
632
633 err = mbox_send_message(chan: priv->tx_ch, mssg: (void *)&mmsg);
634 if (err < 0)
635 dev_err(priv->dev, "%s: failed (%d, err:%d)\n",
636 __func__, vqid, err);
637}
638
639static int imx_rproc_attach(struct rproc *rproc)
640{
641 return imx_rproc_xtr_mbox_init(rproc);
642}
643
644static int imx_rproc_detach(struct rproc *rproc)
645{
646 struct imx_rproc *priv = rproc->priv;
647 const struct imx_rproc_dcfg *dcfg = priv->dcfg;
648
649 if (dcfg->method != IMX_RPROC_SCU_API)
650 return -EOPNOTSUPP;
651
652 if (imx_sc_rm_is_resource_owned(ipc: priv->ipc_handle, resource: priv->rsrc_id))
653 return -EOPNOTSUPP;
654
655 imx_rproc_free_mbox(rproc);
656
657 return 0;
658}
659
660static struct resource_table *imx_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz)
661{
662 struct imx_rproc *priv = rproc->priv;
663
664 /* The resource table has already been mapped in imx_rproc_addr_init */
665 if (!priv->rsc_table)
666 return NULL;
667
668 *table_sz = SZ_1K;
669 return (struct resource_table *)priv->rsc_table;
670}
671
672static const struct rproc_ops imx_rproc_ops = {
673 .prepare = imx_rproc_prepare,
674 .attach = imx_rproc_attach,
675 .detach = imx_rproc_detach,
676 .start = imx_rproc_start,
677 .stop = imx_rproc_stop,
678 .kick = imx_rproc_kick,
679 .da_to_va = imx_rproc_da_to_va,
680 .load = rproc_elf_load_segments,
681 .parse_fw = imx_rproc_parse_fw,
682 .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
683 .get_loaded_rsc_table = imx_rproc_get_loaded_rsc_table,
684 .sanity_check = rproc_elf_sanity_check,
685 .get_boot_addr = rproc_elf_get_boot_addr,
686};
687
688static int imx_rproc_addr_init(struct imx_rproc *priv,
689 struct platform_device *pdev)
690{
691 const struct imx_rproc_dcfg *dcfg = priv->dcfg;
692 struct device *dev = &pdev->dev;
693 struct device_node *np = dev->of_node;
694 int a, b = 0, err, nph;
695
696 /* remap required addresses */
697 for (a = 0; a < dcfg->att_size; a++) {
698 const struct imx_rproc_att *att = &dcfg->att[a];
699
700 if (!(att->flags & ATT_OWN))
701 continue;
702
703 if (b >= IMX_RPROC_MEM_MAX)
704 break;
705
706 if (att->flags & ATT_IOMEM)
707 priv->mem[b].cpu_addr = devm_ioremap(dev: &pdev->dev,
708 offset: att->sa, size: att->size);
709 else
710 priv->mem[b].cpu_addr = devm_ioremap_wc(dev: &pdev->dev,
711 offset: att->sa, size: att->size);
712 if (!priv->mem[b].cpu_addr) {
713 dev_err(dev, "failed to remap %#x bytes from %#x\n", att->size, att->sa);
714 return -ENOMEM;
715 }
716 priv->mem[b].sys_addr = att->sa;
717 priv->mem[b].size = att->size;
718 b++;
719 }
720
721 /* memory-region is optional property */
722 nph = of_count_phandle_with_args(np, list_name: "memory-region", NULL);
723 if (nph <= 0)
724 return 0;
725
726 /* remap optional addresses */
727 for (a = 0; a < nph; a++) {
728 struct device_node *node;
729 struct resource res;
730
731 node = of_parse_phandle(np, phandle_name: "memory-region", index: a);
732 /* Not map vdevbuffer, vdevring region */
733 if (!strncmp(node->name, "vdev", strlen("vdev"))) {
734 of_node_put(node);
735 continue;
736 }
737 err = of_address_to_resource(dev: node, index: 0, r: &res);
738 of_node_put(node);
739 if (err) {
740 dev_err(dev, "unable to resolve memory region\n");
741 return err;
742 }
743
744 if (b >= IMX_RPROC_MEM_MAX)
745 break;
746
747 /* Not use resource version, because we might share region */
748 priv->mem[b].cpu_addr = devm_ioremap_wc(dev: &pdev->dev, offset: res.start, size: resource_size(res: &res));
749 if (!priv->mem[b].cpu_addr) {
750 dev_err(dev, "failed to remap %pr\n", &res);
751 return -ENOMEM;
752 }
753 priv->mem[b].sys_addr = res.start;
754 priv->mem[b].size = resource_size(res: &res);
755 if (!strcmp(node->name, "rsc-table"))
756 priv->rsc_table = priv->mem[b].cpu_addr;
757 b++;
758 }
759
760 return 0;
761}
762
763static int imx_rproc_notified_idr_cb(int id, void *ptr, void *data)
764{
765 struct rproc *rproc = data;
766
767 rproc_vq_interrupt(rproc, vq_id: id);
768
769 return 0;
770}
771
772static void imx_rproc_vq_work(struct work_struct *work)
773{
774 struct imx_rproc *priv = container_of(work, struct imx_rproc,
775 rproc_work);
776 struct rproc *rproc = priv->rproc;
777
778 idr_for_each(&rproc->notifyids, fn: imx_rproc_notified_idr_cb, data: rproc);
779}
780
781static void imx_rproc_rx_callback(struct mbox_client *cl, void *msg)
782{
783 struct rproc *rproc = dev_get_drvdata(dev: cl->dev);
784 struct imx_rproc *priv = rproc->priv;
785
786 queue_work(wq: priv->workqueue, work: &priv->rproc_work);
787}
788
789static int imx_rproc_xtr_mbox_init(struct rproc *rproc)
790{
791 struct imx_rproc *priv = rproc->priv;
792 struct device *dev = priv->dev;
793 struct mbox_client *cl;
794
795 /*
796 * stop() and detach() will free the mbox channels, so need
797 * to request mbox channels in start() and attach().
798 *
799 * Because start() and attach() not able to handle mbox defer
800 * probe, imx_rproc_xtr_mbox_init is also called in probe().
801 * The check is to avoid request mbox again when start() or
802 * attach() after probe() returns success.
803 */
804 if (priv->tx_ch && priv->rx_ch)
805 return 0;
806
807 if (!of_get_property(node: dev->of_node, name: "mbox-names", NULL))
808 return 0;
809
810 cl = &priv->cl;
811 cl->dev = dev;
812 cl->tx_block = true;
813 cl->tx_tout = 100;
814 cl->knows_txdone = false;
815 cl->rx_callback = imx_rproc_rx_callback;
816
817 priv->tx_ch = mbox_request_channel_byname(cl, name: "tx");
818 if (IS_ERR(ptr: priv->tx_ch))
819 return dev_err_probe(dev: cl->dev, err: PTR_ERR(ptr: priv->tx_ch),
820 fmt: "failed to request tx mailbox channel\n");
821
822 priv->rx_ch = mbox_request_channel_byname(cl, name: "rx");
823 if (IS_ERR(ptr: priv->rx_ch)) {
824 mbox_free_channel(chan: priv->tx_ch);
825 return dev_err_probe(dev: cl->dev, err: PTR_ERR(ptr: priv->rx_ch),
826 fmt: "failed to request rx mailbox channel\n");
827 }
828
829 return 0;
830}
831
832static void imx_rproc_free_mbox(struct rproc *rproc)
833{
834 struct imx_rproc *priv = rproc->priv;
835
836 if (priv->tx_ch) {
837 mbox_free_channel(chan: priv->tx_ch);
838 priv->tx_ch = NULL;
839 }
840
841 if (priv->rx_ch) {
842 mbox_free_channel(chan: priv->rx_ch);
843 priv->rx_ch = NULL;
844 }
845}
846
847static void imx_rproc_put_scu(struct rproc *rproc)
848{
849 struct imx_rproc *priv = rproc->priv;
850 const struct imx_rproc_dcfg *dcfg = priv->dcfg;
851
852 if (dcfg->method != IMX_RPROC_SCU_API)
853 return;
854
855 if (imx_sc_rm_is_resource_owned(ipc: priv->ipc_handle, resource: priv->rsrc_id)) {
856 imx_rproc_detach_pd(rproc);
857 return;
858 }
859
860 imx_scu_irq_group_enable(IMX_SC_IRQ_GROUP_REBOOTED, BIT(priv->rproc_pt), enable: false);
861 imx_scu_irq_unregister_notifier(nb: &priv->rproc_nb);
862}
863
864static int imx_rproc_partition_notify(struct notifier_block *nb,
865 unsigned long event, void *group)
866{
867 struct imx_rproc *priv = container_of(nb, struct imx_rproc, rproc_nb);
868
869 /* Ignore other irqs */
870 if (!((event & BIT(priv->rproc_pt)) && (*(u8 *)group == IMX_SC_IRQ_GROUP_REBOOTED)))
871 return 0;
872
873 rproc_report_crash(rproc: priv->rproc, type: RPROC_WATCHDOG);
874
875 pr_info("Partition%d reset!\n", priv->rproc_pt);
876
877 return 0;
878}
879
880static int imx_rproc_attach_pd(struct imx_rproc *priv)
881{
882 struct device *dev = priv->dev;
883 int ret, i;
884
885 /*
886 * If there is only one power-domain entry, the platform driver framework
887 * will handle it, no need handle it in this driver.
888 */
889 priv->num_pd = of_count_phandle_with_args(np: dev->of_node, list_name: "power-domains",
890 cells_name: "#power-domain-cells");
891 if (priv->num_pd <= 1)
892 return 0;
893
894 priv->pd_dev = devm_kmalloc_array(dev, n: priv->num_pd, size: sizeof(*priv->pd_dev), GFP_KERNEL);
895 if (!priv->pd_dev)
896 return -ENOMEM;
897
898 priv->pd_dev_link = devm_kmalloc_array(dev, n: priv->num_pd, size: sizeof(*priv->pd_dev_link),
899 GFP_KERNEL);
900
901 if (!priv->pd_dev_link)
902 return -ENOMEM;
903
904 for (i = 0; i < priv->num_pd; i++) {
905 priv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, index: i);
906 if (IS_ERR(ptr: priv->pd_dev[i])) {
907 ret = PTR_ERR(ptr: priv->pd_dev[i]);
908 goto detach_pd;
909 }
910
911 priv->pd_dev_link[i] = device_link_add(consumer: dev, supplier: priv->pd_dev[i], DL_FLAG_STATELESS |
912 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
913 if (!priv->pd_dev_link[i]) {
914 dev_pm_domain_detach(dev: priv->pd_dev[i], power_off: false);
915 ret = -EINVAL;
916 goto detach_pd;
917 }
918 }
919
920 return 0;
921
922detach_pd:
923 while (--i >= 0) {
924 device_link_del(link: priv->pd_dev_link[i]);
925 dev_pm_domain_detach(dev: priv->pd_dev[i], power_off: false);
926 }
927
928 return ret;
929}
930
931static int imx_rproc_detach_pd(struct rproc *rproc)
932{
933 struct imx_rproc *priv = rproc->priv;
934 int i;
935
936 /*
937 * If there is only one power-domain entry, the platform driver framework
938 * will handle it, no need handle it in this driver.
939 */
940 if (priv->num_pd <= 1)
941 return 0;
942
943 for (i = 0; i < priv->num_pd; i++) {
944 device_link_del(link: priv->pd_dev_link[i]);
945 dev_pm_domain_detach(dev: priv->pd_dev[i], power_off: false);
946 }
947
948 return 0;
949}
950
951static int imx_rproc_detect_mode(struct imx_rproc *priv)
952{
953 struct regmap_config config = { .name = "imx-rproc" };
954 const struct imx_rproc_dcfg *dcfg = priv->dcfg;
955 struct device *dev = priv->dev;
956 struct regmap *regmap;
957 struct arm_smccc_res res;
958 int ret;
959 u32 val;
960 u8 pt;
961
962 switch (dcfg->method) {
963 case IMX_RPROC_NONE:
964 priv->rproc->state = RPROC_DETACHED;
965 return 0;
966 case IMX_RPROC_SMC:
967 arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_STARTED, 0, 0, 0, 0, 0, 0, &res);
968 if (res.a0)
969 priv->rproc->state = RPROC_DETACHED;
970 return 0;
971 case IMX_RPROC_SCU_API:
972 ret = imx_scu_get_handle(ipc: &priv->ipc_handle);
973 if (ret)
974 return ret;
975 ret = of_property_read_u32(np: dev->of_node, propname: "fsl,resource-id", out_value: &priv->rsrc_id);
976 if (ret) {
977 dev_err(dev, "No fsl,resource-id property\n");
978 return ret;
979 }
980
981 if (priv->rsrc_id == IMX_SC_R_M4_1_PID0)
982 priv->core_index = 1;
983 else
984 priv->core_index = 0;
985
986 /*
987 * If Mcore resource is not owned by Acore partition, It is kicked by ROM,
988 * and Linux could only do IPC with Mcore and nothing else.
989 */
990 if (imx_sc_rm_is_resource_owned(ipc: priv->ipc_handle, resource: priv->rsrc_id)) {
991 if (of_property_read_u32(np: dev->of_node, propname: "fsl,entry-address", out_value: &priv->entry))
992 return -EINVAL;
993
994 return imx_rproc_attach_pd(priv);
995 }
996
997 priv->rproc->state = RPROC_DETACHED;
998 priv->rproc->recovery_disabled = false;
999 rproc_set_feature(rproc: priv->rproc, feature: RPROC_FEAT_ATTACH_ON_RECOVERY);
1000
1001 /* Get partition id and enable irq in SCFW */
1002 ret = imx_sc_rm_get_resource_owner(ipc: priv->ipc_handle, resource: priv->rsrc_id, pt: &pt);
1003 if (ret) {
1004 dev_err(dev, "not able to get resource owner\n");
1005 return ret;
1006 }
1007
1008 priv->rproc_pt = pt;
1009 priv->rproc_nb.notifier_call = imx_rproc_partition_notify;
1010
1011 ret = imx_scu_irq_register_notifier(nb: &priv->rproc_nb);
1012 if (ret) {
1013 dev_err(dev, "register scu notifier failed, %d\n", ret);
1014 return ret;
1015 }
1016
1017 ret = imx_scu_irq_group_enable(IMX_SC_IRQ_GROUP_REBOOTED, BIT(priv->rproc_pt),
1018 enable: true);
1019 if (ret) {
1020 imx_scu_irq_unregister_notifier(nb: &priv->rproc_nb);
1021 dev_err(dev, "Enable irq failed, %d\n", ret);
1022 return ret;
1023 }
1024
1025 return 0;
1026 default:
1027 break;
1028 }
1029
1030 priv->gpr = syscon_regmap_lookup_by_phandle(np: dev->of_node, property: "fsl,iomuxc-gpr");
1031 if (IS_ERR(ptr: priv->gpr))
1032 priv->gpr = NULL;
1033
1034 regmap = syscon_regmap_lookup_by_phandle(np: dev->of_node, property: "syscon");
1035 if (IS_ERR(ptr: regmap)) {
1036 dev_err(dev, "failed to find syscon\n");
1037 return PTR_ERR(ptr: regmap);
1038 }
1039
1040 priv->regmap = regmap;
1041 regmap_attach_dev(dev, map: regmap, config: &config);
1042
1043 if (priv->gpr) {
1044 ret = regmap_read(map: priv->gpr, reg: dcfg->gpr_reg, val: &val);
1045 if (val & dcfg->gpr_wait) {
1046 /*
1047 * After cold boot, the CM indicates its in wait
1048 * state, but not fully powered off. Power it off
1049 * fully so firmware can be loaded into it.
1050 */
1051 imx_rproc_stop(rproc: priv->rproc);
1052 return 0;
1053 }
1054 }
1055
1056 ret = regmap_read(map: regmap, reg: dcfg->src_reg, val: &val);
1057 if (ret) {
1058 dev_err(dev, "Failed to read src\n");
1059 return ret;
1060 }
1061
1062 if ((val & dcfg->src_mask) != dcfg->src_stop)
1063 priv->rproc->state = RPROC_DETACHED;
1064
1065 return 0;
1066}
1067
1068static int imx_rproc_clk_enable(struct imx_rproc *priv)
1069{
1070 const struct imx_rproc_dcfg *dcfg = priv->dcfg;
1071 struct device *dev = priv->dev;
1072 int ret;
1073
1074 /* Remote core is not under control of Linux */
1075 if (dcfg->method == IMX_RPROC_NONE)
1076 return 0;
1077
1078 priv->clk = devm_clk_get(dev, NULL);
1079 if (IS_ERR(ptr: priv->clk)) {
1080 dev_err(dev, "Failed to get clock\n");
1081 return PTR_ERR(ptr: priv->clk);
1082 }
1083
1084 /*
1085 * clk for M4 block including memory. Should be
1086 * enabled before .start for FW transfer.
1087 */
1088 ret = clk_prepare_enable(clk: priv->clk);
1089 if (ret) {
1090 dev_err(dev, "Failed to enable clock\n");
1091 return ret;
1092 }
1093
1094 return 0;
1095}
1096
1097static int imx_rproc_probe(struct platform_device *pdev)
1098{
1099 struct device *dev = &pdev->dev;
1100 struct device_node *np = dev->of_node;
1101 struct imx_rproc *priv;
1102 struct rproc *rproc;
1103 const struct imx_rproc_dcfg *dcfg;
1104 int ret;
1105
1106 /* set some other name then imx */
1107 rproc = rproc_alloc(dev, name: "imx-rproc", ops: &imx_rproc_ops,
1108 NULL, len: sizeof(*priv));
1109 if (!rproc)
1110 return -ENOMEM;
1111
1112 dcfg = of_device_get_match_data(dev);
1113 if (!dcfg) {
1114 ret = -EINVAL;
1115 goto err_put_rproc;
1116 }
1117
1118 priv = rproc->priv;
1119 priv->rproc = rproc;
1120 priv->dcfg = dcfg;
1121 priv->dev = dev;
1122
1123 dev_set_drvdata(dev, data: rproc);
1124 priv->workqueue = create_workqueue(dev_name(dev));
1125 if (!priv->workqueue) {
1126 dev_err(dev, "cannot create workqueue\n");
1127 ret = -ENOMEM;
1128 goto err_put_rproc;
1129 }
1130
1131 ret = imx_rproc_xtr_mbox_init(rproc);
1132 if (ret)
1133 goto err_put_wkq;
1134
1135 ret = imx_rproc_addr_init(priv, pdev);
1136 if (ret) {
1137 dev_err(dev, "failed on imx_rproc_addr_init\n");
1138 goto err_put_mbox;
1139 }
1140
1141 ret = imx_rproc_detect_mode(priv);
1142 if (ret)
1143 goto err_put_mbox;
1144
1145 ret = imx_rproc_clk_enable(priv);
1146 if (ret)
1147 goto err_put_scu;
1148
1149 INIT_WORK(&priv->rproc_work, imx_rproc_vq_work);
1150
1151 if (rproc->state != RPROC_DETACHED)
1152 rproc->auto_boot = of_property_read_bool(np, propname: "fsl,auto-boot");
1153
1154 ret = rproc_add(rproc);
1155 if (ret) {
1156 dev_err(dev, "rproc_add failed\n");
1157 goto err_put_clk;
1158 }
1159
1160 return 0;
1161
1162err_put_clk:
1163 clk_disable_unprepare(clk: priv->clk);
1164err_put_scu:
1165 imx_rproc_put_scu(rproc);
1166err_put_mbox:
1167 imx_rproc_free_mbox(rproc);
1168err_put_wkq:
1169 destroy_workqueue(wq: priv->workqueue);
1170err_put_rproc:
1171 rproc_free(rproc);
1172
1173 return ret;
1174}
1175
1176static void imx_rproc_remove(struct platform_device *pdev)
1177{
1178 struct rproc *rproc = platform_get_drvdata(pdev);
1179 struct imx_rproc *priv = rproc->priv;
1180
1181 clk_disable_unprepare(clk: priv->clk);
1182 rproc_del(rproc);
1183 imx_rproc_put_scu(rproc);
1184 imx_rproc_free_mbox(rproc);
1185 destroy_workqueue(wq: priv->workqueue);
1186 rproc_free(rproc);
1187}
1188
1189static const struct of_device_id imx_rproc_of_match[] = {
1190 { .compatible = "fsl,imx7ulp-cm4", .data = &imx_rproc_cfg_imx7ulp },
1191 { .compatible = "fsl,imx7d-cm4", .data = &imx_rproc_cfg_imx7d },
1192 { .compatible = "fsl,imx6sx-cm4", .data = &imx_rproc_cfg_imx6sx },
1193 { .compatible = "fsl,imx8mq-cm4", .data = &imx_rproc_cfg_imx8mq },
1194 { .compatible = "fsl,imx8mm-cm4", .data = &imx_rproc_cfg_imx8mq },
1195 { .compatible = "fsl,imx8mn-cm7", .data = &imx_rproc_cfg_imx8mn },
1196 { .compatible = "fsl,imx8mp-cm7", .data = &imx_rproc_cfg_imx8mn },
1197 { .compatible = "fsl,imx8mn-cm7-mmio", .data = &imx_rproc_cfg_imx8mn_mmio },
1198 { .compatible = "fsl,imx8mp-cm7-mmio", .data = &imx_rproc_cfg_imx8mn_mmio },
1199 { .compatible = "fsl,imx8qxp-cm4", .data = &imx_rproc_cfg_imx8qxp },
1200 { .compatible = "fsl,imx8qm-cm4", .data = &imx_rproc_cfg_imx8qm },
1201 { .compatible = "fsl,imx8ulp-cm33", .data = &imx_rproc_cfg_imx8ulp },
1202 { .compatible = "fsl,imx93-cm33", .data = &imx_rproc_cfg_imx93 },
1203 {},
1204};
1205MODULE_DEVICE_TABLE(of, imx_rproc_of_match);
1206
1207static struct platform_driver imx_rproc_driver = {
1208 .probe = imx_rproc_probe,
1209 .remove_new = imx_rproc_remove,
1210 .driver = {
1211 .name = "imx-rproc",
1212 .of_match_table = imx_rproc_of_match,
1213 },
1214};
1215
1216module_platform_driver(imx_rproc_driver);
1217
1218MODULE_LICENSE("GPL v2");
1219MODULE_DESCRIPTION("i.MX remote processor control driver");
1220MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
1221

source code of linux/drivers/remoteproc/imx_rproc.c