1 | // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) |
2 | /* |
3 | * Copyright 2014-2016 Freescale Semiconductor Inc. |
4 | * Copyright NXP 2016 |
5 | * |
6 | */ |
7 | |
8 | #include <linux/types.h> |
9 | #include <linux/init.h> |
10 | #include <linux/module.h> |
11 | #include <linux/platform_device.h> |
12 | #include <linux/interrupt.h> |
13 | #include <linux/dma-mapping.h> |
14 | #include <linux/delay.h> |
15 | #include <linux/io.h> |
16 | #include <linux/sys_soc.h> |
17 | |
18 | #include <linux/fsl/mc.h> |
19 | #include <soc/fsl/dpaa2-io.h> |
20 | |
21 | #include "qbman-portal.h" |
22 | #include "dpio.h" |
23 | #include "dpio-cmd.h" |
24 | |
25 | MODULE_LICENSE("Dual BSD/GPL" ); |
26 | MODULE_AUTHOR("Freescale Semiconductor, Inc" ); |
27 | MODULE_DESCRIPTION("DPIO Driver" ); |
28 | |
29 | struct dpio_priv { |
30 | struct dpaa2_io *io; |
31 | }; |
32 | |
33 | static cpumask_var_t cpus_unused_mask; |
34 | |
35 | static const struct soc_device_attribute ls1088a_soc[] = { |
36 | {.family = "QorIQ LS1088A" }, |
37 | { /* sentinel */ } |
38 | }; |
39 | |
40 | static const struct soc_device_attribute ls2080a_soc[] = { |
41 | {.family = "QorIQ LS2080A" }, |
42 | { /* sentinel */ } |
43 | }; |
44 | |
45 | static const struct soc_device_attribute ls2088a_soc[] = { |
46 | {.family = "QorIQ LS2088A" }, |
47 | { /* sentinel */ } |
48 | }; |
49 | |
50 | static const struct soc_device_attribute lx2160a_soc[] = { |
51 | {.family = "QorIQ LX2160A" }, |
52 | { /* sentinel */ } |
53 | }; |
54 | |
55 | static int dpaa2_dpio_get_cluster_sdest(struct fsl_mc_device *dpio_dev, int cpu) |
56 | { |
57 | int cluster_base, cluster_size; |
58 | |
59 | if (soc_device_match(matches: ls1088a_soc)) { |
60 | cluster_base = 2; |
61 | cluster_size = 4; |
62 | } else if (soc_device_match(matches: ls2080a_soc) || |
63 | soc_device_match(matches: ls2088a_soc) || |
64 | soc_device_match(matches: lx2160a_soc)) { |
65 | cluster_base = 0; |
66 | cluster_size = 2; |
67 | } else { |
68 | dev_err(&dpio_dev->dev, "unknown SoC version\n" ); |
69 | return -1; |
70 | } |
71 | |
72 | return cluster_base + cpu / cluster_size; |
73 | } |
74 | |
75 | static irqreturn_t dpio_irq_handler(int irq_num, void *arg) |
76 | { |
77 | struct device *dev = (struct device *)arg; |
78 | struct dpio_priv *priv = dev_get_drvdata(dev); |
79 | |
80 | return dpaa2_io_irq(obj: priv->io); |
81 | } |
82 | |
83 | static void unregister_dpio_irq_handlers(struct fsl_mc_device *dpio_dev) |
84 | { |
85 | struct fsl_mc_device_irq *irq; |
86 | |
87 | irq = dpio_dev->irqs[0]; |
88 | |
89 | /* clear the affinity hint */ |
90 | irq_set_affinity_hint(irq: irq->virq, NULL); |
91 | } |
92 | |
93 | static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu) |
94 | { |
95 | int error; |
96 | struct fsl_mc_device_irq *irq; |
97 | |
98 | irq = dpio_dev->irqs[0]; |
99 | error = devm_request_irq(dev: &dpio_dev->dev, |
100 | irq: irq->virq, |
101 | handler: dpio_irq_handler, |
102 | irqflags: 0, |
103 | devname: dev_name(dev: &dpio_dev->dev), |
104 | dev_id: &dpio_dev->dev); |
105 | if (error < 0) { |
106 | dev_err(&dpio_dev->dev, |
107 | "devm_request_irq() failed: %d\n" , |
108 | error); |
109 | return error; |
110 | } |
111 | |
112 | /* set the affinity hint */ |
113 | if (irq_set_affinity_hint(irq: irq->virq, cpumask_of(cpu))) |
114 | dev_err(&dpio_dev->dev, |
115 | "irq_set_affinity failed irq %d cpu %d\n" , |
116 | irq->virq, cpu); |
117 | |
118 | return 0; |
119 | } |
120 | |
121 | static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev) |
122 | { |
123 | struct dpio_attr dpio_attrs; |
124 | struct dpaa2_io_desc desc; |
125 | struct dpio_priv *priv; |
126 | int err = -ENOMEM; |
127 | struct device *dev = &dpio_dev->dev; |
128 | int possible_next_cpu; |
129 | int sdest; |
130 | |
131 | priv = devm_kzalloc(dev, size: sizeof(*priv), GFP_KERNEL); |
132 | if (!priv) |
133 | goto err_priv_alloc; |
134 | |
135 | dev_set_drvdata(dev, data: priv); |
136 | |
137 | err = fsl_mc_portal_allocate(mc_dev: dpio_dev, mc_io_flags: 0, new_mc_io: &dpio_dev->mc_io); |
138 | if (err) { |
139 | dev_dbg(dev, "MC portal allocation failed\n" ); |
140 | err = -EPROBE_DEFER; |
141 | goto err_priv_alloc; |
142 | } |
143 | |
144 | err = dpio_open(mc_io: dpio_dev->mc_io, cmd_flags: 0, dpio_id: dpio_dev->obj_desc.id, |
145 | token: &dpio_dev->mc_handle); |
146 | if (err) { |
147 | dev_err(dev, "dpio_open() failed\n" ); |
148 | goto err_open; |
149 | } |
150 | |
151 | err = dpio_reset(mc_io: dpio_dev->mc_io, cmd_flags: 0, token: dpio_dev->mc_handle); |
152 | if (err) { |
153 | dev_err(dev, "dpio_reset() failed\n" ); |
154 | goto err_reset; |
155 | } |
156 | |
157 | err = dpio_get_attributes(mc_io: dpio_dev->mc_io, cmd_flags: 0, token: dpio_dev->mc_handle, |
158 | attr: &dpio_attrs); |
159 | if (err) { |
160 | dev_err(dev, "dpio_get_attributes() failed %d\n" , err); |
161 | goto err_get_attr; |
162 | } |
163 | desc.qman_version = dpio_attrs.qbman_version; |
164 | desc.qman_clk = dpio_attrs.clk; |
165 | |
166 | err = dpio_enable(mc_io: dpio_dev->mc_io, cmd_flags: 0, token: dpio_dev->mc_handle); |
167 | if (err) { |
168 | dev_err(dev, "dpio_enable() failed %d\n" , err); |
169 | goto err_get_attr; |
170 | } |
171 | |
172 | /* initialize DPIO descriptor */ |
173 | desc.receives_notifications = dpio_attrs.num_priorities ? 1 : 0; |
174 | desc.has_8prio = dpio_attrs.num_priorities == 8 ? 1 : 0; |
175 | desc.dpio_id = dpio_dev->obj_desc.id; |
176 | |
177 | /* get the cpu to use for the affinity hint */ |
178 | possible_next_cpu = cpumask_first(srcp: cpus_unused_mask); |
179 | if (possible_next_cpu >= nr_cpu_ids) { |
180 | dev_err(dev, "probe failed. Number of DPIOs exceeds NR_CPUS.\n" ); |
181 | err = -ERANGE; |
182 | goto err_allocate_irqs; |
183 | } |
184 | desc.cpu = possible_next_cpu; |
185 | cpumask_clear_cpu(cpu: possible_next_cpu, dstp: cpus_unused_mask); |
186 | |
187 | sdest = dpaa2_dpio_get_cluster_sdest(dpio_dev, cpu: desc.cpu); |
188 | if (sdest >= 0) { |
189 | err = dpio_set_stashing_destination(mc_io: dpio_dev->mc_io, cmd_flags: 0, |
190 | token: dpio_dev->mc_handle, |
191 | dest: sdest); |
192 | if (err) |
193 | dev_err(dev, "dpio_set_stashing_destination failed for cpu%d\n" , |
194 | desc.cpu); |
195 | } |
196 | |
197 | if (dpio_dev->obj_desc.region_count < 3) { |
198 | /* No support for DDR backed portals, use classic mapping */ |
199 | /* |
200 | * Set the CENA regs to be the cache inhibited area of the |
201 | * portal to avoid coherency issues if a user migrates to |
202 | * another core. |
203 | */ |
204 | desc.regs_cena = devm_memremap(dev, offset: dpio_dev->regions[1].start, |
205 | size: resource_size(res: &dpio_dev->regions[1]), |
206 | flags: MEMREMAP_WC); |
207 | } else { |
208 | desc.regs_cena = devm_memremap(dev, offset: dpio_dev->regions[2].start, |
209 | size: resource_size(res: &dpio_dev->regions[2]), |
210 | flags: MEMREMAP_WB); |
211 | } |
212 | |
213 | if (IS_ERR(ptr: desc.regs_cena)) { |
214 | dev_err(dev, "devm_memremap failed\n" ); |
215 | err = PTR_ERR(ptr: desc.regs_cena); |
216 | goto err_allocate_irqs; |
217 | } |
218 | |
219 | desc.regs_cinh = devm_ioremap(dev, offset: dpio_dev->regions[1].start, |
220 | size: resource_size(res: &dpio_dev->regions[1])); |
221 | if (!desc.regs_cinh) { |
222 | err = -ENOMEM; |
223 | dev_err(dev, "devm_ioremap failed\n" ); |
224 | goto err_allocate_irqs; |
225 | } |
226 | |
227 | err = fsl_mc_allocate_irqs(mc_dev: dpio_dev); |
228 | if (err) { |
229 | dev_err(dev, "fsl_mc_allocate_irqs failed. err=%d\n" , err); |
230 | goto err_allocate_irqs; |
231 | } |
232 | |
233 | priv->io = dpaa2_io_create(desc: &desc, dev); |
234 | if (!priv->io) { |
235 | dev_err(dev, "dpaa2_io_create failed\n" ); |
236 | err = -ENOMEM; |
237 | goto err_dpaa2_io_create; |
238 | } |
239 | |
240 | err = register_dpio_irq_handlers(dpio_dev, cpu: desc.cpu); |
241 | if (err) |
242 | goto err_register_dpio_irq; |
243 | |
244 | dev_info(dev, "probed\n" ); |
245 | dev_dbg(dev, " receives_notifications = %d\n" , |
246 | desc.receives_notifications); |
247 | dpio_close(mc_io: dpio_dev->mc_io, cmd_flags: 0, token: dpio_dev->mc_handle); |
248 | |
249 | return 0; |
250 | |
251 | err_dpaa2_io_create: |
252 | unregister_dpio_irq_handlers(dpio_dev); |
253 | err_register_dpio_irq: |
254 | fsl_mc_free_irqs(mc_dev: dpio_dev); |
255 | err_allocate_irqs: |
256 | dpio_disable(mc_io: dpio_dev->mc_io, cmd_flags: 0, token: dpio_dev->mc_handle); |
257 | err_get_attr: |
258 | err_reset: |
259 | dpio_close(mc_io: dpio_dev->mc_io, cmd_flags: 0, token: dpio_dev->mc_handle); |
260 | err_open: |
261 | fsl_mc_portal_free(mc_io: dpio_dev->mc_io); |
262 | err_priv_alloc: |
263 | return err; |
264 | } |
265 | |
266 | /* Tear down interrupts for a given DPIO object */ |
267 | static void dpio_teardown_irqs(struct fsl_mc_device *dpio_dev) |
268 | { |
269 | unregister_dpio_irq_handlers(dpio_dev); |
270 | fsl_mc_free_irqs(mc_dev: dpio_dev); |
271 | } |
272 | |
273 | static void dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev) |
274 | { |
275 | struct device *dev; |
276 | struct dpio_priv *priv; |
277 | int err = 0, cpu; |
278 | |
279 | dev = &dpio_dev->dev; |
280 | priv = dev_get_drvdata(dev); |
281 | cpu = dpaa2_io_get_cpu(d: priv->io); |
282 | |
283 | dpaa2_io_down(d: priv->io); |
284 | |
285 | dpio_teardown_irqs(dpio_dev); |
286 | |
287 | cpumask_set_cpu(cpu, dstp: cpus_unused_mask); |
288 | |
289 | err = dpio_open(mc_io: dpio_dev->mc_io, cmd_flags: 0, dpio_id: dpio_dev->obj_desc.id, |
290 | token: &dpio_dev->mc_handle); |
291 | if (err) { |
292 | dev_err(dev, "dpio_open() failed\n" ); |
293 | goto err_open; |
294 | } |
295 | |
296 | dpio_disable(mc_io: dpio_dev->mc_io, cmd_flags: 0, token: dpio_dev->mc_handle); |
297 | |
298 | dpio_close(mc_io: dpio_dev->mc_io, cmd_flags: 0, token: dpio_dev->mc_handle); |
299 | |
300 | err_open: |
301 | fsl_mc_portal_free(mc_io: dpio_dev->mc_io); |
302 | } |
303 | |
304 | static const struct fsl_mc_device_id dpaa2_dpio_match_id_table[] = { |
305 | { |
306 | .vendor = FSL_MC_VENDOR_FREESCALE, |
307 | .obj_type = "dpio" , |
308 | }, |
309 | { .vendor = 0x0 } |
310 | }; |
311 | |
312 | static struct fsl_mc_driver dpaa2_dpio_driver = { |
313 | .driver = { |
314 | .name = KBUILD_MODNAME, |
315 | .owner = THIS_MODULE, |
316 | }, |
317 | .probe = dpaa2_dpio_probe, |
318 | .remove = dpaa2_dpio_remove, |
319 | .match_id_table = dpaa2_dpio_match_id_table |
320 | }; |
321 | |
322 | static int dpio_driver_init(void) |
323 | { |
324 | if (!zalloc_cpumask_var(mask: &cpus_unused_mask, GFP_KERNEL)) |
325 | return -ENOMEM; |
326 | cpumask_copy(dstp: cpus_unused_mask, cpu_online_mask); |
327 | |
328 | return fsl_mc_driver_register(&dpaa2_dpio_driver); |
329 | } |
330 | |
331 | static void dpio_driver_exit(void) |
332 | { |
333 | free_cpumask_var(mask: cpus_unused_mask); |
334 | fsl_mc_driver_unregister(driver: &dpaa2_dpio_driver); |
335 | } |
336 | module_init(dpio_driver_init); |
337 | module_exit(dpio_driver_exit); |
338 | |