1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Qualcomm Technologies HIDMA DMA engine Management interface |
4 | * |
5 | * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. |
6 | */ |
7 | |
8 | #include <linux/dmaengine.h> |
9 | #include <linux/acpi.h> |
10 | #include <linux/of.h> |
11 | #include <linux/property.h> |
12 | #include <linux/of_address.h> |
13 | #include <linux/of_irq.h> |
14 | #include <linux/of_platform.h> |
15 | #include <linux/of_device.h> |
16 | #include <linux/platform_device.h> |
17 | #include <linux/module.h> |
18 | #include <linux/uaccess.h> |
19 | #include <linux/slab.h> |
20 | #include <linux/pm_runtime.h> |
21 | #include <linux/bitops.h> |
22 | #include <linux/dma-mapping.h> |
23 | |
24 | #include "hidma_mgmt.h" |
25 | |
26 | #define HIDMA_QOS_N_OFFSET 0x700 |
27 | #define HIDMA_CFG_OFFSET 0x400 |
28 | #define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C |
29 | #define HIDMA_MAX_XACTIONS_OFFSET 0x420 |
30 | #define HIDMA_HW_VERSION_OFFSET 0x424 |
31 | #define HIDMA_CHRESET_TIMEOUT_OFFSET 0x418 |
32 | |
33 | #define HIDMA_MAX_WR_XACTIONS_MASK GENMASK(4, 0) |
34 | #define HIDMA_MAX_RD_XACTIONS_MASK GENMASK(4, 0) |
35 | #define HIDMA_WEIGHT_MASK GENMASK(6, 0) |
36 | #define HIDMA_MAX_BUS_REQ_LEN_MASK GENMASK(15, 0) |
37 | #define HIDMA_CHRESET_TIMEOUT_MASK GENMASK(19, 0) |
38 | |
39 | #define HIDMA_MAX_WR_XACTIONS_BIT_POS 16 |
40 | #define HIDMA_MAX_BUS_WR_REQ_BIT_POS 16 |
41 | #define HIDMA_WRR_BIT_POS 8 |
42 | #define HIDMA_PRIORITY_BIT_POS 15 |
43 | |
44 | #define HIDMA_AUTOSUSPEND_TIMEOUT 2000 |
45 | #define HIDMA_MAX_CHANNEL_WEIGHT 15 |
46 | |
47 | static unsigned int max_write_request; |
48 | module_param(max_write_request, uint, 0644); |
49 | MODULE_PARM_DESC(max_write_request, |
50 | "maximum write burst (default: ACPI/DT value)" ); |
51 | |
52 | static unsigned int max_read_request; |
53 | module_param(max_read_request, uint, 0644); |
54 | MODULE_PARM_DESC(max_read_request, |
55 | "maximum read burst (default: ACPI/DT value)" ); |
56 | |
57 | static unsigned int max_wr_xactions; |
58 | module_param(max_wr_xactions, uint, 0644); |
59 | MODULE_PARM_DESC(max_wr_xactions, |
60 | "maximum number of write transactions (default: ACPI/DT value)" ); |
61 | |
62 | static unsigned int max_rd_xactions; |
63 | module_param(max_rd_xactions, uint, 0644); |
64 | MODULE_PARM_DESC(max_rd_xactions, |
65 | "maximum number of read transactions (default: ACPI/DT value)" ); |
66 | |
67 | int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev) |
68 | { |
69 | unsigned int i; |
70 | u32 val; |
71 | |
72 | if (!is_power_of_2(n: mgmtdev->max_write_request) || |
73 | (mgmtdev->max_write_request < 128) || |
74 | (mgmtdev->max_write_request > 1024)) { |
75 | dev_err(&mgmtdev->pdev->dev, "invalid write request %d\n" , |
76 | mgmtdev->max_write_request); |
77 | return -EINVAL; |
78 | } |
79 | |
80 | if (!is_power_of_2(n: mgmtdev->max_read_request) || |
81 | (mgmtdev->max_read_request < 128) || |
82 | (mgmtdev->max_read_request > 1024)) { |
83 | dev_err(&mgmtdev->pdev->dev, "invalid read request %d\n" , |
84 | mgmtdev->max_read_request); |
85 | return -EINVAL; |
86 | } |
87 | |
88 | if (mgmtdev->max_wr_xactions > HIDMA_MAX_WR_XACTIONS_MASK) { |
89 | dev_err(&mgmtdev->pdev->dev, |
90 | "max_wr_xactions cannot be bigger than %ld\n" , |
91 | HIDMA_MAX_WR_XACTIONS_MASK); |
92 | return -EINVAL; |
93 | } |
94 | |
95 | if (mgmtdev->max_rd_xactions > HIDMA_MAX_RD_XACTIONS_MASK) { |
96 | dev_err(&mgmtdev->pdev->dev, |
97 | "max_rd_xactions cannot be bigger than %ld\n" , |
98 | HIDMA_MAX_RD_XACTIONS_MASK); |
99 | return -EINVAL; |
100 | } |
101 | |
102 | for (i = 0; i < mgmtdev->dma_channels; i++) { |
103 | if (mgmtdev->priority[i] > 1) { |
104 | dev_err(&mgmtdev->pdev->dev, |
105 | "priority can be 0 or 1\n" ); |
106 | return -EINVAL; |
107 | } |
108 | |
109 | if (mgmtdev->weight[i] > HIDMA_MAX_CHANNEL_WEIGHT) { |
110 | dev_err(&mgmtdev->pdev->dev, |
111 | "max value of weight can be %d.\n" , |
112 | HIDMA_MAX_CHANNEL_WEIGHT); |
113 | return -EINVAL; |
114 | } |
115 | |
116 | /* weight needs to be at least one */ |
117 | if (mgmtdev->weight[i] == 0) |
118 | mgmtdev->weight[i] = 1; |
119 | } |
120 | |
121 | pm_runtime_get_sync(dev: &mgmtdev->pdev->dev); |
122 | val = readl(addr: mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET); |
123 | val &= ~(HIDMA_MAX_BUS_REQ_LEN_MASK << HIDMA_MAX_BUS_WR_REQ_BIT_POS); |
124 | val |= mgmtdev->max_write_request << HIDMA_MAX_BUS_WR_REQ_BIT_POS; |
125 | val &= ~HIDMA_MAX_BUS_REQ_LEN_MASK; |
126 | val |= mgmtdev->max_read_request; |
127 | writel(val, addr: mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET); |
128 | |
129 | val = readl(addr: mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET); |
130 | val &= ~(HIDMA_MAX_WR_XACTIONS_MASK << HIDMA_MAX_WR_XACTIONS_BIT_POS); |
131 | val |= mgmtdev->max_wr_xactions << HIDMA_MAX_WR_XACTIONS_BIT_POS; |
132 | val &= ~HIDMA_MAX_RD_XACTIONS_MASK; |
133 | val |= mgmtdev->max_rd_xactions; |
134 | writel(val, addr: mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET); |
135 | |
136 | mgmtdev->hw_version = |
137 | readl(addr: mgmtdev->virtaddr + HIDMA_HW_VERSION_OFFSET); |
138 | mgmtdev->hw_version_major = (mgmtdev->hw_version >> 28) & 0xF; |
139 | mgmtdev->hw_version_minor = (mgmtdev->hw_version >> 16) & 0xF; |
140 | |
141 | for (i = 0; i < mgmtdev->dma_channels; i++) { |
142 | u32 weight = mgmtdev->weight[i]; |
143 | u32 priority = mgmtdev->priority[i]; |
144 | |
145 | val = readl(addr: mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i)); |
146 | val &= ~(1 << HIDMA_PRIORITY_BIT_POS); |
147 | val |= (priority & 0x1) << HIDMA_PRIORITY_BIT_POS; |
148 | val &= ~(HIDMA_WEIGHT_MASK << HIDMA_WRR_BIT_POS); |
149 | val |= (weight & HIDMA_WEIGHT_MASK) << HIDMA_WRR_BIT_POS; |
150 | writel(val, addr: mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i)); |
151 | } |
152 | |
153 | val = readl(addr: mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET); |
154 | val &= ~HIDMA_CHRESET_TIMEOUT_MASK; |
155 | val |= mgmtdev->chreset_timeout_cycles & HIDMA_CHRESET_TIMEOUT_MASK; |
156 | writel(val, addr: mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET); |
157 | |
158 | pm_runtime_mark_last_busy(dev: &mgmtdev->pdev->dev); |
159 | pm_runtime_put_autosuspend(dev: &mgmtdev->pdev->dev); |
160 | return 0; |
161 | } |
162 | EXPORT_SYMBOL_GPL(hidma_mgmt_setup); |
163 | |
164 | static int hidma_mgmt_probe(struct platform_device *pdev) |
165 | { |
166 | struct hidma_mgmt_dev *mgmtdev; |
167 | struct resource *res; |
168 | void __iomem *virtaddr; |
169 | int irq; |
170 | int rc; |
171 | u32 val; |
172 | |
173 | pm_runtime_set_autosuspend_delay(dev: &pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT); |
174 | pm_runtime_use_autosuspend(dev: &pdev->dev); |
175 | pm_runtime_set_active(dev: &pdev->dev); |
176 | pm_runtime_enable(dev: &pdev->dev); |
177 | pm_runtime_get_sync(dev: &pdev->dev); |
178 | |
179 | virtaddr = devm_platform_get_and_ioremap_resource(pdev, index: 0, res: &res); |
180 | if (IS_ERR(ptr: virtaddr)) { |
181 | rc = PTR_ERR(ptr: virtaddr); |
182 | goto out; |
183 | } |
184 | |
185 | irq = platform_get_irq(pdev, 0); |
186 | if (irq < 0) { |
187 | rc = irq; |
188 | goto out; |
189 | } |
190 | |
191 | mgmtdev = devm_kzalloc(dev: &pdev->dev, size: sizeof(*mgmtdev), GFP_KERNEL); |
192 | if (!mgmtdev) { |
193 | rc = -ENOMEM; |
194 | goto out; |
195 | } |
196 | |
197 | mgmtdev->pdev = pdev; |
198 | mgmtdev->addrsize = resource_size(res); |
199 | mgmtdev->virtaddr = virtaddr; |
200 | |
201 | rc = device_property_read_u32(dev: &pdev->dev, propname: "dma-channels" , |
202 | val: &mgmtdev->dma_channels); |
203 | if (rc) { |
204 | dev_err(&pdev->dev, "number of channels missing\n" ); |
205 | goto out; |
206 | } |
207 | |
208 | rc = device_property_read_u32(dev: &pdev->dev, |
209 | propname: "channel-reset-timeout-cycles" , |
210 | val: &mgmtdev->chreset_timeout_cycles); |
211 | if (rc) { |
212 | dev_err(&pdev->dev, "channel reset timeout missing\n" ); |
213 | goto out; |
214 | } |
215 | |
216 | rc = device_property_read_u32(dev: &pdev->dev, propname: "max-write-burst-bytes" , |
217 | val: &mgmtdev->max_write_request); |
218 | if (rc) { |
219 | dev_err(&pdev->dev, "max-write-burst-bytes missing\n" ); |
220 | goto out; |
221 | } |
222 | |
223 | if (max_write_request && |
224 | (max_write_request != mgmtdev->max_write_request)) { |
225 | dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n" , |
226 | max_write_request); |
227 | mgmtdev->max_write_request = max_write_request; |
228 | } else |
229 | max_write_request = mgmtdev->max_write_request; |
230 | |
231 | rc = device_property_read_u32(dev: &pdev->dev, propname: "max-read-burst-bytes" , |
232 | val: &mgmtdev->max_read_request); |
233 | if (rc) { |
234 | dev_err(&pdev->dev, "max-read-burst-bytes missing\n" ); |
235 | goto out; |
236 | } |
237 | if (max_read_request && |
238 | (max_read_request != mgmtdev->max_read_request)) { |
239 | dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n" , |
240 | max_read_request); |
241 | mgmtdev->max_read_request = max_read_request; |
242 | } else |
243 | max_read_request = mgmtdev->max_read_request; |
244 | |
245 | rc = device_property_read_u32(dev: &pdev->dev, propname: "max-write-transactions" , |
246 | val: &mgmtdev->max_wr_xactions); |
247 | if (rc) { |
248 | dev_err(&pdev->dev, "max-write-transactions missing\n" ); |
249 | goto out; |
250 | } |
251 | if (max_wr_xactions && |
252 | (max_wr_xactions != mgmtdev->max_wr_xactions)) { |
253 | dev_info(&pdev->dev, "overriding max-write-transactions: %d\n" , |
254 | max_wr_xactions); |
255 | mgmtdev->max_wr_xactions = max_wr_xactions; |
256 | } else |
257 | max_wr_xactions = mgmtdev->max_wr_xactions; |
258 | |
259 | rc = device_property_read_u32(dev: &pdev->dev, propname: "max-read-transactions" , |
260 | val: &mgmtdev->max_rd_xactions); |
261 | if (rc) { |
262 | dev_err(&pdev->dev, "max-read-transactions missing\n" ); |
263 | goto out; |
264 | } |
265 | if (max_rd_xactions && |
266 | (max_rd_xactions != mgmtdev->max_rd_xactions)) { |
267 | dev_info(&pdev->dev, "overriding max-read-transactions: %d\n" , |
268 | max_rd_xactions); |
269 | mgmtdev->max_rd_xactions = max_rd_xactions; |
270 | } else |
271 | max_rd_xactions = mgmtdev->max_rd_xactions; |
272 | |
273 | mgmtdev->priority = devm_kcalloc(dev: &pdev->dev, |
274 | n: mgmtdev->dma_channels, |
275 | size: sizeof(*mgmtdev->priority), |
276 | GFP_KERNEL); |
277 | if (!mgmtdev->priority) { |
278 | rc = -ENOMEM; |
279 | goto out; |
280 | } |
281 | |
282 | mgmtdev->weight = devm_kcalloc(dev: &pdev->dev, |
283 | n: mgmtdev->dma_channels, |
284 | size: sizeof(*mgmtdev->weight), GFP_KERNEL); |
285 | if (!mgmtdev->weight) { |
286 | rc = -ENOMEM; |
287 | goto out; |
288 | } |
289 | |
290 | rc = hidma_mgmt_setup(mgmtdev); |
291 | if (rc) { |
292 | dev_err(&pdev->dev, "setup failed\n" ); |
293 | goto out; |
294 | } |
295 | |
296 | /* start the HW */ |
297 | val = readl(addr: mgmtdev->virtaddr + HIDMA_CFG_OFFSET); |
298 | val |= 1; |
299 | writel(val, addr: mgmtdev->virtaddr + HIDMA_CFG_OFFSET); |
300 | |
301 | rc = hidma_mgmt_init_sys(dev: mgmtdev); |
302 | if (rc) { |
303 | dev_err(&pdev->dev, "sysfs setup failed\n" ); |
304 | goto out; |
305 | } |
306 | |
307 | dev_info(&pdev->dev, |
308 | "HW rev: %d.%d @ %pa with %d physical channels\n" , |
309 | mgmtdev->hw_version_major, mgmtdev->hw_version_minor, |
310 | &res->start, mgmtdev->dma_channels); |
311 | |
312 | platform_set_drvdata(pdev, data: mgmtdev); |
313 | pm_runtime_mark_last_busy(dev: &pdev->dev); |
314 | pm_runtime_put_autosuspend(dev: &pdev->dev); |
315 | return 0; |
316 | out: |
317 | pm_runtime_put_sync_suspend(dev: &pdev->dev); |
318 | pm_runtime_disable(dev: &pdev->dev); |
319 | return rc; |
320 | } |
321 | |
322 | #if IS_ENABLED(CONFIG_ACPI) |
323 | static const struct acpi_device_id hidma_mgmt_acpi_ids[] = { |
324 | {"QCOM8060" }, |
325 | {}, |
326 | }; |
327 | MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids); |
328 | #endif |
329 | |
330 | static const struct of_device_id hidma_mgmt_match[] = { |
331 | {.compatible = "qcom,hidma-mgmt-1.0" ,}, |
332 | {}, |
333 | }; |
334 | MODULE_DEVICE_TABLE(of, hidma_mgmt_match); |
335 | |
336 | static struct platform_driver hidma_mgmt_driver = { |
337 | .probe = hidma_mgmt_probe, |
338 | .driver = { |
339 | .name = "hidma-mgmt" , |
340 | .of_match_table = hidma_mgmt_match, |
341 | .acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids), |
342 | }, |
343 | }; |
344 | |
345 | #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) |
346 | static int object_counter; |
347 | |
348 | static int __init hidma_mgmt_of_populate_channels(struct device_node *np) |
349 | { |
350 | struct platform_device *pdev_parent = of_find_device_by_node(np); |
351 | struct platform_device_info pdevinfo; |
352 | struct device_node *child; |
353 | struct resource *res; |
354 | int ret = 0; |
355 | |
356 | /* allocate a resource array */ |
357 | res = kcalloc(n: 3, size: sizeof(*res), GFP_KERNEL); |
358 | if (!res) |
359 | return -ENOMEM; |
360 | |
361 | for_each_available_child_of_node(np, child) { |
362 | struct platform_device *new_pdev; |
363 | |
364 | ret = of_address_to_resource(dev: child, index: 0, r: &res[0]); |
365 | if (!ret) |
366 | goto out; |
367 | |
368 | ret = of_address_to_resource(dev: child, index: 1, r: &res[1]); |
369 | if (!ret) |
370 | goto out; |
371 | |
372 | ret = of_irq_to_resource(dev: child, index: 0, r: &res[2]); |
373 | if (ret <= 0) |
374 | goto out; |
375 | |
376 | memset(&pdevinfo, 0, sizeof(pdevinfo)); |
377 | pdevinfo.fwnode = &child->fwnode; |
378 | pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL; |
379 | pdevinfo.name = child->name; |
380 | pdevinfo.id = object_counter++; |
381 | pdevinfo.res = res; |
382 | pdevinfo.num_res = 3; |
383 | pdevinfo.data = NULL; |
384 | pdevinfo.size_data = 0; |
385 | pdevinfo.dma_mask = DMA_BIT_MASK(64); |
386 | new_pdev = platform_device_register_full(pdevinfo: &pdevinfo); |
387 | if (IS_ERR(ptr: new_pdev)) { |
388 | ret = PTR_ERR(ptr: new_pdev); |
389 | goto out; |
390 | } |
391 | new_pdev->dev.of_node = child; |
392 | of_dma_configure(dev: &new_pdev->dev, np: child, force_dma: true); |
393 | /* |
394 | * It is assumed that calling of_msi_configure is safe on |
395 | * platforms with or without MSI support. |
396 | */ |
397 | of_msi_configure(dev: &new_pdev->dev, np: child); |
398 | } |
399 | |
400 | kfree(objp: res); |
401 | |
402 | return ret; |
403 | |
404 | out: |
405 | of_node_put(node: child); |
406 | kfree(objp: res); |
407 | |
408 | return ret; |
409 | } |
410 | #endif |
411 | |
412 | static int __init hidma_mgmt_init(void) |
413 | { |
414 | #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) |
415 | struct device_node *child; |
416 | |
417 | for_each_matching_node(child, hidma_mgmt_match) { |
418 | /* device tree based firmware here */ |
419 | hidma_mgmt_of_populate_channels(np: child); |
420 | } |
421 | #endif |
422 | /* |
423 | * We do not check for return value here, as it is assumed that |
424 | * platform_driver_register must not fail. The reason for this is that |
425 | * the (potential) hidma_mgmt_of_populate_channels calls above are not |
426 | * cleaned up if it does fail, and to do this work is quite |
427 | * complicated. In particular, various calls of of_address_to_resource, |
428 | * of_irq_to_resource, platform_device_register_full, of_dma_configure, |
429 | * and of_msi_configure which then call other functions and so on, must |
430 | * be cleaned up - this is not a trivial exercise. |
431 | * |
432 | * Currently, this module is not intended to be unloaded, and there is |
433 | * no module_exit function defined which does the needed cleanup. For |
434 | * this reason, we have to assume success here. |
435 | */ |
436 | platform_driver_register(&hidma_mgmt_driver); |
437 | |
438 | return 0; |
439 | } |
440 | module_init(hidma_mgmt_init); |
441 | MODULE_LICENSE("GPL v2" ); |
442 | |