1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Common CPM code |
4 | * |
5 | * Author: Scott Wood <scottwood@freescale.com> |
6 | * |
7 | * Copyright 2007-2008,2010 Freescale Semiconductor, Inc. |
8 | * |
9 | * Some parts derived from commproc.c/cpm2_common.c, which is: |
10 | * Copyright (c) 1997 Dan error_act (dmalek@jlc.net) |
11 | * Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com> |
12 | * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com) |
13 | * 2006 (c) MontaVista Software, Inc. |
14 | * Vitaly Bordug <vbordug@ru.mvista.com> |
15 | */ |
16 | #include <linux/genalloc.h> |
17 | #include <linux/init.h> |
18 | #include <linux/list.h> |
19 | #include <linux/spinlock.h> |
20 | #include <linux/export.h> |
21 | #include <linux/of.h> |
22 | #include <linux/of_address.h> |
23 | #include <linux/slab.h> |
24 | #include <linux/io.h> |
25 | #include <soc/fsl/qe/qe.h> |
26 | |
27 | static struct gen_pool *muram_pool; |
28 | static DEFINE_SPINLOCK(cpm_muram_lock); |
29 | static void __iomem *muram_vbase; |
30 | static phys_addr_t muram_pbase; |
31 | |
32 | struct muram_block { |
33 | struct list_head head; |
34 | s32 start; |
35 | int size; |
36 | }; |
37 | |
38 | static LIST_HEAD(muram_block_list); |
39 | |
40 | /* max address size we deal with */ |
41 | #define OF_MAX_ADDR_CELLS 4 |
42 | #define GENPOOL_OFFSET (4096 * 8) |
43 | |
44 | int cpm_muram_init(void) |
45 | { |
46 | struct device_node *np; |
47 | struct resource r; |
48 | __be32 zero[OF_MAX_ADDR_CELLS] = {}; |
49 | resource_size_t max = 0; |
50 | int i = 0; |
51 | int ret = 0; |
52 | |
53 | if (muram_pbase) |
54 | return 0; |
55 | |
56 | np = of_find_compatible_node(NULL, NULL, compat: "fsl,cpm-muram-data" ); |
57 | if (!np) { |
58 | /* try legacy bindings */ |
59 | np = of_find_node_by_name(NULL, name: "data-only" ); |
60 | if (!np) { |
61 | pr_err("Cannot find CPM muram data node" ); |
62 | ret = -ENODEV; |
63 | goto out_muram; |
64 | } |
65 | } |
66 | |
67 | muram_pool = gen_pool_create(0, -1); |
68 | if (!muram_pool) { |
69 | pr_err("Cannot allocate memory pool for CPM/QE muram" ); |
70 | ret = -ENOMEM; |
71 | goto out_muram; |
72 | } |
73 | muram_pbase = of_translate_address(np, addr: zero); |
74 | if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) { |
75 | pr_err("Cannot translate zero through CPM muram node" ); |
76 | ret = -ENODEV; |
77 | goto out_pool; |
78 | } |
79 | |
80 | while (of_address_to_resource(dev: np, index: i++, r: &r) == 0) { |
81 | if (r.end > max) |
82 | max = r.end; |
83 | ret = gen_pool_add(pool: muram_pool, addr: r.start - muram_pbase + |
84 | GENPOOL_OFFSET, size: resource_size(res: &r), nid: -1); |
85 | if (ret) { |
86 | pr_err("QE: couldn't add muram to pool!\n" ); |
87 | goto out_pool; |
88 | } |
89 | } |
90 | |
91 | muram_vbase = ioremap(offset: muram_pbase, size: max - muram_pbase + 1); |
92 | if (!muram_vbase) { |
93 | pr_err("Cannot map QE muram" ); |
94 | ret = -ENOMEM; |
95 | goto out_pool; |
96 | } |
97 | goto out_muram; |
98 | out_pool: |
99 | gen_pool_destroy(muram_pool); |
100 | out_muram: |
101 | of_node_put(node: np); |
102 | return ret; |
103 | } |
104 | |
105 | /* |
106 | * cpm_muram_alloc_common - cpm_muram_alloc common code |
107 | * @size: number of bytes to allocate |
108 | * @algo: algorithm for alloc. |
109 | * @data: data for genalloc's algorithm. |
110 | * |
111 | * This function returns a non-negative offset into the muram area, or |
112 | * a negative errno on failure. |
113 | */ |
114 | static s32 cpm_muram_alloc_common(unsigned long size, |
115 | genpool_algo_t algo, void *data) |
116 | { |
117 | struct muram_block *entry; |
118 | s32 start; |
119 | |
120 | entry = kmalloc(size: sizeof(*entry), GFP_ATOMIC); |
121 | if (!entry) |
122 | return -ENOMEM; |
123 | start = gen_pool_alloc_algo(pool: muram_pool, size, algo, data); |
124 | if (!start) { |
125 | kfree(objp: entry); |
126 | return -ENOMEM; |
127 | } |
128 | start = start - GENPOOL_OFFSET; |
129 | memset_io(cpm_muram_addr(offset: start), 0, size); |
130 | entry->start = start; |
131 | entry->size = size; |
132 | list_add(new: &entry->head, head: &muram_block_list); |
133 | |
134 | return start; |
135 | } |
136 | |
137 | /* |
138 | * cpm_muram_alloc - allocate the requested size worth of multi-user ram |
139 | * @size: number of bytes to allocate |
140 | * @align: requested alignment, in bytes |
141 | * |
142 | * This function returns a non-negative offset into the muram area, or |
143 | * a negative errno on failure. |
144 | * Use cpm_muram_addr() to get the virtual address of the area. |
145 | * Use cpm_muram_free() to free the allocation. |
146 | */ |
147 | s32 cpm_muram_alloc(unsigned long size, unsigned long align) |
148 | { |
149 | s32 start; |
150 | unsigned long flags; |
151 | struct genpool_data_align muram_pool_data; |
152 | |
153 | spin_lock_irqsave(&cpm_muram_lock, flags); |
154 | muram_pool_data.align = align; |
155 | start = cpm_muram_alloc_common(size, algo: gen_pool_first_fit_align, |
156 | data: &muram_pool_data); |
157 | spin_unlock_irqrestore(lock: &cpm_muram_lock, flags); |
158 | return start; |
159 | } |
160 | EXPORT_SYMBOL(cpm_muram_alloc); |
161 | |
162 | /** |
163 | * cpm_muram_free - free a chunk of multi-user ram |
164 | * @offset: The beginning of the chunk as returned by cpm_muram_alloc(). |
165 | */ |
166 | void cpm_muram_free(s32 offset) |
167 | { |
168 | unsigned long flags; |
169 | int size; |
170 | struct muram_block *tmp; |
171 | |
172 | if (offset < 0) |
173 | return; |
174 | |
175 | size = 0; |
176 | spin_lock_irqsave(&cpm_muram_lock, flags); |
177 | list_for_each_entry(tmp, &muram_block_list, head) { |
178 | if (tmp->start == offset) { |
179 | size = tmp->size; |
180 | list_del(entry: &tmp->head); |
181 | kfree(objp: tmp); |
182 | break; |
183 | } |
184 | } |
185 | gen_pool_free(pool: muram_pool, addr: offset + GENPOOL_OFFSET, size); |
186 | spin_unlock_irqrestore(lock: &cpm_muram_lock, flags); |
187 | } |
188 | EXPORT_SYMBOL(cpm_muram_free); |
189 | |
190 | /* |
191 | * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram |
192 | * @offset: offset of allocation start address |
193 | * @size: number of bytes to allocate |
194 | * This function returns @offset if the area was available, a negative |
195 | * errno otherwise. |
196 | * Use cpm_muram_addr() to get the virtual address of the area. |
197 | * Use cpm_muram_free() to free the allocation. |
198 | */ |
199 | s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size) |
200 | { |
201 | s32 start; |
202 | unsigned long flags; |
203 | struct genpool_data_fixed muram_pool_data_fixed; |
204 | |
205 | spin_lock_irqsave(&cpm_muram_lock, flags); |
206 | muram_pool_data_fixed.offset = offset + GENPOOL_OFFSET; |
207 | start = cpm_muram_alloc_common(size, algo: gen_pool_fixed_alloc, |
208 | data: &muram_pool_data_fixed); |
209 | spin_unlock_irqrestore(lock: &cpm_muram_lock, flags); |
210 | return start; |
211 | } |
212 | EXPORT_SYMBOL(cpm_muram_alloc_fixed); |
213 | |
214 | /** |
215 | * cpm_muram_addr - turn a muram offset into a virtual address |
216 | * @offset: muram offset to convert |
217 | */ |
218 | void __iomem *cpm_muram_addr(unsigned long offset) |
219 | { |
220 | return muram_vbase + offset; |
221 | } |
222 | EXPORT_SYMBOL(cpm_muram_addr); |
223 | |
224 | unsigned long cpm_muram_offset(const void __iomem *addr) |
225 | { |
226 | return addr - muram_vbase; |
227 | } |
228 | EXPORT_SYMBOL(cpm_muram_offset); |
229 | |
230 | /** |
231 | * cpm_muram_dma - turn a muram virtual address into a DMA address |
232 | * @addr: virtual address from cpm_muram_addr() to convert |
233 | */ |
234 | dma_addr_t cpm_muram_dma(void __iomem *addr) |
235 | { |
236 | return muram_pbase + (addr - muram_vbase); |
237 | } |
238 | EXPORT_SYMBOL(cpm_muram_dma); |
239 | |
240 | /* |
241 | * As cpm_muram_free, but takes the virtual address rather than the |
242 | * muram offset. |
243 | */ |
244 | void cpm_muram_free_addr(const void __iomem *addr) |
245 | { |
246 | if (!addr) |
247 | return; |
248 | cpm_muram_free(cpm_muram_offset(addr)); |
249 | } |
250 | EXPORT_SYMBOL(cpm_muram_free_addr); |
251 | |