1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * Device tree based initialization code for reserved memory. |
4 | * |
5 | * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved. |
6 | * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd. |
7 | * http://www.samsung.com |
8 | * Author: Marek Szyprowski <m.szyprowski@samsung.com> |
9 | * Author: Josh Cartwright <joshc@codeaurora.org> |
10 | */ |
11 | |
12 | #define pr_fmt(fmt) "OF: reserved mem: " fmt |
13 | |
14 | #include <linux/err.h> |
15 | #include <linux/of.h> |
16 | #include <linux/of_fdt.h> |
17 | #include <linux/of_platform.h> |
18 | #include <linux/mm.h> |
19 | #include <linux/sizes.h> |
20 | #include <linux/of_reserved_mem.h> |
21 | #include <linux/sort.h> |
22 | #include <linux/slab.h> |
23 | #include <linux/memblock.h> |
24 | |
25 | #define MAX_RESERVED_REGIONS 32 |
26 | static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; |
27 | static int reserved_mem_count; |
28 | |
29 | static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, |
30 | phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, |
31 | phys_addr_t *res_base) |
32 | { |
33 | phys_addr_t base; |
34 | |
35 | end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end; |
36 | align = !align ? SMP_CACHE_BYTES : align; |
37 | base = memblock_find_in_range(start, end, size, align); |
38 | if (!base) |
39 | return -ENOMEM; |
40 | |
41 | *res_base = base; |
42 | if (nomap) |
43 | return memblock_remove(base, size); |
44 | |
45 | return memblock_reserve(base, size); |
46 | } |
47 | |
48 | /** |
49 | * res_mem_save_node() - save fdt node for second pass initialization |
50 | */ |
51 | void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname, |
52 | phys_addr_t base, phys_addr_t size) |
53 | { |
54 | struct reserved_mem *rmem = &reserved_mem[reserved_mem_count]; |
55 | |
56 | if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) { |
57 | pr_err("not enough space all defined regions.\n" ); |
58 | return; |
59 | } |
60 | |
61 | rmem->fdt_node = node; |
62 | rmem->name = uname; |
63 | rmem->base = base; |
64 | rmem->size = size; |
65 | |
66 | reserved_mem_count++; |
67 | return; |
68 | } |
69 | |
70 | /** |
71 | * res_mem_alloc_size() - allocate reserved memory described by 'size', 'align' |
72 | * and 'alloc-ranges' properties |
73 | */ |
74 | static int __init __reserved_mem_alloc_size(unsigned long node, |
75 | const char *uname, phys_addr_t *res_base, phys_addr_t *res_size) |
76 | { |
77 | int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); |
78 | phys_addr_t start = 0, end = 0; |
79 | phys_addr_t base = 0, align = 0, size; |
80 | int len; |
81 | const __be32 *prop; |
82 | int nomap; |
83 | int ret; |
84 | |
85 | prop = of_get_flat_dt_prop(node, "size" , &len); |
86 | if (!prop) |
87 | return -EINVAL; |
88 | |
89 | if (len != dt_root_size_cells * sizeof(__be32)) { |
90 | pr_err("invalid size property in '%s' node.\n" , uname); |
91 | return -EINVAL; |
92 | } |
93 | size = dt_mem_next_cell(dt_root_size_cells, &prop); |
94 | |
95 | nomap = of_get_flat_dt_prop(node, "no-map" , NULL) != NULL; |
96 | |
97 | prop = of_get_flat_dt_prop(node, "alignment" , &len); |
98 | if (prop) { |
99 | if (len != dt_root_addr_cells * sizeof(__be32)) { |
100 | pr_err("invalid alignment property in '%s' node.\n" , |
101 | uname); |
102 | return -EINVAL; |
103 | } |
104 | align = dt_mem_next_cell(dt_root_addr_cells, &prop); |
105 | } |
106 | |
107 | /* Need adjust the alignment to satisfy the CMA requirement */ |
108 | if (IS_ENABLED(CONFIG_CMA) |
109 | && of_flat_dt_is_compatible(node, "shared-dma-pool" ) |
110 | && of_get_flat_dt_prop(node, "reusable" , NULL) |
111 | && !of_get_flat_dt_prop(node, "no-map" , NULL)) { |
112 | unsigned long order = |
113 | max_t(unsigned long, MAX_ORDER - 1, pageblock_order); |
114 | |
115 | align = max(align, (phys_addr_t)PAGE_SIZE << order); |
116 | } |
117 | |
118 | prop = of_get_flat_dt_prop(node, "alloc-ranges" , &len); |
119 | if (prop) { |
120 | |
121 | if (len % t_len != 0) { |
122 | pr_err("invalid alloc-ranges property in '%s', skipping node.\n" , |
123 | uname); |
124 | return -EINVAL; |
125 | } |
126 | |
127 | base = 0; |
128 | |
129 | while (len > 0) { |
130 | start = dt_mem_next_cell(dt_root_addr_cells, &prop); |
131 | end = start + dt_mem_next_cell(dt_root_size_cells, |
132 | &prop); |
133 | |
134 | ret = early_init_dt_alloc_reserved_memory_arch(size, |
135 | align, start, end, nomap, &base); |
136 | if (ret == 0) { |
137 | pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n" , |
138 | uname, &base, |
139 | (unsigned long)size / SZ_1M); |
140 | break; |
141 | } |
142 | len -= t_len; |
143 | } |
144 | |
145 | } else { |
146 | ret = early_init_dt_alloc_reserved_memory_arch(size, align, |
147 | 0, 0, nomap, &base); |
148 | if (ret == 0) |
149 | pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n" , |
150 | uname, &base, (unsigned long)size / SZ_1M); |
151 | } |
152 | |
153 | if (base == 0) { |
154 | pr_info("failed to allocate memory for node '%s'\n" , uname); |
155 | return -ENOMEM; |
156 | } |
157 | |
158 | *res_base = base; |
159 | *res_size = size; |
160 | |
161 | return 0; |
162 | } |
163 | |
164 | static const struct of_device_id __rmem_of_table_sentinel |
165 | __used __section(__reservedmem_of_table_end); |
166 | |
167 | /** |
168 | * res_mem_init_node() - call region specific reserved memory init code |
169 | */ |
170 | static int __init __reserved_mem_init_node(struct reserved_mem *rmem) |
171 | { |
172 | extern const struct of_device_id __reservedmem_of_table[]; |
173 | const struct of_device_id *i; |
174 | |
175 | for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) { |
176 | reservedmem_of_init_fn initfn = i->data; |
177 | const char *compat = i->compatible; |
178 | |
179 | if (!of_flat_dt_is_compatible(rmem->fdt_node, compat)) |
180 | continue; |
181 | |
182 | if (initfn(rmem) == 0) { |
183 | pr_info("initialized node %s, compatible id %s\n" , |
184 | rmem->name, compat); |
185 | return 0; |
186 | } |
187 | } |
188 | return -ENOENT; |
189 | } |
190 | |
191 | static int __init __rmem_cmp(const void *a, const void *b) |
192 | { |
193 | const struct reserved_mem *ra = a, *rb = b; |
194 | |
195 | if (ra->base < rb->base) |
196 | return -1; |
197 | |
198 | if (ra->base > rb->base) |
199 | return 1; |
200 | |
201 | return 0; |
202 | } |
203 | |
204 | static void __init __rmem_check_for_overlap(void) |
205 | { |
206 | int i; |
207 | |
208 | if (reserved_mem_count < 2) |
209 | return; |
210 | |
211 | sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]), |
212 | __rmem_cmp, NULL); |
213 | for (i = 0; i < reserved_mem_count - 1; i++) { |
214 | struct reserved_mem *this, *next; |
215 | |
216 | this = &reserved_mem[i]; |
217 | next = &reserved_mem[i + 1]; |
218 | if (!(this->base && next->base)) |
219 | continue; |
220 | if (this->base + this->size > next->base) { |
221 | phys_addr_t this_end, next_end; |
222 | |
223 | this_end = this->base + this->size; |
224 | next_end = next->base + next->size; |
225 | pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n" , |
226 | this->name, &this->base, &this_end, |
227 | next->name, &next->base, &next_end); |
228 | } |
229 | } |
230 | } |
231 | |
232 | /** |
233 | * fdt_init_reserved_mem - allocate and init all saved reserved memory regions |
234 | */ |
235 | void __init fdt_init_reserved_mem(void) |
236 | { |
237 | int i; |
238 | |
239 | /* check for overlapping reserved regions */ |
240 | __rmem_check_for_overlap(); |
241 | |
242 | for (i = 0; i < reserved_mem_count; i++) { |
243 | struct reserved_mem *rmem = &reserved_mem[i]; |
244 | unsigned long node = rmem->fdt_node; |
245 | int len; |
246 | const __be32 *prop; |
247 | int err = 0; |
248 | |
249 | prop = of_get_flat_dt_prop(node, "phandle" , &len); |
250 | if (!prop) |
251 | prop = of_get_flat_dt_prop(node, "linux,phandle" , &len); |
252 | if (prop) |
253 | rmem->phandle = of_read_number(prop, len/4); |
254 | |
255 | if (rmem->size == 0) |
256 | err = __reserved_mem_alloc_size(node, rmem->name, |
257 | &rmem->base, &rmem->size); |
258 | if (err == 0) |
259 | __reserved_mem_init_node(rmem); |
260 | } |
261 | } |
262 | |
263 | static inline struct reserved_mem *__find_rmem(struct device_node *node) |
264 | { |
265 | unsigned int i; |
266 | |
267 | if (!node->phandle) |
268 | return NULL; |
269 | |
270 | for (i = 0; i < reserved_mem_count; i++) |
271 | if (reserved_mem[i].phandle == node->phandle) |
272 | return &reserved_mem[i]; |
273 | return NULL; |
274 | } |
275 | |
276 | struct rmem_assigned_device { |
277 | struct device *dev; |
278 | struct reserved_mem *rmem; |
279 | struct list_head list; |
280 | }; |
281 | |
282 | static LIST_HEAD(of_rmem_assigned_device_list); |
283 | static DEFINE_MUTEX(of_rmem_assigned_device_mutex); |
284 | |
285 | /** |
286 | * of_reserved_mem_device_init_by_idx() - assign reserved memory region to |
287 | * given device |
288 | * @dev: Pointer to the device to configure |
289 | * @np: Pointer to the device_node with 'reserved-memory' property |
290 | * @idx: Index of selected region |
291 | * |
292 | * This function assigns respective DMA-mapping operations based on reserved |
293 | * memory region specified by 'memory-region' property in @np node to the @dev |
294 | * device. When driver needs to use more than one reserved memory region, it |
295 | * should allocate child devices and initialize regions by name for each of |
296 | * child device. |
297 | * |
298 | * Returns error code or zero on success. |
299 | */ |
300 | int of_reserved_mem_device_init_by_idx(struct device *dev, |
301 | struct device_node *np, int idx) |
302 | { |
303 | struct rmem_assigned_device *rd; |
304 | struct device_node *target; |
305 | struct reserved_mem *rmem; |
306 | int ret; |
307 | |
308 | if (!np || !dev) |
309 | return -EINVAL; |
310 | |
311 | target = of_parse_phandle(np, "memory-region" , idx); |
312 | if (!target) |
313 | return -ENODEV; |
314 | |
315 | rmem = __find_rmem(target); |
316 | of_node_put(target); |
317 | |
318 | if (!rmem || !rmem->ops || !rmem->ops->device_init) |
319 | return -EINVAL; |
320 | |
321 | rd = kmalloc(sizeof(struct rmem_assigned_device), GFP_KERNEL); |
322 | if (!rd) |
323 | return -ENOMEM; |
324 | |
325 | ret = rmem->ops->device_init(rmem, dev); |
326 | if (ret == 0) { |
327 | rd->dev = dev; |
328 | rd->rmem = rmem; |
329 | |
330 | mutex_lock(&of_rmem_assigned_device_mutex); |
331 | list_add(&rd->list, &of_rmem_assigned_device_list); |
332 | mutex_unlock(&of_rmem_assigned_device_mutex); |
333 | |
334 | dev_info(dev, "assigned reserved memory node %s\n" , rmem->name); |
335 | } else { |
336 | kfree(rd); |
337 | } |
338 | |
339 | return ret; |
340 | } |
341 | EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx); |
342 | |
343 | /** |
344 | * of_reserved_mem_device_release() - release reserved memory device structures |
345 | * @dev: Pointer to the device to deconfigure |
346 | * |
347 | * This function releases structures allocated for memory region handling for |
348 | * the given device. |
349 | */ |
350 | void of_reserved_mem_device_release(struct device *dev) |
351 | { |
352 | struct rmem_assigned_device *rd; |
353 | struct reserved_mem *rmem = NULL; |
354 | |
355 | mutex_lock(&of_rmem_assigned_device_mutex); |
356 | list_for_each_entry(rd, &of_rmem_assigned_device_list, list) { |
357 | if (rd->dev == dev) { |
358 | rmem = rd->rmem; |
359 | list_del(&rd->list); |
360 | kfree(rd); |
361 | break; |
362 | } |
363 | } |
364 | mutex_unlock(&of_rmem_assigned_device_mutex); |
365 | |
366 | if (!rmem || !rmem->ops || !rmem->ops->device_release) |
367 | return; |
368 | |
369 | rmem->ops->device_release(rmem, dev); |
370 | } |
371 | EXPORT_SYMBOL_GPL(of_reserved_mem_device_release); |
372 | |
373 | /** |
374 | * of_reserved_mem_lookup() - acquire reserved_mem from a device node |
375 | * @np: node pointer of the desired reserved-memory region |
376 | * |
377 | * This function allows drivers to acquire a reference to the reserved_mem |
378 | * struct based on a device node handle. |
379 | * |
380 | * Returns a reserved_mem reference, or NULL on error. |
381 | */ |
382 | struct reserved_mem *of_reserved_mem_lookup(struct device_node *np) |
383 | { |
384 | const char *name; |
385 | int i; |
386 | |
387 | if (!np->full_name) |
388 | return NULL; |
389 | |
390 | name = kbasename(np->full_name); |
391 | for (i = 0; i < reserved_mem_count; i++) |
392 | if (!strcmp(reserved_mem[i].name, name)) |
393 | return &reserved_mem[i]; |
394 | |
395 | return NULL; |
396 | } |
397 | EXPORT_SYMBOL_GPL(of_reserved_mem_lookup); |
398 | |