1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Basic general purpose allocator for managing special purpose |
4 | * memory, for example, memory that is not managed by the regular |
5 | * kmalloc/kfree interface. Uses for this includes on-device special |
6 | * memory, uncached memory etc. |
7 | * |
8 | * It is safe to use the allocator in NMI handlers and other special |
9 | * unblockable contexts that could otherwise deadlock on locks. This |
10 | * is implemented by using atomic operations and retries on any |
11 | * conflicts. The disadvantage is that there may be livelocks in |
12 | * extreme cases. For better scalability, one allocator can be used |
13 | * for each CPU. |
14 | * |
15 | * The lockless operation only works if there is enough memory |
16 | * available. If new memory is added to the pool a lock has to be |
17 | * still taken. So any user relying on locklessness has to ensure |
18 | * that sufficient memory is preallocated. |
19 | * |
20 | * The basic atomic operation of this allocator is cmpxchg on long. |
21 | * On architectures that don't have NMI-safe cmpxchg implementation, |
22 | * the allocator can NOT be used in NMI handler. So code uses the |
23 | * allocator in NMI handler should depend on |
24 | * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. |
25 | * |
26 | * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> |
27 | */ |
28 | |
29 | #include <linux/slab.h> |
30 | #include <linux/export.h> |
31 | #include <linux/bitmap.h> |
32 | #include <linux/rculist.h> |
33 | #include <linux/interrupt.h> |
34 | #include <linux/genalloc.h> |
35 | #include <linux/of.h> |
36 | #include <linux/of_platform.h> |
37 | #include <linux/platform_device.h> |
38 | #include <linux/vmalloc.h> |
39 | |
40 | static inline size_t chunk_size(const struct gen_pool_chunk *chunk) |
41 | { |
42 | return chunk->end_addr - chunk->start_addr + 1; |
43 | } |
44 | |
45 | static inline int |
46 | set_bits_ll(unsigned long *addr, unsigned long mask_to_set) |
47 | { |
48 | unsigned long val = READ_ONCE(*addr); |
49 | |
50 | do { |
51 | if (val & mask_to_set) |
52 | return -EBUSY; |
53 | cpu_relax(); |
54 | } while (!try_cmpxchg(addr, &val, val | mask_to_set)); |
55 | |
56 | return 0; |
57 | } |
58 | |
59 | static inline int |
60 | clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) |
61 | { |
62 | unsigned long val = READ_ONCE(*addr); |
63 | |
64 | do { |
65 | if ((val & mask_to_clear) != mask_to_clear) |
66 | return -EBUSY; |
67 | cpu_relax(); |
68 | } while (!try_cmpxchg(addr, &val, val & ~mask_to_clear)); |
69 | |
70 | return 0; |
71 | } |
72 | |
73 | /* |
74 | * bitmap_set_ll - set the specified number of bits at the specified position |
75 | * @map: pointer to a bitmap |
76 | * @start: a bit position in @map |
77 | * @nr: number of bits to set |
78 | * |
79 | * Set @nr bits start from @start in @map lock-lessly. Several users |
80 | * can set/clear the same bitmap simultaneously without lock. If two |
81 | * users set the same bit, one user will return remain bits, otherwise |
82 | * return 0. |
83 | */ |
84 | static unsigned long |
85 | bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr) |
86 | { |
87 | unsigned long *p = map + BIT_WORD(start); |
88 | const unsigned long size = start + nr; |
89 | int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); |
90 | unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); |
91 | |
92 | while (nr >= bits_to_set) { |
93 | if (set_bits_ll(addr: p, mask_to_set)) |
94 | return nr; |
95 | nr -= bits_to_set; |
96 | bits_to_set = BITS_PER_LONG; |
97 | mask_to_set = ~0UL; |
98 | p++; |
99 | } |
100 | if (nr) { |
101 | mask_to_set &= BITMAP_LAST_WORD_MASK(size); |
102 | if (set_bits_ll(addr: p, mask_to_set)) |
103 | return nr; |
104 | } |
105 | |
106 | return 0; |
107 | } |
108 | |
109 | /* |
110 | * bitmap_clear_ll - clear the specified number of bits at the specified position |
111 | * @map: pointer to a bitmap |
112 | * @start: a bit position in @map |
113 | * @nr: number of bits to set |
114 | * |
115 | * Clear @nr bits start from @start in @map lock-lessly. Several users |
116 | * can set/clear the same bitmap simultaneously without lock. If two |
117 | * users clear the same bit, one user will return remain bits, |
118 | * otherwise return 0. |
119 | */ |
120 | static unsigned long |
121 | bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr) |
122 | { |
123 | unsigned long *p = map + BIT_WORD(start); |
124 | const unsigned long size = start + nr; |
125 | int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); |
126 | unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); |
127 | |
128 | while (nr >= bits_to_clear) { |
129 | if (clear_bits_ll(addr: p, mask_to_clear)) |
130 | return nr; |
131 | nr -= bits_to_clear; |
132 | bits_to_clear = BITS_PER_LONG; |
133 | mask_to_clear = ~0UL; |
134 | p++; |
135 | } |
136 | if (nr) { |
137 | mask_to_clear &= BITMAP_LAST_WORD_MASK(size); |
138 | if (clear_bits_ll(addr: p, mask_to_clear)) |
139 | return nr; |
140 | } |
141 | |
142 | return 0; |
143 | } |
144 | |
145 | /** |
146 | * gen_pool_create - create a new special memory pool |
147 | * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents |
148 | * @nid: node id of the node the pool structure should be allocated on, or -1 |
149 | * |
150 | * Create a new special memory pool that can be used to manage special purpose |
151 | * memory not managed by the regular kmalloc/kfree interface. |
152 | */ |
153 | struct gen_pool *gen_pool_create(int min_alloc_order, int nid) |
154 | { |
155 | struct gen_pool *pool; |
156 | |
157 | pool = kmalloc_node(size: sizeof(struct gen_pool), GFP_KERNEL, node: nid); |
158 | if (pool != NULL) { |
159 | spin_lock_init(&pool->lock); |
160 | INIT_LIST_HEAD(list: &pool->chunks); |
161 | pool->min_alloc_order = min_alloc_order; |
162 | pool->algo = gen_pool_first_fit; |
163 | pool->data = NULL; |
164 | pool->name = NULL; |
165 | } |
166 | return pool; |
167 | } |
168 | EXPORT_SYMBOL(gen_pool_create); |
169 | |
170 | /** |
171 | * gen_pool_add_owner- add a new chunk of special memory to the pool |
172 | * @pool: pool to add new memory chunk to |
173 | * @virt: virtual starting address of memory chunk to add to pool |
174 | * @phys: physical starting address of memory chunk to add to pool |
175 | * @size: size in bytes of the memory chunk to add to pool |
176 | * @nid: node id of the node the chunk structure and bitmap should be |
177 | * allocated on, or -1 |
178 | * @owner: private data the publisher would like to recall at alloc time |
179 | * |
180 | * Add a new chunk of special memory to the specified pool. |
181 | * |
182 | * Returns 0 on success or a -ve errno on failure. |
183 | */ |
184 | int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, |
185 | size_t size, int nid, void *owner) |
186 | { |
187 | struct gen_pool_chunk *chunk; |
188 | unsigned long nbits = size >> pool->min_alloc_order; |
189 | unsigned long nbytes = sizeof(struct gen_pool_chunk) + |
190 | BITS_TO_LONGS(nbits) * sizeof(long); |
191 | |
192 | chunk = vzalloc_node(size: nbytes, node: nid); |
193 | if (unlikely(chunk == NULL)) |
194 | return -ENOMEM; |
195 | |
196 | chunk->phys_addr = phys; |
197 | chunk->start_addr = virt; |
198 | chunk->end_addr = virt + size - 1; |
199 | chunk->owner = owner; |
200 | atomic_long_set(v: &chunk->avail, i: size); |
201 | |
202 | spin_lock(lock: &pool->lock); |
203 | list_add_rcu(new: &chunk->next_chunk, head: &pool->chunks); |
204 | spin_unlock(lock: &pool->lock); |
205 | |
206 | return 0; |
207 | } |
208 | EXPORT_SYMBOL(gen_pool_add_owner); |
209 | |
210 | /** |
211 | * gen_pool_virt_to_phys - return the physical address of memory |
212 | * @pool: pool to allocate from |
213 | * @addr: starting address of memory |
214 | * |
215 | * Returns the physical address on success, or -1 on error. |
216 | */ |
217 | phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) |
218 | { |
219 | struct gen_pool_chunk *chunk; |
220 | phys_addr_t paddr = -1; |
221 | |
222 | rcu_read_lock(); |
223 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
224 | if (addr >= chunk->start_addr && addr <= chunk->end_addr) { |
225 | paddr = chunk->phys_addr + (addr - chunk->start_addr); |
226 | break; |
227 | } |
228 | } |
229 | rcu_read_unlock(); |
230 | |
231 | return paddr; |
232 | } |
233 | EXPORT_SYMBOL(gen_pool_virt_to_phys); |
234 | |
235 | /** |
236 | * gen_pool_destroy - destroy a special memory pool |
237 | * @pool: pool to destroy |
238 | * |
239 | * Destroy the specified special memory pool. Verifies that there are no |
240 | * outstanding allocations. |
241 | */ |
242 | void gen_pool_destroy(struct gen_pool *pool) |
243 | { |
244 | struct list_head *_chunk, *_next_chunk; |
245 | struct gen_pool_chunk *chunk; |
246 | int order = pool->min_alloc_order; |
247 | unsigned long bit, end_bit; |
248 | |
249 | list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { |
250 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); |
251 | list_del(entry: &chunk->next_chunk); |
252 | |
253 | end_bit = chunk_size(chunk) >> order; |
254 | bit = find_first_bit(addr: chunk->bits, size: end_bit); |
255 | BUG_ON(bit < end_bit); |
256 | |
257 | vfree(addr: chunk); |
258 | } |
259 | kfree_const(x: pool->name); |
260 | kfree(objp: pool); |
261 | } |
262 | EXPORT_SYMBOL(gen_pool_destroy); |
263 | |
264 | /** |
265 | * gen_pool_alloc_algo_owner - allocate special memory from the pool |
266 | * @pool: pool to allocate from |
267 | * @size: number of bytes to allocate from the pool |
268 | * @algo: algorithm passed from caller |
269 | * @data: data passed to algorithm |
270 | * @owner: optionally retrieve the chunk owner |
271 | * |
272 | * Allocate the requested number of bytes from the specified pool. |
273 | * Uses the pool allocation function (with first-fit algorithm by default). |
274 | * Can not be used in NMI handler on architectures without |
275 | * NMI-safe cmpxchg implementation. |
276 | */ |
277 | unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size, |
278 | genpool_algo_t algo, void *data, void **owner) |
279 | { |
280 | struct gen_pool_chunk *chunk; |
281 | unsigned long addr = 0; |
282 | int order = pool->min_alloc_order; |
283 | unsigned long nbits, start_bit, end_bit, remain; |
284 | |
285 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG |
286 | BUG_ON(in_nmi()); |
287 | #endif |
288 | |
289 | if (owner) |
290 | *owner = NULL; |
291 | |
292 | if (size == 0) |
293 | return 0; |
294 | |
295 | nbits = (size + (1UL << order) - 1) >> order; |
296 | rcu_read_lock(); |
297 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
298 | if (size > atomic_long_read(v: &chunk->avail)) |
299 | continue; |
300 | |
301 | start_bit = 0; |
302 | end_bit = chunk_size(chunk) >> order; |
303 | retry: |
304 | start_bit = algo(chunk->bits, end_bit, start_bit, |
305 | nbits, data, pool, chunk->start_addr); |
306 | if (start_bit >= end_bit) |
307 | continue; |
308 | remain = bitmap_set_ll(map: chunk->bits, start: start_bit, nr: nbits); |
309 | if (remain) { |
310 | remain = bitmap_clear_ll(map: chunk->bits, start: start_bit, |
311 | nr: nbits - remain); |
312 | BUG_ON(remain); |
313 | goto retry; |
314 | } |
315 | |
316 | addr = chunk->start_addr + ((unsigned long)start_bit << order); |
317 | size = nbits << order; |
318 | atomic_long_sub(i: size, v: &chunk->avail); |
319 | if (owner) |
320 | *owner = chunk->owner; |
321 | break; |
322 | } |
323 | rcu_read_unlock(); |
324 | return addr; |
325 | } |
326 | EXPORT_SYMBOL(gen_pool_alloc_algo_owner); |
327 | |
328 | /** |
329 | * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage |
330 | * @pool: pool to allocate from |
331 | * @size: number of bytes to allocate from the pool |
332 | * @dma: dma-view physical address return value. Use %NULL if unneeded. |
333 | * |
334 | * Allocate the requested number of bytes from the specified pool. |
335 | * Uses the pool allocation function (with first-fit algorithm by default). |
336 | * Can not be used in NMI handler on architectures without |
337 | * NMI-safe cmpxchg implementation. |
338 | * |
339 | * Return: virtual address of the allocated memory, or %NULL on failure |
340 | */ |
341 | void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) |
342 | { |
343 | return gen_pool_dma_alloc_algo(pool, size, dma, algo: pool->algo, data: pool->data); |
344 | } |
345 | EXPORT_SYMBOL(gen_pool_dma_alloc); |
346 | |
347 | /** |
348 | * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA |
349 | * usage with the given pool algorithm |
350 | * @pool: pool to allocate from |
351 | * @size: number of bytes to allocate from the pool |
352 | * @dma: DMA-view physical address return value. Use %NULL if unneeded. |
353 | * @algo: algorithm passed from caller |
354 | * @data: data passed to algorithm |
355 | * |
356 | * Allocate the requested number of bytes from the specified pool. Uses the |
357 | * given pool allocation function. Can not be used in NMI handler on |
358 | * architectures without NMI-safe cmpxchg implementation. |
359 | * |
360 | * Return: virtual address of the allocated memory, or %NULL on failure |
361 | */ |
362 | void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size, |
363 | dma_addr_t *dma, genpool_algo_t algo, void *data) |
364 | { |
365 | unsigned long vaddr; |
366 | |
367 | if (!pool) |
368 | return NULL; |
369 | |
370 | vaddr = gen_pool_alloc_algo(pool, size, algo, data); |
371 | if (!vaddr) |
372 | return NULL; |
373 | |
374 | if (dma) |
375 | *dma = gen_pool_virt_to_phys(pool, vaddr); |
376 | |
377 | return (void *)vaddr; |
378 | } |
379 | EXPORT_SYMBOL(gen_pool_dma_alloc_algo); |
380 | |
381 | /** |
382 | * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA |
383 | * usage with the given alignment |
384 | * @pool: pool to allocate from |
385 | * @size: number of bytes to allocate from the pool |
386 | * @dma: DMA-view physical address return value. Use %NULL if unneeded. |
387 | * @align: alignment in bytes for starting address |
388 | * |
389 | * Allocate the requested number bytes from the specified pool, with the given |
390 | * alignment restriction. Can not be used in NMI handler on architectures |
391 | * without NMI-safe cmpxchg implementation. |
392 | * |
393 | * Return: virtual address of the allocated memory, or %NULL on failure |
394 | */ |
395 | void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size, |
396 | dma_addr_t *dma, int align) |
397 | { |
398 | struct genpool_data_align data = { .align = align }; |
399 | |
400 | return gen_pool_dma_alloc_algo(pool, size, dma, |
401 | gen_pool_first_fit_align, &data); |
402 | } |
403 | EXPORT_SYMBOL(gen_pool_dma_alloc_align); |
404 | |
405 | /** |
406 | * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for |
407 | * DMA usage |
408 | * @pool: pool to allocate from |
409 | * @size: number of bytes to allocate from the pool |
410 | * @dma: dma-view physical address return value. Use %NULL if unneeded. |
411 | * |
412 | * Allocate the requested number of zeroed bytes from the specified pool. |
413 | * Uses the pool allocation function (with first-fit algorithm by default). |
414 | * Can not be used in NMI handler on architectures without |
415 | * NMI-safe cmpxchg implementation. |
416 | * |
417 | * Return: virtual address of the allocated zeroed memory, or %NULL on failure |
418 | */ |
419 | void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) |
420 | { |
421 | return gen_pool_dma_zalloc_algo(pool, size, dma, algo: pool->algo, data: pool->data); |
422 | } |
423 | EXPORT_SYMBOL(gen_pool_dma_zalloc); |
424 | |
425 | /** |
426 | * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool for |
427 | * DMA usage with the given pool algorithm |
428 | * @pool: pool to allocate from |
429 | * @size: number of bytes to allocate from the pool |
430 | * @dma: DMA-view physical address return value. Use %NULL if unneeded. |
431 | * @algo: algorithm passed from caller |
432 | * @data: data passed to algorithm |
433 | * |
434 | * Allocate the requested number of zeroed bytes from the specified pool. Uses |
435 | * the given pool allocation function. Can not be used in NMI handler on |
436 | * architectures without NMI-safe cmpxchg implementation. |
437 | * |
438 | * Return: virtual address of the allocated zeroed memory, or %NULL on failure |
439 | */ |
440 | void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size, |
441 | dma_addr_t *dma, genpool_algo_t algo, void *data) |
442 | { |
443 | void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data); |
444 | |
445 | if (vaddr) |
446 | memset(vaddr, 0, size); |
447 | |
448 | return vaddr; |
449 | } |
450 | EXPORT_SYMBOL(gen_pool_dma_zalloc_algo); |
451 | |
452 | /** |
453 | * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool for |
454 | * DMA usage with the given alignment |
455 | * @pool: pool to allocate from |
456 | * @size: number of bytes to allocate from the pool |
457 | * @dma: DMA-view physical address return value. Use %NULL if unneeded. |
458 | * @align: alignment in bytes for starting address |
459 | * |
460 | * Allocate the requested number of zeroed bytes from the specified pool, |
461 | * with the given alignment restriction. Can not be used in NMI handler on |
462 | * architectures without NMI-safe cmpxchg implementation. |
463 | * |
464 | * Return: virtual address of the allocated zeroed memory, or %NULL on failure |
465 | */ |
466 | void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size, |
467 | dma_addr_t *dma, int align) |
468 | { |
469 | struct genpool_data_align data = { .align = align }; |
470 | |
471 | return gen_pool_dma_zalloc_algo(pool, size, dma, |
472 | gen_pool_first_fit_align, &data); |
473 | } |
474 | EXPORT_SYMBOL(gen_pool_dma_zalloc_align); |
475 | |
476 | /** |
477 | * gen_pool_free_owner - free allocated special memory back to the pool |
478 | * @pool: pool to free to |
479 | * @addr: starting address of memory to free back to pool |
480 | * @size: size in bytes of memory to free |
481 | * @owner: private data stashed at gen_pool_add() time |
482 | * |
483 | * Free previously allocated special memory back to the specified |
484 | * pool. Can not be used in NMI handler on architectures without |
485 | * NMI-safe cmpxchg implementation. |
486 | */ |
487 | void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size, |
488 | void **owner) |
489 | { |
490 | struct gen_pool_chunk *chunk; |
491 | int order = pool->min_alloc_order; |
492 | unsigned long start_bit, nbits, remain; |
493 | |
494 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG |
495 | BUG_ON(in_nmi()); |
496 | #endif |
497 | |
498 | if (owner) |
499 | *owner = NULL; |
500 | |
501 | nbits = (size + (1UL << order) - 1) >> order; |
502 | rcu_read_lock(); |
503 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
504 | if (addr >= chunk->start_addr && addr <= chunk->end_addr) { |
505 | BUG_ON(addr + size - 1 > chunk->end_addr); |
506 | start_bit = (addr - chunk->start_addr) >> order; |
507 | remain = bitmap_clear_ll(map: chunk->bits, start: start_bit, nr: nbits); |
508 | BUG_ON(remain); |
509 | size = nbits << order; |
510 | atomic_long_add(i: size, v: &chunk->avail); |
511 | if (owner) |
512 | *owner = chunk->owner; |
513 | rcu_read_unlock(); |
514 | return; |
515 | } |
516 | } |
517 | rcu_read_unlock(); |
518 | BUG(); |
519 | } |
520 | EXPORT_SYMBOL(gen_pool_free_owner); |
521 | |
522 | /** |
523 | * gen_pool_for_each_chunk - call func for every chunk of generic memory pool |
524 | * @pool: the generic memory pool |
525 | * @func: func to call |
526 | * @data: additional data used by @func |
527 | * |
528 | * Call @func for every chunk of generic memory pool. The @func is |
529 | * called with rcu_read_lock held. |
530 | */ |
531 | void gen_pool_for_each_chunk(struct gen_pool *pool, |
532 | void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), |
533 | void *data) |
534 | { |
535 | struct gen_pool_chunk *chunk; |
536 | |
537 | rcu_read_lock(); |
538 | list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) |
539 | func(pool, chunk, data); |
540 | rcu_read_unlock(); |
541 | } |
542 | EXPORT_SYMBOL(gen_pool_for_each_chunk); |
543 | |
544 | /** |
545 | * gen_pool_has_addr - checks if an address falls within the range of a pool |
546 | * @pool: the generic memory pool |
547 | * @start: start address |
548 | * @size: size of the region |
549 | * |
550 | * Check if the range of addresses falls within the specified pool. Returns |
551 | * true if the entire range is contained in the pool and false otherwise. |
552 | */ |
553 | bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start, |
554 | size_t size) |
555 | { |
556 | bool found = false; |
557 | unsigned long end = start + size - 1; |
558 | struct gen_pool_chunk *chunk; |
559 | |
560 | rcu_read_lock(); |
561 | list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { |
562 | if (start >= chunk->start_addr && start <= chunk->end_addr) { |
563 | if (end <= chunk->end_addr) { |
564 | found = true; |
565 | break; |
566 | } |
567 | } |
568 | } |
569 | rcu_read_unlock(); |
570 | return found; |
571 | } |
572 | EXPORT_SYMBOL(gen_pool_has_addr); |
573 | |
574 | /** |
575 | * gen_pool_avail - get available free space of the pool |
576 | * @pool: pool to get available free space |
577 | * |
578 | * Return available free space of the specified pool. |
579 | */ |
580 | size_t gen_pool_avail(struct gen_pool *pool) |
581 | { |
582 | struct gen_pool_chunk *chunk; |
583 | size_t avail = 0; |
584 | |
585 | rcu_read_lock(); |
586 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) |
587 | avail += atomic_long_read(v: &chunk->avail); |
588 | rcu_read_unlock(); |
589 | return avail; |
590 | } |
591 | EXPORT_SYMBOL_GPL(gen_pool_avail); |
592 | |
593 | /** |
594 | * gen_pool_size - get size in bytes of memory managed by the pool |
595 | * @pool: pool to get size |
596 | * |
597 | * Return size in bytes of memory managed by the pool. |
598 | */ |
599 | size_t gen_pool_size(struct gen_pool *pool) |
600 | { |
601 | struct gen_pool_chunk *chunk; |
602 | size_t size = 0; |
603 | |
604 | rcu_read_lock(); |
605 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) |
606 | size += chunk_size(chunk); |
607 | rcu_read_unlock(); |
608 | return size; |
609 | } |
610 | EXPORT_SYMBOL_GPL(gen_pool_size); |
611 | |
612 | /** |
613 | * gen_pool_set_algo - set the allocation algorithm |
614 | * @pool: pool to change allocation algorithm |
615 | * @algo: custom algorithm function |
616 | * @data: additional data used by @algo |
617 | * |
618 | * Call @algo for each memory allocation in the pool. |
619 | * If @algo is NULL use gen_pool_first_fit as default |
620 | * memory allocation function. |
621 | */ |
622 | void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data) |
623 | { |
624 | rcu_read_lock(); |
625 | |
626 | pool->algo = algo; |
627 | if (!pool->algo) |
628 | pool->algo = gen_pool_first_fit; |
629 | |
630 | pool->data = data; |
631 | |
632 | rcu_read_unlock(); |
633 | } |
634 | EXPORT_SYMBOL(gen_pool_set_algo); |
635 | |
636 | /** |
637 | * gen_pool_first_fit - find the first available region |
638 | * of memory matching the size requirement (no alignment constraint) |
639 | * @map: The address to base the search on |
640 | * @size: The bitmap size in bits |
641 | * @start: The bitnumber to start searching at |
642 | * @nr: The number of zeroed bits we're looking for |
643 | * @data: additional data - unused |
644 | * @pool: pool to find the fit region memory from |
645 | * @start_addr: not used in this function |
646 | */ |
647 | unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, |
648 | unsigned long start, unsigned int nr, void *data, |
649 | struct gen_pool *pool, unsigned long start_addr) |
650 | { |
651 | return bitmap_find_next_zero_area(map, size, start, nr, align_mask: 0); |
652 | } |
653 | EXPORT_SYMBOL(gen_pool_first_fit); |
654 | |
655 | /** |
656 | * gen_pool_first_fit_align - find the first available region |
657 | * of memory matching the size requirement (alignment constraint) |
658 | * @map: The address to base the search on |
659 | * @size: The bitmap size in bits |
660 | * @start: The bitnumber to start searching at |
661 | * @nr: The number of zeroed bits we're looking for |
662 | * @data: data for alignment |
663 | * @pool: pool to get order from |
664 | * @start_addr: start addr of alloction chunk |
665 | */ |
666 | unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, |
667 | unsigned long start, unsigned int nr, void *data, |
668 | struct gen_pool *pool, unsigned long start_addr) |
669 | { |
670 | struct genpool_data_align *alignment; |
671 | unsigned long align_mask, align_off; |
672 | int order; |
673 | |
674 | alignment = data; |
675 | order = pool->min_alloc_order; |
676 | align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1; |
677 | align_off = (start_addr & (alignment->align - 1)) >> order; |
678 | |
679 | return bitmap_find_next_zero_area_off(map, size, start, nr, |
680 | align_mask, align_offset: align_off); |
681 | } |
682 | EXPORT_SYMBOL(gen_pool_first_fit_align); |
683 | |
684 | /** |
685 | * gen_pool_fixed_alloc - reserve a specific region |
686 | * @map: The address to base the search on |
687 | * @size: The bitmap size in bits |
688 | * @start: The bitnumber to start searching at |
689 | * @nr: The number of zeroed bits we're looking for |
690 | * @data: data for alignment |
691 | * @pool: pool to get order from |
692 | * @start_addr: not used in this function |
693 | */ |
694 | unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, |
695 | unsigned long start, unsigned int nr, void *data, |
696 | struct gen_pool *pool, unsigned long start_addr) |
697 | { |
698 | struct genpool_data_fixed *fixed_data; |
699 | int order; |
700 | unsigned long offset_bit; |
701 | unsigned long start_bit; |
702 | |
703 | fixed_data = data; |
704 | order = pool->min_alloc_order; |
705 | offset_bit = fixed_data->offset >> order; |
706 | if (WARN_ON(fixed_data->offset & ((1UL << order) - 1))) |
707 | return size; |
708 | |
709 | start_bit = bitmap_find_next_zero_area(map, size, |
710 | start: start + offset_bit, nr, align_mask: 0); |
711 | if (start_bit != offset_bit) |
712 | start_bit = size; |
713 | return start_bit; |
714 | } |
715 | EXPORT_SYMBOL(gen_pool_fixed_alloc); |
716 | |
717 | /** |
718 | * gen_pool_first_fit_order_align - find the first available region |
719 | * of memory matching the size requirement. The region will be aligned |
720 | * to the order of the size specified. |
721 | * @map: The address to base the search on |
722 | * @size: The bitmap size in bits |
723 | * @start: The bitnumber to start searching at |
724 | * @nr: The number of zeroed bits we're looking for |
725 | * @data: additional data - unused |
726 | * @pool: pool to find the fit region memory from |
727 | * @start_addr: not used in this function |
728 | */ |
729 | unsigned long gen_pool_first_fit_order_align(unsigned long *map, |
730 | unsigned long size, unsigned long start, |
731 | unsigned int nr, void *data, struct gen_pool *pool, |
732 | unsigned long start_addr) |
733 | { |
734 | unsigned long align_mask = roundup_pow_of_two(nr) - 1; |
735 | |
736 | return bitmap_find_next_zero_area(map, size, start, nr, align_mask); |
737 | } |
738 | EXPORT_SYMBOL(gen_pool_first_fit_order_align); |
739 | |
740 | /** |
741 | * gen_pool_best_fit - find the best fitting region of memory |
742 | * matching the size requirement (no alignment constraint) |
743 | * @map: The address to base the search on |
744 | * @size: The bitmap size in bits |
745 | * @start: The bitnumber to start searching at |
746 | * @nr: The number of zeroed bits we're looking for |
747 | * @data: additional data - unused |
748 | * @pool: pool to find the fit region memory from |
749 | * @start_addr: not used in this function |
750 | * |
751 | * Iterate over the bitmap to find the smallest free region |
752 | * which we can allocate the memory. |
753 | */ |
754 | unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, |
755 | unsigned long start, unsigned int nr, void *data, |
756 | struct gen_pool *pool, unsigned long start_addr) |
757 | { |
758 | unsigned long start_bit = size; |
759 | unsigned long len = size + 1; |
760 | unsigned long index; |
761 | |
762 | index = bitmap_find_next_zero_area(map, size, start, nr, align_mask: 0); |
763 | |
764 | while (index < size) { |
765 | unsigned long next_bit = find_next_bit(addr: map, size, offset: index + nr); |
766 | if ((next_bit - index) < len) { |
767 | len = next_bit - index; |
768 | start_bit = index; |
769 | if (len == nr) |
770 | return start_bit; |
771 | } |
772 | index = bitmap_find_next_zero_area(map, size, |
773 | start: next_bit + 1, nr, align_mask: 0); |
774 | } |
775 | |
776 | return start_bit; |
777 | } |
778 | EXPORT_SYMBOL(gen_pool_best_fit); |
779 | |
780 | static void devm_gen_pool_release(struct device *dev, void *res) |
781 | { |
782 | gen_pool_destroy(*(struct gen_pool **)res); |
783 | } |
784 | |
785 | static int devm_gen_pool_match(struct device *dev, void *res, void *data) |
786 | { |
787 | struct gen_pool **p = res; |
788 | |
789 | /* NULL data matches only a pool without an assigned name */ |
790 | if (!data && !(*p)->name) |
791 | return 1; |
792 | |
793 | if (!data || !(*p)->name) |
794 | return 0; |
795 | |
796 | return !strcmp((*p)->name, data); |
797 | } |
798 | |
799 | /** |
800 | * gen_pool_get - Obtain the gen_pool (if any) for a device |
801 | * @dev: device to retrieve the gen_pool from |
802 | * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device |
803 | * |
804 | * Returns the gen_pool for the device if one is present, or NULL. |
805 | */ |
806 | struct gen_pool *gen_pool_get(struct device *dev, const char *name) |
807 | { |
808 | struct gen_pool **p; |
809 | |
810 | p = devres_find(dev, release: devm_gen_pool_release, match: devm_gen_pool_match, |
811 | match_data: (void *)name); |
812 | if (!p) |
813 | return NULL; |
814 | return *p; |
815 | } |
816 | EXPORT_SYMBOL_GPL(gen_pool_get); |
817 | |
818 | /** |
819 | * devm_gen_pool_create - managed gen_pool_create |
820 | * @dev: device that provides the gen_pool |
821 | * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents |
822 | * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes |
823 | * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device |
824 | * |
825 | * Create a new special memory pool that can be used to manage special purpose |
826 | * memory not managed by the regular kmalloc/kfree interface. The pool will be |
827 | * automatically destroyed by the device management code. |
828 | */ |
829 | struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, |
830 | int nid, const char *name) |
831 | { |
832 | struct gen_pool **ptr, *pool; |
833 | const char *pool_name = NULL; |
834 | |
835 | /* Check that genpool to be created is uniquely addressed on device */ |
836 | if (gen_pool_get(dev, name)) |
837 | return ERR_PTR(error: -EINVAL); |
838 | |
839 | if (name) { |
840 | pool_name = kstrdup_const(s: name, GFP_KERNEL); |
841 | if (!pool_name) |
842 | return ERR_PTR(error: -ENOMEM); |
843 | } |
844 | |
845 | ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL); |
846 | if (!ptr) |
847 | goto free_pool_name; |
848 | |
849 | pool = gen_pool_create(min_alloc_order, nid); |
850 | if (!pool) |
851 | goto free_devres; |
852 | |
853 | *ptr = pool; |
854 | pool->name = pool_name; |
855 | devres_add(dev, res: ptr); |
856 | |
857 | return pool; |
858 | |
859 | free_devres: |
860 | devres_free(res: ptr); |
861 | free_pool_name: |
862 | kfree_const(x: pool_name); |
863 | |
864 | return ERR_PTR(error: -ENOMEM); |
865 | } |
866 | EXPORT_SYMBOL(devm_gen_pool_create); |
867 | |
868 | #ifdef CONFIG_OF |
869 | /** |
870 | * of_gen_pool_get - find a pool by phandle property |
871 | * @np: device node |
872 | * @propname: property name containing phandle(s) |
873 | * @index: index into the phandle array |
874 | * |
875 | * Returns the pool that contains the chunk starting at the physical |
876 | * address of the device tree node pointed at by the phandle property, |
877 | * or NULL if not found. |
878 | */ |
879 | struct gen_pool *of_gen_pool_get(struct device_node *np, |
880 | const char *propname, int index) |
881 | { |
882 | struct platform_device *pdev; |
883 | struct device_node *np_pool, *parent; |
884 | const char *name = NULL; |
885 | struct gen_pool *pool = NULL; |
886 | |
887 | np_pool = of_parse_phandle(np, phandle_name: propname, index); |
888 | if (!np_pool) |
889 | return NULL; |
890 | |
891 | pdev = of_find_device_by_node(np: np_pool); |
892 | if (!pdev) { |
893 | /* Check if named gen_pool is created by parent node device */ |
894 | parent = of_get_parent(node: np_pool); |
895 | pdev = of_find_device_by_node(np: parent); |
896 | of_node_put(node: parent); |
897 | |
898 | of_property_read_string(np: np_pool, propname: "label" , out_string: &name); |
899 | if (!name) |
900 | name = of_node_full_name(np: np_pool); |
901 | } |
902 | if (pdev) |
903 | pool = gen_pool_get(&pdev->dev, name); |
904 | of_node_put(node: np_pool); |
905 | |
906 | return pool; |
907 | } |
908 | EXPORT_SYMBOL_GPL(of_gen_pool_get); |
909 | #endif /* CONFIG_OF */ |
910 | |