1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * DMA Pool allocator |
4 | * |
5 | * Copyright 2001 David Brownell |
6 | * Copyright 2007 Intel Corporation |
7 | * Author: Matthew Wilcox <willy@linux.intel.com> |
8 | * |
9 | * This allocator returns small blocks of a given size which are DMA-able by |
10 | * the given device. It uses the dma_alloc_coherent page allocator to get |
11 | * new pages, then splits them up into blocks of the required size. |
12 | * Many older drivers still have their own code to do this. |
13 | * |
14 | * The current design of this allocator is fairly simple. The pool is |
15 | * represented by the 'struct dma_pool' which keeps a doubly-linked list of |
16 | * allocated pages. Each page in the page_list is split into blocks of at |
17 | * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked |
18 | * list of free blocks across all pages. Used blocks aren't tracked, but we |
19 | * keep a count of how many are currently allocated from each page. |
20 | */ |
21 | |
22 | #include <linux/device.h> |
23 | #include <linux/dma-mapping.h> |
24 | #include <linux/dmapool.h> |
25 | #include <linux/kernel.h> |
26 | #include <linux/list.h> |
27 | #include <linux/export.h> |
28 | #include <linux/mutex.h> |
29 | #include <linux/poison.h> |
30 | #include <linux/sched.h> |
31 | #include <linux/sched/mm.h> |
32 | #include <linux/slab.h> |
33 | #include <linux/stat.h> |
34 | #include <linux/spinlock.h> |
35 | #include <linux/string.h> |
36 | #include <linux/types.h> |
37 | #include <linux/wait.h> |
38 | |
39 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) |
40 | #define DMAPOOL_DEBUG 1 |
41 | #endif |
42 | |
43 | struct dma_block { |
44 | struct dma_block *next_block; |
45 | dma_addr_t dma; |
46 | }; |
47 | |
48 | struct dma_pool { /* the pool */ |
49 | struct list_head page_list; |
50 | spinlock_t lock; |
51 | struct dma_block *next_block; |
52 | size_t nr_blocks; |
53 | size_t nr_active; |
54 | size_t nr_pages; |
55 | struct device *dev; |
56 | unsigned int size; |
57 | unsigned int allocation; |
58 | unsigned int boundary; |
59 | char name[32]; |
60 | struct list_head pools; |
61 | }; |
62 | |
63 | struct dma_page { /* cacheable header for 'allocation' bytes */ |
64 | struct list_head page_list; |
65 | void *vaddr; |
66 | dma_addr_t dma; |
67 | }; |
68 | |
69 | static DEFINE_MUTEX(pools_lock); |
70 | static DEFINE_MUTEX(pools_reg_lock); |
71 | |
72 | static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf) |
73 | { |
74 | struct dma_pool *pool; |
75 | unsigned size; |
76 | |
77 | size = sysfs_emit(buf, fmt: "poolinfo - 0.1\n" ); |
78 | |
79 | mutex_lock(&pools_lock); |
80 | list_for_each_entry(pool, &dev->dma_pools, pools) { |
81 | /* per-pool info, no real statistics yet */ |
82 | size += sysfs_emit_at(buf, at: size, fmt: "%-16s %4zu %4zu %4u %2zu\n" , |
83 | pool->name, pool->nr_active, |
84 | pool->nr_blocks, pool->size, |
85 | pool->nr_pages); |
86 | } |
87 | mutex_unlock(lock: &pools_lock); |
88 | |
89 | return size; |
90 | } |
91 | |
92 | static DEVICE_ATTR_RO(pools); |
93 | |
94 | #ifdef DMAPOOL_DEBUG |
95 | static void pool_check_block(struct dma_pool *pool, struct dma_block *block, |
96 | gfp_t mem_flags) |
97 | { |
98 | u8 *data = (void *)block; |
99 | int i; |
100 | |
101 | for (i = sizeof(struct dma_block); i < pool->size; i++) { |
102 | if (data[i] == POOL_POISON_FREED) |
103 | continue; |
104 | dev_err(pool->dev, "%s %s, %p (corrupted)\n" , __func__, |
105 | pool->name, block); |
106 | |
107 | /* |
108 | * Dump the first 4 bytes even if they are not |
109 | * POOL_POISON_FREED |
110 | */ |
111 | print_hex_dump(KERN_ERR, "" , DUMP_PREFIX_OFFSET, 16, 1, |
112 | data, pool->size, 1); |
113 | break; |
114 | } |
115 | |
116 | if (!want_init_on_alloc(mem_flags)) |
117 | memset(block, POOL_POISON_ALLOCATED, pool->size); |
118 | } |
119 | |
120 | static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) |
121 | { |
122 | struct dma_page *page; |
123 | |
124 | list_for_each_entry(page, &pool->page_list, page_list) { |
125 | if (dma < page->dma) |
126 | continue; |
127 | if ((dma - page->dma) < pool->allocation) |
128 | return page; |
129 | } |
130 | return NULL; |
131 | } |
132 | |
133 | static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma) |
134 | { |
135 | struct dma_block *block = pool->next_block; |
136 | struct dma_page *page; |
137 | |
138 | page = pool_find_page(pool, dma); |
139 | if (!page) { |
140 | dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n" , |
141 | __func__, pool->name, vaddr, &dma); |
142 | return true; |
143 | } |
144 | |
145 | while (block) { |
146 | if (block != vaddr) { |
147 | block = block->next_block; |
148 | continue; |
149 | } |
150 | dev_err(pool->dev, "%s %s, dma %pad already free\n" , |
151 | __func__, pool->name, &dma); |
152 | return true; |
153 | } |
154 | |
155 | memset(vaddr, POOL_POISON_FREED, pool->size); |
156 | return false; |
157 | } |
158 | |
159 | static void pool_init_page(struct dma_pool *pool, struct dma_page *page) |
160 | { |
161 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
162 | } |
163 | #else |
164 | static void pool_check_block(struct dma_pool *pool, struct dma_block *block, |
165 | gfp_t mem_flags) |
166 | { |
167 | } |
168 | |
169 | static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma) |
170 | { |
171 | if (want_init_on_free()) |
172 | memset(vaddr, 0, pool->size); |
173 | return false; |
174 | } |
175 | |
176 | static void pool_init_page(struct dma_pool *pool, struct dma_page *page) |
177 | { |
178 | } |
179 | #endif |
180 | |
181 | static struct dma_block *pool_block_pop(struct dma_pool *pool) |
182 | { |
183 | struct dma_block *block = pool->next_block; |
184 | |
185 | if (block) { |
186 | pool->next_block = block->next_block; |
187 | pool->nr_active++; |
188 | } |
189 | return block; |
190 | } |
191 | |
192 | static void pool_block_push(struct dma_pool *pool, struct dma_block *block, |
193 | dma_addr_t dma) |
194 | { |
195 | block->dma = dma; |
196 | block->next_block = pool->next_block; |
197 | pool->next_block = block; |
198 | } |
199 | |
200 | |
201 | /** |
202 | * dma_pool_create - Creates a pool of consistent memory blocks, for dma. |
203 | * @name: name of pool, for diagnostics |
204 | * @dev: device that will be doing the DMA |
205 | * @size: size of the blocks in this pool. |
206 | * @align: alignment requirement for blocks; must be a power of two |
207 | * @boundary: returned blocks won't cross this power of two boundary |
208 | * Context: not in_interrupt() |
209 | * |
210 | * Given one of these pools, dma_pool_alloc() |
211 | * may be used to allocate memory. Such memory will all have "consistent" |
212 | * DMA mappings, accessible by the device and its driver without using |
213 | * cache flushing primitives. The actual size of blocks allocated may be |
214 | * larger than requested because of alignment. |
215 | * |
216 | * If @boundary is nonzero, objects returned from dma_pool_alloc() won't |
217 | * cross that size boundary. This is useful for devices which have |
218 | * addressing restrictions on individual DMA transfers, such as not crossing |
219 | * boundaries of 4KBytes. |
220 | * |
221 | * Return: a dma allocation pool with the requested characteristics, or |
222 | * %NULL if one can't be created. |
223 | */ |
224 | struct dma_pool *dma_pool_create(const char *name, struct device *dev, |
225 | size_t size, size_t align, size_t boundary) |
226 | { |
227 | struct dma_pool *retval; |
228 | size_t allocation; |
229 | bool empty; |
230 | |
231 | if (!dev) |
232 | return NULL; |
233 | |
234 | if (align == 0) |
235 | align = 1; |
236 | else if (align & (align - 1)) |
237 | return NULL; |
238 | |
239 | if (size == 0 || size > INT_MAX) |
240 | return NULL; |
241 | if (size < sizeof(struct dma_block)) |
242 | size = sizeof(struct dma_block); |
243 | |
244 | size = ALIGN(size, align); |
245 | allocation = max_t(size_t, size, PAGE_SIZE); |
246 | |
247 | if (!boundary) |
248 | boundary = allocation; |
249 | else if ((boundary < size) || (boundary & (boundary - 1))) |
250 | return NULL; |
251 | |
252 | boundary = min(boundary, allocation); |
253 | |
254 | retval = kzalloc(size: sizeof(*retval), GFP_KERNEL); |
255 | if (!retval) |
256 | return retval; |
257 | |
258 | strscpy(p: retval->name, q: name, size: sizeof(retval->name)); |
259 | |
260 | retval->dev = dev; |
261 | |
262 | INIT_LIST_HEAD(list: &retval->page_list); |
263 | spin_lock_init(&retval->lock); |
264 | retval->size = size; |
265 | retval->boundary = boundary; |
266 | retval->allocation = allocation; |
267 | INIT_LIST_HEAD(list: &retval->pools); |
268 | |
269 | /* |
270 | * pools_lock ensures that the ->dma_pools list does not get corrupted. |
271 | * pools_reg_lock ensures that there is not a race between |
272 | * dma_pool_create() and dma_pool_destroy() or within dma_pool_create() |
273 | * when the first invocation of dma_pool_create() failed on |
274 | * device_create_file() and the second assumes that it has been done (I |
275 | * know it is a short window). |
276 | */ |
277 | mutex_lock(&pools_reg_lock); |
278 | mutex_lock(&pools_lock); |
279 | empty = list_empty(head: &dev->dma_pools); |
280 | list_add(new: &retval->pools, head: &dev->dma_pools); |
281 | mutex_unlock(lock: &pools_lock); |
282 | if (empty) { |
283 | int err; |
284 | |
285 | err = device_create_file(device: dev, entry: &dev_attr_pools); |
286 | if (err) { |
287 | mutex_lock(&pools_lock); |
288 | list_del(entry: &retval->pools); |
289 | mutex_unlock(lock: &pools_lock); |
290 | mutex_unlock(lock: &pools_reg_lock); |
291 | kfree(objp: retval); |
292 | return NULL; |
293 | } |
294 | } |
295 | mutex_unlock(lock: &pools_reg_lock); |
296 | return retval; |
297 | } |
298 | EXPORT_SYMBOL(dma_pool_create); |
299 | |
300 | static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) |
301 | { |
302 | unsigned int next_boundary = pool->boundary, offset = 0; |
303 | struct dma_block *block, *first = NULL, *last = NULL; |
304 | |
305 | pool_init_page(pool, page); |
306 | while (offset + pool->size <= pool->allocation) { |
307 | if (offset + pool->size > next_boundary) { |
308 | offset = next_boundary; |
309 | next_boundary += pool->boundary; |
310 | continue; |
311 | } |
312 | |
313 | block = page->vaddr + offset; |
314 | block->dma = page->dma + offset; |
315 | block->next_block = NULL; |
316 | |
317 | if (last) |
318 | last->next_block = block; |
319 | else |
320 | first = block; |
321 | last = block; |
322 | |
323 | offset += pool->size; |
324 | pool->nr_blocks++; |
325 | } |
326 | |
327 | last->next_block = pool->next_block; |
328 | pool->next_block = first; |
329 | |
330 | list_add(new: &page->page_list, head: &pool->page_list); |
331 | pool->nr_pages++; |
332 | } |
333 | |
334 | static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) |
335 | { |
336 | struct dma_page *page; |
337 | |
338 | page = kmalloc(size: sizeof(*page), flags: mem_flags); |
339 | if (!page) |
340 | return NULL; |
341 | |
342 | page->vaddr = dma_alloc_coherent(dev: pool->dev, size: pool->allocation, |
343 | dma_handle: &page->dma, gfp: mem_flags); |
344 | if (!page->vaddr) { |
345 | kfree(objp: page); |
346 | return NULL; |
347 | } |
348 | |
349 | return page; |
350 | } |
351 | |
352 | /** |
353 | * dma_pool_destroy - destroys a pool of dma memory blocks. |
354 | * @pool: dma pool that will be destroyed |
355 | * Context: !in_interrupt() |
356 | * |
357 | * Caller guarantees that no more memory from the pool is in use, |
358 | * and that nothing will try to use the pool after this call. |
359 | */ |
360 | void dma_pool_destroy(struct dma_pool *pool) |
361 | { |
362 | struct dma_page *page, *tmp; |
363 | bool empty, busy = false; |
364 | |
365 | if (unlikely(!pool)) |
366 | return; |
367 | |
368 | mutex_lock(&pools_reg_lock); |
369 | mutex_lock(&pools_lock); |
370 | list_del(entry: &pool->pools); |
371 | empty = list_empty(head: &pool->dev->dma_pools); |
372 | mutex_unlock(lock: &pools_lock); |
373 | if (empty) |
374 | device_remove_file(dev: pool->dev, attr: &dev_attr_pools); |
375 | mutex_unlock(lock: &pools_reg_lock); |
376 | |
377 | if (pool->nr_active) { |
378 | dev_err(pool->dev, "%s %s busy\n" , __func__, pool->name); |
379 | busy = true; |
380 | } |
381 | |
382 | list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) { |
383 | if (!busy) |
384 | dma_free_coherent(dev: pool->dev, size: pool->allocation, |
385 | cpu_addr: page->vaddr, dma_handle: page->dma); |
386 | list_del(entry: &page->page_list); |
387 | kfree(objp: page); |
388 | } |
389 | |
390 | kfree(objp: pool); |
391 | } |
392 | EXPORT_SYMBOL(dma_pool_destroy); |
393 | |
394 | /** |
395 | * dma_pool_alloc - get a block of consistent memory |
396 | * @pool: dma pool that will produce the block |
397 | * @mem_flags: GFP_* bitmask |
398 | * @handle: pointer to dma address of block |
399 | * |
400 | * Return: the kernel virtual address of a currently unused block, |
401 | * and reports its dma address through the handle. |
402 | * If such a memory block can't be allocated, %NULL is returned. |
403 | */ |
404 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, |
405 | dma_addr_t *handle) |
406 | { |
407 | struct dma_block *block; |
408 | struct dma_page *page; |
409 | unsigned long flags; |
410 | |
411 | might_alloc(gfp_mask: mem_flags); |
412 | |
413 | spin_lock_irqsave(&pool->lock, flags); |
414 | block = pool_block_pop(pool); |
415 | if (!block) { |
416 | /* |
417 | * pool_alloc_page() might sleep, so temporarily drop |
418 | * &pool->lock |
419 | */ |
420 | spin_unlock_irqrestore(lock: &pool->lock, flags); |
421 | |
422 | page = pool_alloc_page(pool, mem_flags: mem_flags & (~__GFP_ZERO)); |
423 | if (!page) |
424 | return NULL; |
425 | |
426 | spin_lock_irqsave(&pool->lock, flags); |
427 | pool_initialise_page(pool, page); |
428 | block = pool_block_pop(pool); |
429 | } |
430 | spin_unlock_irqrestore(lock: &pool->lock, flags); |
431 | |
432 | *handle = block->dma; |
433 | pool_check_block(pool, block, mem_flags); |
434 | if (want_init_on_alloc(flags: mem_flags)) |
435 | memset(block, 0, pool->size); |
436 | |
437 | return block; |
438 | } |
439 | EXPORT_SYMBOL(dma_pool_alloc); |
440 | |
441 | /** |
442 | * dma_pool_free - put block back into dma pool |
443 | * @pool: the dma pool holding the block |
444 | * @vaddr: virtual address of block |
445 | * @dma: dma address of block |
446 | * |
447 | * Caller promises neither device nor driver will again touch this block |
448 | * unless it is first re-allocated. |
449 | */ |
450 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) |
451 | { |
452 | struct dma_block *block = vaddr; |
453 | unsigned long flags; |
454 | |
455 | spin_lock_irqsave(&pool->lock, flags); |
456 | if (!pool_block_err(pool, vaddr, dma)) { |
457 | pool_block_push(pool, block, dma); |
458 | pool->nr_active--; |
459 | } |
460 | spin_unlock_irqrestore(lock: &pool->lock, flags); |
461 | } |
462 | EXPORT_SYMBOL(dma_pool_free); |
463 | |
464 | /* |
465 | * Managed DMA pool |
466 | */ |
467 | static void dmam_pool_release(struct device *dev, void *res) |
468 | { |
469 | struct dma_pool *pool = *(struct dma_pool **)res; |
470 | |
471 | dma_pool_destroy(pool); |
472 | } |
473 | |
474 | static int dmam_pool_match(struct device *dev, void *res, void *match_data) |
475 | { |
476 | return *(struct dma_pool **)res == match_data; |
477 | } |
478 | |
479 | /** |
480 | * dmam_pool_create - Managed dma_pool_create() |
481 | * @name: name of pool, for diagnostics |
482 | * @dev: device that will be doing the DMA |
483 | * @size: size of the blocks in this pool. |
484 | * @align: alignment requirement for blocks; must be a power of two |
485 | * @allocation: returned blocks won't cross this boundary (or zero) |
486 | * |
487 | * Managed dma_pool_create(). DMA pool created with this function is |
488 | * automatically destroyed on driver detach. |
489 | * |
490 | * Return: a managed dma allocation pool with the requested |
491 | * characteristics, or %NULL if one can't be created. |
492 | */ |
493 | struct dma_pool *dmam_pool_create(const char *name, struct device *dev, |
494 | size_t size, size_t align, size_t allocation) |
495 | { |
496 | struct dma_pool **ptr, *pool; |
497 | |
498 | ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); |
499 | if (!ptr) |
500 | return NULL; |
501 | |
502 | pool = *ptr = dma_pool_create(name, dev, size, align, allocation); |
503 | if (pool) |
504 | devres_add(dev, res: ptr); |
505 | else |
506 | devres_free(res: ptr); |
507 | |
508 | return pool; |
509 | } |
510 | EXPORT_SYMBOL(dmam_pool_create); |
511 | |
512 | /** |
513 | * dmam_pool_destroy - Managed dma_pool_destroy() |
514 | * @pool: dma pool that will be destroyed |
515 | * |
516 | * Managed dma_pool_destroy(). |
517 | */ |
518 | void dmam_pool_destroy(struct dma_pool *pool) |
519 | { |
520 | struct device *dev = pool->dev; |
521 | |
522 | WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool)); |
523 | } |
524 | EXPORT_SYMBOL(dmam_pool_destroy); |
525 | |